From 3fcde8088cb361a7088ab847725be08f5169166a Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Thu, 25 Feb 2021 14:40:02 -0800 Subject: [PATCH] concierge_impersonation_proxy_test.go: Make it work on more clusters Should work on cluster which have: - load balancers not supported, has squid proxy (e.g. kind) - load balancers supported, has squid proxy (e.g. EKS) - load balancers supported, no squid proxy (e.g. GKE) When testing with a load balancer, call the impersonation proxy through the load balancer. Also, added a new library.RequireNeverWithoutError() helper. Signed-off-by: Margo Crawford --- .../concierge_impersonation_proxy_test.go | 186 ++++++++++++------ test/library/assertions.go | 25 ++- 2 files changed, 150 insertions(+), 61 deletions(-) diff --git a/test/integration/concierge_impersonation_proxy_test.go b/test/integration/concierge_impersonation_proxy_test.go index b59fe91d..aec7bcdd 100644 --- a/test/integration/concierge_impersonation_proxy_test.go +++ b/test/integration/concierge_impersonation_proxy_test.go @@ -31,16 +31,17 @@ import ( const ( // TODO don't hard code "pinniped-concierge-" in these strings. It should be constructed from the env app name. - impersonationProxyConfigMapName = "pinniped-concierge-impersonation-proxy-config" - impersonationProxyTLSSecretName = "pinniped-concierge-impersonation-proxy-tls-serving-certificate" //nolint:gosec // this is not a credential + impersonationProxyConfigMapName = "pinniped-concierge-impersonation-proxy-config" + impersonationProxyTLSSecretName = "pinniped-concierge-impersonation-proxy-tls-serving-certificate" //nolint:gosec // this is not a credential + impersonationProxyLoadBalancerName = "pinniped-concierge-impersonation-proxy-load-balancer" ) +// Note that this test supports being run on all of our integration test cluster types: +// - load balancers not supported, has squid proxy (e.g. kind) +// - load balancers supported, has squid proxy (e.g. EKS) +// - load balancers supported, no squid proxy (e.g. GKE) func TestImpersonationProxy(t *testing.T) { env := library.IntegrationEnv(t) - if env.Proxy == "" { - t.Skip("this test can only run in environments with the in-cluster proxy right now") - return - } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) defer cancel() @@ -51,14 +52,15 @@ func TestImpersonationProxy(t *testing.T) { // Create a WebhookAuthenticator. authenticator := library.CreateTestWebhookAuthenticator(ctx, t) - // The address of the ClusterIP service that points at the impersonation proxy's port + // The address of the ClusterIP service that points at the impersonation proxy's port (used when there is no load balancer). proxyServiceEndpoint := fmt.Sprintf("%s-proxy.%s.svc.cluster.local", env.ConciergeAppName, env.ConciergeNamespace) - proxyServiceURL := fmt.Sprintf("https://%s", proxyServiceEndpoint) - t.Logf("making kubeconfig that points to %q", proxyServiceURL) + // The error message that will be returned by squid when the impersonation proxy port inside the cluster is not listening. + serviceUnavailableViaSquidError := fmt.Sprintf(`Get "https://%s/api/v1/namespaces": Service Unavailable`, proxyServiceEndpoint) - getImpersonationProxyClient := func(caData []byte) *kubernetes.Clientset { + impersonationProxyViaSquidClient := func(caData []byte) *kubernetes.Clientset { + t.Helper() kubeconfig := &rest.Config{ - Host: proxyServiceURL, + Host: fmt.Sprintf("https://%s", proxyServiceEndpoint), TLSClientConfig: rest.TLSClientConfig{Insecure: caData == nil, CAData: caData}, BearerToken: impersonationtoken.Make(t, env.TestUser.Token, &authenticator, env.APIGroupSuffix), Proxy: func(req *http.Request) (*url.URL, error) { @@ -68,37 +70,68 @@ func TestImpersonationProxy(t *testing.T) { return proxyURL, nil }, } + impersonationProxyClient, err := kubernetes.NewForConfig(kubeconfig) + require.NoError(t, err, "unexpected failure from kubernetes.NewForConfig()") + return impersonationProxyClient + } + impersonationProxyViaLoadBalancerClient := func(host string, caData []byte) *kubernetes.Clientset { + t.Helper() + kubeconfig := &rest.Config{ + Host: fmt.Sprintf("https://%s", host), + TLSClientConfig: rest.TLSClientConfig{Insecure: caData == nil, CAData: caData}, + BearerToken: impersonationtoken.Make(t, env.TestUser.Token, &authenticator, env.APIGroupSuffix), + } impersonationProxyClient, err := kubernetes.NewForConfig(kubeconfig) require.NoError(t, err, "unexpected failure from kubernetes.NewForConfig()") return impersonationProxyClient } oldConfigMap, err := adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Get(ctx, impersonationProxyConfigMapName, metav1.GetOptions{}) - if oldConfigMap.Data != nil { + if !k8serrors.IsNotFound(err) { + require.NoError(t, err) // other errors aside from NotFound are unexpected t.Logf("stashing a pre-existing configmap %s", oldConfigMap.Name) require.NoError(t, adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Delete(ctx, impersonationProxyConfigMapName, metav1.DeleteOptions{})) } - serviceUnavailableError := fmt.Sprintf(`Get "%s/api/v1/namespaces": Service Unavailable`, proxyServiceURL) - insecureImpersonationProxyClient := getImpersonationProxyClient(nil) + impersonationProxyLoadBalancerIngress := "" - if env.HasCapability(library.HasExternalLoadBalancerProvider) { - // Check that load balancer has been created - require.Eventually(t, func() bool { - return hasLoadBalancerService(ctx, t, adminClient, env.ConciergeNamespace) + if env.HasCapability(library.HasExternalLoadBalancerProvider) { //nolint:nestif // come on... it's just a test + // Check that load balancer has been created. + library.RequireEventuallyWithoutError(t, func() (bool, error) { + return hasImpersonationProxyLoadBalancerService(ctx, adminClient, env.ConciergeNamespace) }, 10*time.Second, 500*time.Millisecond) + + // Wait for the load balancer to get an ingress and make a note of its address. + var ingress *corev1.LoadBalancerIngress + library.RequireEventuallyWithoutError(t, func() (bool, error) { + ingress, err = getImpersonationProxyLoadBalancerIngress(ctx, adminClient, env.ConciergeNamespace) + if err != nil { + return false, err + } + return ingress != nil, nil + }, 10*time.Second, 500*time.Millisecond) + if ingress.Hostname != "" { + impersonationProxyLoadBalancerIngress = ingress.Hostname + } else { + require.NotEmpty(t, ingress.IP, "the ingress should have either a hostname or IP, but it didn't") + impersonationProxyLoadBalancerIngress = ingress.IP + } } else { - // Check that no load balancer has been created - require.Never(t, func() bool { - return hasLoadBalancerService(ctx, t, adminClient, env.ConciergeNamespace) + require.NotEmpty(t, env.Proxy, + "test cluster does not support load balancers but also doesn't have a squid proxy... "+ + "this is not a supported configuration for test clusters") + + // Check that no load balancer has been created. + library.RequireNeverWithoutError(t, func() (bool, error) { + return hasImpersonationProxyLoadBalancerService(ctx, adminClient, env.ConciergeNamespace) }, 10*time.Second, 500*time.Millisecond) - // Check that we can't use the impersonation proxy to execute kubectl commands yet - _, err = insecureImpersonationProxyClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) - require.EqualError(t, err, serviceUnavailableError) + // Check that we can't use the impersonation proxy to execute kubectl commands yet. + _, err = impersonationProxyViaSquidClient(nil).CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) + require.EqualError(t, err, serviceUnavailableViaSquidError) - // Create configuration to make the impersonation proxy turn on with a hard coded endpoint (without a LoadBalancer) + // Create configuration to make the impersonation proxy turn on with a hard coded endpoint (without a LoadBalancer). configMap := configMapForConfig(t, impersonator.Config{ Mode: impersonator.ModeEnabled, Endpoint: proxyServiceEndpoint, @@ -108,6 +141,7 @@ func TestImpersonationProxy(t *testing.T) { _, err = adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Create(ctx, &configMap, metav1.CreateOptions{}) require.NoError(t, err) + // At the end of the test, clean up the ConfigMap. t.Cleanup(func() { ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -134,8 +168,17 @@ func TestImpersonationProxy(t *testing.T) { return caSecret != nil && caSecret.Data["ca.crt"] != nil }, 5*time.Minute, 250*time.Millisecond) - // Create an impersonation proxy client with that ca data. - impersonationProxyClient := getImpersonationProxyClient(caSecret.Data["ca.crt"]) + // Create an impersonation proxy client with that CA data to use for the rest of this test. + // This client performs TLS checks, so it also provides test coverage that the impersonation proxy server is generating TLS certs correctly. + var impersonationProxyClient *kubernetes.Clientset + if env.HasCapability(library.HasExternalLoadBalancerProvider) { + impersonationProxyClient = impersonationProxyViaLoadBalancerClient(impersonationProxyLoadBalancerIngress, caSecret.Data["ca.crt"]) + } else { + impersonationProxyClient = impersonationProxyViaSquidClient(caSecret.Data["ca.crt"]) + } + + // Test that the user can perform basic actions through the client with their username and group membership + // influencing RBAC checks correctly. t.Run( "access as user", library.AccessAsUserTest(ctx, env.TestUser.ExpectedUsername, impersonationProxyClient), @@ -148,13 +191,14 @@ func TestImpersonationProxy(t *testing.T) { ) } - t.Run("watching all the verbs", func(t *testing.T) { - // Create a namespace, because it will be easier to deletecollection if we have a namespace. - // t.Cleanup Delete the namespace. + // Try more Kube API verbs through the impersonation proxy. + t.Run("watching all the basic verbs", func(t *testing.T) { + // Create a namespace, because it will be easier to exercise deletecollection if we have a namespace. namespace, err := adminClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{GenerateName: "impersonation-integration-test-"}, }, metav1.CreateOptions{}) require.NoError(t, err) + // Schedule the namespace for cleanup. t.Cleanup(func() { t.Logf("cleaning up test namespace %s", namespace.Name) err = adminClient.CoreV1().Namespaces().Delete(context.Background(), namespace.Name, metav1.DeleteOptions{}) @@ -175,6 +219,7 @@ func TestImpersonationProxy(t *testing.T) { Name: "cluster-admin", }, ) + // Wait for the above RBAC rule to take effect. library.WaitForUserToHaveAccess(t, env.TestUser.ExpectedUsername, []string{}, &v1.ResourceAttributes{ Namespace: namespace.Name, Verb: "create", @@ -183,7 +228,7 @@ func TestImpersonationProxy(t *testing.T) { Resource: "configmaps", }) - // Create and start informer. + // Create and start informer to exercise the "watch" verb for us. informerFactory := k8sinformers.NewSharedInformerFactoryWithOptions( impersonationProxyClient, 0, @@ -198,10 +243,13 @@ func TestImpersonationProxy(t *testing.T) { }) informerFactory.WaitForCacheSync(ctx.Done()) - // Test "create" verb through the impersonation proxy. + // Use labels on our created ConfigMaps to avoid accidentally listing other ConfigMaps that might + // exist in the namespace. In Kube 1.20+ there is a default ConfigMap in every namespace. configMapLabels := labels.Set{ "pinniped.dev/testConfigMap": library.RandHex(t, 8), } + + // Test "create" verb through the impersonation proxy. _, err = impersonationProxyClient.CoreV1().ConfigMaps(namespace.Name).Create(ctx, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "configmap-1", Labels: configMapLabels}}, metav1.CreateOptions{}, @@ -244,8 +292,7 @@ func TestImpersonationProxy(t *testing.T) { require.NoError(t, err) require.Equal(t, "bar", updateResult.Data["foo"]) - // Make sure that the updated ConfigMap shows up in the informer's cache to - // demonstrate that the informer's "watch" verb is working through the impersonation proxy. + // Make sure that the updated ConfigMap shows up in the informer's cache. require.Eventually(t, func() bool { configMap, err := informer.Lister().ConfigMaps(namespace.Name).Get("configmap-3") return err == nil && configMap.Data["foo"] == "bar" @@ -262,8 +309,7 @@ func TestImpersonationProxy(t *testing.T) { require.Equal(t, "bar", patchResult.Data["foo"]) require.Equal(t, "42", patchResult.Data["baz"]) - // Make sure that the patched ConfigMap shows up in the informer's cache to - // demonstrate that the informer's "watch" verb is working through the impersonation proxy. + // Make sure that the patched ConfigMap shows up in the informer's cache. require.Eventually(t, func() bool { configMap, err := informer.Lister().ConfigMaps(namespace.Name).Get("configmap-3") return err == nil && configMap.Data["foo"] == "bar" && configMap.Data["baz"] == "42" @@ -273,8 +319,7 @@ func TestImpersonationProxy(t *testing.T) { err = impersonationProxyClient.CoreV1().ConfigMaps(namespace.Name).Delete(ctx, "configmap-3", metav1.DeleteOptions{}) require.NoError(t, err) - // Make sure that the deleted ConfigMap shows up in the informer's cache to - // demonstrate that the informer's "watch" verb is working through the impersonation proxy. + // Make sure that the deleted ConfigMap shows up in the informer's cache. require.Eventually(t, func() bool { _, getErr := informer.Lister().ConfigMaps(namespace.Name).Get("configmap-3") list, listErr := informer.Lister().ConfigMaps(namespace.Name).List(configMapLabels.AsSelector()) @@ -285,13 +330,13 @@ func TestImpersonationProxy(t *testing.T) { err = impersonationProxyClient.CoreV1().ConfigMaps(namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{}) require.NoError(t, err) - // Make sure that the deleted ConfigMaps shows up in the informer's cache to - // demonstrate that the informer's "watch" verb is working through the impersonation proxy. + // Make sure that the deleted ConfigMaps shows up in the informer's cache. require.Eventually(t, func() bool { list, listErr := informer.Lister().ConfigMaps(namespace.Name).List(configMapLabels.AsSelector()) return listErr == nil && len(list) == 0 }, 10*time.Second, 50*time.Millisecond) + // There should be no ConfigMaps left. listResult, err = impersonationProxyClient.CoreV1().ConfigMaps(namespace.Name).List(ctx, metav1.ListOptions{ LabelSelector: configMapLabels.String(), }) @@ -311,22 +356,30 @@ func TestImpersonationProxy(t *testing.T) { require.NoError(t, err) } - // Check that the impersonation proxy has shut down - require.Eventually(t, func() bool { - // It's okay if this returns RBAC errors because this user has no role bindings. - // What we want to see is that the proxy eventually shuts down entirely. - _, err = insecureImpersonationProxyClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) - return err.Error() == serviceUnavailableError - }, 20*time.Second, 500*time.Millisecond) - if env.HasCapability(library.HasExternalLoadBalancerProvider) { // The load balancer should not exist after we disable the impersonation proxy. // Note that this can take kind of a long time on real cloud providers (e.g. ~22 seconds on EKS). - require.Eventually(t, func() bool { - return !hasLoadBalancerService(ctx, t, adminClient, env.ConciergeNamespace) + library.RequireEventuallyWithoutError(t, func() (bool, error) { + hasService, err := hasImpersonationProxyLoadBalancerService(ctx, adminClient, env.ConciergeNamespace) + return !hasService, err }, time.Minute, 500*time.Millisecond) } + // Check that the impersonation proxy port has shut down. + // Ideally we could always check that the impersonation proxy's port has shut down, but on clusters where we + // do not run the squid proxy we have no easy way to see beyond the load balancer to see inside the cluster, + // so we'll skip this check on clusters which have load balancers but don't run the squid proxy. + // The other cluster types that do run the squid proxy will give us sufficient coverage here. + if env.Proxy != "" { + require.Eventually(t, func() bool { + // It's okay if this returns RBAC errors because this user has no role bindings. + // What we want to see is that the proxy eventually shuts down entirely. + _, err = impersonationProxyViaSquidClient(nil).CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) + return err.Error() == serviceUnavailableViaSquidError + }, 20*time.Second, 500*time.Millisecond) + } + + // Check that the generated TLS cert Secret was deleted by the controller. require.Eventually(t, func() bool { caSecret, err = adminClient.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, impersonationProxyTLSSecretName, metav1.GetOptions{}) return k8serrors.IsNotFound(err) @@ -346,15 +399,28 @@ func configMapForConfig(t *testing.T, config impersonator.Config) corev1.ConfigM return configMap } -func hasLoadBalancerService(ctx context.Context, t *testing.T, client kubernetes.Interface, namespace string) bool { - t.Helper() - - services, err := client.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - for _, service := range services.Items { - if service.Spec.Type == corev1.ServiceTypeLoadBalancer { - return true - } +func hasImpersonationProxyLoadBalancerService(ctx context.Context, client kubernetes.Interface, namespace string) (bool, error) { + service, err := client.CoreV1().Services(namespace).Get(ctx, impersonationProxyLoadBalancerName, metav1.GetOptions{}) + if k8serrors.IsNotFound(err) { + return false, nil } - return false + if err != nil { + return false, err + } + return service.Spec.Type == corev1.ServiceTypeLoadBalancer, nil +} + +func getImpersonationProxyLoadBalancerIngress(ctx context.Context, client kubernetes.Interface, namespace string) (*corev1.LoadBalancerIngress, error) { + service, err := client.CoreV1().Services(namespace).Get(ctx, impersonationProxyLoadBalancerName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + ingresses := service.Status.LoadBalancer.Ingress + if len(ingresses) > 1 { + return nil, fmt.Errorf("didn't expect multiple ingresses, but if it happens then maybe this test needs to be adjusted") + } + if len(ingresses) == 0 { + return nil, nil + } + return &ingresses[0], nil } diff --git a/test/library/assertions.go b/test/library/assertions.go index 4f42f5a7..4db0ee09 100644 --- a/test/library/assertions.go +++ b/test/library/assertions.go @@ -5,6 +5,7 @@ package library import ( "context" + "errors" "fmt" "testing" "time" @@ -15,7 +16,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" ) -// RequireEventuallyWithoutError is a wrapper around require.Eventually() that allows the caller to +// RequireEventuallyWithoutError is similar to require.Eventually() except that it also allows the caller to // return an error from the condition function. If the condition function returns an error at any // point, the assertion will immediately fail. func RequireEventuallyWithoutError( @@ -29,6 +30,28 @@ func RequireEventuallyWithoutError( require.NoError(t, wait.PollImmediate(tick, waitFor, f), msgAndArgs...) } +// RequireNeverWithoutError is similar to require.Never() except that it also allows the caller to +// return an error from the condition function. If the condition function returns an error at any +// point, the assertion will immediately fail. +func RequireNeverWithoutError( + t *testing.T, + f func() (bool, error), + waitFor time.Duration, + tick time.Duration, + msgAndArgs ...interface{}, +) { + t.Helper() + err := wait.PollImmediate(tick, waitFor, f) + isWaitTimeout := errors.Is(err, wait.ErrWaitTimeout) + if err != nil && !isWaitTimeout { + require.NoError(t, err, msgAndArgs...) // this will fail and throw the right error message + } + if err == nil { + // This prints the same error message that require.Never would print in this case. + require.Fail(t, "Condition satisfied", msgAndArgs...) + } +} + // NewRestartAssertion allows a caller to assert that there were no restarts for a Pod in the // provided namespace with the provided labelSelector during the lifetime of a test. func AssertNoRestartsDuringTest(t *testing.T, namespace, labelSelector string) {