concierge_impersonation_proxy_test.go: Make it work on more clusters
Should work on cluster which have: - load balancers not supported, has squid proxy (e.g. kind) - load balancers supported, has squid proxy (e.g. EKS) - load balancers supported, no squid proxy (e.g. GKE) When testing with a load balancer, call the impersonation proxy through the load balancer. Also, added a new library.RequireNeverWithoutError() helper. Signed-off-by: Margo Crawford <margaretc@vmware.com>
This commit is contained in:
parent
f937ae2c07
commit
3fcde8088c
@ -33,14 +33,15 @@ const (
|
|||||||
// TODO don't hard code "pinniped-concierge-" in these strings. It should be constructed from the env app name.
|
// TODO don't hard code "pinniped-concierge-" in these strings. It should be constructed from the env app name.
|
||||||
impersonationProxyConfigMapName = "pinniped-concierge-impersonation-proxy-config"
|
impersonationProxyConfigMapName = "pinniped-concierge-impersonation-proxy-config"
|
||||||
impersonationProxyTLSSecretName = "pinniped-concierge-impersonation-proxy-tls-serving-certificate" //nolint:gosec // this is not a credential
|
impersonationProxyTLSSecretName = "pinniped-concierge-impersonation-proxy-tls-serving-certificate" //nolint:gosec // this is not a credential
|
||||||
|
impersonationProxyLoadBalancerName = "pinniped-concierge-impersonation-proxy-load-balancer"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Note that this test supports being run on all of our integration test cluster types:
|
||||||
|
// - load balancers not supported, has squid proxy (e.g. kind)
|
||||||
|
// - load balancers supported, has squid proxy (e.g. EKS)
|
||||||
|
// - load balancers supported, no squid proxy (e.g. GKE)
|
||||||
func TestImpersonationProxy(t *testing.T) {
|
func TestImpersonationProxy(t *testing.T) {
|
||||||
env := library.IntegrationEnv(t)
|
env := library.IntegrationEnv(t)
|
||||||
if env.Proxy == "" {
|
|
||||||
t.Skip("this test can only run in environments with the in-cluster proxy right now")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -51,14 +52,15 @@ func TestImpersonationProxy(t *testing.T) {
|
|||||||
// Create a WebhookAuthenticator.
|
// Create a WebhookAuthenticator.
|
||||||
authenticator := library.CreateTestWebhookAuthenticator(ctx, t)
|
authenticator := library.CreateTestWebhookAuthenticator(ctx, t)
|
||||||
|
|
||||||
// The address of the ClusterIP service that points at the impersonation proxy's port
|
// The address of the ClusterIP service that points at the impersonation proxy's port (used when there is no load balancer).
|
||||||
proxyServiceEndpoint := fmt.Sprintf("%s-proxy.%s.svc.cluster.local", env.ConciergeAppName, env.ConciergeNamespace)
|
proxyServiceEndpoint := fmt.Sprintf("%s-proxy.%s.svc.cluster.local", env.ConciergeAppName, env.ConciergeNamespace)
|
||||||
proxyServiceURL := fmt.Sprintf("https://%s", proxyServiceEndpoint)
|
// The error message that will be returned by squid when the impersonation proxy port inside the cluster is not listening.
|
||||||
t.Logf("making kubeconfig that points to %q", proxyServiceURL)
|
serviceUnavailableViaSquidError := fmt.Sprintf(`Get "https://%s/api/v1/namespaces": Service Unavailable`, proxyServiceEndpoint)
|
||||||
|
|
||||||
getImpersonationProxyClient := func(caData []byte) *kubernetes.Clientset {
|
impersonationProxyViaSquidClient := func(caData []byte) *kubernetes.Clientset {
|
||||||
|
t.Helper()
|
||||||
kubeconfig := &rest.Config{
|
kubeconfig := &rest.Config{
|
||||||
Host: proxyServiceURL,
|
Host: fmt.Sprintf("https://%s", proxyServiceEndpoint),
|
||||||
TLSClientConfig: rest.TLSClientConfig{Insecure: caData == nil, CAData: caData},
|
TLSClientConfig: rest.TLSClientConfig{Insecure: caData == nil, CAData: caData},
|
||||||
BearerToken: impersonationtoken.Make(t, env.TestUser.Token, &authenticator, env.APIGroupSuffix),
|
BearerToken: impersonationtoken.Make(t, env.TestUser.Token, &authenticator, env.APIGroupSuffix),
|
||||||
Proxy: func(req *http.Request) (*url.URL, error) {
|
Proxy: func(req *http.Request) (*url.URL, error) {
|
||||||
@ -68,37 +70,68 @@ func TestImpersonationProxy(t *testing.T) {
|
|||||||
return proxyURL, nil
|
return proxyURL, nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
impersonationProxyClient, err := kubernetes.NewForConfig(kubeconfig)
|
||||||
|
require.NoError(t, err, "unexpected failure from kubernetes.NewForConfig()")
|
||||||
|
return impersonationProxyClient
|
||||||
|
}
|
||||||
|
|
||||||
|
impersonationProxyViaLoadBalancerClient := func(host string, caData []byte) *kubernetes.Clientset {
|
||||||
|
t.Helper()
|
||||||
|
kubeconfig := &rest.Config{
|
||||||
|
Host: fmt.Sprintf("https://%s", host),
|
||||||
|
TLSClientConfig: rest.TLSClientConfig{Insecure: caData == nil, CAData: caData},
|
||||||
|
BearerToken: impersonationtoken.Make(t, env.TestUser.Token, &authenticator, env.APIGroupSuffix),
|
||||||
|
}
|
||||||
impersonationProxyClient, err := kubernetes.NewForConfig(kubeconfig)
|
impersonationProxyClient, err := kubernetes.NewForConfig(kubeconfig)
|
||||||
require.NoError(t, err, "unexpected failure from kubernetes.NewForConfig()")
|
require.NoError(t, err, "unexpected failure from kubernetes.NewForConfig()")
|
||||||
return impersonationProxyClient
|
return impersonationProxyClient
|
||||||
}
|
}
|
||||||
|
|
||||||
oldConfigMap, err := adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Get(ctx, impersonationProxyConfigMapName, metav1.GetOptions{})
|
oldConfigMap, err := adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Get(ctx, impersonationProxyConfigMapName, metav1.GetOptions{})
|
||||||
if oldConfigMap.Data != nil {
|
if !k8serrors.IsNotFound(err) {
|
||||||
|
require.NoError(t, err) // other errors aside from NotFound are unexpected
|
||||||
t.Logf("stashing a pre-existing configmap %s", oldConfigMap.Name)
|
t.Logf("stashing a pre-existing configmap %s", oldConfigMap.Name)
|
||||||
require.NoError(t, adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Delete(ctx, impersonationProxyConfigMapName, metav1.DeleteOptions{}))
|
require.NoError(t, adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Delete(ctx, impersonationProxyConfigMapName, metav1.DeleteOptions{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceUnavailableError := fmt.Sprintf(`Get "%s/api/v1/namespaces": Service Unavailable`, proxyServiceURL)
|
impersonationProxyLoadBalancerIngress := ""
|
||||||
insecureImpersonationProxyClient := getImpersonationProxyClient(nil)
|
|
||||||
|
|
||||||
if env.HasCapability(library.HasExternalLoadBalancerProvider) {
|
if env.HasCapability(library.HasExternalLoadBalancerProvider) { //nolint:nestif // come on... it's just a test
|
||||||
// Check that load balancer has been created
|
// Check that load balancer has been created.
|
||||||
require.Eventually(t, func() bool {
|
library.RequireEventuallyWithoutError(t, func() (bool, error) {
|
||||||
return hasLoadBalancerService(ctx, t, adminClient, env.ConciergeNamespace)
|
return hasImpersonationProxyLoadBalancerService(ctx, adminClient, env.ConciergeNamespace)
|
||||||
}, 10*time.Second, 500*time.Millisecond)
|
}, 10*time.Second, 500*time.Millisecond)
|
||||||
|
|
||||||
|
// Wait for the load balancer to get an ingress and make a note of its address.
|
||||||
|
var ingress *corev1.LoadBalancerIngress
|
||||||
|
library.RequireEventuallyWithoutError(t, func() (bool, error) {
|
||||||
|
ingress, err = getImpersonationProxyLoadBalancerIngress(ctx, adminClient, env.ConciergeNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return ingress != nil, nil
|
||||||
|
}, 10*time.Second, 500*time.Millisecond)
|
||||||
|
if ingress.Hostname != "" {
|
||||||
|
impersonationProxyLoadBalancerIngress = ingress.Hostname
|
||||||
} else {
|
} else {
|
||||||
// Check that no load balancer has been created
|
require.NotEmpty(t, ingress.IP, "the ingress should have either a hostname or IP, but it didn't")
|
||||||
require.Never(t, func() bool {
|
impersonationProxyLoadBalancerIngress = ingress.IP
|
||||||
return hasLoadBalancerService(ctx, t, adminClient, env.ConciergeNamespace)
|
}
|
||||||
|
} else {
|
||||||
|
require.NotEmpty(t, env.Proxy,
|
||||||
|
"test cluster does not support load balancers but also doesn't have a squid proxy... "+
|
||||||
|
"this is not a supported configuration for test clusters")
|
||||||
|
|
||||||
|
// Check that no load balancer has been created.
|
||||||
|
library.RequireNeverWithoutError(t, func() (bool, error) {
|
||||||
|
return hasImpersonationProxyLoadBalancerService(ctx, adminClient, env.ConciergeNamespace)
|
||||||
}, 10*time.Second, 500*time.Millisecond)
|
}, 10*time.Second, 500*time.Millisecond)
|
||||||
|
|
||||||
// Check that we can't use the impersonation proxy to execute kubectl commands yet
|
// Check that we can't use the impersonation proxy to execute kubectl commands yet.
|
||||||
_, err = insecureImpersonationProxyClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
_, err = impersonationProxyViaSquidClient(nil).CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||||
require.EqualError(t, err, serviceUnavailableError)
|
require.EqualError(t, err, serviceUnavailableViaSquidError)
|
||||||
|
|
||||||
// Create configuration to make the impersonation proxy turn on with a hard coded endpoint (without a LoadBalancer)
|
// Create configuration to make the impersonation proxy turn on with a hard coded endpoint (without a LoadBalancer).
|
||||||
configMap := configMapForConfig(t, impersonator.Config{
|
configMap := configMapForConfig(t, impersonator.Config{
|
||||||
Mode: impersonator.ModeEnabled,
|
Mode: impersonator.ModeEnabled,
|
||||||
Endpoint: proxyServiceEndpoint,
|
Endpoint: proxyServiceEndpoint,
|
||||||
@ -108,6 +141,7 @@ func TestImpersonationProxy(t *testing.T) {
|
|||||||
_, err = adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Create(ctx, &configMap, metav1.CreateOptions{})
|
_, err = adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Create(ctx, &configMap, metav1.CreateOptions{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// At the end of the test, clean up the ConfigMap.
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -134,8 +168,17 @@ func TestImpersonationProxy(t *testing.T) {
|
|||||||
return caSecret != nil && caSecret.Data["ca.crt"] != nil
|
return caSecret != nil && caSecret.Data["ca.crt"] != nil
|
||||||
}, 5*time.Minute, 250*time.Millisecond)
|
}, 5*time.Minute, 250*time.Millisecond)
|
||||||
|
|
||||||
// Create an impersonation proxy client with that ca data.
|
// Create an impersonation proxy client with that CA data to use for the rest of this test.
|
||||||
impersonationProxyClient := getImpersonationProxyClient(caSecret.Data["ca.crt"])
|
// This client performs TLS checks, so it also provides test coverage that the impersonation proxy server is generating TLS certs correctly.
|
||||||
|
var impersonationProxyClient *kubernetes.Clientset
|
||||||
|
if env.HasCapability(library.HasExternalLoadBalancerProvider) {
|
||||||
|
impersonationProxyClient = impersonationProxyViaLoadBalancerClient(impersonationProxyLoadBalancerIngress, caSecret.Data["ca.crt"])
|
||||||
|
} else {
|
||||||
|
impersonationProxyClient = impersonationProxyViaSquidClient(caSecret.Data["ca.crt"])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the user can perform basic actions through the client with their username and group membership
|
||||||
|
// influencing RBAC checks correctly.
|
||||||
t.Run(
|
t.Run(
|
||||||
"access as user",
|
"access as user",
|
||||||
library.AccessAsUserTest(ctx, env.TestUser.ExpectedUsername, impersonationProxyClient),
|
library.AccessAsUserTest(ctx, env.TestUser.ExpectedUsername, impersonationProxyClient),
|
||||||
@ -148,13 +191,14 @@ func TestImpersonationProxy(t *testing.T) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("watching all the verbs", func(t *testing.T) {
|
// Try more Kube API verbs through the impersonation proxy.
|
||||||
// Create a namespace, because it will be easier to deletecollection if we have a namespace.
|
t.Run("watching all the basic verbs", func(t *testing.T) {
|
||||||
// t.Cleanup Delete the namespace.
|
// Create a namespace, because it will be easier to exercise deletecollection if we have a namespace.
|
||||||
namespace, err := adminClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{
|
namespace, err := adminClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{
|
||||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "impersonation-integration-test-"},
|
ObjectMeta: metav1.ObjectMeta{GenerateName: "impersonation-integration-test-"},
|
||||||
}, metav1.CreateOptions{})
|
}, metav1.CreateOptions{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
// Schedule the namespace for cleanup.
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
t.Logf("cleaning up test namespace %s", namespace.Name)
|
t.Logf("cleaning up test namespace %s", namespace.Name)
|
||||||
err = adminClient.CoreV1().Namespaces().Delete(context.Background(), namespace.Name, metav1.DeleteOptions{})
|
err = adminClient.CoreV1().Namespaces().Delete(context.Background(), namespace.Name, metav1.DeleteOptions{})
|
||||||
@ -175,6 +219,7 @@ func TestImpersonationProxy(t *testing.T) {
|
|||||||
Name: "cluster-admin",
|
Name: "cluster-admin",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
// Wait for the above RBAC rule to take effect.
|
||||||
library.WaitForUserToHaveAccess(t, env.TestUser.ExpectedUsername, []string{}, &v1.ResourceAttributes{
|
library.WaitForUserToHaveAccess(t, env.TestUser.ExpectedUsername, []string{}, &v1.ResourceAttributes{
|
||||||
Namespace: namespace.Name,
|
Namespace: namespace.Name,
|
||||||
Verb: "create",
|
Verb: "create",
|
||||||
@ -183,7 +228,7 @@ func TestImpersonationProxy(t *testing.T) {
|
|||||||
Resource: "configmaps",
|
Resource: "configmaps",
|
||||||
})
|
})
|
||||||
|
|
||||||
// Create and start informer.
|
// Create and start informer to exercise the "watch" verb for us.
|
||||||
informerFactory := k8sinformers.NewSharedInformerFactoryWithOptions(
|
informerFactory := k8sinformers.NewSharedInformerFactoryWithOptions(
|
||||||
impersonationProxyClient,
|
impersonationProxyClient,
|
||||||
0,
|
0,
|
||||||
@ -198,10 +243,13 @@ func TestImpersonationProxy(t *testing.T) {
|
|||||||
})
|
})
|
||||||
informerFactory.WaitForCacheSync(ctx.Done())
|
informerFactory.WaitForCacheSync(ctx.Done())
|
||||||
|
|
||||||
// Test "create" verb through the impersonation proxy.
|
// Use labels on our created ConfigMaps to avoid accidentally listing other ConfigMaps that might
|
||||||
|
// exist in the namespace. In Kube 1.20+ there is a default ConfigMap in every namespace.
|
||||||
configMapLabels := labels.Set{
|
configMapLabels := labels.Set{
|
||||||
"pinniped.dev/testConfigMap": library.RandHex(t, 8),
|
"pinniped.dev/testConfigMap": library.RandHex(t, 8),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test "create" verb through the impersonation proxy.
|
||||||
_, err = impersonationProxyClient.CoreV1().ConfigMaps(namespace.Name).Create(ctx,
|
_, err = impersonationProxyClient.CoreV1().ConfigMaps(namespace.Name).Create(ctx,
|
||||||
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "configmap-1", Labels: configMapLabels}},
|
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "configmap-1", Labels: configMapLabels}},
|
||||||
metav1.CreateOptions{},
|
metav1.CreateOptions{},
|
||||||
@ -244,8 +292,7 @@ func TestImpersonationProxy(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "bar", updateResult.Data["foo"])
|
require.Equal(t, "bar", updateResult.Data["foo"])
|
||||||
|
|
||||||
// Make sure that the updated ConfigMap shows up in the informer's cache to
|
// Make sure that the updated ConfigMap shows up in the informer's cache.
|
||||||
// demonstrate that the informer's "watch" verb is working through the impersonation proxy.
|
|
||||||
require.Eventually(t, func() bool {
|
require.Eventually(t, func() bool {
|
||||||
configMap, err := informer.Lister().ConfigMaps(namespace.Name).Get("configmap-3")
|
configMap, err := informer.Lister().ConfigMaps(namespace.Name).Get("configmap-3")
|
||||||
return err == nil && configMap.Data["foo"] == "bar"
|
return err == nil && configMap.Data["foo"] == "bar"
|
||||||
@ -262,8 +309,7 @@ func TestImpersonationProxy(t *testing.T) {
|
|||||||
require.Equal(t, "bar", patchResult.Data["foo"])
|
require.Equal(t, "bar", patchResult.Data["foo"])
|
||||||
require.Equal(t, "42", patchResult.Data["baz"])
|
require.Equal(t, "42", patchResult.Data["baz"])
|
||||||
|
|
||||||
// Make sure that the patched ConfigMap shows up in the informer's cache to
|
// Make sure that the patched ConfigMap shows up in the informer's cache.
|
||||||
// demonstrate that the informer's "watch" verb is working through the impersonation proxy.
|
|
||||||
require.Eventually(t, func() bool {
|
require.Eventually(t, func() bool {
|
||||||
configMap, err := informer.Lister().ConfigMaps(namespace.Name).Get("configmap-3")
|
configMap, err := informer.Lister().ConfigMaps(namespace.Name).Get("configmap-3")
|
||||||
return err == nil && configMap.Data["foo"] == "bar" && configMap.Data["baz"] == "42"
|
return err == nil && configMap.Data["foo"] == "bar" && configMap.Data["baz"] == "42"
|
||||||
@ -273,8 +319,7 @@ func TestImpersonationProxy(t *testing.T) {
|
|||||||
err = impersonationProxyClient.CoreV1().ConfigMaps(namespace.Name).Delete(ctx, "configmap-3", metav1.DeleteOptions{})
|
err = impersonationProxyClient.CoreV1().ConfigMaps(namespace.Name).Delete(ctx, "configmap-3", metav1.DeleteOptions{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Make sure that the deleted ConfigMap shows up in the informer's cache to
|
// Make sure that the deleted ConfigMap shows up in the informer's cache.
|
||||||
// demonstrate that the informer's "watch" verb is working through the impersonation proxy.
|
|
||||||
require.Eventually(t, func() bool {
|
require.Eventually(t, func() bool {
|
||||||
_, getErr := informer.Lister().ConfigMaps(namespace.Name).Get("configmap-3")
|
_, getErr := informer.Lister().ConfigMaps(namespace.Name).Get("configmap-3")
|
||||||
list, listErr := informer.Lister().ConfigMaps(namespace.Name).List(configMapLabels.AsSelector())
|
list, listErr := informer.Lister().ConfigMaps(namespace.Name).List(configMapLabels.AsSelector())
|
||||||
@ -285,13 +330,13 @@ func TestImpersonationProxy(t *testing.T) {
|
|||||||
err = impersonationProxyClient.CoreV1().ConfigMaps(namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{})
|
err = impersonationProxyClient.CoreV1().ConfigMaps(namespace.Name).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Make sure that the deleted ConfigMaps shows up in the informer's cache to
|
// Make sure that the deleted ConfigMaps shows up in the informer's cache.
|
||||||
// demonstrate that the informer's "watch" verb is working through the impersonation proxy.
|
|
||||||
require.Eventually(t, func() bool {
|
require.Eventually(t, func() bool {
|
||||||
list, listErr := informer.Lister().ConfigMaps(namespace.Name).List(configMapLabels.AsSelector())
|
list, listErr := informer.Lister().ConfigMaps(namespace.Name).List(configMapLabels.AsSelector())
|
||||||
return listErr == nil && len(list) == 0
|
return listErr == nil && len(list) == 0
|
||||||
}, 10*time.Second, 50*time.Millisecond)
|
}, 10*time.Second, 50*time.Millisecond)
|
||||||
|
|
||||||
|
// There should be no ConfigMaps left.
|
||||||
listResult, err = impersonationProxyClient.CoreV1().ConfigMaps(namespace.Name).List(ctx, metav1.ListOptions{
|
listResult, err = impersonationProxyClient.CoreV1().ConfigMaps(namespace.Name).List(ctx, metav1.ListOptions{
|
||||||
LabelSelector: configMapLabels.String(),
|
LabelSelector: configMapLabels.String(),
|
||||||
})
|
})
|
||||||
@ -311,22 +356,30 @@ func TestImpersonationProxy(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that the impersonation proxy has shut down
|
|
||||||
require.Eventually(t, func() bool {
|
|
||||||
// It's okay if this returns RBAC errors because this user has no role bindings.
|
|
||||||
// What we want to see is that the proxy eventually shuts down entirely.
|
|
||||||
_, err = insecureImpersonationProxyClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
|
||||||
return err.Error() == serviceUnavailableError
|
|
||||||
}, 20*time.Second, 500*time.Millisecond)
|
|
||||||
|
|
||||||
if env.HasCapability(library.HasExternalLoadBalancerProvider) {
|
if env.HasCapability(library.HasExternalLoadBalancerProvider) {
|
||||||
// The load balancer should not exist after we disable the impersonation proxy.
|
// The load balancer should not exist after we disable the impersonation proxy.
|
||||||
// Note that this can take kind of a long time on real cloud providers (e.g. ~22 seconds on EKS).
|
// Note that this can take kind of a long time on real cloud providers (e.g. ~22 seconds on EKS).
|
||||||
require.Eventually(t, func() bool {
|
library.RequireEventuallyWithoutError(t, func() (bool, error) {
|
||||||
return !hasLoadBalancerService(ctx, t, adminClient, env.ConciergeNamespace)
|
hasService, err := hasImpersonationProxyLoadBalancerService(ctx, adminClient, env.ConciergeNamespace)
|
||||||
|
return !hasService, err
|
||||||
}, time.Minute, 500*time.Millisecond)
|
}, time.Minute, 500*time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check that the impersonation proxy port has shut down.
|
||||||
|
// Ideally we could always check that the impersonation proxy's port has shut down, but on clusters where we
|
||||||
|
// do not run the squid proxy we have no easy way to see beyond the load balancer to see inside the cluster,
|
||||||
|
// so we'll skip this check on clusters which have load balancers but don't run the squid proxy.
|
||||||
|
// The other cluster types that do run the squid proxy will give us sufficient coverage here.
|
||||||
|
if env.Proxy != "" {
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
// It's okay if this returns RBAC errors because this user has no role bindings.
|
||||||
|
// What we want to see is that the proxy eventually shuts down entirely.
|
||||||
|
_, err = impersonationProxyViaSquidClient(nil).CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||||
|
return err.Error() == serviceUnavailableViaSquidError
|
||||||
|
}, 20*time.Second, 500*time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the generated TLS cert Secret was deleted by the controller.
|
||||||
require.Eventually(t, func() bool {
|
require.Eventually(t, func() bool {
|
||||||
caSecret, err = adminClient.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, impersonationProxyTLSSecretName, metav1.GetOptions{})
|
caSecret, err = adminClient.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, impersonationProxyTLSSecretName, metav1.GetOptions{})
|
||||||
return k8serrors.IsNotFound(err)
|
return k8serrors.IsNotFound(err)
|
||||||
@ -346,15 +399,28 @@ func configMapForConfig(t *testing.T, config impersonator.Config) corev1.ConfigM
|
|||||||
return configMap
|
return configMap
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasLoadBalancerService(ctx context.Context, t *testing.T, client kubernetes.Interface, namespace string) bool {
|
func hasImpersonationProxyLoadBalancerService(ctx context.Context, client kubernetes.Interface, namespace string) (bool, error) {
|
||||||
t.Helper()
|
service, err := client.CoreV1().Services(namespace).Get(ctx, impersonationProxyLoadBalancerName, metav1.GetOptions{})
|
||||||
|
if k8serrors.IsNotFound(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return service.Spec.Type == corev1.ServiceTypeLoadBalancer, nil
|
||||||
|
}
|
||||||
|
|
||||||
services, err := client.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{})
|
func getImpersonationProxyLoadBalancerIngress(ctx context.Context, client kubernetes.Interface, namespace string) (*corev1.LoadBalancerIngress, error) {
|
||||||
require.NoError(t, err)
|
service, err := client.CoreV1().Services(namespace).Get(ctx, impersonationProxyLoadBalancerName, metav1.GetOptions{})
|
||||||
for _, service := range services.Items {
|
if err != nil {
|
||||||
if service.Spec.Type == corev1.ServiceTypeLoadBalancer {
|
return nil, err
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
ingresses := service.Status.LoadBalancer.Ingress
|
||||||
|
if len(ingresses) > 1 {
|
||||||
|
return nil, fmt.Errorf("didn't expect multiple ingresses, but if it happens then maybe this test needs to be adjusted")
|
||||||
}
|
}
|
||||||
return false
|
if len(ingresses) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return &ingresses[0], nil
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ package library
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -15,7 +16,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RequireEventuallyWithoutError is a wrapper around require.Eventually() that allows the caller to
|
// RequireEventuallyWithoutError is similar to require.Eventually() except that it also allows the caller to
|
||||||
// return an error from the condition function. If the condition function returns an error at any
|
// return an error from the condition function. If the condition function returns an error at any
|
||||||
// point, the assertion will immediately fail.
|
// point, the assertion will immediately fail.
|
||||||
func RequireEventuallyWithoutError(
|
func RequireEventuallyWithoutError(
|
||||||
@ -29,6 +30,28 @@ func RequireEventuallyWithoutError(
|
|||||||
require.NoError(t, wait.PollImmediate(tick, waitFor, f), msgAndArgs...)
|
require.NoError(t, wait.PollImmediate(tick, waitFor, f), msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RequireNeverWithoutError is similar to require.Never() except that it also allows the caller to
|
||||||
|
// return an error from the condition function. If the condition function returns an error at any
|
||||||
|
// point, the assertion will immediately fail.
|
||||||
|
func RequireNeverWithoutError(
|
||||||
|
t *testing.T,
|
||||||
|
f func() (bool, error),
|
||||||
|
waitFor time.Duration,
|
||||||
|
tick time.Duration,
|
||||||
|
msgAndArgs ...interface{},
|
||||||
|
) {
|
||||||
|
t.Helper()
|
||||||
|
err := wait.PollImmediate(tick, waitFor, f)
|
||||||
|
isWaitTimeout := errors.Is(err, wait.ErrWaitTimeout)
|
||||||
|
if err != nil && !isWaitTimeout {
|
||||||
|
require.NoError(t, err, msgAndArgs...) // this will fail and throw the right error message
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// This prints the same error message that require.Never would print in this case.
|
||||||
|
require.Fail(t, "Condition satisfied", msgAndArgs...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// NewRestartAssertion allows a caller to assert that there were no restarts for a Pod in the
|
// NewRestartAssertion allows a caller to assert that there were no restarts for a Pod in the
|
||||||
// provided namespace with the provided labelSelector during the lifetime of a test.
|
// provided namespace with the provided labelSelector during the lifetime of a test.
|
||||||
func AssertNoRestartsDuringTest(t *testing.T, namespace, labelSelector string) {
|
func AssertNoRestartsDuringTest(t *testing.T, namespace, labelSelector string) {
|
||||||
|
Loading…
Reference in New Issue
Block a user