Merge branch 'main' into main

This commit is contained in:
Nanci Lancaster 2021-03-29 12:29:25 -05:00 committed by GitHub
commit 017c891fb8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 232 additions and 167 deletions

View File

@ -47,7 +47,7 @@ Care to kick the tires? It's easy to [install and try Pinniped](https://pinniped
## Community meetings
Pinniped is better because of our contributors and maintainers. It is because of you that we can bring great software to the community. Please join us during our online community meetings, occurring every first and third Thursday of the month at 9 AM PT / 12 PM PT. Use [this Zoom Link](https://vmware.zoom.us/j/93798188973?pwd=T3pIMWxReEQvcWljNm1admRoZTFSZz09) to attend and add any agenda items you wish to discuss to [the notes document](https://hackmd.io/rd_kVJhjQfOvfAWzK8A3tQ?view). Join our [Google Group](https://groups.google.com/u/1/g/project-pinniped) to receive invites to this meeting.
Pinniped is better because of our contributors and maintainers. It is because of you that we can bring great software to the community. Please join us during our online community meetings, occurring every first and third Thursday of the month at 9 AM PT / 12 PM PT. Use [this Zoom Link](https://vmware.zoom.us/j/93798188973?pwd=T3pIMWxReEQvcWljNm1admRoZTFSZz09) to attend and add any agenda items you wish to discuss to [the notes document](https://hackmd.io/rd_kVJhjQfOvfAWzK8A3tQ?view). Join our [Google Group](https://groups.google.com/g/project-pinniped) to receive invites to this meeting.
If the meeting day falls on a US holiday, please consider that occurrence of the meeting to be canceled.

View File

@ -32,7 +32,10 @@ rules:
verbs: [ use ]
resourceNames: [ nonroot ]
- apiGroups: [ "" ]
resources: [ "users", "groups" ]
resources: [ "users", "groups", "serviceaccounts" ]
verbs: [ "impersonate" ]
- apiGroups: [ "authentication.k8s.io" ]
resources: [ "*" ] #! What we really want is userextras/* but the RBAC authorizer only supports */subresource, not resource/*
verbs: [ "impersonate" ]
- apiGroups: [ "" ]
resources: [ nodes ]

View File

@ -210,12 +210,14 @@ if ! tilt_mode; then
#
# Deploy dex
#
dex_test_password="$(openssl rand -hex 16)"
pushd test/deploy/dex >/dev/null
log_note "Deploying Dex to the cluster..."
ytt --file . >"$manifest"
ytt --file . \
--data-value-yaml "supervisor_redirect_uris=[https://pinniped-supervisor-clusterip.supervisor.svc.cluster.local/some/path/callback]" \
--data-value "pinny_bcrypt_passwd_hash=$(htpasswd -nbBC 10 x "$dex_test_password" | sed -e "s/^x://")" \
>"$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
@ -328,7 +330,7 @@ export PINNIPED_TEST_CLI_OIDC_ISSUER_CA_BUNDLE="${test_ca_bundle_pem}"
export PINNIPED_TEST_CLI_OIDC_CLIENT_ID=pinniped-cli
export PINNIPED_TEST_CLI_OIDC_CALLBACK_URL=http://127.0.0.1:48095/callback
export PINNIPED_TEST_CLI_OIDC_USERNAME=pinny@example.com
export PINNIPED_TEST_CLI_OIDC_PASSWORD=password
export PINNIPED_TEST_CLI_OIDC_PASSWORD=${dex_test_password}
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER=https://dex.dex.svc.cluster.local/dex
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER_CA_BUNDLE="${test_ca_bundle_pem}"
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ADDITIONAL_SCOPES=email
@ -338,7 +340,7 @@ export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CLIENT_ID=pinniped-supervisor
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CLIENT_SECRET=pinniped-supervisor-secret
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CALLBACK_URL=https://pinniped-supervisor-clusterip.supervisor.svc.cluster.local/some/path/callback
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_USERNAME=pinny@example.com
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_PASSWORD=password
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_PASSWORD=${dex_test_password}
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_EXPECTED_GROUPS= # Dex's local user store does not let us configure groups.
export PINNIPED_TEST_API_GROUP_SUFFIX='${api_group_suffix}'

View File

@ -7,6 +7,9 @@ capabilities:
# Is it possible to borrow the cluster's signing key from the kube API server?
clusterSigningKeyIsAvailable: false
# Will the cluster successfully provision a load balancer if requested?
hasExternalLoadBalancerProvider: true
# Does the cluster allow requests without authentication?
# https://kubernetes.io/docs/reference/access-authn-authz/authentication/#anonymous-requests
anonymousAuthenticationSupported: false

View File

@ -7,6 +7,9 @@ capabilities:
# Is it possible to borrow the cluster's signing key from the kube API server?
clusterSigningKeyIsAvailable: false
# Will the cluster successfully provision a load balancer if requested?
hasExternalLoadBalancerProvider: true
# Does the cluster allow requests without authentication?
# https://kubernetes.io/docs/reference/access-authn-authz/authentication/#anonymous-requests
anonymousAuthenticationSupported: true

View File

@ -32,7 +32,7 @@ enablePasswordDB: true
staticPasswords:
- username: "pinny"
email: "pinny@example.com"
hash: "$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W" #! bcrypt("password")
hash: #@ data.values.pinny_bcrypt_passwd_hash
userID: "061d23d1-fe1e-4777-9ae9-59cd12abeaaa"
#@ end

View File

@ -19,3 +19,6 @@ ports:
#! supervisor_redirect_uris is an array of redirect uris that dex will use for its pinniped-supervisor client.
#! usage: --data-value-yaml "supervisor_redirect_uris=[some-redirect.com,some-other-redirect.com]"
supervisor_redirect_uris: []
#! The bcrypt-hashed password of the pinny test user account.
pinny_bcrypt_passwd_hash:

View File

@ -42,7 +42,7 @@ import (
conciergev1alpha "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
identityv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/identity/v1alpha1"
loginv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
"go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
pinnipedconciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
"go.pinniped.dev/internal/concierge/impersonator"
"go.pinniped.dev/internal/kubeclient"
"go.pinniped.dev/internal/testutil"
@ -74,12 +74,18 @@ func (sb *syncBuffer) Write(b []byte) (int, error) {
}
// Note that this test supports being run on all of our integration test cluster types:
// - load balancers not supported, has squid proxy (e.g. kind)
// - load balancers supported, has squid proxy (e.g. EKS)
// - load balancers supported, no squid proxy (e.g. GKE)
// - TKGS acceptance (long-running) cluster: auto mode will choose disabled, supports LBs, does not have squid.
// - GKE acceptance (long-running) cluster: auto will choose enabled, support LBs, does not have squid.
// - kind: auto mode will choose disabled, does not support LBs, has squid.
// - GKE ephemeral clusters: auto mode will choose enabled, supports LBs, has squid.
// - AKS ephemeral clusters: auto mode will choose enabled, supports LBs, has squid.
// - EKS ephemeral clusters: auto mode will choose enabled, supports LBs, has squid.
func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's complex.
env := library.IntegrationEnv(t)
impersonatorShouldHaveStartedAutomaticallyByDefault := !env.HasCapability(library.ClusterSigningKeyIsAvailable)
clusterSupportsLoadBalancers := env.HasCapability(library.HasExternalLoadBalancerProvider)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Minute)
defer cancel()
@ -96,8 +102,23 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
// The address of the ClusterIP service that points at the impersonation proxy's port (used when there is no load balancer).
proxyServiceEndpoint := fmt.Sprintf("%s-proxy.%s.svc.cluster.local", env.ConciergeAppName, env.ConciergeNamespace)
newImpersonationProxyClientWithCredentials := func(credentials *loginv1alpha1.ClusterCredential, impersonationProxyURL string, impersonationProxyCACertPEM []byte, doubleImpersonateUser string) *kubeclient.Client {
kubeconfig := impersonationProxyRestConfig(credentials, impersonationProxyURL, impersonationProxyCACertPEM, doubleImpersonateUser)
if !clusterSupportsLoadBalancers {
// Only if there is no possibility to send traffic through a load balancer, then send the traffic through the Squid proxy.
// Prefer to go through a load balancer because that's how the impersonator is intended to be used in the real world.
kubeconfig.Proxy = kubeconfigProxyFunc(t, env.Proxy)
}
return library.NewKubeclient(t, kubeconfig)
}
newAnonymousImpersonationProxyClient := func(impersonationProxyURL string, impersonationProxyCACertPEM []byte, doubleImpersonateUser string) *kubeclient.Client {
emptyCredentials := &loginv1alpha1.ClusterCredential{}
return newImpersonationProxyClientWithCredentials(emptyCredentials, impersonationProxyURL, impersonationProxyCACertPEM, doubleImpersonateUser)
}
var mostRecentTokenCredentialRequestResponse *loginv1alpha1.TokenCredentialRequest
refreshCredential := func() *loginv1alpha1.ClusterCredential {
refreshCredential := func(t *testing.T, impersonationProxyURL string, impersonationProxyCACertPEM []byte) *loginv1alpha1.ClusterCredential {
if mostRecentTokenCredentialRequestResponse == nil || credentialAlmostExpired(t, mostRecentTokenCredentialRequestResponse) {
var err error
// Make a TokenCredentialRequest. This can either return a cert signed by the Kube API server's CA (e.g. on kind)
@ -106,8 +127,14 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
//
// However, we issue short-lived certs, so this cert will only be valid for a few minutes.
// Cache it until it is almost expired and then refresh it whenever it is close to expired.
mostRecentTokenCredentialRequestResponse, err = library.CreateTokenCredentialRequest(ctx, t, credentialRequestSpecWithWorkingCredentials)
require.NoError(t, err)
//
// Also, use an anonymous client which goes through the impersonation proxy to make the request because that's
// what would normally happen when a user is using a kubeconfig where the server is the impersonation proxy,
// so it more closely simulates the normal use case, and also because we want this to work on AKS clusters
// which do not allow anonymous requests.
client := newAnonymousImpersonationProxyClient(impersonationProxyURL, impersonationProxyCACertPEM, "").PinnipedConcierge
mostRecentTokenCredentialRequestResponse, err = createTokenCredentialRequest(credentialRequestSpecWithWorkingCredentials, client)
require.NoError(t, err, library.Sdump(err))
require.Nil(t, mostRecentTokenCredentialRequestResponse.Status.Message,
"expected no error message but got: %s", library.Sdump(mostRecentTokenCredentialRequestResponse.Status.Message))
@ -128,26 +155,12 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
return library.NewKubeclient(t, kubeconfig).Kubernetes
}
newImpersonationProxyClientWithCredentials := func(credentials *loginv1alpha1.ClusterCredential, impersonationProxyURL string, impersonationProxyCACertPEM []byte, doubleImpersonateUser string) *kubeclient.Client {
kubeconfig := impersonationProxyRestConfig(credentials, impersonationProxyURL, impersonationProxyCACertPEM, doubleImpersonateUser)
if !env.HasCapability(library.HasExternalLoadBalancerProvider) {
// Send traffic through the Squid proxy
kubeconfig.Proxy = kubeconfigProxyFunc(t, env.Proxy)
}
return library.NewKubeclient(t, kubeconfig)
}
newImpersonationProxyClient := func(impersonationProxyURL string, impersonationProxyCACertPEM []byte, doubleImpersonateUser string) *kubeclient.Client {
refreshedCredentials := refreshCredential().DeepCopy()
newImpersonationProxyClient := func(t *testing.T, impersonationProxyURL string, impersonationProxyCACertPEM []byte, doubleImpersonateUser string) *kubeclient.Client {
refreshedCredentials := refreshCredential(t, impersonationProxyURL, impersonationProxyCACertPEM).DeepCopy()
refreshedCredentials.Token = "not a valid token" // demonstrates that client certs take precedence over tokens by setting both on the requests
return newImpersonationProxyClientWithCredentials(refreshedCredentials, impersonationProxyURL, impersonationProxyCACertPEM, doubleImpersonateUser)
}
newAnonymousImpersonationProxyClient := func(impersonationProxyURL string, impersonationProxyCACertPEM []byte, doubleImpersonateUser string) *kubeclient.Client {
emptyCredentials := &loginv1alpha1.ClusterCredential{}
return newImpersonationProxyClientWithCredentials(emptyCredentials, impersonationProxyURL, impersonationProxyCACertPEM, doubleImpersonateUser)
}
oldConfigMap, err := adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Get(ctx, impersonationProxyConfigMapName(env), metav1.GetOptions{})
if !k8serrors.IsNotFound(err) {
require.NoError(t, err) // other errors aside from NotFound are unexpected
@ -180,20 +193,49 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
// CredentialIssuer will be updated eventually with a successful impersonation proxy frontend.
// We do this to ensure that future tests that use the impersonation proxy (e.g.,
// TestE2EFullIntegration) will start with a known-good state.
if env.HasCapability(library.HasExternalLoadBalancerProvider) {
if clusterSupportsLoadBalancers {
performImpersonatorDiscovery(ctx, t, env, adminConciergeClient)
}
})
if env.HasCapability(library.HasExternalLoadBalancerProvider) { //nolint:nestif // come on... it's just a test
// Done with set-up and ready to get started with the test. There are several states that we could be in at
// this point depending on the capabilities of the cluster under test. We handle each possible case here.
switch {
case impersonatorShouldHaveStartedAutomaticallyByDefault && clusterSupportsLoadBalancers:
// Auto mode should have decided that the impersonator will run and should have started a load balancer,
// and we will be able to use the load balancer to access the impersonator. (e.g. GKE, AKS, EKS)
// Check that load balancer has been automatically created by the impersonator's "auto" mode.
library.RequireEventuallyWithoutError(t, func() (bool, error) {
return hasImpersonationProxyLoadBalancerService(ctx, env, adminClient)
}, 30*time.Second, 500*time.Millisecond)
} else {
require.NotEmpty(t, env.Proxy,
"test cluster does not support load balancers but also doesn't have a squid proxy... "+
case impersonatorShouldHaveStartedAutomaticallyByDefault && !clusterSupportsLoadBalancers:
t.Fatal("None of the clusters types that we currently test against should automatically" +
"enable the impersonation proxy without also supporting load balancers. If we add such a" +
"cluster type in the future then we should enhance this test.")
case !impersonatorShouldHaveStartedAutomaticallyByDefault && clusterSupportsLoadBalancers:
// Auto mode should have decided that the impersonator will be disabled. We need to manually enable it.
// The cluster supports load balancers so we should enable it and let the impersonator create a load balancer
// automatically. (e.g. TKGS)
// The CredentialIssuer's strategies array should have been updated to include an unsuccessful impersonation
// strategy saying that it was automatically disabled.
requireDisabledStrategy(ctx, t, env, adminConciergeClient)
// Create configuration to make the impersonation proxy turn on with no endpoint (i.e. automatically create a load balancer).
configMap := impersonationProxyConfigMapForConfig(t, env, impersonator.Config{Mode: impersonator.ModeEnabled})
t.Logf("creating configmap %s", configMap.Name)
_, err = adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Create(ctx, &configMap, metav1.CreateOptions{})
require.NoError(t, err)
default:
// Auto mode should have decided that the impersonator will be disabled. We need to manually enable it.
// However, the cluster does not support load balancers so we should enable it without a load balancer
// and use squid to make requests. (e.g. kind)
if env.Proxy == "" {
t.Skip("test cluster does not support load balancers but also doesn't have a squid proxy... " +
"this is not a supported configuration for test clusters")
}
// Check that no load balancer has been created by the impersonator's "auto" mode.
library.RequireNeverWithoutError(t, func() (bool, error) {
@ -206,7 +248,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
require.Truef(t, isErr, "wanted error %q to be service unavailable via squid error, but: %s", err, message)
// Create configuration to make the impersonation proxy turn on with a hard coded endpoint (without a load balancer).
configMap := configMapForConfig(t, env, impersonator.Config{
configMap := impersonationProxyConfigMapForConfig(t, env, impersonator.Config{
Mode: impersonator.ModeEnabled,
Endpoint: proxyServiceEndpoint,
})
@ -221,7 +263,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
// in the strategies array or it may be included in an error state. It can be in an error state for
// awhile when it is waiting for the load balancer to be assigned an ip/hostname.
impersonationProxyURL, impersonationProxyCACertPEM := performImpersonatorDiscovery(ctx, t, env, adminConciergeClient)
if !env.HasCapability(library.HasExternalLoadBalancerProvider) {
if !clusterSupportsLoadBalancers {
// In this case, we specified the endpoint in the configmap, so check that it was reported correctly in the CredentialIssuer.
require.Equal(t, "https://"+proxyServiceEndpoint, impersonationProxyURL)
}
@ -230,8 +272,8 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
// credentials before they expire. Create a closure to capture the arguments to newImpersonationProxyClient
// so we don't have to keep repeating them.
// This client performs TLS checks, so it also provides test coverage that the impersonation proxy server is generating TLS certs correctly.
impersonationProxyKubeClient := func() kubernetes.Interface {
return newImpersonationProxyClient(impersonationProxyURL, impersonationProxyCACertPEM, "").Kubernetes
impersonationProxyKubeClient := func(t *testing.T) kubernetes.Interface {
return newImpersonationProxyClient(t, impersonationProxyURL, impersonationProxyCACertPEM, "").Kubernetes
}
t.Run("positive tests", func(t *testing.T) {
@ -264,18 +306,75 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
// influencing RBAC checks correctly.
t.Run(
"access as user",
library.AccessAsUserTest(ctx, env.TestUser.ExpectedUsername, impersonationProxyKubeClient()),
library.AccessAsUserTest(ctx, env.TestUser.ExpectedUsername, impersonationProxyKubeClient(t)),
)
for _, group := range env.TestUser.ExpectedGroups {
group := group
t.Run(
"access as group "+group,
library.AccessAsGroupTest(ctx, group, impersonationProxyKubeClient()),
library.AccessAsGroupTest(ctx, group, impersonationProxyKubeClient(t)),
)
}
t.Run("kubectl port-forward and keeping the connection open for over a minute", func(t *testing.T) {
kubeconfigPath, envVarsWithProxy, _ := getImpersonationKubeconfig(t, env, impersonationProxyURL, impersonationProxyCACertPEM)
t.Run("kubectl port-forward and keeping the connection open for over a minute (non-idle)", func(t *testing.T) {
kubeconfigPath, envVarsWithProxy, _ := getImpersonationKubeconfig(t, env, impersonationProxyURL, impersonationProxyCACertPEM, credentialRequestSpecWithWorkingCredentials.Authenticator)
// Run the kubectl port-forward command.
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
defer cancelFunc()
portForwardCmd, _, portForwardStderr := kubectlCommand(timeout, t, kubeconfigPath, envVarsWithProxy, "port-forward", "--namespace", env.ConciergeNamespace, conciergePod.Name, "8443:8443")
portForwardCmd.Env = envVarsWithProxy
// Start, but don't wait for the command to finish.
err := portForwardCmd.Start()
require.NoError(t, err, `"kubectl port-forward" failed`)
go func() {
assert.EqualErrorf(t, portForwardCmd.Wait(), "signal: killed", `wanted "kubectl port-forward" to get signaled because context was cancelled (stderr: %q)`, portForwardStderr.String())
}()
// The server should recognize this this
// is going to be a long-running command and keep the connection open as long as the client stays connected.
// curl the endpoint as many times as we can within 70 seconds.
// this will ensure that we don't run into idle timeouts.
var curlStdOut, curlStdErr bytes.Buffer
timeout, cancelFunc = context.WithTimeout(ctx, 75*time.Second)
defer cancelFunc()
startTime := time.Now()
for time.Now().Before(startTime.Add(70 * time.Second)) {
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:8443") // -sS turns off the progressbar but still prints errors
curlCmd.Stdout = &curlStdOut
curlCmd.Stderr = &curlStdErr
curlErr := curlCmd.Run()
if curlErr != nil {
t.Log("curl error: " + curlErr.Error())
t.Log("curlStdErr: " + curlStdErr.String())
t.Log("stdout: " + curlStdOut.String())
}
t.Log("time: ", time.Now())
time.Sleep(1 * time.Second)
}
// curl the endpoint once more, once 70 seconds has elapsed, to make sure the connection is still open.
timeout, cancelFunc = context.WithTimeout(ctx, 30*time.Second)
defer cancelFunc()
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:8443") // -sS turns off the progressbar but still prints errors
curlCmd.Stdout = &curlStdOut
curlCmd.Stderr = &curlStdErr
curlErr := curlCmd.Run()
if curlErr != nil {
t.Log("curl error: " + curlErr.Error())
t.Log("curlStdErr: " + curlStdErr.String())
t.Log("stdout: " + curlStdOut.String())
}
// We expect this to 403, but all we care is that it gets through.
require.NoError(t, curlErr)
require.Contains(t, curlStdOut.String(), "\"forbidden: User \\\"system:anonymous\\\" cannot get path \\\"/\\\"\"")
})
t.Run("kubectl port-forward and keeping the connection open for over a minute (idle)", func(t *testing.T) {
kubeconfigPath, envVarsWithProxy, _ := getImpersonationKubeconfig(t, env, impersonationProxyURL, impersonationProxyCACertPEM, credentialRequestSpecWithWorkingCredentials.Authenticator)
// Run the kubectl port-forward command.
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
@ -294,7 +393,6 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
// is going to be a long-running command and keep the connection open as long as the client stays connected.
time.Sleep(70 * time.Second)
require.Eventually(t, func() bool {
timeout, cancelFunc = context.WithTimeout(ctx, 2*time.Minute)
defer cancelFunc()
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:8443") // -sS turns off the progressbar but still prints errors
@ -308,8 +406,8 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
t.Log("stdout: " + curlStdOut.String())
}
// We expect this to 403, but all we care is that it gets through.
return err == nil && strings.Contains(curlStdOut.String(), "\"forbidden: User \\\"system:anonymous\\\" cannot get path \\\"/\\\"\"")
}, 1*time.Minute, 500*time.Millisecond)
require.NoError(t, err)
require.Contains(t, curlStdOut.String(), "\"forbidden: User \\\"system:anonymous\\\" cannot get path \\\"/\\\"\"")
})
t.Run("using and watching all the basic verbs", func(t *testing.T) {
@ -318,7 +416,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
// Create and start informer to exercise the "watch" verb for us.
informerFactory := k8sinformers.NewSharedInformerFactoryWithOptions(
impersonationProxyKubeClient(),
impersonationProxyKubeClient(t),
0,
k8sinformers.WithNamespace(namespaceName))
informer := informerFactory.Core().V1().ConfigMaps()
@ -338,17 +436,17 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
}
// Test "create" verb through the impersonation proxy.
_, err := impersonationProxyKubeClient().CoreV1().ConfigMaps(namespaceName).Create(ctx,
_, err := impersonationProxyKubeClient(t).CoreV1().ConfigMaps(namespaceName).Create(ctx,
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "configmap-1", Labels: configMapLabels}},
metav1.CreateOptions{},
)
require.NoError(t, err)
_, err = impersonationProxyKubeClient().CoreV1().ConfigMaps(namespaceName).Create(ctx,
_, err = impersonationProxyKubeClient(t).CoreV1().ConfigMaps(namespaceName).Create(ctx,
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "configmap-2", Labels: configMapLabels}},
metav1.CreateOptions{},
)
require.NoError(t, err)
_, err = impersonationProxyKubeClient().CoreV1().ConfigMaps(namespaceName).Create(ctx,
_, err = impersonationProxyKubeClient(t).CoreV1().ConfigMaps(namespaceName).Create(ctx,
&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "configmap-3", Labels: configMapLabels}},
metav1.CreateOptions{},
)
@ -364,11 +462,11 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
}, 10*time.Second, 50*time.Millisecond)
// Test "get" verb through the impersonation proxy.
configMap3, err := impersonationProxyKubeClient().CoreV1().ConfigMaps(namespaceName).Get(ctx, "configmap-3", metav1.GetOptions{})
configMap3, err := impersonationProxyKubeClient(t).CoreV1().ConfigMaps(namespaceName).Get(ctx, "configmap-3", metav1.GetOptions{})
require.NoError(t, err)
// Test "list" verb through the impersonation proxy.
listResult, err := impersonationProxyKubeClient().CoreV1().ConfigMaps(namespaceName).List(ctx, metav1.ListOptions{
listResult, err := impersonationProxyKubeClient(t).CoreV1().ConfigMaps(namespaceName).List(ctx, metav1.ListOptions{
LabelSelector: configMapLabels.String(),
})
require.NoError(t, err)
@ -376,7 +474,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
// Test "update" verb through the impersonation proxy.
configMap3.Data = map[string]string{"foo": "bar"}
updateResult, err := impersonationProxyKubeClient().CoreV1().ConfigMaps(namespaceName).Update(ctx, configMap3, metav1.UpdateOptions{})
updateResult, err := impersonationProxyKubeClient(t).CoreV1().ConfigMaps(namespaceName).Update(ctx, configMap3, metav1.UpdateOptions{})
require.NoError(t, err)
require.Equal(t, "bar", updateResult.Data["foo"])
@ -387,7 +485,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
}, 10*time.Second, 50*time.Millisecond)
// Test "patch" verb through the impersonation proxy.
patchResult, err := impersonationProxyKubeClient().CoreV1().ConfigMaps(namespaceName).Patch(ctx,
patchResult, err := impersonationProxyKubeClient(t).CoreV1().ConfigMaps(namespaceName).Patch(ctx,
"configmap-3",
types.MergePatchType,
[]byte(`{"data":{"baz":"42"}}`),
@ -404,7 +502,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
}, 10*time.Second, 50*time.Millisecond)
// Test "delete" verb through the impersonation proxy.
err = impersonationProxyKubeClient().CoreV1().ConfigMaps(namespaceName).Delete(ctx, "configmap-3", metav1.DeleteOptions{})
err = impersonationProxyKubeClient(t).CoreV1().ConfigMaps(namespaceName).Delete(ctx, "configmap-3", metav1.DeleteOptions{})
require.NoError(t, err)
// Make sure that the deleted ConfigMap shows up in the informer's cache.
@ -415,7 +513,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
}, 10*time.Second, 50*time.Millisecond)
// Test "deletecollection" verb through the impersonation proxy.
err = impersonationProxyKubeClient().CoreV1().ConfigMaps(namespaceName).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{})
err = impersonationProxyKubeClient(t).CoreV1().ConfigMaps(namespaceName).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{})
require.NoError(t, err)
// Make sure that the deleted ConfigMaps shows up in the informer's cache.
@ -425,7 +523,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
}, 10*time.Second, 50*time.Millisecond)
// There should be no ConfigMaps left.
listResult, err = impersonationProxyKubeClient().CoreV1().ConfigMaps(namespaceName).List(ctx, metav1.ListOptions{
listResult, err = impersonationProxyKubeClient(t).CoreV1().ConfigMaps(namespaceName).List(ctx, metav1.ListOptions{
LabelSelector: configMapLabels.String(),
})
require.NoError(t, err)
@ -435,11 +533,11 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
t.Run("double impersonation as a regular user is blocked", func(t *testing.T) {
// Make a client which will send requests through the impersonation proxy and will also add
// impersonate headers to the request.
doubleImpersonationKubeClient := newImpersonationProxyClient(impersonationProxyURL, impersonationProxyCACertPEM, "other-user-to-impersonate").Kubernetes
doubleImpersonationKubeClient := newImpersonationProxyClient(t, impersonationProxyURL, impersonationProxyCACertPEM, "other-user-to-impersonate").Kubernetes
// Check that we can get some resource through the impersonation proxy without any impersonation headers on the request.
// We could use any resource for this, but we happen to know that this one should exist.
_, err := impersonationProxyKubeClient().CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, impersonationProxyTLSSecretName(env), metav1.GetOptions{})
_, err := impersonationProxyKubeClient(t).CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, impersonationProxyTLSSecretName(env), metav1.GetOptions{})
require.NoError(t, err)
// Now we'll see what happens when we add an impersonation header to the request. This should generate a
@ -489,7 +587,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
t.Run("WhoAmIRequests and different kinds of authentication through the impersonation proxy", func(t *testing.T) {
// Test using the TokenCredentialRequest for authentication.
impersonationProxyPinnipedConciergeClient := newImpersonationProxyClient(
impersonationProxyPinnipedConciergeClient := newImpersonationProxyClient(t,
impersonationProxyURL, impersonationProxyCACertPEM, "",
).PinnipedConcierge
whoAmI, err := impersonationProxyPinnipedConciergeClient.IdentityV1alpha1().WhoAmIRequests().
@ -546,7 +644,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
})
t.Run("kubectl as a client", func(t *testing.T) {
kubeconfigPath, envVarsWithProxy, tempDir := getImpersonationKubeconfig(t, env, impersonationProxyURL, impersonationProxyCACertPEM)
kubeconfigPath, envVarsWithProxy, tempDir := getImpersonationKubeconfig(t, env, impersonationProxyURL, impersonationProxyCACertPEM, credentialRequestSpecWithWorkingCredentials.Authenticator)
// Try "kubectl exec" through the impersonation proxy.
echoString := "hello world"
@ -618,7 +716,10 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
t.Run("websocket client", func(t *testing.T) {
namespaceName := createTestNamespace(t, adminClient)
impersonationRestConfig := impersonationProxyRestConfig(refreshCredential(), impersonationProxyURL, impersonationProxyCACertPEM, "")
impersonationRestConfig := impersonationProxyRestConfig(
refreshCredential(t, impersonationProxyURL, impersonationProxyCACertPEM),
impersonationProxyURL, impersonationProxyCACertPEM, "",
)
tlsConfig, err := rest.TLSConfigFor(impersonationRestConfig)
require.NoError(t, err)
@ -634,7 +735,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
dialer := websocket.Dialer{
TLSClientConfig: tlsConfig,
}
if !env.HasCapability(library.HasExternalLoadBalancerProvider) {
if !clusterSupportsLoadBalancers {
dialer.Proxy = func(req *http.Request) (*url.URL, error) {
proxyURL, err := url.Parse(env.Proxy)
require.NoError(t, err)
@ -702,14 +803,17 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
})
// create rest client
restConfig := impersonationProxyRestConfig(refreshCredential(), impersonationProxyURL, impersonationProxyCACertPEM, "")
restConfig := impersonationProxyRestConfig(
refreshCredential(t, impersonationProxyURL, impersonationProxyCACertPEM),
impersonationProxyURL, impersonationProxyCACertPEM, "",
)
tlsConfig, err := rest.TLSConfigFor(restConfig)
require.NoError(t, err)
httpTransport := http.Transport{
TLSClientConfig: tlsConfig,
}
if !env.HasCapability(library.HasExternalLoadBalancerProvider) {
if !clusterSupportsLoadBalancers {
httpTransport.Proxy = func(req *http.Request) (*url.URL, error) {
proxyURL, err := url.Parse(env.Proxy)
require.NoError(t, err)
@ -777,8 +881,8 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
t.Run("manually disabling the impersonation proxy feature", func(t *testing.T) {
// Update configuration to force the proxy to disabled mode
configMap := configMapForConfig(t, env, impersonator.Config{Mode: impersonator.ModeDisabled})
if env.HasCapability(library.HasExternalLoadBalancerProvider) {
configMap := impersonationProxyConfigMapForConfig(t, env, impersonator.Config{Mode: impersonator.ModeDisabled})
if clusterSupportsLoadBalancers {
t.Logf("creating configmap %s", configMap.Name)
_, err := adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Create(ctx, &configMap, metav1.CreateOptions{})
require.NoError(t, err)
@ -788,7 +892,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
require.NoError(t, err)
}
if env.HasCapability(library.HasExternalLoadBalancerProvider) {
if clusterSupportsLoadBalancers {
// The load balancer should have been deleted when we disabled the impersonation proxy.
// Note that this can take kind of a long time on real cloud providers (e.g. ~22 seconds on EKS).
library.RequireEventuallyWithoutError(t, func() (bool, error) {
@ -827,13 +931,15 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
// At this point the impersonator should be stopped. The CredentialIssuer's strategies array should be updated to
// include an unsuccessful impersonation strategy saying that it was manually configured to be disabled.
requireDisabledByConfigurationStrategy(ctx, t, env, adminConciergeClient)
requireDisabledStrategy(ctx, t, env, adminConciergeClient)
if !env.HasCapability(library.ClusterSigningKeyIsAvailable) {
if !env.HasCapability(library.ClusterSigningKeyIsAvailable) && env.HasCapability(library.AnonymousAuthenticationSupported) {
// This cluster does not support the cluster signing key strategy, so now that we've manually disabled the
// impersonation strategy, we should be left with no working strategies.
// Given that there are no working strategies, a TokenCredentialRequest which would otherwise work should now
// fail, because there is no point handing out credentials that are not going to work for any strategy.
// Note that library.CreateTokenCredentialRequest makes an unauthenticated request, so we can't meaningfully
// perform this part of the test on a cluster which does not allow anonymous authentication.
tokenCredentialRequestResponse, err := library.CreateTokenCredentialRequest(ctx, t, credentialRequestSpecWithWorkingCredentials)
require.NoError(t, err)
@ -917,7 +1023,7 @@ func expectedWhoAmIRequestResponse(username string, groups []string) *identityv1
}
}
func performImpersonatorDiscovery(ctx context.Context, t *testing.T, env *library.TestEnv, adminConciergeClient versioned.Interface) (string, []byte) {
func performImpersonatorDiscovery(ctx context.Context, t *testing.T, env *library.TestEnv, adminConciergeClient pinnipedconciergeclientset.Interface) (string, []byte) {
t.Helper()
var impersonationProxyURL string
var impersonationProxyCACertPEM []byte
@ -962,7 +1068,7 @@ func performImpersonatorDiscovery(ctx context.Context, t *testing.T, env *librar
return impersonationProxyURL, impersonationProxyCACertPEM
}
func requireDisabledByConfigurationStrategy(ctx context.Context, t *testing.T, env *library.TestEnv, adminConciergeClient versioned.Interface) {
func requireDisabledStrategy(ctx context.Context, t *testing.T, env *library.TestEnv, adminConciergeClient pinnipedconciergeclientset.Interface) {
t.Helper()
library.RequireEventuallyWithoutError(t, func() (bool, error) {
@ -1039,7 +1145,7 @@ func kubeconfigProxyFunc(t *testing.T, squidProxyURL string) func(req *http.Requ
}
}
func configMapForConfig(t *testing.T, env *library.TestEnv, config impersonator.Config) corev1.ConfigMap {
func impersonationProxyConfigMapForConfig(t *testing.T, env *library.TestEnv, config impersonator.Config) corev1.ConfigMap {
t.Helper()
configString, err := yaml.Marshal(config)
require.NoError(t, err)
@ -1084,7 +1190,7 @@ func credentialIssuerName(env *library.TestEnv) string {
return env.ConciergeAppName + "-config"
}
func getImpersonationKubeconfig(t *testing.T, env *library.TestEnv, impersonationProxyURL string, impersonationProxyCACertPEM []byte) (string, []string, string) {
func getImpersonationKubeconfig(t *testing.T, env *library.TestEnv, impersonationProxyURL string, impersonationProxyCACertPEM []byte, authenticator corev1.TypedLocalObjectReference) (string, []string, string) {
t.Helper()
pinnipedExe := library.PinnipedCLIPath(t)
@ -1101,6 +1207,8 @@ func getImpersonationKubeconfig(t *testing.T, env *library.TestEnv, impersonatio
"--concierge-api-group-suffix", env.APIGroupSuffix,
"--oidc-skip-browser",
"--static-token", env.TestUser.Token,
"--concierge-authenticator-name", authenticator.Name,
"--concierge-authenticator-type", "webhook",
// Force the use of impersonation proxy strategy, but let it auto-discover the endpoint and CA.
"--concierge-mode", "ImpersonationProxy"}
t.Log("Running:", pinnipedExe, getKubeConfigCmd)
@ -1192,3 +1300,14 @@ func requireClose(t *testing.T, c chan struct{}, timeout time.Duration) {
require.FailNow(t, "failed to receive from channel within "+timeout.String())
}
}
func createTokenCredentialRequest(
spec loginv1alpha1.TokenCredentialRequestSpec,
client pinnipedconciergeclientset.Interface,
) (*loginv1alpha1.TokenCredentialRequest, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
return client.LoginV1alpha1().TokenCredentialRequests().Create(ctx,
&loginv1alpha1.TokenCredentialRequest{Spec: spec}, metav1.CreateOptions{},
)
}

View File

@ -42,10 +42,6 @@ import (
func TestE2EFullIntegration(t *testing.T) {
env := library.IntegrationEnv(t)
// If anything in this test crashes, dump out the supervisor and proxy pod logs.
defer library.DumpLogs(t, env.SupervisorNamespace, "")
defer library.DumpLogs(t, "dex", "app=proxy")
ctx, cancelFunc := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancelFunc()

View File

@ -601,18 +601,25 @@ func requireDelete(t *testing.T, client pinnipedclientset.Interface, ns, name st
func requireStatus(t *testing.T, client pinnipedclientset.Interface, ns, name string, status v1alpha1.FederationDomainStatusCondition) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
var federationDomain *v1alpha1.FederationDomain
var err error
assert.Eventually(t, func() bool {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
federationDomain, err = client.ConfigV1alpha1().FederationDomains(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil {
t.Logf("error trying to get FederationDomain: %s", err.Error())
t.Logf("error trying to get FederationDomain %s/%s: %v", ns, name, err)
return false
}
return err == nil && federationDomain.Status.Status == status
}, time.Minute, 200*time.Millisecond)
if federationDomain.Status.Status != status {
t.Logf("found FederationDomain %s/%s with status %s", ns, name, federationDomain.Status.Status)
return false
}
return true
}, 5*time.Minute, 200*time.Millisecond)
require.NoError(t, err)
require.Equalf(t, status, federationDomain.Status.Status, "unexpected status (message = '%s')", federationDomain.Status.Message)
}

View File

@ -39,10 +39,6 @@ import (
func TestSupervisorLogin(t *testing.T) {
env := library.IntegrationEnv(t)
// If anything in this test crashes, dump out the supervisor and proxy pod logs.
defer library.DumpLogs(t, env.SupervisorNamespace, "")
defer library.DumpLogs(t, "dex", "app=proxy")
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()

View File

@ -82,7 +82,7 @@ func assertNoRestartsDuringTest(t *testing.T, namespace, labelSelector string) {
}
// Expect the restart count to be the same as it was before the test.
if !assert.Equal(
assert.Equal(
t,
previousRestartCount,
currentRestartCount,
@ -90,10 +90,7 @@ func assertNoRestartsDuringTest(t *testing.T, namespace, labelSelector string) {
key.String(),
currentRestartCount,
previousRestartCount,
) {
// Attempt to dump the logs from the previous container that crashed.
dumpContainerLogs(ctx, t, kubeClient, key.namespace, key.pod, key.container, true)
}
)
}
})
}

View File

@ -1,64 +0,0 @@
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package library
import (
"bufio"
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// DumpLogs is meant to be called in a `defer` to dump the logs of components in the cluster on a test failure.
func DumpLogs(t *testing.T, namespace string, labelSelector string) {
// Only trigger on failed tests.
if !t.Failed() {
return
}
kubeClient := NewKubernetesClientset(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
require.NoError(t, err)
for _, pod := range pods.Items {
for _, container := range pod.Status.ContainerStatuses {
if container.RestartCount > 0 {
dumpContainerLogs(ctx, t, kubeClient, pod.Namespace, pod.Name, container.Name, true)
}
dumpContainerLogs(ctx, t, kubeClient, pod.Namespace, pod.Name, container.Name, false)
}
}
}
func dumpContainerLogs(ctx context.Context, t *testing.T, kubeClient kubernetes.Interface, namespace, pod, container string, prev bool) {
logTailLines := int64(40)
shortName := fmt.Sprintf("%s/%s/%s", namespace, pod, container)
logReader, err := kubeClient.CoreV1().Pods(namespace).GetLogs(pod, &corev1.PodLogOptions{
Container: container,
TailLines: &logTailLines,
Previous: prev,
}).Stream(ctx)
if !assert.NoErrorf(t, err, "failed to stream logs for container %s", shortName) {
return
}
scanner := bufio.NewScanner(logReader)
for scanner.Scan() {
prefix := shortName
if prev {
prefix += " (previous)"
}
t.Logf("%s > %s", prefix, scanner.Text())
}
assert.NoError(t, scanner.Err(), "failed to read logs from container %s", shortName)
}