From 6723ed9fd83d23cfad47284b93fb65cc92de2ca3 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Tue, 11 May 2021 13:55:46 -0700 Subject: [PATCH] Add end-to-end integration test for CLI-based LDAP login --- go.mod | 1 + go.sum | 1 + test/integration/e2e_test.go | 534 +++++++++++++++++++++++------------ 3 files changed, 349 insertions(+), 187 deletions(-) diff --git a/go.mod b/go.mod index 96f6e4db..d86141f4 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( cloud.google.com/go v0.60.0 // indirect github.com/MakeNowJust/heredoc/v2 v2.0.1 github.com/coreos/go-oidc/v3 v3.0.0 + github.com/creack/pty v1.1.11 github.com/davecgh/go-spew v1.1.1 github.com/go-ldap/ldap/v3 v3.3.0 github.com/go-logr/logr v0.4.0 diff --git a/go.sum b/go.sum index b5ac283a..e58f11bb 100644 --- a/go.sum +++ b/go.sum @@ -148,6 +148,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/test/integration/e2e_test.go b/test/integration/e2e_test.go index 2d7731c6..59192796 100644 --- a/test/integration/e2e_test.go +++ b/test/integration/e2e_test.go @@ -9,6 +9,7 @@ import ( "encoding/base64" "errors" "fmt" + "io" "io/ioutil" "net/url" "os" @@ -21,6 +22,7 @@ import ( "time" coreosoidc "github.com/coreos/go-oidc/v3/oidc" + "github.com/creack/pty" "github.com/stretchr/testify/require" authorizationv1 "k8s.io/api/authorization/v1" corev1 "k8s.io/api/core/v1" @@ -30,6 +32,7 @@ import ( configv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/config/v1alpha1" idpv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/idp/v1alpha1" "go.pinniped.dev/internal/certauthority" + "go.pinniped.dev/internal/here" "go.pinniped.dev/internal/oidc" "go.pinniped.dev/internal/testutil" "go.pinniped.dev/pkg/oidcclient" @@ -92,24 +95,6 @@ func TestE2EFullIntegration(t *testing.T) { configv1alpha1.SuccessFederationDomainStatusCondition, ) - // Create upstream OIDC provider and wait for it to become ready. - library.CreateTestOIDCIdentityProvider(t, idpv1alpha1.OIDCIdentityProviderSpec{ - Issuer: env.SupervisorUpstreamOIDC.Issuer, - TLS: &idpv1alpha1.TLSSpec{ - CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorUpstreamOIDC.CABundle)), - }, - AuthorizationConfig: idpv1alpha1.OIDCAuthorizationConfig{ - AdditionalScopes: env.SupervisorUpstreamOIDC.AdditionalScopes, - }, - Claims: idpv1alpha1.OIDCClaims{ - Username: env.SupervisorUpstreamOIDC.UsernameClaim, - Groups: env.SupervisorUpstreamOIDC.GroupsClaim, - }, - Client: idpv1alpha1.OIDCClient{ - SecretName: library.CreateClientCredsSecret(t, env.SupervisorUpstreamOIDC.ClientID, env.SupervisorUpstreamOIDC.ClientSecret).Name, - }, - }, idpv1alpha1.PhaseReady) - // Create a JWTAuthenticator that will validate the tokens from the downstream issuer. clusterAudience := "test-cluster-" + library.RandHex(t, 8) authenticator := library.CreateTestJWTAuthenticator(ctx, t, authv1alpha.JWTAuthenticatorSpec{ @@ -118,159 +103,314 @@ func TestE2EFullIntegration(t *testing.T) { TLS: &authv1alpha.TLSSpec{CertificateAuthorityData: testCABundleBase64}, }) - // Create a ClusterRoleBinding to give our test user from the upstream read-only access to the cluster. - library.CreateTestClusterRoleBinding(t, - rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: env.SupervisorUpstreamOIDC.Username}, - rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "view"}, - ) - library.WaitForUserToHaveAccess(t, env.SupervisorUpstreamOIDC.Username, []string{}, &authorizationv1.ResourceAttributes{ - Verb: "get", - Group: "", - Version: "v1", - Resource: "namespaces", + // Add an OIDC upstream IDP and try using it to authenticate during kubectl commands. + t.Run("with Supervisor OIDC upstream IDP", func(t *testing.T) { + expectedUsername := env.SupervisorUpstreamOIDC.Username + expectedGroups := env.SupervisorUpstreamOIDC.ExpectedGroups + + // Create a ClusterRoleBinding to give our test user from the upstream read-only access to the cluster. + library.CreateTestClusterRoleBinding(t, + rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: expectedUsername}, + rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "view"}, + ) + library.WaitForUserToHaveAccess(t, expectedUsername, []string{}, &authorizationv1.ResourceAttributes{ + Verb: "get", + Group: "", + Version: "v1", + Resource: "namespaces", + }) + + // Create upstream OIDC provider and wait for it to become ready. + library.CreateTestOIDCIdentityProvider(t, idpv1alpha1.OIDCIdentityProviderSpec{ + Issuer: env.SupervisorUpstreamOIDC.Issuer, + TLS: &idpv1alpha1.TLSSpec{ + CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorUpstreamOIDC.CABundle)), + }, + AuthorizationConfig: idpv1alpha1.OIDCAuthorizationConfig{ + AdditionalScopes: env.SupervisorUpstreamOIDC.AdditionalScopes, + }, + Claims: idpv1alpha1.OIDCClaims{ + Username: env.SupervisorUpstreamOIDC.UsernameClaim, + Groups: env.SupervisorUpstreamOIDC.GroupsClaim, + }, + Client: idpv1alpha1.OIDCClient{ + SecretName: library.CreateClientCredsSecret(t, env.SupervisorUpstreamOIDC.ClientID, env.SupervisorUpstreamOIDC.ClientSecret).Name, + }, + }, idpv1alpha1.PhaseReady) + + // Use a specific session cache for this test. + sessionCachePath := tempDir + "/oidc-test-sessions.yaml" + + kubeconfigPath := runPinnipedGetKubeconfig(t, env, pinnipedExe, tempDir, []string{ + "get", "kubeconfig", + "--concierge-api-group-suffix", env.APIGroupSuffix, + "--concierge-authenticator-type", "jwt", + "--concierge-authenticator-name", authenticator.Name, + "--oidc-skip-browser", + "--oidc-ca-bundle", testCABundlePath, + "--oidc-session-cache", sessionCachePath, + }) + + // Run "kubectl get namespaces" which should trigger a browser login via the plugin. + start := time.Now() + kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath) + kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...) + stderrPipe, err := kubectlCmd.StderrPipe() + require.NoError(t, err) + stdoutPipe, err := kubectlCmd.StdoutPipe() + require.NoError(t, err) + + t.Logf("starting kubectl subprocess") + require.NoError(t, kubectlCmd.Start()) + t.Cleanup(func() { + err := kubectlCmd.Wait() + t.Logf("kubectl subprocess exited with code %d", kubectlCmd.ProcessState.ExitCode()) + stdout, stdoutErr := ioutil.ReadAll(stdoutPipe) + if stdoutErr != nil { + stdout = []byte("") + } + stderr, stderrErr := ioutil.ReadAll(stderrPipe) + if stderrErr != nil { + stderr = []byte("") + } + require.NoErrorf(t, err, "kubectl process did not exit cleanly, stdout/stderr: %q/%q", string(stdout), string(stderr)) + }) + + // Start a background goroutine to read stderr from the CLI and parse out the login URL. + loginURLChan := make(chan string) + spawnTestGoroutine(t, func() (err error) { + defer func() { + closeErr := stderrPipe.Close() + if closeErr == nil || errors.Is(closeErr, os.ErrClosed) { + return + } + if err == nil { + err = fmt.Errorf("stderr stream closed with error: %w", closeErr) + } + }() + + reader := bufio.NewReader(library.NewLoggerReader(t, "stderr", stderrPipe)) + line, err := reader.ReadString('\n') + if err != nil { + return fmt.Errorf("could not read login URL line from stderr: %w", err) + } + const prompt = "Please log in: " + if !strings.HasPrefix(line, prompt) { + return fmt.Errorf("expected %q to have prefix %q", line, prompt) + } + loginURLChan <- strings.TrimPrefix(line, prompt) + return readAndExpectEmpty(reader) + }) + + // Start a background goroutine to read stdout from kubectl and return the result as a string. + kubectlOutputChan := make(chan string) + spawnTestGoroutine(t, func() (err error) { + defer func() { + closeErr := stdoutPipe.Close() + if closeErr == nil || errors.Is(closeErr, os.ErrClosed) { + return + } + if err == nil { + err = fmt.Errorf("stdout stream closed with error: %w", closeErr) + } + }() + output, err := ioutil.ReadAll(stdoutPipe) + if err != nil { + return err + } + t.Logf("kubectl output:\n%s\n", output) + kubectlOutputChan <- string(output) + return nil + }) + + // Wait for the CLI to print out the login URL and open the browser to it. + t.Logf("waiting for CLI to output login URL") + var loginURL string + select { + case <-time.After(1 * time.Minute): + require.Fail(t, "timed out waiting for login URL") + case loginURL = <-loginURLChan: + } + t.Logf("navigating to login page") + require.NoError(t, page.Navigate(loginURL)) + + // Expect to be redirected to the upstream provider and log in. + browsertest.LoginToUpstream(t, page, env.SupervisorUpstreamOIDC) + + // Expect to be redirected to the localhost callback. + t.Logf("waiting for redirect to callback") + browsertest.WaitForURL(t, page, regexp.MustCompile(`\Ahttp://127\.0\.0\.1:[0-9]+/callback\?.+\z`)) + + // Wait for the "pre" element that gets rendered for a `text/plain` page, and + // assert that it contains the success message. + t.Logf("verifying success page") + browsertest.WaitForVisibleElements(t, page, "pre") + msg, err := page.First("pre").Text() + require.NoError(t, err) + require.Equal(t, "you have been logged in and may now close this tab", msg) + + // Expect the CLI to output a list of namespaces in JSON format. + t.Logf("waiting for kubectl to output namespace list JSON") + var kubectlOutput string + select { + case <-time.After(10 * time.Second): + require.Fail(t, "timed out waiting for kubectl output") + case kubectlOutput = <-kubectlOutputChan: + } + require.Greaterf(t, len(strings.Split(kubectlOutput, "\n")), 2, "expected some namespaces to be returned, got %q", kubectlOutput) + t.Logf("first kubectl command took %s", time.Since(start).String()) + + requireUserCanUseKubectlWithoutAuthenticatingAgain(ctx, t, env, + downstream, + kubeconfigPath, + sessionCachePath, + pinnipedExe, + expectedUsername, + expectedGroups, + ) }) - // Use a specific session cache for this test. - sessionCachePath := tempDir + "/sessions.yaml" + // Add an LDAP upstream IDP and try using it to authenticate during kubectl commands. + t.Run("with Supervisor LDAP upstream IDP", func(t *testing.T) { + expectedUsername := env.SupervisorUpstreamLDAP.TestUserMailAttributeValue + expectedGroups := []string{} // LDAP groups are not implemented yet - // Run "pinniped get kubeconfig" to get a kubeconfig YAML. - envVarsWithProxy := append(os.Environ(), env.ProxyEnv()...) - kubeconfigYAML, stderr := runPinnipedCLI(t, envVarsWithProxy, pinnipedExe, "get", "kubeconfig", - "--concierge-api-group-suffix", env.APIGroupSuffix, - "--concierge-authenticator-type", "jwt", - "--concierge-authenticator-name", authenticator.Name, - "--oidc-skip-browser", - "--oidc-ca-bundle", testCABundlePath, - "--oidc-session-cache", sessionCachePath, - ) - t.Logf("stderr output from 'pinniped get kubeconfig':\n%s\n\n", stderr) - t.Logf("test kubeconfig:\n%s\n\n", kubeconfigYAML) + // Create a ClusterRoleBinding to give our test user from the upstream read-only access to the cluster. + library.CreateTestClusterRoleBinding(t, + rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: expectedUsername}, + rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "view"}, + ) + library.WaitForUserToHaveAccess(t, expectedUsername, []string{}, &authorizationv1.ResourceAttributes{ + Verb: "get", + Group: "", + Version: "v1", + Resource: "namespaces", + }) - restConfig := library.NewRestConfigFromKubeconfig(t, kubeconfigYAML) - require.NotNil(t, restConfig.ExecProvider) - require.Equal(t, []string{"login", "oidc"}, restConfig.ExecProvider.Args[:2]) - kubeconfigPath := filepath.Join(tempDir, "kubeconfig.yaml") - require.NoError(t, ioutil.WriteFile(kubeconfigPath, []byte(kubeconfigYAML), 0600)) + // Put the bind service account's info into a Secret. + bindSecret := library.CreateTestSecret(t, env.SupervisorNamespace, "ldap-service-account", corev1.SecretTypeBasicAuth, + map[string]string{ + corev1.BasicAuthUsernameKey: env.SupervisorUpstreamLDAP.BindUsername, + corev1.BasicAuthPasswordKey: env.SupervisorUpstreamLDAP.BindPassword, + }, + ) - // Run "kubectl get namespaces" which should trigger a browser login via the plugin. - start := time.Now() + // Create upstream LDAP provider and wait for it to become ready. + library.CreateTestLDAPIdentityProvider(t, idpv1alpha1.LDAPIdentityProviderSpec{ + Host: env.SupervisorUpstreamLDAP.Host, + TLS: &idpv1alpha1.TLSSpec{ + CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorUpstreamLDAP.CABundle)), + }, + Bind: idpv1alpha1.LDAPIdentityProviderBind{ + SecretName: bindSecret.Name, + }, + UserSearch: idpv1alpha1.LDAPIdentityProviderUserSearch{ + Base: env.SupervisorUpstreamLDAP.UserSearchBase, + Filter: "", + Attributes: idpv1alpha1.LDAPIdentityProviderUserSearchAttributes{ + Username: env.SupervisorUpstreamLDAP.TestUserMailAttributeName, + UID: env.SupervisorUpstreamLDAP.TestUserUniqueIDAttributeName, + }, + }, + }, idpv1alpha1.LDAPPhaseReady) + + // Use a specific session cache for this test. + sessionCachePath := tempDir + "/ldap-test-sessions.yaml" + + kubeconfigPath := runPinnipedGetKubeconfig(t, env, pinnipedExe, tempDir, []string{ + "get", "kubeconfig", + "--concierge-api-group-suffix", env.APIGroupSuffix, + "--concierge-authenticator-type", "jwt", + "--concierge-authenticator-name", authenticator.Name, + "--oidc-session-cache", sessionCachePath, + }) + + // Run "kubectl get namespaces" which should trigger an LDAP-style login CLI prompt via the plugin. + start := time.Now() + kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath) + kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...) + ptyFile, err := pty.Start(kubectlCmd) + require.NoError(t, err) + + // Wait for the subprocess to print the username prompt, then type the user's username. + readFromFileUntilStringIsSeen(t, ptyFile, "Username: ") + _, err = ptyFile.WriteString(expectedUsername + "\n") + require.NoError(t, err) + + // Wait for the subprocess to print the password prompt, then type the user's password. + readFromFileUntilStringIsSeen(t, ptyFile, "Password: ") + _, err = ptyFile.WriteString(env.SupervisorUpstreamLDAP.TestUserPassword + "\n") + require.NoError(t, err) + + // Read all of the remaining output from the subprocess until EOF. + remainingOutput, err := ioutil.ReadAll(ptyFile) + require.NoError(t, err) + require.Greaterf(t, len(strings.Split(string(remainingOutput), "\n")), 2, "expected some namespaces to be returned, got %q", string(remainingOutput)) + t.Logf("first kubectl command took %s", time.Since(start).String()) + + requireUserCanUseKubectlWithoutAuthenticatingAgain(ctx, t, env, + downstream, + kubeconfigPath, + sessionCachePath, + pinnipedExe, + expectedUsername, + expectedGroups, + ) + }) +} + +func readFromFileUntilStringIsSeen(t *testing.T, f *os.File, until string) string { + readFromFile := "" + + library.RequireEventuallyWithoutError(t, func() (bool, error) { + someOutput, foundEOF := readAvailableOutput(t, f) + readFromFile += someOutput + if strings.Contains(readFromFile, until) { + return true, nil // found it! finished. + } + if foundEOF { + return false, fmt.Errorf("reached EOF of subcommand's output without seeing expected string %q", until) + } + return false, nil // keep waiting and reading + }, 1*time.Minute, 1*time.Second) + + return readFromFile +} + +func readAvailableOutput(t *testing.T, r io.Reader) (string, bool) { + buf := make([]byte, 1024) + n, err := r.Read(buf) + if err != nil { + if err == io.EOF { + return string(buf[:n]), true + } else { + require.NoError(t, err) + } + } + return string(buf[:n]), false +} + +func requireUserCanUseKubectlWithoutAuthenticatingAgain( + ctx context.Context, + t *testing.T, + env *library.TestEnv, + downstream *configv1alpha1.FederationDomain, + kubeconfigPath string, + sessionCachePath string, + pinnipedExe string, + expectedUsername string, + expectedGroups []string, +) { + // Run kubectl, which should work without any prompting for authentication. kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath) kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...) - stderrPipe, err := kubectlCmd.StderrPipe() - require.NoError(t, err) - stdoutPipe, err := kubectlCmd.StdoutPipe() - require.NoError(t, err) - - t.Logf("starting kubectl subprocess") - require.NoError(t, kubectlCmd.Start()) - t.Cleanup(func() { - err := kubectlCmd.Wait() - t.Logf("kubectl subprocess exited with code %d", kubectlCmd.ProcessState.ExitCode()) - stdout, stdoutErr := ioutil.ReadAll(stdoutPipe) - if stdoutErr != nil { - stdout = []byte("") - } - stderr, stderrErr := ioutil.ReadAll(stderrPipe) - if stderrErr != nil { - stderr = []byte("") - } - require.NoErrorf(t, err, "kubectl process did not exit cleanly, stdout/stderr: %q/%q", string(stdout), string(stderr)) - }) - - // Start a background goroutine to read stderr from the CLI and parse out the login URL. - loginURLChan := make(chan string) - spawnTestGoroutine(t, func() (err error) { - defer func() { - closeErr := stderrPipe.Close() - if closeErr == nil || errors.Is(closeErr, os.ErrClosed) { - return - } - if err == nil { - err = fmt.Errorf("stderr stream closed with error: %w", closeErr) - } - }() - - reader := bufio.NewReader(library.NewLoggerReader(t, "stderr", stderrPipe)) - line, err := reader.ReadString('\n') - if err != nil { - return fmt.Errorf("could not read login URL line from stderr: %w", err) - } - const prompt = "Please log in: " - if !strings.HasPrefix(line, prompt) { - return fmt.Errorf("expected %q to have prefix %q", line, prompt) - } - loginURLChan <- strings.TrimPrefix(line, prompt) - return readAndExpectEmpty(reader) - }) - - // Start a background goroutine to read stdout from kubectl and return the result as a string. - kubectlOutputChan := make(chan string) - spawnTestGoroutine(t, func() (err error) { - defer func() { - closeErr := stdoutPipe.Close() - if closeErr == nil || errors.Is(closeErr, os.ErrClosed) { - return - } - if err == nil { - err = fmt.Errorf("stdout stream closed with error: %w", closeErr) - } - }() - output, err := ioutil.ReadAll(stdoutPipe) - if err != nil { - return err - } - t.Logf("kubectl output:\n%s\n", output) - kubectlOutputChan <- string(output) - return nil - }) - - // Wait for the CLI to print out the login URL and open the browser to it. - t.Logf("waiting for CLI to output login URL") - var loginURL string - select { - case <-time.After(1 * time.Minute): - require.Fail(t, "timed out waiting for login URL") - case loginURL = <-loginURLChan: - } - t.Logf("navigating to login page") - require.NoError(t, page.Navigate(loginURL)) - - // Expect to be redirected to the upstream provider and log in. - browsertest.LoginToUpstream(t, page, env.SupervisorUpstreamOIDC) - - // Expect to be redirected to the localhost callback. - t.Logf("waiting for redirect to callback") - browsertest.WaitForURL(t, page, regexp.MustCompile(`\Ahttp://127\.0\.0\.1:[0-9]+/callback\?.+\z`)) - - // Wait for the "pre" element that gets rendered for a `text/plain` page, and - // assert that it contains the success message. - t.Logf("verifying success page") - browsertest.WaitForVisibleElements(t, page, "pre") - msg, err := page.First("pre").Text() - require.NoError(t, err) - require.Equal(t, "you have been logged in and may now close this tab", msg) - - // Expect the CLI to output a list of namespaces in JSON format. - t.Logf("waiting for kubectl to output namespace list JSON") - var kubectlOutput string - select { - case <-time.After(10 * time.Second): - require.Fail(t, "timed out waiting for kubectl output") - case kubectlOutput = <-kubectlOutputChan: - } - require.Greaterf(t, len(strings.Split(kubectlOutput, "\n")), 2, "expected some namespaces to be returned, got %q", kubectlOutput) - t.Logf("first kubectl command took %s", time.Since(start).String()) - - // Run kubectl again, which should work with no browser interaction. - kubectlCmd2 := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath) - kubectlCmd2.Env = append(os.Environ(), env.ProxyEnv()...) - start = time.Now() - kubectlOutput2, err := kubectlCmd2.CombinedOutput() + startTime := time.Now() + kubectlOutput2, err := kubectlCmd.CombinedOutput() require.NoError(t, err) require.Greaterf(t, len(bytes.Split(kubectlOutput2, []byte("\n"))), 2, "expected some namespaces to be returned again") - t.Logf("second kubectl command took %s", time.Since(start).String()) + t.Logf("second kubectl command took %s", time.Since(startTime).String()) - // probe our cache for the current ID token as a proxy for a whoami API + // Probe our cache for the current ID token as a proxy for a whoami API. cache := filesession.New(sessionCachePath, filesession.WithErrorReporter(func(err error) { require.NoError(t, err) })) @@ -286,49 +426,52 @@ func TestE2EFullIntegration(t *testing.T) { require.NotNil(t, token) idTokenClaims := token.IDToken.Claims - require.Equal(t, env.SupervisorUpstreamOIDC.Username, idTokenClaims[oidc.DownstreamUsernameClaim]) + require.Equal(t, expectedUsername, idTokenClaims[oidc.DownstreamUsernameClaim]) // The groups claim in the file ends up as an []interface{}, so adjust our expectation to match. - expectedGroups := make([]interface{}, 0, len(env.SupervisorUpstreamOIDC.ExpectedGroups)) - for _, g := range env.SupervisorUpstreamOIDC.ExpectedGroups { - expectedGroups = append(expectedGroups, g) + expectedGroupsAsEmptyInterfaces := make([]interface{}, 0, len(expectedGroups)) + for _, g := range expectedGroups { + expectedGroupsAsEmptyInterfaces = append(expectedGroupsAsEmptyInterfaces, g) } - require.Equal(t, expectedGroups, idTokenClaims[oidc.DownstreamGroupsClaim]) + require.Equal(t, expectedGroupsAsEmptyInterfaces, idTokenClaims[oidc.DownstreamGroupsClaim]) - // confirm we are the right user according to Kube expectedYAMLGroups := func() string { var b strings.Builder - for _, g := range env.SupervisorUpstreamOIDC.ExpectedGroups { + for _, g := range expectedGroups { b.WriteString("\n") b.WriteString(` - `) b.WriteString(g) } return b.String() }() + + // Confirm we are the right user according to Kube by calling the whoami API. kubectlCmd3 := exec.CommandContext(ctx, "kubectl", "create", "-f", "-", "-o", "yaml", "--kubeconfig", kubeconfigPath) kubectlCmd3.Env = append(os.Environ(), env.ProxyEnv()...) - kubectlCmd3.Stdin = strings.NewReader(` -apiVersion: identity.concierge.` + env.APIGroupSuffix + `/v1alpha1 -kind: WhoAmIRequest -`) + kubectlCmd3.Stdin = strings.NewReader(here.Docf(` + apiVersion: identity.concierge.%s/v1alpha1 + kind: WhoAmIRequest + `, env.APIGroupSuffix)) + kubectlOutput3, err := kubectlCmd3.CombinedOutput() require.NoError(t, err) - require.Equal(t, - `apiVersion: identity.concierge.`+env.APIGroupSuffix+`/v1alpha1 -kind: WhoAmIRequest -metadata: - creationTimestamp: null -spec: {} -status: - kubernetesUserInfo: - user: - groups:`+expectedYAMLGroups+` - - system:authenticated - username: `+env.SupervisorUpstreamOIDC.Username+` -`, + + require.Equal(t, here.Docf(` + apiVersion: identity.concierge.%s/v1alpha1 + kind: WhoAmIRequest + metadata: + creationTimestamp: null + spec: {} + status: + kubernetesUserInfo: + user: + groups:%s + - system:authenticated + username: %s + `, env.APIGroupSuffix, expectedYAMLGroups, expectedUsername), string(kubectlOutput3)) - expectedGroupsPlusAuthenticated := append([]string{}, env.SupervisorUpstreamOIDC.ExpectedGroups...) + expectedGroupsPlusAuthenticated := append([]string{}, expectedGroups...) expectedGroupsPlusAuthenticated = append(expectedGroupsPlusAuthenticated, "system:authenticated") // Validate that `pinniped whoami` returns the correct identity. assertWhoami( @@ -337,7 +480,24 @@ status: true, pinnipedExe, kubeconfigPath, - env.SupervisorUpstreamOIDC.Username, + expectedUsername, expectedGroupsPlusAuthenticated, ) } + +func runPinnipedGetKubeconfig(t *testing.T, env *library.TestEnv, pinnipedExe string, tempDir string, pinnipedCLICommand []string) string { + // Run "pinniped get kubeconfig" to get a kubeconfig YAML. + envVarsWithProxy := append(os.Environ(), env.ProxyEnv()...) + kubeconfigYAML, stderr := runPinnipedCLI(t, envVarsWithProxy, pinnipedExe, pinnipedCLICommand...) + t.Logf("stderr output from 'pinniped get kubeconfig':\n%s\n\n", stderr) + t.Logf("test kubeconfig:\n%s\n\n", kubeconfigYAML) + + restConfig := library.NewRestConfigFromKubeconfig(t, kubeconfigYAML) + require.NotNil(t, restConfig.ExecProvider) + require.Equal(t, []string{"login", "oidc"}, restConfig.ExecProvider.Args[:2]) + + kubeconfigPath := filepath.Join(tempDir, "kubeconfig.yaml") + require.NoError(t, ioutil.WriteFile(kubeconfigPath, []byte(kubeconfigYAML), 0600)) + + return kubeconfigPath +}