From 8ffd9fdc4e083b8abc6c168e185e1cabaa884e9a Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Tue, 6 Apr 2021 15:13:27 -0700 Subject: [PATCH 01/32] Started debug logging. --- cmd/pinniped/cmd/login_oidc.go | 21 ++++++++++++++++++--- cmd/pinniped/cmd/login_static.go | 5 +++++ 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/cmd/pinniped/cmd/login_oidc.go b/cmd/pinniped/cmd/login_oidc.go index 34ead8f8..9eaa433f 100644 --- a/cmd/pinniped/cmd/login_oidc.go +++ b/cmd/pinniped/cmd/login_oidc.go @@ -20,10 +20,12 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" + "k8s.io/client-go/transport" "k8s.io/klog/v2/klogr" "go.pinniped.dev/internal/execcredcache" "go.pinniped.dev/internal/groupsuffix" + "go.pinniped.dev/internal/plog" "go.pinniped.dev/pkg/conciergeclient" "go.pinniped.dev/pkg/oidcclient" "go.pinniped.dev/pkg/oidcclient/filesession" @@ -110,6 +112,8 @@ func oidcLoginCommand(deps oidcLoginCommandDeps) *cobra.Command { } func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLoginFlags) error { + SetLogLevel() + // Initialize the session cache. var sessionOptions []filesession.Option @@ -153,6 +157,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin // --skip-browser replaces the default "browser open" function with one that prints to stderr. if flags.skipBrowser { + plog.Debug("skipping browser.") opts = append(opts, oidcclient.WithBrowserOpen(func(url string) error { cmd.PrintErr("Please log in: ", url, "\n") return nil @@ -166,7 +171,6 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin } opts = append(opts, oidcclient.WithClient(client)) } - // Look up cached credentials based on a hash of all the CLI arguments and the cluster info. cacheKey := struct { Args []string `json:"args"` @@ -183,6 +187,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin } } + plog.Debug("performing OIDC login", "issuer", flags.issuer, "client id", flags.clientID) // Do the basic login to get an OIDC token. token, err := deps.login(flags.issuer, flags.clientID, opts...) if err != nil { @@ -192,6 +197,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin // If the concierge was configured, exchange the credential for a separate short-lived, cluster-specific credential. if concierge != nil { + plog.Debug("exchanging token for cluster credential", "endpoint", flags.conciergeEndpoint, "authenticator type", flags.conciergeAuthenticatorType, "authenticator name", flags.conciergeAuthenticatorName) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -224,7 +230,7 @@ func makeClient(caBundlePaths []string, caBundleData []string) (*http.Client, er } pool.AppendCertsFromPEM(pem) } - return &http.Client{ + client := &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, TLSClientConfig: &tls.Config{ @@ -232,7 +238,10 @@ func makeClient(caBundlePaths []string, caBundleData []string) (*http.Client, er MinVersion: tls.VersionTLS12, }, }, - }, nil + } + + client.Transport = transport.DebugWrappers(client.Transport) + return client, nil } func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential { @@ -251,6 +260,12 @@ func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential { return &cred } +func SetLogLevel() { + if os.Getenv("PINNIPED_DEBUG") == "true" { + _ = plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug) + } +} + // mustGetConfigDir returns a directory that follows the XDG base directory convention: // $XDG_CONFIG_HOME defines the base directory relative to which user specific configuration files should // be stored. If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config should be used. diff --git a/cmd/pinniped/cmd/login_static.go b/cmd/pinniped/cmd/login_static.go index 4b9ac2fd..4a6b0513 100644 --- a/cmd/pinniped/cmd/login_static.go +++ b/cmd/pinniped/cmd/login_static.go @@ -17,6 +17,7 @@ import ( "go.pinniped.dev/internal/execcredcache" "go.pinniped.dev/internal/groupsuffix" + "go.pinniped.dev/internal/plog" "go.pinniped.dev/pkg/conciergeclient" "go.pinniped.dev/pkg/oidcclient/oidctypes" ) @@ -83,6 +84,8 @@ func staticLoginCommand(deps staticLoginDeps) *cobra.Command { } func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams) error { + SetLogLevel() + if flags.staticToken == "" && flags.staticTokenEnvName == "" { return fmt.Errorf("one of --token or --token-env must be set") } @@ -137,6 +140,7 @@ func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams // If the concierge was configured, exchange the credential for a separate short-lived, cluster-specific credential. if concierge != nil { + plog.Debug("exchanging static token for cluster credential", "endpoint", flags.conciergeEndpoint, "authenticator type", flags.conciergeAuthenticatorType, "authenticator name", flags.conciergeAuthenticatorName) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -145,6 +149,7 @@ func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams if err != nil { return fmt.Errorf("could not complete Concierge credential exchange: %w", err) } + plog.Debug("exchanged static token for cluster credential") } // If there was a credential cache, save the resulting credential for future use. We only save to the cache if From 211d4fd0b6f5e312e62bc3e360c659c457b06408 Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Wed, 7 Apr 2021 15:30:29 -0700 Subject: [PATCH 02/32] Add more logging, integration test checks that debug flag works. --- cmd/pinniped/cmd/login_oidc.go | 7 ++++--- pkg/oidcclient/login.go | 7 ++++++- test/integration/cli_test.go | 31 +++++++++++++++++++++++-------- 3 files changed, 33 insertions(+), 12 deletions(-) diff --git a/cmd/pinniped/cmd/login_oidc.go b/cmd/pinniped/cmd/login_oidc.go index 9eaa433f..88a5d36a 100644 --- a/cmd/pinniped/cmd/login_oidc.go +++ b/cmd/pinniped/cmd/login_oidc.go @@ -157,7 +157,6 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin // --skip-browser replaces the default "browser open" function with one that prints to stderr. if flags.skipBrowser { - plog.Debug("skipping browser.") opts = append(opts, oidcclient.WithBrowserOpen(func(url string) error { cmd.PrintErr("Please log in: ", url, "\n") return nil @@ -187,7 +186,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin } } - plog.Debug("performing OIDC login", "issuer", flags.issuer, "client id", flags.clientID) + plog.Debug("Pinniped: Performing OIDC login", "issuer", flags.issuer, "client id", flags.clientID) // Do the basic login to get an OIDC token. token, err := deps.login(flags.issuer, flags.clientID, opts...) if err != nil { @@ -197,7 +196,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin // If the concierge was configured, exchange the credential for a separate short-lived, cluster-specific credential. if concierge != nil { - plog.Debug("exchanging token for cluster credential", "endpoint", flags.conciergeEndpoint, "authenticator type", flags.conciergeAuthenticatorType, "authenticator name", flags.conciergeAuthenticatorName) + plog.Debug("Pinniped: Exchanging token for cluster credential", "endpoint", flags.conciergeEndpoint, "authenticator type", flags.conciergeAuthenticatorType, "authenticator name", flags.conciergeAuthenticatorName) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -205,6 +204,8 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin if err != nil { return fmt.Errorf("could not complete Concierge credential exchange: %w", err) } + } else { + plog.Debug("Pinniped: No concierge configured, skipping token credential exchange") } // If there was a credential cache, save the resulting credential for future use. diff --git a/pkg/oidcclient/login.go b/pkg/oidcclient/login.go index 53b6efd4..4d753944 100644 --- a/pkg/oidcclient/login.go +++ b/pkg/oidcclient/login.go @@ -1,4 +1,4 @@ -// Copyright 2020 the Pinniped contributors. All Rights Reserved. +// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package oidcclient implements a CLI OIDC login flow. @@ -24,6 +24,7 @@ import ( "go.pinniped.dev/internal/httputil/httperr" "go.pinniped.dev/internal/httputil/securityheader" "go.pinniped.dev/internal/oidc/provider" + "go.pinniped.dev/internal/plog" "go.pinniped.dev/internal/upstreamoidc" "go.pinniped.dev/pkg/oidcclient/nonce" "go.pinniped.dev/pkg/oidcclient/oidctypes" @@ -260,6 +261,7 @@ func (h *handlerState) baseLogin() (*oidctypes.Token, error) { // If the ID token is still valid for a bit, return it immediately and skip the rest of the flow. cached := h.cache.GetToken(cacheKey) if cached != nil && cached.IDToken != nil && time.Until(cached.IDToken.Expiry.Time) > minIDTokenValidity { + plog.Debug("Pinniped: Found unexpired cached token") return cached, nil } @@ -327,6 +329,7 @@ func (h *handlerState) initOIDCDiscovery() error { return nil } + plog.Debug("Pinniped: Performing OIDC discovery", "issuer", h.issuer) var err error h.provider, err = oidc.NewProvider(h.ctx, h.issuer) if err != nil { @@ -343,6 +346,7 @@ func (h *handlerState) initOIDCDiscovery() error { } func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidctypes.Token, error) { + plog.Debug("Pinniped: Performing RFC8693 token exchange", "requested audience", h.requestedAudience) // Perform OIDC discovery. This may have already been performed if there was not a cached base token. if err := h.initOIDCDiscovery(); err != nil { return nil, err @@ -413,6 +417,7 @@ func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidcty } func (h *handlerState) handleRefresh(ctx context.Context, refreshToken *oidctypes.RefreshToken) (*oidctypes.Token, error) { + plog.Debug("refreshing cached token") refreshSource := h.oauth2Config.TokenSource(ctx, &oauth2.Token{RefreshToken: refreshToken.Token}) refreshed, err := refreshSource.Token() diff --git a/test/integration/cli_test.go b/test/integration/cli_test.go index 3bf37437..6f58ed61 100644 --- a/test/integration/cli_test.go +++ b/test/integration/cli_test.go @@ -237,6 +237,18 @@ func TestCLILoginOIDC(t *testing.T) { require.NoErrorf(t, json.Unmarshal(cmd3Output, &credOutput3), "command returned something other than an ExecCredential:\n%s", string(cmd2Output)) require.NotEqual(t, credOutput2.Status.Token, credOutput3.Status.Token) + + t.Logf("starting fourth CLI subprocess to test debug logging") + err = os.Setenv("PINNIPED_DEBUG", "true") + require.NoError(t, err) + command := oidcLoginCommand(ctx, t, pinnipedExe, sessionCachePath) + cmd4CombinedOutput, err := command.CombinedOutput() + require.NoError(t, err, string(cmd4CombinedOutput)) + + require.Contains(t, string(cmd4CombinedOutput), "Performing OIDC login") + require.Contains(t, string(cmd4CombinedOutput), "Found unexpired cached token") + require.Contains(t, string(cmd4CombinedOutput), "No concierge configured, skipping token credential exchange") + require.Contains(t, string(cmd4CombinedOutput), credOutput3.Status.Token) } func runPinnipedLoginOIDC( @@ -271,6 +283,7 @@ func runPinnipedLoginOIDC( // Start a background goroutine to read stderr from the CLI and parse out the login URL. loginURLChan := make(chan string) spawnTestGoroutine(t, func() (err error) { + t.Helper() defer func() { closeErr := stderr.Close() if closeErr == nil || errors.Is(closeErr, os.ErrClosed) { @@ -282,16 +295,18 @@ func runPinnipedLoginOIDC( }() reader := bufio.NewReader(library.NewLoggerReader(t, "stderr", stderr)) - line, err := reader.ReadString('\n') - if err != nil { - return fmt.Errorf("could not read login URL line from stderr: %w", err) - } + + scanner := bufio.NewScanner(reader) const prompt = "Please log in: " - if !strings.HasPrefix(line, prompt) { - return fmt.Errorf("expected %q to have prefix %q", line, prompt) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, prompt) { + loginURLChan <- strings.TrimPrefix(line, prompt) + return nil + } } - loginURLChan <- strings.TrimPrefix(line, prompt) - return readAndExpectEmpty(reader) + + return fmt.Errorf("expected stderr to contain %s", prompt) }) // Start a background goroutine to read stdout from the CLI and parse out an ExecCredential. From 6a21499ed31a5bf82cd488afb8d66f0dea3a9f1b Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Wed, 7 Apr 2021 15:54:48 -0700 Subject: [PATCH 03/32] Add check for number of log lines. --- test/integration/cli_test.go | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/test/integration/cli_test.go b/test/integration/cli_test.go index 6f58ed61..2181290a 100644 --- a/test/integration/cli_test.go +++ b/test/integration/cli_test.go @@ -208,6 +208,8 @@ func TestCLILoginOIDC(t *testing.T) { require.NoErrorf(t, json.Unmarshal(cmd2Output, &credOutput2), "command returned something other than an ExecCredential:\n%s", string(cmd2Output)) require.Equal(t, credOutput, credOutput2) + // the logs contain only the ExecCredential. There are 2 elements because the last one is "". + require.Len(t, strings.Split(string(cmd2Output), "\n"), 2) // Overwrite the cache entry to remove the access and ID tokens. t.Logf("overwriting cache to remove valid ID token") @@ -237,18 +239,23 @@ func TestCLILoginOIDC(t *testing.T) { require.NoErrorf(t, json.Unmarshal(cmd3Output, &credOutput3), "command returned something other than an ExecCredential:\n%s", string(cmd2Output)) require.NotEqual(t, credOutput2.Status.Token, credOutput3.Status.Token) + // the logs contain only the ExecCredential. There are 2 elements because the last one is "". + require.Len(t, strings.Split(string(cmd3Output), "\n"), 2) t.Logf("starting fourth CLI subprocess to test debug logging") err = os.Setenv("PINNIPED_DEBUG", "true") require.NoError(t, err) command := oidcLoginCommand(ctx, t, pinnipedExe, sessionCachePath) cmd4CombinedOutput, err := command.CombinedOutput() - require.NoError(t, err, string(cmd4CombinedOutput)) + cmd4StringOutput := string(cmd4CombinedOutput) + require.NoError(t, err, cmd4StringOutput) - require.Contains(t, string(cmd4CombinedOutput), "Performing OIDC login") - require.Contains(t, string(cmd4CombinedOutput), "Found unexpired cached token") - require.Contains(t, string(cmd4CombinedOutput), "No concierge configured, skipping token credential exchange") - require.Contains(t, string(cmd4CombinedOutput), credOutput3.Status.Token) + // the logs contain only the 3 debug lines plus the ExecCredential. There are 5 elements because the last one is "". + require.Len(t, strings.Split(cmd4StringOutput, "\n"), 5) + require.Contains(t, cmd4StringOutput, "Performing OIDC login") + require.Contains(t, cmd4StringOutput, "Found unexpired cached token") + require.Contains(t, cmd4StringOutput, "No concierge configured, skipping token credential exchange") + require.Contains(t, cmd4StringOutput, credOutput3.Status.Token) } func runPinnipedLoginOIDC( From 45e4695444157f9c6b96db349e046197159b706f Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Thu, 8 Apr 2021 10:14:29 -0700 Subject: [PATCH 04/32] Unset pinniped debug environment variable at end of integration test Also log when setting the debug log level fails --- cmd/pinniped/cmd/login_oidc.go | 13 ++++++++++--- cmd/pinniped/cmd/login_static.go | 5 ++++- test/integration/cli_test.go | 2 ++ 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/cmd/pinniped/cmd/login_oidc.go b/cmd/pinniped/cmd/login_oidc.go index 88a5d36a..c7ad0594 100644 --- a/cmd/pinniped/cmd/login_oidc.go +++ b/cmd/pinniped/cmd/login_oidc.go @@ -112,7 +112,10 @@ func oidcLoginCommand(deps oidcLoginCommandDeps) *cobra.Command { } func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLoginFlags) error { - SetLogLevel() + err := SetLogLevel() + if err != nil { + plog.WarningErr("Received error while setting log level", err) + } // Initialize the session cache. var sessionOptions []filesession.Option @@ -261,10 +264,14 @@ func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential { return &cred } -func SetLogLevel() { +func SetLogLevel() error { if os.Getenv("PINNIPED_DEBUG") == "true" { - _ = plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug) + err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug) + if err != nil { + return err + } } + return nil } // mustGetConfigDir returns a directory that follows the XDG base directory convention: diff --git a/cmd/pinniped/cmd/login_static.go b/cmd/pinniped/cmd/login_static.go index 4a6b0513..dcdc8c4d 100644 --- a/cmd/pinniped/cmd/login_static.go +++ b/cmd/pinniped/cmd/login_static.go @@ -84,7 +84,10 @@ func staticLoginCommand(deps staticLoginDeps) *cobra.Command { } func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams) error { - SetLogLevel() + err := SetLogLevel() + if err != nil { + plog.WarningErr("Received error while setting log level", err) + } if flags.staticToken == "" && flags.staticTokenEnvName == "" { return fmt.Errorf("one of --token or --token-env must be set") diff --git a/test/integration/cli_test.go b/test/integration/cli_test.go index 2181290a..622e9ba5 100644 --- a/test/integration/cli_test.go +++ b/test/integration/cli_test.go @@ -256,6 +256,8 @@ func TestCLILoginOIDC(t *testing.T) { require.Contains(t, cmd4StringOutput, "Found unexpired cached token") require.Contains(t, cmd4StringOutput, "No concierge configured, skipping token credential exchange") require.Contains(t, cmd4StringOutput, credOutput3.Status.Token) + err = os.Unsetenv("PINNIPED_DEBUG") + require.NoError(t, err) } func runPinnipedLoginOIDC( From b5889f37ff68b10bc4fe3dca8a3c1d01913dcc16 Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Fri, 16 Apr 2021 10:46:59 -0700 Subject: [PATCH 05/32] WIP on new plog --- cmd/pinniped/cmd/login_oidc.go | 15 ++-- cmd/pinniped/cmd/login_oidc_test.go | 28 ++++++ cmd/pinniped/cmd/login_static.go | 6 +- internal/plog/plog.go | 131 ++++++++++++++++++++++++---- pkg/oidcclient/login_test.go | 84 +++++++++++++++--- 5 files changed, 228 insertions(+), 36 deletions(-) diff --git a/cmd/pinniped/cmd/login_oidc.go b/cmd/pinniped/cmd/login_oidc.go index c7ad0594..091827cc 100644 --- a/cmd/pinniped/cmd/login_oidc.go +++ b/cmd/pinniped/cmd/login_oidc.go @@ -112,7 +112,7 @@ func oidcLoginCommand(deps oidcLoginCommandDeps) *cobra.Command { } func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLoginFlags) error { - err := SetLogLevel() + pLogger, err := SetLogLevel() if err != nil { plog.WarningErr("Received error while setting log level", err) } @@ -189,7 +189,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin } } - plog.Debug("Pinniped: Performing OIDC login", "issuer", flags.issuer, "client id", flags.clientID) + pLogger.Debug("Performing OIDC login", "issuer", flags.issuer, "client id", flags.clientID) // Do the basic login to get an OIDC token. token, err := deps.login(flags.issuer, flags.clientID, opts...) if err != nil { @@ -199,7 +199,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin // If the concierge was configured, exchange the credential for a separate short-lived, cluster-specific credential. if concierge != nil { - plog.Debug("Pinniped: Exchanging token for cluster credential", "endpoint", flags.conciergeEndpoint, "authenticator type", flags.conciergeAuthenticatorType, "authenticator name", flags.conciergeAuthenticatorName) + pLogger.Debug("Exchanging token for cluster credential", "endpoint", flags.conciergeEndpoint, "authenticator type", flags.conciergeAuthenticatorType, "authenticator name", flags.conciergeAuthenticatorName) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -208,7 +208,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin return fmt.Errorf("could not complete Concierge credential exchange: %w", err) } } else { - plog.Debug("Pinniped: No concierge configured, skipping token credential exchange") + pLogger.Debug("No concierge configured, skipping token credential exchange") } // If there was a credential cache, save the resulting credential for future use. @@ -264,14 +264,15 @@ func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential { return &cred } -func SetLogLevel() error { +func SetLogLevel() (*plog.PLogger, error) { if os.Getenv("PINNIPED_DEBUG") == "true" { err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug) if err != nil { - return err + return nil, err } } - return nil + logger := plog.New("Pinniped login: ") + return &logger, nil } // mustGetConfigDir returns a directory that follows the XDG base directory convention: diff --git a/cmd/pinniped/cmd/login_oidc_test.go b/cmd/pinniped/cmd/login_oidc_test.go index 8472f6af..541c6a6d 100644 --- a/cmd/pinniped/cmd/login_oidc_test.go +++ b/cmd/pinniped/cmd/login_oidc_test.go @@ -9,6 +9,7 @@ import ( "encoding/base64" "fmt" "io/ioutil" + "os" "path/filepath" "testing" "time" @@ -16,10 +17,12 @@ import ( "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" + "k8s.io/klog/v2" "go.pinniped.dev/internal/certauthority" "go.pinniped.dev/internal/here" "go.pinniped.dev/internal/testutil" + "go.pinniped.dev/internal/testutil/testlogger" "go.pinniped.dev/pkg/conciergeclient" "go.pinniped.dev/pkg/oidcclient" "go.pinniped.dev/pkg/oidcclient/oidctypes" @@ -41,10 +44,12 @@ func TestLoginOIDCCommand(t *testing.T) { args []string loginErr error conciergeErr error + envVars map[string]string wantError bool wantStdout string wantStderr string wantOptionsCount int + wantLogs []string }{ { name: "help flag passed", @@ -170,8 +175,13 @@ func TestLoginOIDCCommand(t *testing.T) { "--client-id", "test-client-id", "--issuer", "test-issuer", }, + envVars: map[string]string{"PINNIPED_DEBUG": "true"}, wantOptionsCount: 3, wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n", + wantLogs: []string{ + "\"level\"=0 \"msg\"=\"Pinniped login: Performing OIDC login\" \"client id\"=\"test-client-id\" \"issuer\"=\"test-issuer\"", + "\"level\"=0 \"msg\"=\"Pinniped login: No concierge configured, skipping token credential exchange\"", + }, }, { name: "success with all options", @@ -192,13 +202,29 @@ func TestLoginOIDCCommand(t *testing.T) { "--concierge-api-group-suffix", "some.suffix.com", "--credential-cache", testutil.TempDir(t) + "/credentials.yaml", }, + envVars: map[string]string{"PINNIPED_DEBUG": "true"}, wantOptionsCount: 7, wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"token":"exchanged-token"}}` + "\n", + wantLogs: []string{ + "\"level\"=0 \"msg\"=\"Pinniped login: Performing OIDC login\" \"client id\"=\"test-client-id\" \"issuer\"=\"test-issuer\"", + "\"level\"=0 \"msg\"=\"Pinniped login: Exchanging token for cluster credential\" \"authenticator name\"=\"test-authenticator\" \"authenticator type\"=\"webhook\" \"endpoint\"=\"https://127.0.0.1:1234/\"", + }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { + for k, v := range tt.envVars { + kk := k + err := os.Setenv(kk, v) + require.NoError(t, err) + t.Cleanup(func() { + t.Log("cleaning up " + kk) + err = os.Unsetenv(kk) + }) + } + testLogger := testlogger.New(t) + klog.SetLogger(testLogger) var ( gotOptions []oidcclient.Option ) @@ -248,6 +274,8 @@ func TestLoginOIDCCommand(t *testing.T) { require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout") require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr") require.Len(t, gotOptions, tt.wantOptionsCount) + + require.Equal(t, tt.wantLogs, testLogger.Lines()) }) } } diff --git a/cmd/pinniped/cmd/login_static.go b/cmd/pinniped/cmd/login_static.go index dcdc8c4d..fb1df4d5 100644 --- a/cmd/pinniped/cmd/login_static.go +++ b/cmd/pinniped/cmd/login_static.go @@ -84,7 +84,7 @@ func staticLoginCommand(deps staticLoginDeps) *cobra.Command { } func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams) error { - err := SetLogLevel() + pLogger, err := SetLogLevel() if err != nil { plog.WarningErr("Received error while setting log level", err) } @@ -143,7 +143,7 @@ func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams // If the concierge was configured, exchange the credential for a separate short-lived, cluster-specific credential. if concierge != nil { - plog.Debug("exchanging static token for cluster credential", "endpoint", flags.conciergeEndpoint, "authenticator type", flags.conciergeAuthenticatorType, "authenticator name", flags.conciergeAuthenticatorName) + pLogger.Debug("exchanging static token for cluster credential", "endpoint", flags.conciergeEndpoint, "authenticator type", flags.conciergeAuthenticatorType, "authenticator name", flags.conciergeAuthenticatorName) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -152,7 +152,7 @@ func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams if err != nil { return fmt.Errorf("could not complete Concierge credential exchange: %w", err) } - plog.Debug("exchanged static token for cluster credential") + pLogger.Debug("exchanged static token for cluster credential") } // If there was a credential cache, save the resulting credential for future use. We only save to the cache if diff --git a/internal/plog/plog.go b/internal/plog/plog.go index b38e7179..cf6bc80b 100644 --- a/internal/plog/plog.go +++ b/internal/plog/plog.go @@ -1,4 +1,4 @@ -// Copyright 2020 the Pinniped contributors. All Rights Reserved. +// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package plog implements a thin layer over klog to help enforce pinniped's logging convention. @@ -26,56 +26,157 @@ // act of desperation to determine why the system is broken. package plog -import "k8s.io/klog/v2" +import ( + "k8s.io/klog/v2" +) const errorKey = "error" -// Use Error to log an unexpected system error. -func Error(msg string, err error, keysAndValues ...interface{}) { - klog.ErrorS(err, msg, keysAndValues...) +type _ interface { + Error(msg string, err error, keysAndValues ...interface{}) + Warning(msg string, keysAndValues ...interface{}) + WarningErr(msg string, err error, keysAndValues ...interface{}) + Info(msg string, keysAndValues ...interface{}) + InfoErr(msg string, err error, keysAndValues ...interface{}) + Debug(msg string, keysAndValues ...interface{}) + DebugErr(msg string, err error, keysAndValues ...interface{}) + Trace(msg string, keysAndValues ...interface{}) + TraceErr(msg string, err error, keysAndValues ...interface{}) + All(msg string, keysAndValues ...interface{}) } -func Warning(msg string, keysAndValues ...interface{}) { +type PLogger struct { + prefix string + depth int +} + +func New(prefix string) PLogger { + return PLogger{ + depth: 0, + prefix: prefix, + } +} + +func (p *PLogger) Error(msg string, err error, keysAndValues ...interface{}) { + klog.ErrorSDepth(p.depth+1, err, p.prefix+msg, keysAndValues...) +} + +func (p *PLogger) warningDepth(msg string, depth int, keysAndValues ...interface{}) { // klog's structured logging has no concept of a warning (i.e. no WarningS function) // Thus we use info at log level zero as a proxy // klog's info logs have an I prefix and its warning logs have a W prefix // Since we lose the W prefix by using InfoS, just add a key to make these easier to find keysAndValues = append([]interface{}{"warning", "true"}, keysAndValues...) - klog.V(klogLevelWarning).InfoS(msg, keysAndValues...) + if klog.V(klogLevelWarning).Enabled() { + klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...) + } +} + +func (p *PLogger) Warning(msg string, keysAndValues ...interface{}) { + p.warningDepth(msg, p.depth+1, keysAndValues...) +} + +// Use WarningErr to issue a Warning message with an error object as part of the message. +func (p *PLogger) WarningErr(msg string, err error, keysAndValues ...interface{}) { + p.warningDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...) +} + +func (p *PLogger) infoDepth(msg string, depth int, keysAndValues ...interface{}) { + if klog.V(klogLevelInfo).Enabled() { + klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...) + } +} + +func (p *PLogger) Info(msg string, keysAndValues ...interface{}) { + p.infoDepth(msg, p.depth+1, keysAndValues...) +} + +// Use InfoErr to log an expected error, e.g. validation failure of an http parameter. +func (p *PLogger) InfoErr(msg string, err error, keysAndValues ...interface{}) { + p.infoDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...) +} + +func (p *PLogger) debugDepth(msg string, depth int, keysAndValues ...interface{}) { + if klog.V(klogLevelDebug).Enabled() { + klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...) + } +} + +func (p *PLogger) Debug(msg string, keysAndValues ...interface{}) { + p.debugDepth(msg, p.depth+1, keysAndValues...) +} + +// Use DebugErr to issue a Debug message with an error object as part of the message. +func (p *PLogger) DebugErr(msg string, err error, keysAndValues ...interface{}) { + p.debugDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...) +} + +func (p *PLogger) traceDepth(msg string, depth int, keysAndValues ...interface{}) { + if klog.V(klogLevelTrace).Enabled() { + klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...) + } +} + +func (p *PLogger) Trace(msg string, keysAndValues ...interface{}) { + p.traceDepth(msg, p.depth+1, keysAndValues...) +} + +// Use TraceErr to issue a Trace message with an error object as part of the message. +func (p *PLogger) TraceErr(msg string, err error, keysAndValues ...interface{}) { + p.traceDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...) +} + +func (p *PLogger) All(msg string, keysAndValues ...interface{}) { + if klog.V(klogLevelAll).Enabled() { + klog.InfoSDepth(p.depth+1, p.prefix+msg, keysAndValues...) + } +} + +var pLogger = PLogger{ //nolint:gochecknoglobals + depth: 1, +} + +// Use Error to log an unexpected system error. +func Error(msg string, err error, keysAndValues ...interface{}) { + pLogger.Error(msg, err, keysAndValues...) +} + +func Warning(msg string, keysAndValues ...interface{}) { + pLogger.Warning(msg, keysAndValues...) } // Use WarningErr to issue a Warning message with an error object as part of the message. func WarningErr(msg string, err error, keysAndValues ...interface{}) { - Warning(msg, append([]interface{}{errorKey, err}, keysAndValues...)...) + pLogger.WarningErr(msg, err, keysAndValues...) } func Info(msg string, keysAndValues ...interface{}) { - klog.V(klogLevelInfo).InfoS(msg, keysAndValues...) + pLogger.Info(msg, keysAndValues...) } // Use InfoErr to log an expected error, e.g. validation failure of an http parameter. func InfoErr(msg string, err error, keysAndValues ...interface{}) { - Info(msg, append([]interface{}{errorKey, err}, keysAndValues...)...) + pLogger.InfoErr(msg, err, keysAndValues...) } func Debug(msg string, keysAndValues ...interface{}) { - klog.V(klogLevelDebug).InfoS(msg, keysAndValues...) + pLogger.Debug(msg, keysAndValues...) } // Use DebugErr to issue a Debug message with an error object as part of the message. func DebugErr(msg string, err error, keysAndValues ...interface{}) { - Debug(msg, append([]interface{}{errorKey, err}, keysAndValues...)...) + pLogger.DebugErr(msg, err, keysAndValues...) } func Trace(msg string, keysAndValues ...interface{}) { - klog.V(klogLevelTrace).InfoS(msg, keysAndValues...) + pLogger.Trace(msg, keysAndValues...) } // Use TraceErr to issue a Trace message with an error object as part of the message. func TraceErr(msg string, err error, keysAndValues ...interface{}) { - Trace(msg, append([]interface{}{errorKey, err}, keysAndValues...)...) + pLogger.TraceErr(msg, err, keysAndValues...) } func All(msg string, keysAndValues ...interface{}) { - klog.V(klogLevelAll).InfoS(msg, keysAndValues...) + pLogger.All(msg, keysAndValues...) } diff --git a/pkg/oidcclient/login_test.go b/pkg/oidcclient/login_test.go index 8005bcaf..f1f5f6cf 100644 --- a/pkg/oidcclient/login_test.go +++ b/pkg/oidcclient/login_test.go @@ -19,11 +19,14 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/oauth2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" "go.pinniped.dev/internal/httputil/httperr" "go.pinniped.dev/internal/mocks/mockupstreamoidcidentityprovider" "go.pinniped.dev/internal/oidc/provider" + "go.pinniped.dev/internal/plog" "go.pinniped.dev/internal/testutil" + "go.pinniped.dev/internal/testutil/testlogger" "go.pinniped.dev/pkg/oidcclient/nonce" "go.pinniped.dev/pkg/oidcclient/oidctypes" "go.pinniped.dev/pkg/oidcclient/pkce" @@ -205,6 +208,7 @@ func TestLogin(t *testing.T) { clientID string wantErr string wantToken *oidctypes.Token + wantLogs []string }{ { name: "option error", @@ -269,7 +273,8 @@ func TestLogin(t *testing.T) { return WithSessionCache(cache)(h) } }, - wantErr: `could not perform OIDC discovery for "test-issuer": Get "test-issuer/.well-known/openid-configuration": unsupported protocol scheme ""`, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"test-issuer\""}, + wantErr: `could not perform OIDC discovery for "test-issuer": Get "test-issuer/.well-known/openid-configuration": unsupported protocol scheme ""`, }, { name: "session cache hit with valid token", @@ -290,6 +295,7 @@ func TestLogin(t *testing.T) { return WithSessionCache(cache)(h) } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\""}, wantToken: &testToken, }, { @@ -297,8 +303,9 @@ func TestLogin(t *testing.T) { opt: func(t *testing.T) Option { return func(h *handlerState) error { return nil } }, - issuer: errorServer.URL, - wantErr: fmt.Sprintf("could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL), + issuer: errorServer.URL, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""}, + wantErr: fmt.Sprintf("could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL), }, { name: "session cache hit with refreshable token", @@ -337,6 +344,8 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", + "\"level\"=0 \"msg\"=\"refreshing cached token\""}, wantToken: &testToken, }, { @@ -369,6 +378,8 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", + "\"level\"=0 \"msg\"=\"refreshing cached token\""}, wantErr: "some validation error", }, { @@ -395,6 +406,8 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", + "\"level\"=0 \"msg\"=\"refreshing cached token\""}, // Expect this to fall through to the authorization code flow, so it fails here. wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address", }, @@ -406,8 +419,9 @@ func TestLogin(t *testing.T) { return nil } }, - issuer: successServer.URL, - wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address", + issuer: successServer.URL, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address", }, { name: "browser open failure", @@ -416,8 +430,9 @@ func TestLogin(t *testing.T) { return fmt.Errorf("some browser open error") }) }, - issuer: successServer.URL, - wantErr: "could not open browser: some browser open error", + issuer: successServer.URL, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantErr: "could not open browser: some browser open error", }, { name: "timeout waiting for callback", @@ -433,8 +448,9 @@ func TestLogin(t *testing.T) { return nil } }, - issuer: successServer.URL, - wantErr: "timed out waiting for token callback: context canceled", + issuer: successServer.URL, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantErr: "timed out waiting for token callback: context canceled", }, { name: "callback returns error", @@ -449,8 +465,9 @@ func TestLogin(t *testing.T) { return nil } }, - issuer: successServer.URL, - wantErr: "error handling callback: some callback error", + issuer: successServer.URL, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantErr: "error handling callback: some callback error", }, { name: "callback returns success", @@ -510,6 +527,7 @@ func TestLogin(t *testing.T) { } }, issuer: successServer.URL, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantToken: &testToken, }, { @@ -533,6 +551,9 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"cluster-1234\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""}, wantErr: fmt.Sprintf("failed to exchange token: could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL), }, { @@ -556,6 +577,9 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"cluster-1234\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + brokenTokenURLServer.URL + "\""}, wantErr: `failed to exchange token: could not build RFC8693 request: parse "%": invalid URL escape "%"`, }, { @@ -579,6 +603,9 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-invalid-http-response\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: fmt.Sprintf(`failed to exchange token: Post "%s/token": failed to parse Location header "%%": parse "%%": invalid URL escape "%%"`, successServer.URL), }, { @@ -602,6 +629,9 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-http-400\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: unexpected HTTP response status 400`, }, { @@ -625,6 +655,9 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-invalid-content-type\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: failed to decode content-type header: mime: invalid media parameter`, }, { @@ -648,6 +681,9 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-wrong-content-type\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: unexpected HTTP response content type "invalid"`, }, { @@ -671,6 +707,9 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-invalid-json\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: failed to decode response: unexpected EOF`, }, { @@ -694,6 +733,9 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-invalid-tokentype\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: got unexpected token_type "invalid"`, }, { @@ -717,6 +759,9 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-invalid-issuedtokentype\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: got unexpected issued_token_type "invalid"`, }, { @@ -740,6 +785,9 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-invalid-jwt\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: received invalid JWT: oidc: malformed jwt: square/go-jose: compact JWS format must have three parts`, }, { @@ -769,6 +817,9 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantToken: &testExchangedToken, }, { @@ -816,18 +867,29 @@ func TestLogin(t *testing.T) { return nil } }, + wantLogs: []string{ + "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", + "\"level\"=0 \"msg\"=\"refreshing cached token\"", + "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience\"", + }, wantToken: &testExchangedToken, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { + err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug) + require.NoError(t, err) + testLogger := testlogger.New(t) + klog.SetLogger(testLogger) + tok, err := Login(tt.issuer, tt.clientID, WithContext(context.Background()), WithListenPort(0), WithScopes([]string{"test-scope"}), tt.opt(t), ) + require.Equal(t, tt.wantLogs, testLogger.Lines()) if tt.wantErr != "" { require.EqualError(t, err, tt.wantErr) require.Nil(t, tok) From 264778113d75ef6fb0014fa4d53fcd4eb6bdf92f Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Fri, 16 Apr 2021 14:38:05 -0700 Subject: [PATCH 06/32] lookupEnv in oidclogin same as for static --- cmd/pinniped/cmd/login_oidc.go | 11 +++++++---- cmd/pinniped/cmd/login_oidc_test.go | 20 +++++++------------- cmd/pinniped/cmd/login_static.go | 2 +- cmd/pinniped/cmd/login_static_test.go | 11 +++++++++++ 4 files changed, 26 insertions(+), 18 deletions(-) diff --git a/cmd/pinniped/cmd/login_oidc.go b/cmd/pinniped/cmd/login_oidc.go index 091827cc..37a19d58 100644 --- a/cmd/pinniped/cmd/login_oidc.go +++ b/cmd/pinniped/cmd/login_oidc.go @@ -38,13 +38,15 @@ func init() { } type oidcLoginCommandDeps struct { + lookupEnv func(string) (string, bool) login func(string, string, ...oidcclient.Option) (*oidctypes.Token, error) exchangeToken func(context.Context, *conciergeclient.Client, string) (*clientauthv1beta1.ExecCredential, error) } func oidcLoginCommandRealDeps() oidcLoginCommandDeps { return oidcLoginCommandDeps{ - login: oidcclient.Login, + lookupEnv: os.LookupEnv, + login: oidcclient.Login, exchangeToken: func(ctx context.Context, client *conciergeclient.Client, token string) (*clientauthv1beta1.ExecCredential, error) { return client.ExchangeToken(ctx, token) }, @@ -112,7 +114,7 @@ func oidcLoginCommand(deps oidcLoginCommandDeps) *cobra.Command { } func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLoginFlags) error { - pLogger, err := SetLogLevel() + pLogger, err := SetLogLevel(deps.lookupEnv) if err != nil { plog.WarningErr("Received error while setting log level", err) } @@ -264,8 +266,9 @@ func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential { return &cred } -func SetLogLevel() (*plog.PLogger, error) { - if os.Getenv("PINNIPED_DEBUG") == "true" { +func SetLogLevel(lookupEnv func(string) (string, bool)) (*plog.PLogger, error) { + debug, _ := lookupEnv("PINNIPED_DEBUG") + if debug == "true" { err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug) if err != nil { return nil, err diff --git a/cmd/pinniped/cmd/login_oidc_test.go b/cmd/pinniped/cmd/login_oidc_test.go index 541c6a6d..32179fde 100644 --- a/cmd/pinniped/cmd/login_oidc_test.go +++ b/cmd/pinniped/cmd/login_oidc_test.go @@ -9,7 +9,6 @@ import ( "encoding/base64" "fmt" "io/ioutil" - "os" "path/filepath" "testing" "time" @@ -44,7 +43,7 @@ func TestLoginOIDCCommand(t *testing.T) { args []string loginErr error conciergeErr error - envVars map[string]string + env map[string]string wantError bool wantStdout string wantStderr string @@ -175,7 +174,7 @@ func TestLoginOIDCCommand(t *testing.T) { "--client-id", "test-client-id", "--issuer", "test-issuer", }, - envVars: map[string]string{"PINNIPED_DEBUG": "true"}, + env: map[string]string{"PINNIPED_DEBUG": "true"}, wantOptionsCount: 3, wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n", wantLogs: []string{ @@ -202,7 +201,7 @@ func TestLoginOIDCCommand(t *testing.T) { "--concierge-api-group-suffix", "some.suffix.com", "--credential-cache", testutil.TempDir(t) + "/credentials.yaml", }, - envVars: map[string]string{"PINNIPED_DEBUG": "true"}, + env: map[string]string{"PINNIPED_DEBUG": "true"}, wantOptionsCount: 7, wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"token":"exchanged-token"}}` + "\n", wantLogs: []string{ @@ -214,21 +213,16 @@ func TestLoginOIDCCommand(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - for k, v := range tt.envVars { - kk := k - err := os.Setenv(kk, v) - require.NoError(t, err) - t.Cleanup(func() { - t.Log("cleaning up " + kk) - err = os.Unsetenv(kk) - }) - } testLogger := testlogger.New(t) klog.SetLogger(testLogger) var ( gotOptions []oidcclient.Option ) cmd := oidcLoginCommand(oidcLoginCommandDeps{ + lookupEnv: func(s string) (string, bool) { + v, ok := tt.env[s] + return v, ok + }, login: func(issuer string, clientID string, opts ...oidcclient.Option) (*oidctypes.Token, error) { require.Equal(t, "test-issuer", issuer) require.Equal(t, "test-client-id", clientID) diff --git a/cmd/pinniped/cmd/login_static.go b/cmd/pinniped/cmd/login_static.go index fb1df4d5..2b3fed1d 100644 --- a/cmd/pinniped/cmd/login_static.go +++ b/cmd/pinniped/cmd/login_static.go @@ -84,7 +84,7 @@ func staticLoginCommand(deps staticLoginDeps) *cobra.Command { } func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams) error { - pLogger, err := SetLogLevel() + pLogger, err := SetLogLevel(deps.lookupEnv) if err != nil { plog.WarningErr("Received error while setting log level", err) } diff --git a/cmd/pinniped/cmd/login_static_test.go b/cmd/pinniped/cmd/login_static_test.go index c2e6d3ea..f391da48 100644 --- a/cmd/pinniped/cmd/login_static_test.go +++ b/cmd/pinniped/cmd/login_static_test.go @@ -12,6 +12,10 @@ import ( "testing" "time" + "k8s.io/klog/v2" + + "go.pinniped.dev/internal/testutil/testlogger" + "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" @@ -41,6 +45,7 @@ func TestLoginStaticCommand(t *testing.T) { wantStdout string wantStderr string wantOptionsCount int + wantLogs []string }{ { name: "help flag passed", @@ -126,10 +131,12 @@ func TestLoginStaticCommand(t *testing.T) { "--concierge-authenticator-name", "test-authenticator", }, conciergeErr: fmt.Errorf("some concierge error"), + env: map[string]string{"PINNIPED_DEBUG": "true"}, wantError: true, wantStderr: here.Doc(` Error: could not complete Concierge credential exchange: some concierge error `), + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped login: exchanging static token for cluster credential\" \"authenticator name\"=\"test-authenticator\" \"authenticator type\"=\"webhook\" \"endpoint\"=\"https://127.0.0.1/\""}, }, { name: "invalid API group suffix", @@ -157,6 +164,8 @@ func TestLoginStaticCommand(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { + testLogger := testlogger.New(t) + klog.SetLogger(testLogger) cmd := staticLoginCommand(staticLoginDeps{ lookupEnv: func(s string) (string, bool) { v, ok := tt.env[s] @@ -192,6 +201,8 @@ func TestLoginStaticCommand(t *testing.T) { } require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout") require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr") + + require.Equal(t, tt.wantLogs, testLogger.Lines()) }) } } From 09560fd8dc07c6d2ac01120b315db80cd7770243 Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Mon, 19 Apr 2021 10:46:22 -0700 Subject: [PATCH 07/32] Log lines about using cached credential --- cmd/pinniped/cmd/login_oidc.go | 3 +++ cmd/pinniped/cmd/login_static.go | 1 + cmd/pinniped/cmd/login_static_test.go | 1 + 3 files changed, 5 insertions(+) diff --git a/cmd/pinniped/cmd/login_oidc.go b/cmd/pinniped/cmd/login_oidc.go index 37a19d58..07f871f3 100644 --- a/cmd/pinniped/cmd/login_oidc.go +++ b/cmd/pinniped/cmd/login_oidc.go @@ -187,6 +187,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin if flags.credentialCachePath != "" { credCache = execcredcache.New(flags.credentialCachePath) if cred := credCache.Get(cacheKey); cred != nil { + pLogger.Debug("using cached cluster credential.") return json.NewEncoder(cmd.OutOrStdout()).Encode(cred) } } @@ -209,12 +210,14 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin if err != nil { return fmt.Errorf("could not complete Concierge credential exchange: %w", err) } + pLogger.Debug("Exchanged token for cluster credential") } else { pLogger.Debug("No concierge configured, skipping token credential exchange") } // If there was a credential cache, save the resulting credential for future use. if credCache != nil { + pLogger.Debug("caching cluster credential for future use.") credCache.Put(cacheKey, cred) } return json.NewEncoder(cmd.OutOrStdout()).Encode(cred) diff --git a/cmd/pinniped/cmd/login_static.go b/cmd/pinniped/cmd/login_static.go index 2b3fed1d..3642ffe1 100644 --- a/cmd/pinniped/cmd/login_static.go +++ b/cmd/pinniped/cmd/login_static.go @@ -137,6 +137,7 @@ func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams if flags.credentialCachePath != "" { credCache = execcredcache.New(flags.credentialCachePath) if cred := credCache.Get(cacheKey); cred != nil { + pLogger.Debug("using cached cluster credential.") return json.NewEncoder(out).Encode(cred) } } diff --git a/cmd/pinniped/cmd/login_static_test.go b/cmd/pinniped/cmd/login_static_test.go index f391da48..7db88e32 100644 --- a/cmd/pinniped/cmd/login_static_test.go +++ b/cmd/pinniped/cmd/login_static_test.go @@ -158,6 +158,7 @@ func TestLoginStaticCommand(t *testing.T) { args: []string{ "--token", "test-token", }, + env: map[string]string{"PINNIPED_DEBUG": "true"}, wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"token":"test-token"}}` + "\n", }, } From c45d48d02758dd321413f08526a6034668ead86b Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Wed, 21 Apr 2021 10:58:48 -0700 Subject: [PATCH 08/32] Change test log expectations --- cmd/pinniped/cmd/login_oidc.go | 2 +- cmd/pinniped/cmd/login_oidc_test.go | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/pinniped/cmd/login_oidc.go b/cmd/pinniped/cmd/login_oidc.go index 07f871f3..27c6b22b 100644 --- a/cmd/pinniped/cmd/login_oidc.go +++ b/cmd/pinniped/cmd/login_oidc.go @@ -210,7 +210,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin if err != nil { return fmt.Errorf("could not complete Concierge credential exchange: %w", err) } - pLogger.Debug("Exchanged token for cluster credential") + pLogger.Debug("Successfully exchanged token for cluster credential.") } else { pLogger.Debug("No concierge configured, skipping token credential exchange") } diff --git a/cmd/pinniped/cmd/login_oidc_test.go b/cmd/pinniped/cmd/login_oidc_test.go index 32179fde..559d2c36 100644 --- a/cmd/pinniped/cmd/login_oidc_test.go +++ b/cmd/pinniped/cmd/login_oidc_test.go @@ -180,6 +180,7 @@ func TestLoginOIDCCommand(t *testing.T) { wantLogs: []string{ "\"level\"=0 \"msg\"=\"Pinniped login: Performing OIDC login\" \"client id\"=\"test-client-id\" \"issuer\"=\"test-issuer\"", "\"level\"=0 \"msg\"=\"Pinniped login: No concierge configured, skipping token credential exchange\"", + "\"level\"=0 \"msg\"=\"Pinniped login: caching cluster credential for future use.\"", }, }, { @@ -207,6 +208,8 @@ func TestLoginOIDCCommand(t *testing.T) { wantLogs: []string{ "\"level\"=0 \"msg\"=\"Pinniped login: Performing OIDC login\" \"client id\"=\"test-client-id\" \"issuer\"=\"test-issuer\"", "\"level\"=0 \"msg\"=\"Pinniped login: Exchanging token for cluster credential\" \"authenticator name\"=\"test-authenticator\" \"authenticator type\"=\"webhook\" \"endpoint\"=\"https://127.0.0.1:1234/\"", + "\"level\"=0 \"msg\"=\"Pinniped login: Successfully exchanged token for cluster credential.\"", + "\"level\"=0 \"msg\"=\"Pinniped login: caching cluster credential for future use.\"", }, }, } From 5f3eab25388a53d0dba19238e08f3ad6abcbd387 Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Wed, 21 Apr 2021 13:05:32 -0700 Subject: [PATCH 09/32] Fix expected number of log lines in TestCLILoginOIDC --- test/integration/cli_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/integration/cli_test.go b/test/integration/cli_test.go index 622e9ba5..e840b34d 100644 --- a/test/integration/cli_test.go +++ b/test/integration/cli_test.go @@ -250,11 +250,12 @@ func TestCLILoginOIDC(t *testing.T) { cmd4StringOutput := string(cmd4CombinedOutput) require.NoError(t, err, cmd4StringOutput) - // the logs contain only the 3 debug lines plus the ExecCredential. There are 5 elements because the last one is "". - require.Len(t, strings.Split(cmd4StringOutput, "\n"), 5) + // the logs contain only the 4 debug lines plus the ExecCredential. There are 6 elements because the last one is "". + require.Len(t, strings.Split(cmd4StringOutput, "\n"), 6) require.Contains(t, cmd4StringOutput, "Performing OIDC login") require.Contains(t, cmd4StringOutput, "Found unexpired cached token") require.Contains(t, cmd4StringOutput, "No concierge configured, skipping token credential exchange") + require.Contains(t, cmd4StringOutput, "caching cluster credential for future use.") require.Contains(t, cmd4StringOutput, credOutput3.Status.Token) err = os.Unsetenv("PINNIPED_DEBUG") require.NoError(t, err) From 96fda6ed130e9c0ea98102d0bf46e3d2e96ed6c5 Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Tue, 27 Apr 2021 16:18:30 -0700 Subject: [PATCH 10/32] Added documentation for how to configure the Supervisor with GitLab --- .../howto/configure-supervisor-with-gitlab.md | 83 +++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 site/content/docs/howto/configure-supervisor-with-gitlab.md diff --git a/site/content/docs/howto/configure-supervisor-with-gitlab.md b/site/content/docs/howto/configure-supervisor-with-gitlab.md new file mode 100644 index 00000000..b143fc4f --- /dev/null +++ b/site/content/docs/howto/configure-supervisor-with-gitlab.md @@ -0,0 +1,83 @@ +--- +title: Configure the Pinniped Supervisor to use Gitlab as an OIDC Provider +description: Set up the Pinniped Supervisor to use Gitlab login. +cascade: + layout: docs +menu: + docs: + name: Configure Supervisor With Gitlab + weight: 35 + parent: howtos +--- +The Supervisor is an [OpenID Connect (OIDC)](https://openid.net/connect/) issuer that supports connecting a single "upstream" OIDC identity provider to many "downstream" cluster clients. + +This guide shows you how to configure the supervisor so that users can authenticate to their Kubernetes +cluster using their Gitlab credentials. +## Prerequisites + +This how-to guide assumes that you have already installed the Pinniped Supervisor with working ingress, +and that you have configured a `FederationDomain` to issue tokens for your downstream clusters, as +described [here](https://pinniped.dev/docs/howto/configure-supervisor/). + +## Configuring your Gitlab Application +1. In Gitlab, navigate to User Settings > Applications +1. Create a new application: + 1. Enter the name of your application. + 1. Enter the redirect URI. This is the `issuer` you configured in your `FederationDomain` appended with `/callback`. + 1. Check the box saying that the application is Confidential. + 1. Select scope `openid`. Optionally select `profile` and `email`. + 1. Save the application and make note of the Application ID and Secret. + +## Configuring the Supervisor cluster +Create an [`OIDCIdentityProvider`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#oidcidentityprovider) in the same namespace as the Supervisor. +```yaml +apiVersion: idp.supervisor.pinniped.dev/v1alpha1 +kind: OIDCIdentityProvider +metadata: + name: my-oidc-provider +spec: + # The upstream issuer name. + # This should be something like https://gitlab.com or https://gitlab.your-company-name.example.com. + issuer: "" + # Optionally specify the CA bundle for the GitLab server as base64 encoded PEM data. + tls: + certificateAuthorityData: "" + authorizationConfig: + # Any scopes other than "openid" that you selected when creating your GitLab application. + additionalScopes: [ email, profile ] + # See here for a list of available claims: https://docs.gitlab.com/ee/integration/openid_connect_provider.html#shared-information + claims: + # The name of the claim in your GitLab token that will be mapped to the "username" claim in downstream + # tokens minted by the Supervisor. + # For example, "email" or "sub". + username: "" + # The name of the claim in your GitLab token that represents the groups that the user belongs to. + groups: "groups" + client: + # the name of the kubernetes secret that contains your GitLab application's client ID and client secret. + secretName: my-oidc-provider-client-secret +``` + +Then, create a `Secret` containing the Client ID and Client Secret in the same namespace as the Supervisor. +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-oidc-provider-client-secret +stringData: + # clientID should be the Application ID that you got from GitLab. + clientID: xxx + # clientSecret should be the Secret that you got from GitLab. + clientSecret: yyy +type: "secrets.pinniped.dev/oidc-client" +``` + +## Next Steps + +Now that you have configured the Supervisor to use GitLab, +you may want to check out [Configure Concierge JWT Authentication](https://pinniped.dev/docs/howto/configure-concierge-jwt/) +to learn how to configure the Concierge to use the JWTs that the Supervisor now issues. + + + + From 90b2854032f0dcd103867acd5e4ddf43ea81044f Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Wed, 28 Apr 2021 09:34:42 -0700 Subject: [PATCH 11/32] Avoid using global logger in login.go --- cmd/pinniped/cmd/login_oidc.go | 1 + cmd/pinniped/cmd/login_oidc_test.go | 8 +-- pkg/oidcclient/login.go | 18 +++-- pkg/oidcclient/login_test.go | 102 ++++++++++++++-------------- 4 files changed, 71 insertions(+), 58 deletions(-) diff --git a/cmd/pinniped/cmd/login_oidc.go b/cmd/pinniped/cmd/login_oidc.go index 27c6b22b..d52b71d8 100644 --- a/cmd/pinniped/cmd/login_oidc.go +++ b/cmd/pinniped/cmd/login_oidc.go @@ -134,6 +134,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin // Initialize the login handler. opts := []oidcclient.Option{ oidcclient.WithContext(cmd.Context()), + oidcclient.WithLogger(*pLogger), oidcclient.WithScopes(flags.scopes), oidcclient.WithSessionCache(sessionCache), } diff --git a/cmd/pinniped/cmd/login_oidc_test.go b/cmd/pinniped/cmd/login_oidc_test.go index 559d2c36..68dc18d7 100644 --- a/cmd/pinniped/cmd/login_oidc_test.go +++ b/cmd/pinniped/cmd/login_oidc_test.go @@ -145,7 +145,7 @@ func TestLoginOIDCCommand(t *testing.T) { "--issuer", "test-issuer", }, loginErr: fmt.Errorf("some login error"), - wantOptionsCount: 3, + wantOptionsCount: 4, wantError: true, wantStderr: here.Doc(` Error: could not complete Pinniped login: some login error @@ -162,7 +162,7 @@ func TestLoginOIDCCommand(t *testing.T) { "--concierge-endpoint", "https://127.0.0.1:1234/", }, conciergeErr: fmt.Errorf("some concierge error"), - wantOptionsCount: 3, + wantOptionsCount: 4, wantError: true, wantStderr: here.Doc(` Error: could not complete Concierge credential exchange: some concierge error @@ -175,7 +175,7 @@ func TestLoginOIDCCommand(t *testing.T) { "--issuer", "test-issuer", }, env: map[string]string{"PINNIPED_DEBUG": "true"}, - wantOptionsCount: 3, + wantOptionsCount: 4, wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n", wantLogs: []string{ "\"level\"=0 \"msg\"=\"Pinniped login: Performing OIDC login\" \"client id\"=\"test-client-id\" \"issuer\"=\"test-issuer\"", @@ -203,7 +203,7 @@ func TestLoginOIDCCommand(t *testing.T) { "--credential-cache", testutil.TempDir(t) + "/credentials.yaml", }, env: map[string]string{"PINNIPED_DEBUG": "true"}, - wantOptionsCount: 7, + wantOptionsCount: 8, wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"token":"exchanged-token"}}` + "\n", wantLogs: []string{ "\"level\"=0 \"msg\"=\"Pinniped login: Performing OIDC login\" \"client id\"=\"test-client-id\" \"issuer\"=\"test-issuer\"", diff --git a/pkg/oidcclient/login.go b/pkg/oidcclient/login.go index 4d753944..d606dec7 100644 --- a/pkg/oidcclient/login.go +++ b/pkg/oidcclient/login.go @@ -50,6 +50,7 @@ const ( type handlerState struct { // Basic parameters. ctx context.Context + logger plog.PLogger issuer string clientID string scopes []string @@ -98,6 +99,15 @@ func WithContext(ctx context.Context) Option { } } +// WithLogger specifies a PLogger to use with the login. +// If not specified this will default to a new logger. +func WithLogger(logger plog.PLogger) Option { + return func(h *handlerState) error { + h.logger = logger + return nil + } +} + // WithListenPort specifies a TCP listen port on localhost, which will be used for the redirect_uri and to handle the // authorization code callback. By default, a random high port will be chosen which requires the authorization server // to support wildcard port numbers as described by https://tools.ietf.org/html/rfc8252: @@ -261,7 +271,7 @@ func (h *handlerState) baseLogin() (*oidctypes.Token, error) { // If the ID token is still valid for a bit, return it immediately and skip the rest of the flow. cached := h.cache.GetToken(cacheKey) if cached != nil && cached.IDToken != nil && time.Until(cached.IDToken.Expiry.Time) > minIDTokenValidity { - plog.Debug("Pinniped: Found unexpired cached token") + h.logger.Debug("Found unexpired cached token.") return cached, nil } @@ -329,7 +339,7 @@ func (h *handlerState) initOIDCDiscovery() error { return nil } - plog.Debug("Pinniped: Performing OIDC discovery", "issuer", h.issuer) + h.logger.Debug("Performing OIDC discovery", "issuer", h.issuer) var err error h.provider, err = oidc.NewProvider(h.ctx, h.issuer) if err != nil { @@ -346,7 +356,7 @@ func (h *handlerState) initOIDCDiscovery() error { } func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidctypes.Token, error) { - plog.Debug("Pinniped: Performing RFC8693 token exchange", "requested audience", h.requestedAudience) + h.logger.Debug("Performing RFC8693 token exchange", "requestedAudience", h.requestedAudience) // Perform OIDC discovery. This may have already been performed if there was not a cached base token. if err := h.initOIDCDiscovery(); err != nil { return nil, err @@ -417,7 +427,7 @@ func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidcty } func (h *handlerState) handleRefresh(ctx context.Context, refreshToken *oidctypes.RefreshToken) (*oidctypes.Token, error) { - plog.Debug("refreshing cached token") + h.logger.Debug("Refreshing cached token.") refreshSource := h.oauth2Config.TokenSource(ctx, &oauth2.Token{RefreshToken: refreshToken.Token}) refreshed, err := refreshSource.Token() diff --git a/pkg/oidcclient/login_test.go b/pkg/oidcclient/login_test.go index f1f5f6cf..89d25e84 100644 --- a/pkg/oidcclient/login_test.go +++ b/pkg/oidcclient/login_test.go @@ -273,7 +273,7 @@ func TestLogin(t *testing.T) { return WithSessionCache(cache)(h) } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"test-issuer\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"test-issuer\""}, wantErr: `could not perform OIDC discovery for "test-issuer": Get "test-issuer/.well-known/openid-configuration": unsupported protocol scheme ""`, }, { @@ -295,7 +295,7 @@ func TestLogin(t *testing.T) { return WithSessionCache(cache)(h) } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\""}, wantToken: &testToken, }, { @@ -304,7 +304,7 @@ func TestLogin(t *testing.T) { return func(h *handlerState) error { return nil } }, issuer: errorServer.URL, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""}, wantErr: fmt.Sprintf("could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL), }, { @@ -344,8 +344,8 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", - "\"level\"=0 \"msg\"=\"refreshing cached token\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Refreshing cached token.\""}, wantToken: &testToken, }, { @@ -378,8 +378,8 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", - "\"level\"=0 \"msg\"=\"refreshing cached token\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Refreshing cached token.\""}, wantErr: "some validation error", }, { @@ -406,8 +406,8 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", - "\"level\"=0 \"msg\"=\"refreshing cached token\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Refreshing cached token.\""}, // Expect this to fall through to the authorization code flow, so it fails here. wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address", }, @@ -420,7 +420,7 @@ func TestLogin(t *testing.T) { } }, issuer: successServer.URL, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address", }, { @@ -431,7 +431,7 @@ func TestLogin(t *testing.T) { }) }, issuer: successServer.URL, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: "could not open browser: some browser open error", }, { @@ -449,7 +449,7 @@ func TestLogin(t *testing.T) { } }, issuer: successServer.URL, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: "timed out waiting for token callback: context canceled", }, { @@ -466,7 +466,7 @@ func TestLogin(t *testing.T) { } }, issuer: successServer.URL, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: "error handling callback: some callback error", }, { @@ -527,7 +527,7 @@ func TestLogin(t *testing.T) { } }, issuer: successServer.URL, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantToken: &testToken, }, { @@ -551,9 +551,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"cluster-1234\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"cluster-1234\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""}, wantErr: fmt.Sprintf("failed to exchange token: could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL), }, { @@ -577,9 +577,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"cluster-1234\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + brokenTokenURLServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"cluster-1234\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + brokenTokenURLServer.URL + "\""}, wantErr: `failed to exchange token: could not build RFC8693 request: parse "%": invalid URL escape "%"`, }, { @@ -603,9 +603,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-invalid-http-response\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-http-response\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: fmt.Sprintf(`failed to exchange token: Post "%s/token": failed to parse Location header "%%": parse "%%": invalid URL escape "%%"`, successServer.URL), }, { @@ -629,9 +629,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-http-400\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-http-400\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: unexpected HTTP response status 400`, }, { @@ -655,9 +655,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-invalid-content-type\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-content-type\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: failed to decode content-type header: mime: invalid media parameter`, }, { @@ -681,9 +681,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-wrong-content-type\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-wrong-content-type\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: unexpected HTTP response content type "invalid"`, }, { @@ -707,9 +707,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-invalid-json\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-json\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: failed to decode response: unexpected EOF`, }, { @@ -733,9 +733,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-invalid-tokentype\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-tokentype\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: got unexpected token_type "invalid"`, }, { @@ -759,9 +759,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-invalid-issuedtokentype\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-issuedtokentype\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: got unexpected issued_token_type "invalid"`, }, { @@ -785,9 +785,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience-produce-invalid-jwt\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-jwt\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: received invalid JWT: oidc: malformed jwt: square/go-jose: compact JWS format must have three parts`, }, { @@ -817,9 +817,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped: Found unexpired cached token\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantToken: &testExchangedToken, }, { @@ -868,9 +868,9 @@ func TestLogin(t *testing.T) { } }, wantLogs: []string{ - "\"level\"=0 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", - "\"level\"=0 \"msg\"=\"refreshing cached token\"", - "\"level\"=0 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requested audience\"=\"test-audience\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Refreshing cached token.\"", + "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience\"", }, wantToken: &testExchangedToken, }, @@ -881,6 +881,7 @@ func TestLogin(t *testing.T) { err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug) require.NoError(t, err) testLogger := testlogger.New(t) + pLogger := plog.New("Pinniped Test Prefix: ") klog.SetLogger(testLogger) tok, err := Login(tt.issuer, tt.clientID, @@ -888,6 +889,7 @@ func TestLogin(t *testing.T) { WithListenPort(0), WithScopes([]string{"test-scope"}), tt.opt(t), + WithLogger(pLogger), ) require.Equal(t, tt.wantLogs, testLogger.Lines()) if tt.wantErr != "" { From bed2d2dd624aad74ab44ee5a7c1bf3adbeb60ca9 Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Wed, 28 Apr 2021 13:34:36 -0700 Subject: [PATCH 12/32] Incorporated PR feedback --- .../howto/configure-supervisor-with-gitlab.md | 27 ++++++++++++------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/site/content/docs/howto/configure-supervisor-with-gitlab.md b/site/content/docs/howto/configure-supervisor-with-gitlab.md index b143fc4f..bf928b17 100644 --- a/site/content/docs/howto/configure-supervisor-with-gitlab.md +++ b/site/content/docs/howto/configure-supervisor-with-gitlab.md @@ -11,7 +11,7 @@ menu: --- The Supervisor is an [OpenID Connect (OIDC)](https://openid.net/connect/) issuer that supports connecting a single "upstream" OIDC identity provider to many "downstream" cluster clients. -This guide shows you how to configure the supervisor so that users can authenticate to their Kubernetes +This guide shows you how to configure the Supervisor so that users can authenticate to their Kubernetes cluster using their Gitlab credentials. ## Prerequisites @@ -30,6 +30,7 @@ described [here](https://pinniped.dev/docs/howto/configure-supervisor/). ## Configuring the Supervisor cluster Create an [`OIDCIdentityProvider`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#oidcidentityprovider) in the same namespace as the Supervisor. +For example, here is an `OIDCIdentityProvider` that works against https://gitlab.com, and uses the email claim as the username. ```yaml apiVersion: idp.supervisor.pinniped.dev/v1alpha1 kind: OIDCIdentityProvider @@ -38,10 +39,10 @@ metadata: spec: # The upstream issuer name. # This should be something like https://gitlab.com or https://gitlab.your-company-name.example.com. - issuer: "" - # Optionally specify the CA bundle for the GitLab server as base64 encoded PEM data. - tls: - certificateAuthorityData: "" + issuer: "https://gitlab.com" + # If needed, specify the CA bundle for the GitLab server as base64 encoded PEM data. + #tls: + # certificateAuthorityData: "" authorizationConfig: # Any scopes other than "openid" that you selected when creating your GitLab application. additionalScopes: [ email, profile ] @@ -50,11 +51,12 @@ spec: # The name of the claim in your GitLab token that will be mapped to the "username" claim in downstream # tokens minted by the Supervisor. # For example, "email" or "sub". - username: "" - # The name of the claim in your GitLab token that represents the groups that the user belongs to. + username: "email" + # The name of the claim in GitLab that represents the groups that the user belongs to. + # Note that GitLab's "groups" claim comes from their /userinfo endpoint, not the token. groups: "groups" client: - # the name of the kubernetes secret that contains your GitLab application's client ID and client secret. + # The name of the kubernetes secret that contains your GitLab application's client ID and client secret. secretName: my-oidc-provider-client-secret ``` @@ -65,13 +67,20 @@ kind: Secret metadata: name: my-oidc-provider-client-secret stringData: - # clientID should be the Application ID that you got from GitLab. + # clientID should be the Application ID that you got from GitLab. clientID: xxx # clientSecret should be the Secret that you got from GitLab. clientSecret: yyy type: "secrets.pinniped.dev/oidc-client" ``` +To validate your configuration, run +```shell +kubectl describe OIDCIdentityProvider my-oidc-identity-provider +``` + +Look at the `status` field. If it was configured correctly, you should see `phase: Ready`. + ## Next Steps Now that you have configured the Supervisor to use GitLab, From bb7e7fe81eea81081d08eb478fc6b350e97e4f6e Mon Sep 17 00:00:00 2001 From: Monis Khan Date: Wed, 28 Apr 2021 13:49:42 -0400 Subject: [PATCH 13/32] webhookcachefiller: be stricter about CA bundle validation Signed-off-by: Monis Khan --- .../controller/authenticator/authenticator.go | 16 ++++++++++++++-- .../webhookcachefiller_test.go | 9 +++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/internal/controller/authenticator/authenticator.go b/internal/controller/authenticator/authenticator.go index 14af741d..2ac68500 100644 --- a/internal/controller/authenticator/authenticator.go +++ b/internal/controller/authenticator/authenticator.go @@ -5,7 +5,9 @@ package authenticator import ( + "crypto/x509" "encoding/base64" + "fmt" auth1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1" ) @@ -22,8 +24,18 @@ type Closer interface { // nil CA bundle will be returned. If the provided spec contains a CA bundle that is not properly // encoded, an error will be returned. func CABundle(spec *auth1alpha1.TLSSpec) ([]byte, error) { - if spec == nil { + if spec == nil || len(spec.CertificateAuthorityData) == 0 { return nil, nil } - return base64.StdEncoding.DecodeString(spec.CertificateAuthorityData) + + pem, err := base64.StdEncoding.DecodeString(spec.CertificateAuthorityData) + if err != nil { + return nil, err + } + + if ok := x509.NewCertPool().AppendCertsFromPEM(pem); !ok { + return nil, fmt.Errorf("certificateAuthorityData is not valid PEM") + } + + return pem, nil } diff --git a/internal/controller/authenticator/webhookcachefiller/webhookcachefiller_test.go b/internal/controller/authenticator/webhookcachefiller/webhookcachefiller_test.go index 4d6a0744..aae172d9 100644 --- a/internal/controller/authenticator/webhookcachefiller/webhookcachefiller_test.go +++ b/internal/controller/authenticator/webhookcachefiller/webhookcachefiller_test.go @@ -135,6 +135,15 @@ func TestNewWebhookAuthenticator(t *testing.T) { require.EqualError(t, err, "invalid TLS configuration: illegal base64 data at input byte 7") }) + t.Run("invalid pem data", func(t *testing.T) { + res, err := newWebhookAuthenticator(&auth1alpha1.WebhookAuthenticatorSpec{ + Endpoint: "https://example.com", + TLS: &auth1alpha1.TLSSpec{CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte("bad data"))}, + }, ioutil.TempFile, clientcmd.WriteToFile) + require.Nil(t, res) + require.EqualError(t, err, "invalid TLS configuration: certificateAuthorityData is not valid PEM") + }) + t.Run("valid config with no TLS spec", func(t *testing.T) { res, err := newWebhookAuthenticator(&auth1alpha1.WebhookAuthenticatorSpec{ Endpoint: "https://example.com", From 62785674c33bcd773f1565be8b69fb1afedc7a63 Mon Sep 17 00:00:00 2001 From: Monis Khan Date: Tue, 20 Apr 2021 11:19:58 -0400 Subject: [PATCH 14/32] impersonator: add support for service account token authentication This change updates the impersonator logic to pass through requests that authenticated via a bearer token that asserts a UID. This allows us to support service account tokens (as well as any other form of token based authentication). Signed-off-by: Monis Khan --- internal/concierge/impersonator/doc.go | 7 + .../concierge/impersonator/impersonator.go | 194 +++++-- .../impersonator/impersonator_test.go | 475 +++++++++++++++--- .../authenticator/authncache/cache.go | 7 +- internal/valuelesscontext/valuelesscontext.go | 14 + .../concierge_impersonation_proxy_test.go | 160 +++++- 6 files changed, 748 insertions(+), 109 deletions(-) create mode 100644 internal/valuelesscontext/valuelesscontext.go diff --git a/internal/concierge/impersonator/doc.go b/internal/concierge/impersonator/doc.go index d8504603..d15c6715 100644 --- a/internal/concierge/impersonator/doc.go +++ b/internal/concierge/impersonator/doc.go @@ -35,6 +35,13 @@ only shares this information with the audit stack). To keep things simple, we use the fake audit backend at the Metadata level for all requests. This guarantees that we always have an audit event on every request. +One final wrinkle is that impersonation cannot impersonate UIDs (yet). This is +problematic because service account tokens always assert a UID. To handle this +case without losing authentication information, when we see an identity with a +UID that was asserted via a bearer token, we simply pass the request through +with the original bearer token and no impersonation headers set (as if the user +had made the request directly against the Kubernetes API server). + For all normal requests, we only use http/2.0 when proxying to the API server. For upgrade requests, we only use http/1.1 since these always go from http/1.1 to either websockets or SPDY. diff --git a/internal/concierge/impersonator/impersonator.go b/internal/concierge/impersonator/impersonator.go index 087329dd..5778b795 100644 --- a/internal/concierge/impersonator/impersonator.go +++ b/internal/concierge/impersonator/impersonator.go @@ -15,6 +15,8 @@ import ( "strings" "time" + authenticationv1 "k8s.io/api/authentication/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -26,6 +28,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" auditinternal "k8s.io/apiserver/pkg/apis/audit" "k8s.io/apiserver/pkg/audit/policy" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/request/bearertoken" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/endpoints/filterlatency" @@ -45,6 +49,7 @@ import ( "go.pinniped.dev/internal/httputil/securityheader" "go.pinniped.dev/internal/kubeclient" "go.pinniped.dev/internal/plog" + "go.pinniped.dev/internal/valuelesscontext" ) // FactoryFunc is a function which can create an impersonator server. @@ -176,6 +181,11 @@ func newInternal( //nolint:funlen // yeah, it's kind of long. // See the genericapiserver.DefaultBuildHandlerChain func for details. handler = defaultBuildHandlerChainFunc(handler, c) + // we need to grab the bearer token before WithAuthentication deletes it. + handler = filterlatency.TrackCompleted(handler) + handler = withBearerTokenPreservation(handler) + handler = filterlatency.TrackStarted(handler, "bearertokenpreservation") + // Always set security headers so browsers do the right thing. handler = filterlatency.TrackCompleted(handler) handler = securityheader.Wrap(handler) @@ -189,6 +199,9 @@ func newInternal( //nolint:funlen // yeah, it's kind of long. serverConfig.AuditPolicyChecker = policy.FakeChecker(auditinternal.LevelMetadata, nil) serverConfig.AuditBackend = &auditfake.Backend{} + // if we ever start unioning a TCR bearer token authenticator with serverConfig.Authenticator + // then we will need to update the related assumption in tokenPassthroughRoundTripper + delegatingAuthorizer := serverConfig.Authorization.Authorizer nestedImpersonationAuthorizer := &comparableAuthorizer{ authorizerFunc: func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) { @@ -290,6 +303,35 @@ func (f authorizerFunc) Authorize(ctx context.Context, a authorizer.Attributes) return f(ctx, a) } +func withBearerTokenPreservation(delegate http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // this looks a bit hacky but lets us avoid writing any logic for parsing out the bearer token + var reqToken string + _, _, _ = bearertoken.New(authenticator.TokenFunc(func(_ context.Context, token string) (*authenticator.Response, bool, error) { + reqToken = token + return nil, false, nil + })).AuthenticateRequest(r) + + // smuggle the token through the context. this does mean that we need to avoid logging the context. + if len(reqToken) != 0 { + ctx := context.WithValue(r.Context(), tokenKey, reqToken) + r = r.WithContext(ctx) + } + + delegate.ServeHTTP(w, r) + }) +} + +func tokenFrom(ctx context.Context) string { + token, _ := ctx.Value(tokenKey).(string) + return token +} + +// contextKey type is unexported to prevent collisions. +type contextKey int + +const tokenKey contextKey = iota + func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapiserver.Config) http.Handler, error) { serverURL, err := url.Parse(restConfig.Host) if err != nil { @@ -300,11 +342,19 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi if err != nil { return nil, fmt.Errorf("could not get http/1.1 round tripper: %w", err) } + http1RoundTripperAnonymous, err := getTransportForProtocol(rest.AnonymousClientConfig(restConfig), "http/1.1") + if err != nil { + return nil, fmt.Errorf("could not get http/1.1 anonymous round tripper: %w", err) + } http2RoundTripper, err := getTransportForProtocol(restConfig, "h2") if err != nil { return nil, fmt.Errorf("could not get http/2.0 round tripper: %w", err) } + http2RoundTripperAnonymous, err := getTransportForProtocol(rest.AnonymousClientConfig(restConfig), "h2") + if err != nil { + return nil, fmt.Errorf("could not get http/2.0 anonymous round tripper: %w", err) + } return func(c *genericapiserver.Config) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -347,15 +397,18 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi return } + // grab the request's bearer token if present. this is optional and does not fail the request if missing. + token := tokenFrom(r.Context()) + // KAS only supports upgrades via http/1.1 to websockets/SPDY (upgrades never use http/2.0) // Thus we default to using http/2.0 when the request is not an upgrade, otherwise we use http/1.1 - baseRT := http2RoundTripper + baseRT, baseRTAnonymous := http2RoundTripper, http2RoundTripperAnonymous isUpgradeRequest := httpstream.IsUpgradeRequest(r) if isUpgradeRequest { - baseRT = http1RoundTripper + baseRT, baseRTAnonymous = http1RoundTripper, http1RoundTripperAnonymous } - rt, err := getTransportForUser(userInfo, baseRT, ae) + rt, err := getTransportForUser(r.Context(), userInfo, baseRT, baseRTAnonymous, ae, token, c.Authentication.Authenticator) if err != nil { plog.WarningErr("rejecting request as we cannot act as the current user", err, "url", r.URL.String(), @@ -413,34 +466,117 @@ func ensureNoImpersonationHeaders(r *http.Request) error { return nil } -func getTransportForUser(userInfo user.Info, delegate http.RoundTripper, ae *auditinternal.Event) (http.RoundTripper, error) { - if len(userInfo.GetUID()) == 0 { - extra, err := buildExtra(userInfo.GetExtra(), ae) - if err != nil { - return nil, err - } - - impersonateConfig := transport.ImpersonationConfig{ - UserName: userInfo.GetName(), - Groups: userInfo.GetGroups(), - Extra: extra, - } - // transport.NewImpersonatingRoundTripper clones the request before setting headers - // thus it will not accidentally mutate the input request (see http.Handler docs) - return transport.NewImpersonatingRoundTripper(impersonateConfig, delegate), nil +func getTransportForUser(ctx context.Context, userInfo user.Info, delegate, delegateAnonymous http.RoundTripper, ae *auditinternal.Event, token string, authenticator authenticator.Request) (http.RoundTripper, error) { + if canImpersonateFully(userInfo) { + return standardImpersonationRoundTripper(userInfo, ae, delegate) } - // 0. in the case of a request that is not attempting to do nested impersonation - // 1. if we make the assumption that the TCR API does not issue tokens (or pass the TCR API bearer token - // authenticator into this func - we need to know the authentication cred is something KAS would honor) - // 2. then if preserve the incoming authorization header into the request's context - // 3. we could reauthenticate it here (it would be a free cache hit) - // 4. confirm that it matches the passed in user info (i.e. it was actually the cred used to authenticate and not a client cert) - // 5. then we could issue a reverse proxy request using an anonymous rest config and the bearer token - // 6. thus instead of impersonating the user, we would just be passing their request through - // 7. this would preserve the UID info and thus allow us to safely support all token based auth - // 8. the above would be safe even if in the future Kube started supporting UIDs asserted by client certs - return nil, constable.Error("unexpected uid") + return tokenPassthroughRoundTripper(ctx, delegateAnonymous, ae, token, authenticator) +} + +func canImpersonateFully(userInfo user.Info) bool { + // nolint: gosimple // this structure is on purpose because we plan to expand this function + if len(userInfo.GetUID()) == 0 { + return true + } + + // once kube supports UID impersonation, add logic to detect if the KAS is + // new enough to have this functionality and return true in that case as well + return false +} + +func standardImpersonationRoundTripper(userInfo user.Info, ae *auditinternal.Event, delegate http.RoundTripper) (http.RoundTripper, error) { + extra, err := buildExtra(userInfo.GetExtra(), ae) + if err != nil { + return nil, err + } + + impersonateConfig := transport.ImpersonationConfig{ + UserName: userInfo.GetName(), + Groups: userInfo.GetGroups(), + Extra: extra, + } + // transport.NewImpersonatingRoundTripper clones the request before setting headers + // thus it will not accidentally mutate the input request (see http.Handler docs) + return transport.NewImpersonatingRoundTripper(impersonateConfig, delegate), nil +} + +func tokenPassthroughRoundTripper(ctx context.Context, delegateAnonymous http.RoundTripper, ae *auditinternal.Event, token string, authenticator authenticator.Request) (http.RoundTripper, error) { + // all code below assumes KAS does not support UID impersonation because that case is handled in the standard path + + // it also assumes that the TCR API does not issue tokens - if this assumption changes, we will need + // some way to distinguish a token that is only valid against this impersonation proxy and not against KAS. + // this code will fail closed because said TCR token would not work against KAS and the request would fail. + + // if we get here we know the final user info had a UID + // if the original user is also performing a nested impersonation, it means that said nested + // impersonation is trying to impersonate a UID since final user info == ae.ImpersonatedUser + // we know this KAS does not support UID impersonation so this request must be rejected + if ae.ImpersonatedUser != nil { + return nil, constable.Error("unable to impersonate uid") + } + + // see what KAS thinks this token translates into + // this is important because certs have precedence over tokens and we want + // to make sure that we do not get confused and pass along the wrong token + tokenUser, err := tokenReview(ctx, token, authenticator) + if err != nil { + return nil, err + } + + // we want to compare the result of the token authentication with the original user that made the request + // if the user who made the request and the token do not match, we cannot go any further at this point + if !apiequality.Semantic.DeepEqual(ae.User, tokenUser) { + // this info leak seems fine for trace level logs + plog.Trace("failed to passthrough token due to user mismatch", + "original-username", ae.User.Username, + "original-uid", ae.User.UID, + "token-username", tokenUser.Username, + "token-uid", tokenUser.UID, + ) + return nil, constable.Error("token authenticated as a different user") + } + + // now we know that if we send this token to KAS, it will authenticate correctly + return transport.NewBearerAuthRoundTripper(token, delegateAnonymous), nil +} + +func tokenReview(ctx context.Context, token string, authenticator authenticator.Request) (authenticationv1.UserInfo, error) { + if len(token) == 0 { + return authenticationv1.UserInfo{}, constable.Error("no token on request") + } + + // create a header that contains nothing but the token + // an astute observer may ask "but what about the token's audience?" + // in this case, we want to leave audiences unset per the token review docs: + // > If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver. + // i.e. we want to make sure that the given token is valid against KAS + fakeReq := &http.Request{Header: http.Header{}} + fakeReq.Header.Set("Authorization", "Bearer "+token) + + // propagate cancellation of parent context (without any values such as audience) + fakeReq = fakeReq.WithContext(valuelesscontext.New(ctx)) + + // this will almost always be a free call that hits our 10 second cache TTL + resp, ok, err := authenticator.AuthenticateRequest(fakeReq) + if err != nil { + return authenticationv1.UserInfo{}, err + } + if !ok { + return authenticationv1.UserInfo{}, constable.Error("token failed to authenticate") + } + + tokenUser := authenticationv1.UserInfo{ + Username: resp.User.GetName(), + UID: resp.User.GetUID(), + Groups: resp.User.GetGroups(), + Extra: make(map[string]authenticationv1.ExtraValue, len(resp.User.GetExtra())), + } + for k, v := range resp.User.GetExtra() { + tokenUser.Extra[k] = v + } + + return tokenUser, nil } func buildExtra(extra map[string][]string, ae *auditinternal.Event) (map[string][]string, error) { diff --git a/internal/concierge/impersonator/impersonator_test.go b/internal/concierge/impersonator/impersonator_test.go index dfd765c3..78fd1759 100644 --- a/internal/concierge/impersonator/impersonator_test.go +++ b/internal/concierge/impersonator/impersonator_test.go @@ -22,6 +22,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/httpstream" auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/request/bearertoken" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" @@ -33,6 +35,7 @@ import ( featuregatetesting "k8s.io/component-base/featuregate/testing" "go.pinniped.dev/internal/certauthority" + "go.pinniped.dev/internal/constable" "go.pinniped.dev/internal/dynamiccert" "go.pinniped.dev/internal/here" "go.pinniped.dev/internal/httputil/roundtripper" @@ -176,6 +179,26 @@ func TestImpersonator(t *testing.T) { "X-Forwarded-For": {"127.0.0.1"}, }, }, + { + name: "when there is no client cert on request but it has basic auth, it is still an anonymous request", + clientCert: &clientCert{}, + clientMutateHeaders: func(header http.Header) { + header.Set("Test", "val") + req := &http.Request{Header: header} + req.SetBasicAuth("foo", "bar") + }, + kubeAPIServerClientBearerTokenFile: "required-to-be-set", + wantKubeAPIServerRequestHeaders: http.Header{ + "Impersonate-User": {"system:anonymous"}, + "Impersonate-Group": {"system:unauthenticated"}, + "Authorization": {"Bearer some-service-account-token"}, + "User-Agent": {"test-agent"}, + "Accept": {"application/vnd.kubernetes.protobuf,application/json"}, + "Accept-Encoding": {"gzip"}, + "X-Forwarded-For": {"127.0.0.1"}, + "Test": {"val"}, + }, + }, { name: "failed client cert authentication", clientCert: newClientCert(t, unrelatedCA, "test-username", []string{"test-group1"}), @@ -499,39 +522,12 @@ func TestImpersonatorHTTPHandler(t *testing.T) { "extra-2": {"some", "more", "extra", "stuff"}, } - validURL, _ := url.Parse("http://pinniped.dev/blah") - newRequest := func(h http.Header, userInfo user.Info, event *auditinternal.Event) *http.Request { - ctx := context.Background() - - if userInfo != nil { - ctx = request.WithUser(ctx, userInfo) - } - - ae := &auditinternal.Event{Level: auditinternal.LevelMetadata} - if event != nil { - ae = event - } - ctx = request.WithAuditEvent(ctx, ae) - - reqInfo := &request.RequestInfo{ - IsResourceRequest: false, - Path: validURL.Path, - Verb: "get", - } - ctx = request.WithRequestInfo(ctx, reqInfo) - - r, err := http.NewRequestWithContext(ctx, http.MethodGet, validURL.String(), nil) - require.NoError(t, err) - r.Header = h - - return r - } - tests := []struct { name string restConfig *rest.Config wantCreationErr string request *http.Request + authenticator authenticator.Request wantHTTPBody string wantHTTPStatus int wantKubeAPIServerRequestHeaders http.Header @@ -563,50 +559,50 @@ func TestImpersonatorHTTPHandler(t *testing.T) { }, { name: "Impersonate-User header already in request", - request: newRequest(map[string][]string{"Impersonate-User": {"some-user"}}, nil, nil), + request: newRequest(t, map[string][]string{"Impersonate-User": {"some-user"}}, nil, nil, ""), wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n", wantHTTPStatus: http.StatusInternalServerError, }, { name: "Impersonate-Group header already in request", - request: newRequest(map[string][]string{"Impersonate-Group": {"some-group"}}, nil, nil), + request: newRequest(t, map[string][]string{"Impersonate-Group": {"some-group"}}, nil, nil, ""), wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n", wantHTTPStatus: http.StatusInternalServerError, }, { name: "Impersonate-Extra header already in request", - request: newRequest(map[string][]string{"Impersonate-Extra-something": {"something"}}, nil, nil), + request: newRequest(t, map[string][]string{"Impersonate-Extra-something": {"something"}}, nil, nil, ""), wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n", wantHTTPStatus: http.StatusInternalServerError, }, { name: "Impersonate-* header already in request", - request: newRequest(map[string][]string{"Impersonate-Something": {"some-newfangled-impersonate-header"}}, nil, nil), + request: newRequest(t, map[string][]string{"Impersonate-Something": {"some-newfangled-impersonate-header"}}, nil, nil, ""), wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n", wantHTTPStatus: http.StatusInternalServerError, }, { name: "unexpected authorization header", - request: newRequest(map[string][]string{"Authorization": {"panda"}}, nil, nil), + request: newRequest(t, map[string][]string{"Authorization": {"panda"}}, nil, nil, ""), wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid authorization header","reason":"InternalError","details":{"causes":[{"message":"invalid authorization header"}]},"code":500}` + "\n", wantHTTPStatus: http.StatusInternalServerError, }, { name: "missing user", - request: newRequest(map[string][]string{}, nil, nil), + request: newRequest(t, map[string][]string{}, nil, nil, ""), wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid user","reason":"InternalError","details":{"causes":[{"message":"invalid user"}]},"code":500}` + "\n", wantHTTPStatus: http.StatusInternalServerError, }, { name: "unexpected UID", - request: newRequest(map[string][]string{}, &user.DefaultInfo{UID: "007"}, nil), + request: newRequest(t, map[string][]string{}, &user.DefaultInfo{UID: "007"}, nil, ""), wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n", wantHTTPStatus: http.StatusInternalServerError, }, { name: "authenticated user but missing audit event", request: func() *http.Request { - req := newRequest(map[string][]string{ + req := newRequest(t, map[string][]string{ "User-Agent": {"test-user-agent"}, "Connection": {"Upgrade"}, "Upgrade": {"some-upgrade"}, @@ -615,7 +611,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { Name: testUser, Groups: testGroups, Extra: testExtra, - }, nil) + }, nil, "") ctx := request.WithAuditEvent(req.Context(), nil) req = req.WithContext(ctx) return req @@ -625,7 +621,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { }, { name: "authenticated user with upper case extra", - request: newRequest(map[string][]string{ + request: newRequest(t, map[string][]string{ "User-Agent": {"test-user-agent"}, "Connection": {"Upgrade"}, "Upgrade": {"some-upgrade"}, @@ -639,13 +635,13 @@ func TestImpersonatorHTTPHandler(t *testing.T) { "valid-key": {"valid-value"}, "Invalid-key": {"still-valid-value"}, }, - }, nil), + }, nil, ""), wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n", wantHTTPStatus: http.StatusInternalServerError, }, { name: "authenticated user with upper case extra across multiple lines", - request: newRequest(map[string][]string{ + request: newRequest(t, map[string][]string{ "User-Agent": {"test-user-agent"}, "Connection": {"Upgrade"}, "Upgrade": {"some-upgrade"}, @@ -659,13 +655,13 @@ func TestImpersonatorHTTPHandler(t *testing.T) { "valid-key": {"valid-value"}, "valid-data\nInvalid-key": {"still-valid-value"}, }, - }, nil), + }, nil, ""), wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n", wantHTTPStatus: http.StatusInternalServerError, }, { name: "authenticated user with reserved extra key", - request: newRequest(map[string][]string{ + request: newRequest(t, map[string][]string{ "User-Agent": {"test-user-agent"}, "Connection": {"Upgrade"}, "Upgrade": {"some-upgrade"}, @@ -679,14 +675,164 @@ func TestImpersonatorHTTPHandler(t *testing.T) { "valid-key": {"valid-value"}, "foo.impersonation-proxy.concierge.pinniped.dev": {"still-valid-value"}, }, - }, nil), + }, nil, ""), + wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n", + wantHTTPStatus: http.StatusInternalServerError, + }, + { + name: "authenticated user with UID but no bearer token", + request: newRequest(t, map[string][]string{ + "User-Agent": {"test-user-agent"}, + "Connection": {"Upgrade"}, + "Upgrade": {"some-upgrade"}, + "Content-Type": {"some-type"}, + "Content-Length": {"some-length"}, + "Other-Header": {"test-header-value-1"}, + }, &user.DefaultInfo{ + UID: "-", // anything non-empty, rest of the fields get ignored in this code path + }, + &auditinternal.Event{ + User: authenticationv1.UserInfo{ + Username: testUser, + UID: "fancy-uid", + Groups: testGroups, + Extra: map[string]authenticationv1.ExtraValue{ + "extra-1": {"some", "extra", "stuff"}, + "extra-2": {"some", "more", "extra", "stuff"}, + }, + }, + ImpersonatedUser: nil, + }, + "", + ), + authenticator: nil, + wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n", + wantHTTPStatus: http.StatusInternalServerError, + }, + { + name: "authenticated user with UID and bearer token and nested impersonation", + request: newRequest(t, map[string][]string{ + "User-Agent": {"test-user-agent"}, + "Connection": {"Upgrade"}, + "Upgrade": {"some-upgrade"}, + "Content-Type": {"some-type"}, + "Content-Length": {"some-length"}, + "Other-Header": {"test-header-value-1"}, + }, &user.DefaultInfo{ + UID: "-", // anything non-empty, rest of the fields get ignored in this code path + }, + &auditinternal.Event{ + User: authenticationv1.UserInfo{ + Username: "dude", + UID: "--1--", + Groups: []string{"--a--", "--b--"}, + Extra: map[string]authenticationv1.ExtraValue{ + "--c--": {"--d--"}, + "--e--": {"--f--"}, + }, + }, + ImpersonatedUser: &authenticationv1.UserInfo{}, + }, + "token-from-user-nested", + ), + authenticator: nil, + wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n", + wantHTTPStatus: http.StatusInternalServerError, + }, + { + name: "authenticated user with UID and bearer token results in error", + request: newRequest(t, map[string][]string{ + "User-Agent": {"test-user-agent"}, + "Connection": {"Upgrade"}, + "Upgrade": {"some-upgrade"}, + "Content-Type": {"some-type"}, + "Content-Length": {"some-length"}, + "Other-Header": {"test-header-value-1"}, + }, &user.DefaultInfo{ + UID: "-", // anything non-empty, rest of the fields get ignored in this code path + }, + &auditinternal.Event{ + User: authenticationv1.UserInfo{ + Username: "dude", + UID: "--1--", + Groups: []string{"--a--", "--b--"}, + Extra: map[string]authenticationv1.ExtraValue{ + "--c--": {"--d--"}, + "--e--": {"--f--"}, + }, + }, + ImpersonatedUser: nil, + }, + "some-non-empty-token", + ), + authenticator: testTokenAuthenticator(t, "", nil, constable.Error("some err")), + wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n", + wantHTTPStatus: http.StatusInternalServerError, + }, + { + name: "authenticated user with UID and bearer token does not authenticate", + request: newRequest(t, map[string][]string{ + "User-Agent": {"test-user-agent"}, + "Connection": {"Upgrade"}, + "Upgrade": {"some-upgrade"}, + "Content-Type": {"some-type"}, + "Content-Length": {"some-length"}, + "Other-Header": {"test-header-value-1"}, + }, &user.DefaultInfo{ + UID: "-", // anything non-empty, rest of the fields get ignored in this code path + }, + &auditinternal.Event{ + User: authenticationv1.UserInfo{ + Username: "dude", + UID: "--1--", + Groups: []string{"--a--", "--b--"}, + Extra: map[string]authenticationv1.ExtraValue{ + "--c--": {"--d--"}, + "--e--": {"--f--"}, + }, + }, + ImpersonatedUser: nil, + }, + "this-token-does-not-work", + ), + authenticator: testTokenAuthenticator(t, "some-other-token-works", nil, nil), + wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n", + wantHTTPStatus: http.StatusInternalServerError, + }, + { + name: "authenticated user with UID and bearer token authenticates as different user", + request: newRequest(t, map[string][]string{ + "User-Agent": {"test-user-agent"}, + "Connection": {"Upgrade"}, + "Upgrade": {"some-upgrade"}, + "Content-Type": {"some-type"}, + "Content-Length": {"some-length"}, + "Other-Header": {"test-header-value-1"}, + }, &user.DefaultInfo{ + UID: "-", // anything non-empty, rest of the fields get ignored in this code path + }, + &auditinternal.Event{ + User: authenticationv1.UserInfo{ + Username: "dude", + UID: "--1--", + Groups: []string{"--a--", "--b--"}, + Extra: map[string]authenticationv1.ExtraValue{ + "--c--": {"--d--"}, + "--e--": {"--f--"}, + }, + }, + ImpersonatedUser: nil, + }, + "this-token-does-work", + ), + authenticator: testTokenAuthenticator(t, "this-token-does-work", &user.DefaultInfo{Name: "someone-else"}, nil), wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n", wantHTTPStatus: http.StatusInternalServerError, }, // happy path { name: "authenticated user", - request: newRequest(map[string][]string{ + request: newRequest(t, map[string][]string{ "User-Agent": {"test-user-agent"}, "Accept": {"some-accepted-format"}, "Accept-Encoding": {"some-accepted-encoding"}, @@ -699,7 +845,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { Name: testUser, Groups: testGroups, Extra: testExtra, - }, nil), + }, nil, ""), wantKubeAPIServerRequestHeaders: map[string][]string{ "Authorization": {"Bearer some-service-account-token"}, "Impersonate-Extra-Extra-1": {"some", "extra", "stuff"}, @@ -717,9 +863,61 @@ func TestImpersonatorHTTPHandler(t *testing.T) { wantHTTPBody: "successful proxied response", wantHTTPStatus: http.StatusOK, }, + { + name: "authenticated user with UID and bearer token", + request: newRequest(t, map[string][]string{ + "User-Agent": {"test-user-agent"}, + "Accept": {"some-accepted-format"}, + "Accept-Encoding": {"some-accepted-encoding"}, + "Connection": {"Upgrade"}, + "Upgrade": {"some-upgrade"}, + "Content-Type": {"some-type"}, + "Content-Length": {"some-length"}, + "Other-Header": {"test-header-value-1"}, + }, &user.DefaultInfo{ + UID: "-", // anything non-empty, rest of the fields get ignored in this code path + }, + &auditinternal.Event{ + User: authenticationv1.UserInfo{ + Username: testUser, + UID: "fancy-uid", + Groups: testGroups, + Extra: map[string]authenticationv1.ExtraValue{ + "extra-1": {"some", "extra", "stuff"}, + "extra-2": {"some", "more", "extra", "stuff"}, + }, + }, + ImpersonatedUser: nil, + }, + "token-from-user", + ), + authenticator: testTokenAuthenticator( + t, + "token-from-user", + &user.DefaultInfo{ + Name: testUser, + UID: "fancy-uid", + Groups: testGroups, + Extra: testExtra, + }, + nil, + ), + wantKubeAPIServerRequestHeaders: map[string][]string{ + "Authorization": {"Bearer token-from-user"}, + "User-Agent": {"test-user-agent"}, + "Accept": {"some-accepted-format"}, + "Accept-Encoding": {"some-accepted-encoding"}, + "Connection": {"Upgrade"}, + "Upgrade": {"some-upgrade"}, + "Content-Type": {"some-type"}, + "Other-Header": {"test-header-value-1"}, + }, + wantHTTPBody: "successful proxied response", + wantHTTPStatus: http.StatusOK, + }, { name: "authenticated gke user", - request: newRequest(map[string][]string{ + request: newRequest(t, map[string][]string{ "User-Agent": {"test-user-agent"}, "Accept": {"some-accepted-format"}, "Accept-Encoding": {"some-accepted-encoding"}, @@ -736,7 +934,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { "iam.gke.io/user-assertion": {"ABC"}, "user-assertion.cloud.google.com": {"XYZ"}, }, - }, nil), + }, nil, ""), wantKubeAPIServerRequestHeaders: map[string][]string{ "Authorization": {"Bearer some-service-account-token"}, "Impersonate-Extra-Iam.gke.io%2fuser-Assertion": {"ABC"}, @@ -756,7 +954,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { }, { name: "authenticated openshift/openstack user", - request: newRequest(map[string][]string{ + request: newRequest(t, map[string][]string{ "User-Agent": {"test-user-agent"}, "Accept": {"some-accepted-format"}, "Accept-Encoding": {"some-accepted-encoding"}, @@ -781,7 +979,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { "alpha.kubernetes.io/identity/user/domain/id": {"domain-id"}, "alpha.kubernetes.io/identity/user/domain/name": {"domain-name"}, }, - }, nil), + }, nil, ""), wantKubeAPIServerRequestHeaders: map[string][]string{ "Authorization": {"Bearer some-service-account-token"}, "Impersonate-Extra-Scopes.authorization.openshift.io": {"user:info", "user:full"}, @@ -805,7 +1003,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { }, { name: "authenticated user with almost reserved key", - request: newRequest(map[string][]string{ + request: newRequest(t, map[string][]string{ "User-Agent": {"test-user-agent"}, "Accept": {"some-accepted-format"}, "Accept-Encoding": {"some-accepted-encoding"}, @@ -820,7 +1018,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { Extra: map[string][]string{ "foo.iimpersonation-proxy.concierge.pinniped.dev": {"still-valid-value"}, }, - }, nil), + }, nil, ""), wantKubeAPIServerRequestHeaders: map[string][]string{ "Authorization": {"Bearer some-service-account-token"}, "Impersonate-Extra-Foo.iimpersonation-Proxy.concierge.pinniped.dev": {"still-valid-value"}, @@ -839,7 +1037,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { }, { name: "authenticated user with almost reserved key and nested impersonation", - request: newRequest(map[string][]string{ + request: newRequest(t, map[string][]string{ "User-Agent": {"test-user-agent"}, "Accept": {"some-accepted-format"}, "Accept-Encoding": {"some-accepted-encoding"}, @@ -866,6 +1064,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { }, ImpersonatedUser: &authenticationv1.UserInfo{}, }, + "", ), wantKubeAPIServerRequestHeaders: map[string][]string{ "Authorization": {"Bearer some-service-account-token"}, @@ -886,7 +1085,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { }, { name: "authenticated user with nested impersonation", - request: newRequest(map[string][]string{ + request: newRequest(t, map[string][]string{ "User-Agent": {"test-user-agent"}, "Accept": {"some-accepted-format"}, "Accept-Encoding": {"some-accepted-encoding"}, @@ -912,6 +1111,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { }, ImpersonatedUser: &authenticationv1.UserInfo{}, }, + "", ), wantKubeAPIServerRequestHeaders: map[string][]string{ "Authorization": {"Bearer some-service-account-token"}, @@ -933,7 +1133,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { }, { name: "authenticated gke user with nested impersonation", - request: newRequest(map[string][]string{ + request: newRequest(t, map[string][]string{ "User-Agent": {"test-user-agent"}, "Accept": {"some-accepted-format"}, "Accept-Encoding": {"some-accepted-encoding"}, @@ -959,6 +1159,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { }, ImpersonatedUser: &authenticationv1.UserInfo{}, }, + "", ), wantKubeAPIServerRequestHeaders: map[string][]string{ "Authorization": {"Bearer some-service-account-token"}, @@ -980,7 +1181,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { }, { name: "authenticated user with nested impersonation of gke user", - request: newRequest(map[string][]string{ + request: newRequest(t, map[string][]string{ "User-Agent": {"test-user-agent"}, "Accept": {"some-accepted-format"}, "Accept-Encoding": {"some-accepted-encoding"}, @@ -1010,6 +1211,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { }, ImpersonatedUser: &authenticationv1.UserInfo{}, }, + "", ), wantKubeAPIServerRequestHeaders: map[string][]string{ "Authorization": {"Bearer some-service-account-token"}, @@ -1031,13 +1233,13 @@ func TestImpersonatorHTTPHandler(t *testing.T) { }, { name: "user is authenticated but the kube API request returns an error", - request: newRequest(map[string][]string{ + request: newRequest(t, map[string][]string{ "User-Agent": {"test-user-agent"}, }, &user.DefaultInfo{ Name: testUser, Groups: testGroups, Extra: testExtra, - }, nil), + }, nil, ""), kubeAPIServerStatusCode: http.StatusNotFound, wantKubeAPIServerRequestHeaders: map[string][]string{ "Accept-Encoding": {"gzip"}, // because the rest client used in this test does not disable compression @@ -1095,6 +1297,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) { metav1.AddToGroupVersion(scheme, metav1.Unversioned) codecs := serializer.NewCodecFactory(scheme) serverConfig := genericapiserver.NewRecommendedConfig(codecs) + serverConfig.Authentication.Authenticator = tt.authenticator w := httptest.NewRecorder() @@ -1137,6 +1340,83 @@ func TestImpersonatorHTTPHandler(t *testing.T) { } } +func newRequest(t *testing.T, h http.Header, userInfo user.Info, event *auditinternal.Event, token string) *http.Request { + t.Helper() + + validURL, err := url.Parse("http://pinniped.dev/blah") + require.NoError(t, err) + + ctx := context.Background() + + if userInfo != nil { + ctx = request.WithUser(ctx, userInfo) + } + + ae := &auditinternal.Event{Level: auditinternal.LevelMetadata} + if event != nil { + ae = event + } + ctx = request.WithAuditEvent(ctx, ae) + + reqInfo := &request.RequestInfo{ + IsResourceRequest: false, + Path: validURL.Path, + Verb: "get", + } + ctx = request.WithRequestInfo(ctx, reqInfo) + + ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"must-be-ignored"}) + + if len(token) != 0 { + ctx = context.WithValue(ctx, tokenKey, token) + } + + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, time.Now().Add(time.Hour)) + t.Cleanup(cancel) + + r, err := http.NewRequestWithContext(ctx, http.MethodGet, validURL.String(), nil) + require.NoError(t, err) + + r.Header = h + + return r +} + +func testTokenAuthenticator(t *testing.T, token string, userInfo user.Info, err error) authenticator.Request { + t.Helper() + + return authenticator.RequestFunc(func(r *http.Request) (*authenticator.Response, bool, error) { + if auds, ok := authenticator.AudiencesFrom(r.Context()); ok || len(auds) != 0 { + t.Errorf("unexpected audiences on request: %v", auds) + } + + if ctxToken := tokenFrom(r.Context()); len(ctxToken) != 0 { + t.Errorf("unexpected token on request: %v", ctxToken) + } + + if _, ok := r.Context().Deadline(); !ok { + t.Error("request should always have deadline") + } + + if err != nil { + return nil, false, err + } + + var reqToken string + _, _, _ = bearertoken.New(authenticator.TokenFunc(func(_ context.Context, token string) (*authenticator.Response, bool, error) { + reqToken = token + return nil, false, nil + })).AuthenticateRequest(r) + + if reqToken != token { + return nil, false, nil + } + + return &authenticator.Response{User: userInfo}, true, nil + }) +} + type clientCert struct { certPEM, keyPEM []byte } @@ -1242,7 +1522,9 @@ func Test_deleteKnownImpersonationHeaders(t *testing.T) { inputReq := (&http.Request{Header: tt.headers}).WithContext(context.Background()) inputReqCopy := inputReq.Clone(inputReq.Context()) + var called bool delegate := http.HandlerFunc(func(w http.ResponseWriter, outputReq *http.Request) { + called = true require.Nil(t, w) // assert only headers mutated @@ -1259,6 +1541,85 @@ func Test_deleteKnownImpersonationHeaders(t *testing.T) { deleteKnownImpersonationHeaders(delegate).ServeHTTP(nil, inputReq) require.Equal(t, inputReqCopy, inputReq) // assert no mutation occurred + require.True(t, called) + }) + } +} + +func Test_withBearerTokenPreservation(t *testing.T) { + tests := []struct { + name string + headers http.Header + want string + }{ + { + name: "has bearer token", + headers: map[string][]string{ + "Authorization": {"Bearer thingy"}, + }, + want: "thingy", + }, + { + name: "has bearer token but too many preceding spaces", + headers: map[string][]string{ + "Authorization": {"Bearer 1"}, + }, + want: "", + }, + { + name: "has bearer token with space, only keeps first part", + headers: map[string][]string{ + "Authorization": {"Bearer panda man"}, + }, + want: "panda", + }, + { + name: "has bearer token with surrounding whitespace", + headers: map[string][]string{ + "Authorization": {" Bearer cool beans "}, + }, + want: "cool", + }, + { + name: "has multiple bearer tokens", + headers: map[string][]string{ + "Authorization": {"Bearer this thing", "what does this mean?"}, + }, + want: "this", + }, + { + name: "no bearer token", + headers: map[string][]string{ + "Not-Authorization": {"Bearer not a token"}, + }, + want: "", + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + inputReq := (&http.Request{Header: tt.headers}).WithContext(context.Background()) + inputReqCopy := inputReq.Clone(inputReq.Context()) + + var called bool + delegate := http.HandlerFunc(func(w http.ResponseWriter, outputReq *http.Request) { + called = true + require.Nil(t, w) + + // assert only context is mutated + outputReqCopy := outputReq.Clone(inputReq.Context()) + require.Equal(t, inputReqCopy, outputReqCopy) + + require.Equal(t, tt.want, tokenFrom(outputReq.Context())) + + if len(tt.want) == 0 { + require.True(t, inputReq == outputReq, "expect req to passed through when no token expected") + } + }) + + withBearerTokenPreservation(delegate).ServeHTTP(nil, inputReq) + require.Equal(t, inputReqCopy, inputReq) // assert no mutation occurred + require.True(t, called) }) } } diff --git a/internal/controller/authenticator/authncache/cache.go b/internal/controller/authenticator/authncache/cache.go index 5aa31004..14366c39 100644 --- a/internal/controller/authenticator/authncache/cache.go +++ b/internal/controller/authenticator/authncache/cache.go @@ -16,6 +16,7 @@ import ( loginapi "go.pinniped.dev/generated/latest/apis/concierge/login" "go.pinniped.dev/internal/constable" "go.pinniped.dev/internal/plog" + "go.pinniped.dev/internal/valuelesscontext" ) // ErrNoSuchAuthenticator is returned by Cache.AuthenticateTokenCredentialRequest() when the requested authenticator is not configured. @@ -101,7 +102,7 @@ func (c *Cache) AuthenticateTokenCredentialRequest(ctx context.Context, req *log // The incoming context could have an audience. Since we do not want to handle audiences right now, do not pass it // through directly to the authentication webhook. - ctx = valuelessContext{ctx} + ctx = valuelesscontext.New(ctx) // Call the selected authenticator. resp, authenticated, err := val.AuthenticateToken(ctx, req.Spec.Token) @@ -119,7 +120,3 @@ func (c *Cache) AuthenticateTokenCredentialRequest(ctx context.Context, req *log } return respUser, nil } - -type valuelessContext struct{ context.Context } - -func (valuelessContext) Value(interface{}) interface{} { return nil } diff --git a/internal/valuelesscontext/valuelesscontext.go b/internal/valuelesscontext/valuelesscontext.go new file mode 100644 index 00000000..93c90a5e --- /dev/null +++ b/internal/valuelesscontext/valuelesscontext.go @@ -0,0 +1,14 @@ +// Copyright 2021 the Pinniped contributors. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package valuelesscontext + +import "context" + +func New(ctx context.Context) context.Context { + return valuelessContext{Context: ctx} +} + +type valuelessContext struct{ context.Context } + +func (valuelessContext) Value(interface{}) interface{} { return nil } diff --git a/test/integration/concierge_impersonation_proxy_test.go b/test/integration/concierge_impersonation_proxy_test.go index 97a328ac..390ae1f1 100644 --- a/test/integration/concierge_impersonation_proxy_test.go +++ b/test/integration/concierge_impersonation_proxy_test.go @@ -6,7 +6,11 @@ package integration import ( "bytes" "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" "crypto/x509" + "crypto/x509/pkix" "encoding/base64" "encoding/json" "encoding/pem" @@ -28,6 +32,8 @@ import ( "golang.org/x/net/http2" authenticationv1 "k8s.io/api/authentication/v1" authorizationv1 "k8s.io/api/authorization/v1" + certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -42,6 +48,8 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/transport" + "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/certificate/csr" "k8s.io/client-go/util/keyutil" "sigs.k8s.io/yaml" @@ -780,32 +788,148 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl whoAmI, ) - // Test using a service account token. Authenticating as Service Accounts through the impersonation - // proxy is not supported, so it should fail. + // Test using a service account token. namespaceName := createTestNamespace(t, adminClient) - _, saToken, _ := createServiceAccountToken(ctx, t, adminClient, namespaceName) + saName, saToken, _ := createServiceAccountToken(ctx, t, adminClient, namespaceName) impersonationProxyServiceAccountPinnipedConciergeClient := newImpersonationProxyClientWithCredentials(t, &loginv1alpha1.ClusterCredential{Token: saToken}, impersonationProxyURL, impersonationProxyCACertPEM, nil).PinnipedConcierge - _, err = impersonationProxyServiceAccountPinnipedConciergeClient.IdentityV1alpha1().WhoAmIRequests(). + whoAmI, err = impersonationProxyServiceAccountPinnipedConciergeClient.IdentityV1alpha1().WhoAmIRequests(). Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{}) - require.EqualError(t, err, "Internal error occurred: unimplemented functionality - unable to act as current user") - require.True(t, k8serrors.IsInternalError(err), err) - require.Equal(t, &k8serrors.StatusError{ - ErrStatus: metav1.Status{ - Status: metav1.StatusFailure, - Code: http.StatusInternalServerError, - Reason: metav1.StatusReasonInternalError, - Details: &metav1.StatusDetails{ - Causes: []metav1.StatusCause{ - { - Message: "unimplemented functionality - unable to act as current user", - }, + require.NoError(t, err) + require.Equal(t, + expectedWhoAmIRequestResponse( + serviceaccount.MakeUsername(namespaceName, saName), + []string{"system:serviceaccounts", "system:serviceaccounts:" + namespaceName, "system:authenticated"}, + nil, + ), + whoAmI, + ) + }) + + t.Run("WhoAmIRequests and SA token request", func(t *testing.T) { + namespaceName := createTestNamespace(t, adminClient) + kubeClient := adminClient.CoreV1() + saName, _, saUID := createServiceAccountToken(ctx, t, adminClient, namespaceName) + + _, tokenRequestProbeErr := kubeClient.ServiceAccounts(namespaceName).CreateToken(ctx, saName, &authenticationv1.TokenRequest{}, metav1.CreateOptions{}) + if k8serrors.IsNotFound(tokenRequestProbeErr) && tokenRequestProbeErr.Error() == "the server could not find the requested resource" { + return // stop test early since the token request API is not enabled on this cluster - other errors are caught below + } + + pod, err := kubeClient.Pods(namespaceName).Create(ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-impersonation-proxy-", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "ignored-but-required", + Image: "does-not-matter", }, }, - Message: "Internal error occurred: unimplemented functionality - unable to act as current user", + ServiceAccountName: saName, }, - }, err) + }, metav1.CreateOptions{}) + require.NoError(t, err) + + tokenRequestBadAudience, err := kubeClient.ServiceAccounts(namespaceName).CreateToken(ctx, saName, &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + Audiences: []string{"should-fail-because-wrong-audience"}, // anything that is not an API server audience + BoundObjectRef: &authenticationv1.BoundObjectReference{ + Kind: "Pod", + APIVersion: "", + Name: pod.Name, + UID: pod.UID, + }, + }, + }, metav1.CreateOptions{}) + require.NoError(t, err) + + impersonationProxySABadAudPinnipedConciergeClient := newImpersonationProxyClientWithCredentials(t, + &loginv1alpha1.ClusterCredential{Token: tokenRequestBadAudience.Status.Token}, + impersonationProxyURL, impersonationProxyCACertPEM, nil).PinnipedConcierge + + _, badAudErr := impersonationProxySABadAudPinnipedConciergeClient.IdentityV1alpha1().WhoAmIRequests(). + Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{}) + require.True(t, k8serrors.IsUnauthorized(badAudErr), library.Sdump(badAudErr)) + + tokenRequest, err := kubeClient.ServiceAccounts(namespaceName).CreateToken(ctx, saName, &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + Audiences: []string{}, + BoundObjectRef: &authenticationv1.BoundObjectReference{ + Kind: "Pod", + APIVersion: "", + Name: pod.Name, + UID: pod.UID, + }, + }, + }, metav1.CreateOptions{}) + require.NoError(t, err) + + impersonationProxySAClient := newImpersonationProxyClientWithCredentials(t, + &loginv1alpha1.ClusterCredential{Token: tokenRequest.Status.Token}, + impersonationProxyURL, impersonationProxyCACertPEM, nil) + + whoAmITokenReq, err := impersonationProxySAClient.PinnipedConcierge.IdentityV1alpha1().WhoAmIRequests(). + Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{}) + require.NoError(t, err) + + // new service account tokens include the pod info in the extra fields + require.Equal(t, + expectedWhoAmIRequestResponse( + serviceaccount.MakeUsername(namespaceName, saName), + []string{"system:serviceaccounts", "system:serviceaccounts:" + namespaceName, "system:authenticated"}, + map[string]identityv1alpha1.ExtraValue{ + "authentication.kubernetes.io/pod-name": {pod.Name}, + "authentication.kubernetes.io/pod-uid": {string(pod.UID)}, + }, + ), + whoAmITokenReq, + ) + + // allow the test SA to create CSRs + library.CreateTestClusterRoleBinding(t, + rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Name: saName, Namespace: namespaceName}, + rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "system:node-bootstrapper"}, + ) + library.WaitForUserToHaveAccess(t, serviceaccount.MakeUsername(namespaceName, saName), []string{}, &authorizationv1.ResourceAttributes{ + Verb: "create", Group: certificatesv1.GroupName, Version: "*", Resource: "certificatesigningrequests", + }) + + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + + csrPEM, err := cert.MakeCSR(privateKey, &pkix.Name{ + CommonName: "panda-man", + Organization: []string{"living-the-dream", "need-more-sleep"}, + }, nil, nil) + require.NoError(t, err) + + csrName, _, err := csr.RequestCertificate( + impersonationProxySAClient.Kubernetes, + csrPEM, + "", + certificatesv1.KubeAPIServerClientSignerName, + []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + privateKey, + ) + require.NoError(t, err) + + saCSR, err := impersonationProxySAClient.Kubernetes.CertificatesV1beta1().CertificateSigningRequests().Get(ctx, csrName, metav1.GetOptions{}) + require.NoError(t, err) + + err = adminClient.CertificatesV1beta1().CertificateSigningRequests().Delete(ctx, csrName, metav1.DeleteOptions{}) + require.NoError(t, err) + + // make sure the user info that the CSR captured matches the SA, including the UID + require.Equal(t, serviceaccount.MakeUsername(namespaceName, saName), saCSR.Spec.Username) + require.Equal(t, string(saUID), saCSR.Spec.UID) + require.Equal(t, []string{"system:serviceaccounts", "system:serviceaccounts:" + namespaceName, "system:authenticated"}, saCSR.Spec.Groups) + require.Equal(t, map[string]certificatesv1beta1.ExtraValue{ + "authentication.kubernetes.io/pod-name": {pod.Name}, + "authentication.kubernetes.io/pod-uid": {string(pod.UID)}, + }, saCSR.Spec.Extra) }) t.Run("kubectl as a client", func(t *testing.T) { From 44c7f8daf07270076c38b6f685375f9d0f5bd562 Mon Sep 17 00:00:00 2001 From: Monis Khan Date: Fri, 30 Apr 2021 09:45:34 -0400 Subject: [PATCH 15/32] valuelesscontext: add some unit tests Signed-off-by: Monis Khan --- .../valuelesscontext/valuelesscontext_test.go | 273 ++++++++++++++++++ 1 file changed, 273 insertions(+) create mode 100644 internal/valuelesscontext/valuelesscontext_test.go diff --git a/internal/valuelesscontext/valuelesscontext_test.go b/internal/valuelesscontext/valuelesscontext_test.go new file mode 100644 index 00000000..f7f83abb --- /dev/null +++ b/internal/valuelesscontext/valuelesscontext_test.go @@ -0,0 +1,273 @@ +// Copyright 2021 the Pinniped contributors. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package valuelesscontext + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + "k8s.io/apiserver/pkg/authentication/authenticator" +) + +func TestNew(t *testing.T) { + t.Parallel() + + type contextKey int + + tests := []struct { + name string + f func(*testing.T, context.Context) context.Context + want func(*testing.T, context.Context) + }{ + { + name: "standard context", + f: func(t *testing.T, ctx context.Context) context.Context { + return ctx + }, + want: func(t *testing.T, ctx context.Context) { + auds, ok := authenticator.AudiencesFrom(ctx) + require.False(t, ok) + require.Nil(t, auds) + + deadline, ok := ctx.Deadline() + require.False(t, ok) + require.Zero(t, deadline) + + require.Nil(t, ctx.Done()) + + require.NoError(t, ctx.Err()) + }, + }, + { + name: "standard context with audience", + f: func(t *testing.T, ctx context.Context) context.Context { + return authenticator.WithAudiences(ctx, authenticator.Audiences{"1", "2"}) + }, + want: func(t *testing.T, ctx context.Context) { + auds, ok := authenticator.AudiencesFrom(ctx) + require.True(t, ok) + require.Equal(t, authenticator.Audiences{"1", "2"}, auds) + + deadline, ok := ctx.Deadline() + require.False(t, ok) + require.Zero(t, deadline) + + require.Nil(t, ctx.Done()) + + require.NoError(t, ctx.Err()) + }, + }, + { + name: "standard context with audience and past deadline", + f: func(t *testing.T, ctx context.Context) context.Context { + ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"}) + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, time.Now().Add(-time.Hour)) + t.Cleanup(cancel) + return ctx + }, + want: func(t *testing.T, ctx context.Context) { + auds, ok := authenticator.AudiencesFrom(ctx) + require.True(t, ok) + require.Equal(t, authenticator.Audiences{"3", "4"}, auds) + + deadline, ok := ctx.Deadline() + require.True(t, ok) + require.NotZero(t, deadline) + require.True(t, deadline.Before(time.Now())) + + ch := ctx.Done() + require.NotNil(t, ch) + select { + case <-ch: + case <-time.After(10 * time.Second): + t.Error("expected closed done channel") + } + + require.Equal(t, context.DeadlineExceeded, ctx.Err()) + }, + }, + { + name: "valueless context with audience and past deadline", + f: func(t *testing.T, ctx context.Context) context.Context { + ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"}) + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, time.Now().Add(-time.Hour)) + t.Cleanup(cancel) + return New(ctx) + }, + want: func(t *testing.T, ctx context.Context) { + auds, ok := authenticator.AudiencesFrom(ctx) + require.False(t, ok) + require.Nil(t, auds) + + deadline, ok := ctx.Deadline() + require.True(t, ok) + require.NotZero(t, deadline) + require.True(t, deadline.Before(time.Now())) + + ch := ctx.Done() + require.NotNil(t, ch) + select { + case <-ch: + case <-time.After(10 * time.Second): + t.Error("expected closed done channel") + } + + require.Equal(t, context.DeadlineExceeded, ctx.Err()) + }, + }, + { + name: "standard context with audience and custom value and past deadline", + f: func(t *testing.T, ctx context.Context) context.Context { + ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"}) + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, time.Now().Add(-time.Hour)) + t.Cleanup(cancel) + ctx = context.WithValue(ctx, contextKey(0xDEADBEEF), "mooo") + return ctx + }, + want: func(t *testing.T, ctx context.Context) { + auds, ok := authenticator.AudiencesFrom(ctx) + require.True(t, ok) + require.Equal(t, authenticator.Audiences{"3", "4"}, auds) + + val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string) + require.True(t, ok) + require.Equal(t, "mooo", val) + + deadline, ok := ctx.Deadline() + require.True(t, ok) + require.NotZero(t, deadline) + require.True(t, deadline.Before(time.Now())) + + ch := ctx.Done() + require.NotNil(t, ch) + select { + case <-ch: + case <-time.After(10 * time.Second): + t.Error("expected closed done channel") + } + + require.Equal(t, context.DeadlineExceeded, ctx.Err()) + }, + }, + { + name: "valueless context with audience and custom value and past deadline", + f: func(t *testing.T, ctx context.Context) context.Context { + ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"}) + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, time.Now().Add(-time.Hour)) + t.Cleanup(cancel) + ctx = context.WithValue(ctx, contextKey(0xDEADBEEF), "mooo") + return New(ctx) + }, + want: func(t *testing.T, ctx context.Context) { + auds, ok := authenticator.AudiencesFrom(ctx) + require.False(t, ok) + require.Nil(t, auds) + + val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string) + require.False(t, ok) + require.Zero(t, val) + + deadline, ok := ctx.Deadline() + require.True(t, ok) + require.NotZero(t, deadline) + require.True(t, deadline.Before(time.Now())) + + ch := ctx.Done() + require.NotNil(t, ch) + select { + case <-ch: + case <-time.After(10 * time.Second): + t.Error("expected closed done channel") + } + + require.Equal(t, context.DeadlineExceeded, ctx.Err()) + }, + }, + { + name: "standard context with audience and custom value and future deadline", + f: func(t *testing.T, ctx context.Context) context.Context { + ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"}) + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, time.Now().Add(time.Hour)) + t.Cleanup(cancel) + ctx = context.WithValue(ctx, contextKey(0xDEADBEEF), "mooo") + return ctx + }, + want: func(t *testing.T, ctx context.Context) { + auds, ok := authenticator.AudiencesFrom(ctx) + require.True(t, ok) + require.Equal(t, authenticator.Audiences{"3", "4"}, auds) + + val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string) + require.True(t, ok) + require.Equal(t, "mooo", val) + + deadline, ok := ctx.Deadline() + require.True(t, ok) + require.NotZero(t, deadline) + require.True(t, deadline.After(time.Now())) + + ch := ctx.Done() + require.NotNil(t, ch) + select { + case <-ch: + t.Error("expected not closed done channel") + case <-time.After(3 * time.Second): + } + + require.NoError(t, ctx.Err()) + }, + }, + { + name: "valueless context with audience and custom value and future deadline", + f: func(t *testing.T, ctx context.Context) context.Context { + ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"}) + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, time.Now().Add(time.Hour)) + t.Cleanup(cancel) + ctx = context.WithValue(ctx, contextKey(0xDEADBEEF), "mooo") + return New(ctx) + }, + want: func(t *testing.T, ctx context.Context) { + auds, ok := authenticator.AudiencesFrom(ctx) + require.False(t, ok) + require.Nil(t, auds) + + val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string) + require.False(t, ok) + require.Zero(t, val) + + deadline, ok := ctx.Deadline() + require.True(t, ok) + require.NotZero(t, deadline) + require.True(t, deadline.After(time.Now())) + + ch := ctx.Done() + require.NotNil(t, ch) + select { + case <-ch: + t.Error("expected not closed done channel") + case <-time.After(3 * time.Second): + } + + require.NoError(t, ctx.Err()) + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := tt.f(t, context.Background()) + tt.want(t, ctx) + }) + } +} From b5ffab63309ad1a3cf968bcc911f220e4a4b5fbb Mon Sep 17 00:00:00 2001 From: Monis Khan Date: Fri, 30 Apr 2021 10:33:11 -0400 Subject: [PATCH 16/32] valuelesscontext: make unit tests more clear Signed-off-by: Monis Khan --- .../valuelesscontext/valuelesscontext_test.go | 233 ++++++++---------- 1 file changed, 101 insertions(+), 132 deletions(-) diff --git a/internal/valuelesscontext/valuelesscontext_test.go b/internal/valuelesscontext/valuelesscontext_test.go index f7f83abb..ba593dfe 100644 --- a/internal/valuelesscontext/valuelesscontext_test.go +++ b/internal/valuelesscontext/valuelesscontext_test.go @@ -18,20 +18,26 @@ func TestNew(t *testing.T) { type contextKey int tests := []struct { - name string - f func(*testing.T, context.Context) context.Context - want func(*testing.T, context.Context) + name string + f func(*testing.T, context.Context) context.Context + wantReg, wantNew, wantBoth func(*testing.T, context.Context) }{ { - name: "standard context", + name: "empty context", f: func(t *testing.T, ctx context.Context) context.Context { return ctx }, - want: func(t *testing.T, ctx context.Context) { + wantReg: func(t *testing.T, ctx context.Context) {}, + wantNew: func(t *testing.T, ctx context.Context) {}, + wantBoth: func(t *testing.T, ctx context.Context) { auds, ok := authenticator.AudiencesFrom(ctx) require.False(t, ok) require.Nil(t, auds) + val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string) + require.False(t, ok) + require.Zero(t, val) + deadline, ok := ctx.Deadline() require.False(t, ok) require.Zero(t, deadline) @@ -42,14 +48,24 @@ func TestNew(t *testing.T) { }, }, { - name: "standard context with audience", + name: "context with audience", f: func(t *testing.T, ctx context.Context) context.Context { return authenticator.WithAudiences(ctx, authenticator.Audiences{"1", "2"}) }, - want: func(t *testing.T, ctx context.Context) { + wantReg: func(t *testing.T, ctx context.Context) { auds, ok := authenticator.AudiencesFrom(ctx) require.True(t, ok) require.Equal(t, authenticator.Audiences{"1", "2"}, auds) + }, + wantNew: func(t *testing.T, ctx context.Context) { + auds, ok := authenticator.AudiencesFrom(ctx) + require.False(t, ok) + require.Nil(t, auds) + }, + wantBoth: func(t *testing.T, ctx context.Context) { + val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string) + require.False(t, ok) + require.Zero(t, val) deadline, ok := ctx.Deadline() require.False(t, ok) @@ -61,7 +77,7 @@ func TestNew(t *testing.T) { }, }, { - name: "standard context with audience and past deadline", + name: "context with audience and past deadline", f: func(t *testing.T, ctx context.Context) context.Context { ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"}) var cancel context.CancelFunc @@ -69,107 +85,17 @@ func TestNew(t *testing.T) { t.Cleanup(cancel) return ctx }, - want: func(t *testing.T, ctx context.Context) { + wantReg: func(t *testing.T, ctx context.Context) { auds, ok := authenticator.AudiencesFrom(ctx) require.True(t, ok) require.Equal(t, authenticator.Audiences{"3", "4"}, auds) - - deadline, ok := ctx.Deadline() - require.True(t, ok) - require.NotZero(t, deadline) - require.True(t, deadline.Before(time.Now())) - - ch := ctx.Done() - require.NotNil(t, ch) - select { - case <-ch: - case <-time.After(10 * time.Second): - t.Error("expected closed done channel") - } - - require.Equal(t, context.DeadlineExceeded, ctx.Err()) }, - }, - { - name: "valueless context with audience and past deadline", - f: func(t *testing.T, ctx context.Context) context.Context { - ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"}) - var cancel context.CancelFunc - ctx, cancel = context.WithDeadline(ctx, time.Now().Add(-time.Hour)) - t.Cleanup(cancel) - return New(ctx) - }, - want: func(t *testing.T, ctx context.Context) { + wantNew: func(t *testing.T, ctx context.Context) { auds, ok := authenticator.AudiencesFrom(ctx) require.False(t, ok) require.Nil(t, auds) - - deadline, ok := ctx.Deadline() - require.True(t, ok) - require.NotZero(t, deadline) - require.True(t, deadline.Before(time.Now())) - - ch := ctx.Done() - require.NotNil(t, ch) - select { - case <-ch: - case <-time.After(10 * time.Second): - t.Error("expected closed done channel") - } - - require.Equal(t, context.DeadlineExceeded, ctx.Err()) }, - }, - { - name: "standard context with audience and custom value and past deadline", - f: func(t *testing.T, ctx context.Context) context.Context { - ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"}) - var cancel context.CancelFunc - ctx, cancel = context.WithDeadline(ctx, time.Now().Add(-time.Hour)) - t.Cleanup(cancel) - ctx = context.WithValue(ctx, contextKey(0xDEADBEEF), "mooo") - return ctx - }, - want: func(t *testing.T, ctx context.Context) { - auds, ok := authenticator.AudiencesFrom(ctx) - require.True(t, ok) - require.Equal(t, authenticator.Audiences{"3", "4"}, auds) - - val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string) - require.True(t, ok) - require.Equal(t, "mooo", val) - - deadline, ok := ctx.Deadline() - require.True(t, ok) - require.NotZero(t, deadline) - require.True(t, deadline.Before(time.Now())) - - ch := ctx.Done() - require.NotNil(t, ch) - select { - case <-ch: - case <-time.After(10 * time.Second): - t.Error("expected closed done channel") - } - - require.Equal(t, context.DeadlineExceeded, ctx.Err()) - }, - }, - { - name: "valueless context with audience and custom value and past deadline", - f: func(t *testing.T, ctx context.Context) context.Context { - ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"}) - var cancel context.CancelFunc - ctx, cancel = context.WithDeadline(ctx, time.Now().Add(-time.Hour)) - t.Cleanup(cancel) - ctx = context.WithValue(ctx, contextKey(0xDEADBEEF), "mooo") - return New(ctx) - }, - want: func(t *testing.T, ctx context.Context) { - auds, ok := authenticator.AudiencesFrom(ctx) - require.False(t, ok) - require.Nil(t, auds) - + wantBoth: func(t *testing.T, ctx context.Context) { val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string) require.False(t, ok) require.Zero(t, val) @@ -191,7 +117,52 @@ func TestNew(t *testing.T) { }, }, { - name: "standard context with audience and custom value and future deadline", + name: "context with audience and custom value and past deadline", + f: func(t *testing.T, ctx context.Context) context.Context { + ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"}) + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, time.Now().Add(-time.Hour)) + t.Cleanup(cancel) + ctx = context.WithValue(ctx, contextKey(0xDEADBEEF), "mooo") + return ctx + }, + wantReg: func(t *testing.T, ctx context.Context) { + auds, ok := authenticator.AudiencesFrom(ctx) + require.True(t, ok) + require.Equal(t, authenticator.Audiences{"3", "4"}, auds) + + val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string) + require.True(t, ok) + require.Equal(t, "mooo", val) + }, + wantNew: func(t *testing.T, ctx context.Context) { + auds, ok := authenticator.AudiencesFrom(ctx) + require.False(t, ok) + require.Nil(t, auds) + + val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string) + require.False(t, ok) + require.Zero(t, val) + }, + wantBoth: func(t *testing.T, ctx context.Context) { + deadline, ok := ctx.Deadline() + require.True(t, ok) + require.NotZero(t, deadline) + require.True(t, deadline.Before(time.Now())) + + ch := ctx.Done() + require.NotNil(t, ch) + select { + case <-ch: + case <-time.After(10 * time.Second): + t.Error("expected closed done channel") + } + + require.Equal(t, context.DeadlineExceeded, ctx.Err()) + }, + }, + { + name: "context with audience and custom value and future deadline", f: func(t *testing.T, ctx context.Context) context.Context { ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"}) var cancel context.CancelFunc @@ -200,7 +171,7 @@ func TestNew(t *testing.T) { ctx = context.WithValue(ctx, contextKey(0xDEADBEEF), "mooo") return ctx }, - want: func(t *testing.T, ctx context.Context) { + wantReg: func(t *testing.T, ctx context.Context) { auds, ok := authenticator.AudiencesFrom(ctx) require.True(t, ok) require.Equal(t, authenticator.Audiences{"3", "4"}, auds) @@ -208,34 +179,8 @@ func TestNew(t *testing.T) { val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string) require.True(t, ok) require.Equal(t, "mooo", val) - - deadline, ok := ctx.Deadline() - require.True(t, ok) - require.NotZero(t, deadline) - require.True(t, deadline.After(time.Now())) - - ch := ctx.Done() - require.NotNil(t, ch) - select { - case <-ch: - t.Error("expected not closed done channel") - case <-time.After(3 * time.Second): - } - - require.NoError(t, ctx.Err()) }, - }, - { - name: "valueless context with audience and custom value and future deadline", - f: func(t *testing.T, ctx context.Context) context.Context { - ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"}) - var cancel context.CancelFunc - ctx, cancel = context.WithDeadline(ctx, time.Now().Add(time.Hour)) - t.Cleanup(cancel) - ctx = context.WithValue(ctx, contextKey(0xDEADBEEF), "mooo") - return New(ctx) - }, - want: func(t *testing.T, ctx context.Context) { + wantNew: func(t *testing.T, ctx context.Context) { auds, ok := authenticator.AudiencesFrom(ctx) require.False(t, ok) require.Nil(t, auds) @@ -243,7 +188,8 @@ func TestNew(t *testing.T) { val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string) require.False(t, ok) require.Zero(t, val) - + }, + wantBoth: func(t *testing.T, ctx context.Context) { deadline, ok := ctx.Deadline() require.True(t, ok) require.NotZero(t, deadline) @@ -267,7 +213,30 @@ func TestNew(t *testing.T) { t.Parallel() ctx := tt.f(t, context.Background()) - tt.want(t, ctx) + + t.Run("reg", func(t *testing.T) { + t.Parallel() + + tt.wantReg(t, ctx) + }) + + t.Run("reg-both", func(t *testing.T) { + t.Parallel() + + tt.wantBoth(t, ctx) + }) + + t.Run("new", func(t *testing.T) { + t.Parallel() + + tt.wantNew(t, New(ctx)) + }) + + t.Run("new-both", func(t *testing.T) { + t.Parallel() + + tt.wantBoth(t, New(ctx)) + }) }) } } From ab94b97f4a43f80e3509368aaab57a631a48cb79 Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Fri, 30 Apr 2021 12:10:04 -0700 Subject: [PATCH 17/32] Change login.go to use logr.logger --- cmd/pinniped/cmd/login_oidc.go | 2 +- pkg/oidcclient/login.go | 16 ++--- pkg/oidcclient/login_test.go | 109 ++++++++++++++++----------------- 3 files changed, 64 insertions(+), 63 deletions(-) diff --git a/cmd/pinniped/cmd/login_oidc.go b/cmd/pinniped/cmd/login_oidc.go index d52b71d8..4c1620be 100644 --- a/cmd/pinniped/cmd/login_oidc.go +++ b/cmd/pinniped/cmd/login_oidc.go @@ -134,7 +134,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin // Initialize the login handler. opts := []oidcclient.Option{ oidcclient.WithContext(cmd.Context()), - oidcclient.WithLogger(*pLogger), + oidcclient.WithLogger(klogr.New()), oidcclient.WithScopes(flags.scopes), oidcclient.WithSessionCache(sessionCache), } diff --git a/pkg/oidcclient/login.go b/pkg/oidcclient/login.go index d606dec7..142b6779 100644 --- a/pkg/oidcclient/login.go +++ b/pkg/oidcclient/login.go @@ -17,6 +17,7 @@ import ( "time" "github.com/coreos/go-oidc/v3/oidc" + "github.com/go-logr/logr" "github.com/pkg/browser" "golang.org/x/oauth2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,7 +25,6 @@ import ( "go.pinniped.dev/internal/httputil/httperr" "go.pinniped.dev/internal/httputil/securityheader" "go.pinniped.dev/internal/oidc/provider" - "go.pinniped.dev/internal/plog" "go.pinniped.dev/internal/upstreamoidc" "go.pinniped.dev/pkg/oidcclient/nonce" "go.pinniped.dev/pkg/oidcclient/oidctypes" @@ -45,12 +45,14 @@ const ( // overallTimeout is the overall time that a login is allowed to take. This includes several user interactions, so // we set this to be relatively long. overallTimeout = 90 * time.Minute + + debugLogLevel = 4 ) type handlerState struct { // Basic parameters. ctx context.Context - logger plog.PLogger + logger logr.Logger issuer string clientID string scopes []string @@ -101,7 +103,7 @@ func WithContext(ctx context.Context) Option { // WithLogger specifies a PLogger to use with the login. // If not specified this will default to a new logger. -func WithLogger(logger plog.PLogger) Option { +func WithLogger(logger logr.Logger) Option { return func(h *handlerState) error { h.logger = logger return nil @@ -271,7 +273,7 @@ func (h *handlerState) baseLogin() (*oidctypes.Token, error) { // If the ID token is still valid for a bit, return it immediately and skip the rest of the flow. cached := h.cache.GetToken(cacheKey) if cached != nil && cached.IDToken != nil && time.Until(cached.IDToken.Expiry.Time) > minIDTokenValidity { - h.logger.Debug("Found unexpired cached token.") + h.logger.V(debugLogLevel).Info("Pinniped: Found unexpired cached token.") return cached, nil } @@ -339,7 +341,7 @@ func (h *handlerState) initOIDCDiscovery() error { return nil } - h.logger.Debug("Performing OIDC discovery", "issuer", h.issuer) + h.logger.V(debugLogLevel).Info("Pinniped: Performing OIDC discovery", "issuer", h.issuer) var err error h.provider, err = oidc.NewProvider(h.ctx, h.issuer) if err != nil { @@ -356,7 +358,7 @@ func (h *handlerState) initOIDCDiscovery() error { } func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidctypes.Token, error) { - h.logger.Debug("Performing RFC8693 token exchange", "requestedAudience", h.requestedAudience) + h.logger.V(debugLogLevel).Info("Pinniped: Performing RFC8693 token exchange", "requestedAudience", h.requestedAudience) // Perform OIDC discovery. This may have already been performed if there was not a cached base token. if err := h.initOIDCDiscovery(); err != nil { return nil, err @@ -427,7 +429,7 @@ func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidcty } func (h *handlerState) handleRefresh(ctx context.Context, refreshToken *oidctypes.RefreshToken) (*oidctypes.Token, error) { - h.logger.Debug("Refreshing cached token.") + h.logger.V(debugLogLevel).Info("Pinniped: Refreshing cached token.") refreshSource := h.oauth2Config.TokenSource(ctx, &oauth2.Token{RefreshToken: refreshToken.Token}) refreshed, err := refreshSource.Token() diff --git a/pkg/oidcclient/login_test.go b/pkg/oidcclient/login_test.go index 89d25e84..c3ac6609 100644 --- a/pkg/oidcclient/login_test.go +++ b/pkg/oidcclient/login_test.go @@ -13,6 +13,8 @@ import ( "testing" "time" + "github.com/go-logr/stdr" + "github.com/coreos/go-oidc/v3/oidc" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" @@ -24,7 +26,6 @@ import ( "go.pinniped.dev/internal/httputil/httperr" "go.pinniped.dev/internal/mocks/mockupstreamoidcidentityprovider" "go.pinniped.dev/internal/oidc/provider" - "go.pinniped.dev/internal/plog" "go.pinniped.dev/internal/testutil" "go.pinniped.dev/internal/testutil/testlogger" "go.pinniped.dev/pkg/oidcclient/nonce" @@ -273,7 +274,7 @@ func TestLogin(t *testing.T) { return WithSessionCache(cache)(h) } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"test-issuer\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"test-issuer\""}, wantErr: `could not perform OIDC discovery for "test-issuer": Get "test-issuer/.well-known/openid-configuration": unsupported protocol scheme ""`, }, { @@ -295,7 +296,7 @@ func TestLogin(t *testing.T) { return WithSessionCache(cache)(h) } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\""}, wantToken: &testToken, }, { @@ -304,7 +305,7 @@ func TestLogin(t *testing.T) { return func(h *handlerState) error { return nil } }, issuer: errorServer.URL, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""}, wantErr: fmt.Sprintf("could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL), }, { @@ -344,8 +345,8 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Refreshing cached token.\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", + "\"level\"=4 \"msg\"=\"Pinniped: Refreshing cached token.\""}, wantToken: &testToken, }, { @@ -378,8 +379,8 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Refreshing cached token.\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", + "\"level\"=4 \"msg\"=\"Pinniped: Refreshing cached token.\""}, wantErr: "some validation error", }, { @@ -406,8 +407,8 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Refreshing cached token.\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", + "\"level\"=4 \"msg\"=\"Pinniped: Refreshing cached token.\""}, // Expect this to fall through to the authorization code flow, so it fails here. wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address", }, @@ -420,7 +421,7 @@ func TestLogin(t *testing.T) { } }, issuer: successServer.URL, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address", }, { @@ -431,7 +432,7 @@ func TestLogin(t *testing.T) { }) }, issuer: successServer.URL, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: "could not open browser: some browser open error", }, { @@ -449,7 +450,7 @@ func TestLogin(t *testing.T) { } }, issuer: successServer.URL, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: "timed out waiting for token callback: context canceled", }, { @@ -466,7 +467,7 @@ func TestLogin(t *testing.T) { } }, issuer: successServer.URL, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: "error handling callback: some callback error", }, { @@ -527,7 +528,7 @@ func TestLogin(t *testing.T) { } }, issuer: successServer.URL, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantToken: &testToken, }, { @@ -551,9 +552,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"cluster-1234\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"cluster-1234\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""}, wantErr: fmt.Sprintf("failed to exchange token: could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL), }, { @@ -577,9 +578,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"cluster-1234\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + brokenTokenURLServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"cluster-1234\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + brokenTokenURLServer.URL + "\""}, wantErr: `failed to exchange token: could not build RFC8693 request: parse "%": invalid URL escape "%"`, }, { @@ -603,9 +604,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-http-response\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-http-response\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: fmt.Sprintf(`failed to exchange token: Post "%s/token": failed to parse Location header "%%": parse "%%": invalid URL escape "%%"`, successServer.URL), }, { @@ -629,9 +630,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-http-400\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-http-400\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: unexpected HTTP response status 400`, }, { @@ -655,9 +656,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-content-type\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-content-type\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: failed to decode content-type header: mime: invalid media parameter`, }, { @@ -681,9 +682,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-wrong-content-type\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-wrong-content-type\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: unexpected HTTP response content type "invalid"`, }, { @@ -707,9 +708,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-json\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-json\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: failed to decode response: unexpected EOF`, }, { @@ -733,9 +734,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-tokentype\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-tokentype\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: got unexpected token_type "invalid"`, }, { @@ -759,9 +760,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-issuedtokentype\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-issuedtokentype\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: got unexpected issued_token_type "invalid"`, }, { @@ -785,9 +786,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-jwt\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-jwt\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantErr: `failed to exchange token: received invalid JWT: oidc: malformed jwt: square/go-jose: compact JWS format must have three parts`, }, { @@ -817,9 +818,9 @@ func TestLogin(t *testing.T) { return nil } }, - wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Found unexpired cached token.\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, + wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""}, wantToken: &testExchangedToken, }, { @@ -868,9 +869,9 @@ func TestLogin(t *testing.T) { } }, wantLogs: []string{ - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Refreshing cached token.\"", - "\"level\"=0 \"msg\"=\"Pinniped Test Prefix: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"", + "\"level\"=4 \"msg\"=\"Pinniped: Refreshing cached token.\"", + "\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience\"", }, wantToken: &testExchangedToken, }, @@ -878,18 +879,16 @@ func TestLogin(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug) - require.NoError(t, err) testLogger := testlogger.New(t) - pLogger := plog.New("Pinniped Test Prefix: ") klog.SetLogger(testLogger) + stdr.SetVerbosity(debugLogLevel) // set stdr's global log level to debug so the test logger will send output. tok, err := Login(tt.issuer, tt.clientID, WithContext(context.Background()), WithListenPort(0), WithScopes([]string{"test-scope"}), tt.opt(t), - WithLogger(pLogger), + WithLogger(testLogger), ) require.Equal(t, tt.wantLogs, testLogger.Lines()) if tt.wantErr != "" { From 71e38d232e70bce84a63ff2cf26fce6566edb6db Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Mon, 3 May 2021 09:13:18 -0700 Subject: [PATCH 18/32] login.go discards logs by default Signed-off-by: Margo Crawford --- pkg/oidcclient/login.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/oidcclient/login.go b/pkg/oidcclient/login.go index 142b6779..14ca72d3 100644 --- a/pkg/oidcclient/login.go +++ b/pkg/oidcclient/login.go @@ -196,6 +196,7 @@ func Login(issuer string, clientID string, opts ...Option) (*oidctypes.Token, er cache: &nopCache{}, callbackPath: "/callback", ctx: context.Background(), + logger: logr.Discard(), // discard logs unless a logger is specified callbacks: make(chan callbackResult), httpClient: http.DefaultClient, From b80cbb8cc55edc41a5d427f88a65144d25535d1e Mon Sep 17 00:00:00 2001 From: Matt Moyer Date: Mon, 3 May 2021 16:20:13 -0500 Subject: [PATCH 19/32] Run kube-cert-agent pod as Concierge ServiceAccount. Since 0dfb3e95c55b0f38d899b5efc40cd0643e1e0f86, we no longer directly create the kube-cert-agent Pod, so our "use" permission on PodSecurityPolicies no longer has the intended effect. Since the deployments controller is now the one creating pods for us, we need to get the permission on the PodSpec of the target pod instead, which we do somewhat simply by using the same service account as the main Concierge pods. We still set `automountServiceAccountToken: false`, so this should not actually give any useful permissions to the agent pod when running. Signed-off-by: Matt Moyer --- deploy/concierge/deployment.yaml | 1 + internal/config/concierge/config.go | 3 +++ internal/config/concierge/config_test.go | 15 ++++++++++++++- internal/config/concierge/types.go | 1 + .../controller/kubecertagent/kubecertagent.go | 4 ++++ .../kubecertagent/kubecertagent_test.go | 2 ++ internal/controllermanager/prepare_controllers.go | 1 + 7 files changed, 26 insertions(+), 1 deletion(-) diff --git a/deploy/concierge/deployment.yaml b/deploy/concierge/deployment.yaml index 223b7fa8..ad12e915 100644 --- a/deploy/concierge/deployment.yaml +++ b/deploy/concierge/deployment.yaml @@ -47,6 +47,7 @@ data: impersonationTLSCertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-tls-serving-certificate") @) impersonationCACertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-ca-certificate") @) impersonationSignerSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-signer-ca-certificate") @) + serviceAccount: (@= defaultResourceName() @) labels: (@= json.encode(labels()).rstrip() @) kubeCertAgent: namePrefix: (@= defaultResourceNameWithSuffix("kube-cert-agent-") @) diff --git a/internal/config/concierge/config.go b/internal/config/concierge/config.go index b4d4c9a5..4c080a48 100644 --- a/internal/config/concierge/config.go +++ b/internal/config/concierge/config.go @@ -122,6 +122,9 @@ func validateNames(names *NamesConfigSpec) error { if names.ImpersonationSignerSecret == "" { missingNames = append(missingNames, "impersonationSignerSecret") } + if names.ServiceAccount == "" { + missingNames = append(missingNames, "serviceAccount") + } if len(missingNames) > 0 { return constable.Error("missing required names: " + strings.Join(missingNames, ", ")) } diff --git a/internal/config/concierge/config_test.go b/internal/config/concierge/config_test.go index 0034d06b..da1172e7 100644 --- a/internal/config/concierge/config_test.go +++ b/internal/config/concierge/config_test.go @@ -43,6 +43,7 @@ func TestFromPath(t *testing.T) { impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value impersonationSignerSecret: impersonationSignerSecret-value + serviceAccount: serviceAccount-value labels: myLabelKey1: myLabelValue1 myLabelKey2: myLabelValue2 @@ -72,6 +73,7 @@ func TestFromPath(t *testing.T) { ImpersonationTLSCertificateSecret: "impersonationTLSCertificateSecret-value", ImpersonationCACertificateSecret: "impersonationCACertificateSecret-value", ImpersonationSignerSecret: "impersonationSignerSecret-value", + ServiceAccount: "serviceAccount-value", }, Labels: map[string]string{ "myLabelKey1": "myLabelValue1", @@ -98,6 +100,7 @@ func TestFromPath(t *testing.T) { impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value + serviceAccount: serviceAccount-value `), wantConfig: &Config{ DiscoveryInfo: DiscoveryInfoSpec{ @@ -119,6 +122,7 @@ func TestFromPath(t *testing.T) { ImpersonationTLSCertificateSecret: "impersonationTLSCertificateSecret-value", ImpersonationCACertificateSecret: "impersonationCACertificateSecret-value", ImpersonationSignerSecret: "impersonationSignerSecret-value", + ServiceAccount: "serviceAccount-value", }, Labels: map[string]string{}, KubeCertAgentConfig: KubeCertAgentSpec{ @@ -133,7 +137,7 @@ func TestFromPath(t *testing.T) { wantError: "validate names: missing required names: servingCertificateSecret, credentialIssuer, " + "apiService, impersonationConfigMap, impersonationLoadBalancerService, " + "impersonationTLSCertificateSecret, impersonationCACertificateSecret, " + - "impersonationSignerSecret", + "impersonationSignerSecret, serviceAccount", }, { name: "Missing apiService name", @@ -147,6 +151,7 @@ func TestFromPath(t *testing.T) { impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value + serviceAccount: serviceAccount-value `), wantError: "validate names: missing required names: apiService", }, @@ -162,6 +167,7 @@ func TestFromPath(t *testing.T) { impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value + serviceAccount: serviceAccount-value `), wantError: "validate names: missing required names: credentialIssuer", }, @@ -177,6 +183,7 @@ func TestFromPath(t *testing.T) { impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value + serviceAccount: serviceAccount-value `), wantError: "validate names: missing required names: servingCertificateSecret", }, @@ -192,6 +199,7 @@ func TestFromPath(t *testing.T) { impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value + serviceAccount: serviceAccount-value `), wantError: "validate names: missing required names: impersonationConfigMap", }, @@ -207,6 +215,7 @@ func TestFromPath(t *testing.T) { impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value + serviceAccount: serviceAccount-value `), wantError: "validate names: missing required names: impersonationLoadBalancerService", }, @@ -222,6 +231,7 @@ func TestFromPath(t *testing.T) { impersonationLoadBalancerService: impersonationLoadBalancerService-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value + serviceAccount: serviceAccount-value `), wantError: "validate names: missing required names: impersonationTLSCertificateSecret", }, @@ -237,6 +247,7 @@ func TestFromPath(t *testing.T) { impersonationLoadBalancerService: impersonationLoadBalancerService-value impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value + serviceAccount: serviceAccount-value `), wantError: "validate names: missing required names: impersonationCACertificateSecret", }, @@ -252,6 +263,7 @@ func TestFromPath(t *testing.T) { impersonationLoadBalancerService: impersonationLoadBalancerService-value impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value + serviceAccount: serviceAccount-value `), wantError: "validate names: missing required names: impersonationSignerSecret", }, @@ -265,6 +277,7 @@ func TestFromPath(t *testing.T) { apiService: pinniped-api impersonationLoadBalancerService: impersonationLoadBalancerService-value impersonationSignerSecret: impersonationSignerSecret-value + serviceAccount: serviceAccount-value `), wantError: "validate names: missing required names: impersonationConfigMap, " + "impersonationTLSCertificateSecret, impersonationCACertificateSecret", diff --git a/internal/config/concierge/types.go b/internal/config/concierge/types.go index cfec621e..b2fed37b 100644 --- a/internal/config/concierge/types.go +++ b/internal/config/concierge/types.go @@ -41,6 +41,7 @@ type NamesConfigSpec struct { ImpersonationTLSCertificateSecret string `json:"impersonationTLSCertificateSecret"` ImpersonationCACertificateSecret string `json:"impersonationCACertificateSecret"` ImpersonationSignerSecret string `json:"impersonationSignerSecret"` + ServiceAccount string `json:"serviceAccount"` } // ServingCertificateConfigSpec contains the configuration knobs for the API's diff --git a/internal/controller/kubecertagent/kubecertagent.go b/internal/controller/kubecertagent/kubecertagent.go index 478d27fe..e45bf850 100644 --- a/internal/controller/kubecertagent/kubecertagent.go +++ b/internal/controller/kubecertagent/kubecertagent.go @@ -64,6 +64,9 @@ type AgentConfig struct { // NamePrefix will be prefixed to all agent pod names. NamePrefix string + // ServiceAccountName is the service account under which to run the agent pods. + ServiceAccountName string + // ContainerImagePullSecrets is a list of names of Kubernetes Secret objects that will be used as // ImagePullSecrets on the kube-cert-agent pods. ContainerImagePullSecrets []string @@ -472,6 +475,7 @@ func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) * RestartPolicy: corev1.RestartPolicyAlways, NodeSelector: controllerManagerPod.Spec.NodeSelector, AutomountServiceAccountToken: pointer.BoolPtr(false), + ServiceAccountName: c.cfg.ServiceAccountName, NodeName: controllerManagerPod.Spec.NodeName, Tolerations: controllerManagerPod.Spec.Tolerations, // We need to run the agent pod as root since the file permissions diff --git a/internal/controller/kubecertagent/kubecertagent_test.go b/internal/controller/kubecertagent/kubecertagent_test.go index 4a1a17f9..73d0a749 100644 --- a/internal/controller/kubecertagent/kubecertagent_test.go +++ b/internal/controller/kubecertagent/kubecertagent_test.go @@ -123,6 +123,7 @@ func TestAgentController(t *testing.T) { }}, RestartPolicy: corev1.RestartPolicyAlways, TerminationGracePeriodSeconds: pointer.Int64Ptr(0), + ServiceAccountName: "test-service-account-name", AutomountServiceAccountToken: pointer.BoolPtr(false), SecurityContext: &corev1.PodSecurityContext{ RunAsUser: pointer.Int64Ptr(0), @@ -672,6 +673,7 @@ func TestAgentController(t *testing.T) { AgentConfig{ Namespace: "concierge", ContainerImage: "pinniped-server-image", + ServiceAccountName: "test-service-account-name", NamePrefix: "pinniped-concierge-kube-cert-agent-", ContainerImagePullSecrets: []string{"pinniped-image-pull-secret"}, CredentialIssuerName: "pinniped-concierge-config", diff --git a/internal/controllermanager/prepare_controllers.go b/internal/controllermanager/prepare_controllers.go index 8990a4ec..ed74c4ac 100644 --- a/internal/controllermanager/prepare_controllers.go +++ b/internal/controllermanager/prepare_controllers.go @@ -121,6 +121,7 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) { agentConfig := kubecertagent.AgentConfig{ Namespace: c.ServerInstallationInfo.Namespace, + ServiceAccountName: c.NamesConfig.ServiceAccount, ContainerImage: *c.KubeCertAgentConfig.Image, NamePrefix: *c.KubeCertAgentConfig.NamePrefix, ContainerImagePullSecrets: c.KubeCertAgentConfig.ImagePullSecrets, From 165bef78092c1002e13e58bb7354a175159f6400 Mon Sep 17 00:00:00 2001 From: Matt Moyer Date: Mon, 3 May 2021 16:31:48 -0500 Subject: [PATCH 20/32] Split out kube-cert-agent service account and bindings. Followup on the previous comment to split apart the ServiceAccount of the kube-cert-agent and the main concierge pod. This is a bit cleaner and ensures that in testing our main Concierge pod never requires any privileged permissions. Signed-off-by: Matt Moyer --- deploy/concierge/deployment.yaml | 9 +++++- deploy/concierge/rbac.yaml | 31 +++++++++++++++++-- internal/config/concierge/config.go | 4 +-- internal/config/concierge/config_test.go | 28 ++++++++--------- internal/config/concierge/types.go | 2 +- .../controllermanager/prepare_controllers.go | 2 +- 6 files changed, 54 insertions(+), 22 deletions(-) diff --git a/deploy/concierge/deployment.yaml b/deploy/concierge/deployment.yaml index ad12e915..525257a7 100644 --- a/deploy/concierge/deployment.yaml +++ b/deploy/concierge/deployment.yaml @@ -22,6 +22,13 @@ metadata: labels: #@ labels() --- apiVersion: v1 +kind: ServiceAccount +metadata: + name: #@ defaultResourceNameWithSuffix("kube-cert-agent") + namespace: #@ namespace() + labels: #@ labels() +--- +apiVersion: v1 kind: ConfigMap metadata: name: #@ defaultResourceNameWithSuffix("config") @@ -47,7 +54,7 @@ data: impersonationTLSCertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-tls-serving-certificate") @) impersonationCACertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-ca-certificate") @) impersonationSignerSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-signer-ca-certificate") @) - serviceAccount: (@= defaultResourceName() @) + agentServiceAccount: (@= defaultResourceNameWithSuffix("kube-cert-agent") @) labels: (@= json.encode(labels()).rstrip() @) kubeCertAgent: namePrefix: (@= defaultResourceNameWithSuffix("kube-cert-agent-") @) diff --git a/deploy/concierge/rbac.yaml b/deploy/concierge/rbac.yaml index 74e5653e..12350714 100644 --- a/deploy/concierge/rbac.yaml +++ b/deploy/concierge/rbac.yaml @@ -24,9 +24,6 @@ rules: - apiGroups: [ flowcontrol.apiserver.k8s.io ] resources: [ flowschemas, prioritylevelconfigurations ] verbs: [ get, list, watch ] - - apiGroups: [ policy ] - resources: [ podsecuritypolicies ] - verbs: [ use ] - apiGroups: [ security.openshift.io ] resources: [ securitycontextconstraints ] verbs: [ use ] @@ -67,6 +64,34 @@ roleRef: name: #@ defaultResourceNameWithSuffix("aggregated-api-server") apiGroup: rbac.authorization.k8s.io +#! Give permission to the kube-cert-agent Pod to run privileged. +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: #@ defaultResourceNameWithSuffix("kube-cert-agent") + namespace: #@ namespace() + labels: #@ labels() +rules: + - apiGroups: [ policy ] + resources: [ podsecuritypolicies ] + verbs: [ use ] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: #@ defaultResourceNameWithSuffix("kube-cert-agent") + namespace: #@ namespace() + labels: #@ labels() +subjects: + - kind: ServiceAccount + name: #@ defaultResourceNameWithSuffix("kube-cert-agent") + namespace: #@ namespace() +roleRef: + kind: Role + name: #@ defaultResourceNameWithSuffix("kube-cert-agent") + apiGroup: rbac.authorization.k8s.io + #! Give permission to various objects within the app's own namespace --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/internal/config/concierge/config.go b/internal/config/concierge/config.go index 4c080a48..610caa7e 100644 --- a/internal/config/concierge/config.go +++ b/internal/config/concierge/config.go @@ -122,8 +122,8 @@ func validateNames(names *NamesConfigSpec) error { if names.ImpersonationSignerSecret == "" { missingNames = append(missingNames, "impersonationSignerSecret") } - if names.ServiceAccount == "" { - missingNames = append(missingNames, "serviceAccount") + if names.AgentServiceAccount == "" { + missingNames = append(missingNames, "agentServiceAccount") } if len(missingNames) > 0 { return constable.Error("missing required names: " + strings.Join(missingNames, ", ")) diff --git a/internal/config/concierge/config_test.go b/internal/config/concierge/config_test.go index da1172e7..1101d2d5 100644 --- a/internal/config/concierge/config_test.go +++ b/internal/config/concierge/config_test.go @@ -43,7 +43,7 @@ func TestFromPath(t *testing.T) { impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value impersonationSignerSecret: impersonationSignerSecret-value - serviceAccount: serviceAccount-value + agentServiceAccount: agentServiceAccount-value labels: myLabelKey1: myLabelValue1 myLabelKey2: myLabelValue2 @@ -73,7 +73,7 @@ func TestFromPath(t *testing.T) { ImpersonationTLSCertificateSecret: "impersonationTLSCertificateSecret-value", ImpersonationCACertificateSecret: "impersonationCACertificateSecret-value", ImpersonationSignerSecret: "impersonationSignerSecret-value", - ServiceAccount: "serviceAccount-value", + AgentServiceAccount: "agentServiceAccount-value", }, Labels: map[string]string{ "myLabelKey1": "myLabelValue1", @@ -100,7 +100,7 @@ func TestFromPath(t *testing.T) { impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value - serviceAccount: serviceAccount-value + agentServiceAccount: agentServiceAccount-value `), wantConfig: &Config{ DiscoveryInfo: DiscoveryInfoSpec{ @@ -122,7 +122,7 @@ func TestFromPath(t *testing.T) { ImpersonationTLSCertificateSecret: "impersonationTLSCertificateSecret-value", ImpersonationCACertificateSecret: "impersonationCACertificateSecret-value", ImpersonationSignerSecret: "impersonationSignerSecret-value", - ServiceAccount: "serviceAccount-value", + AgentServiceAccount: "agentServiceAccount-value", }, Labels: map[string]string{}, KubeCertAgentConfig: KubeCertAgentSpec{ @@ -137,7 +137,7 @@ func TestFromPath(t *testing.T) { wantError: "validate names: missing required names: servingCertificateSecret, credentialIssuer, " + "apiService, impersonationConfigMap, impersonationLoadBalancerService, " + "impersonationTLSCertificateSecret, impersonationCACertificateSecret, " + - "impersonationSignerSecret, serviceAccount", + "impersonationSignerSecret, agentServiceAccount", }, { name: "Missing apiService name", @@ -151,7 +151,7 @@ func TestFromPath(t *testing.T) { impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value - serviceAccount: serviceAccount-value + agentServiceAccount: agentServiceAccount-value `), wantError: "validate names: missing required names: apiService", }, @@ -167,7 +167,7 @@ func TestFromPath(t *testing.T) { impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value - serviceAccount: serviceAccount-value + agentServiceAccount: agentServiceAccount-value `), wantError: "validate names: missing required names: credentialIssuer", }, @@ -183,7 +183,7 @@ func TestFromPath(t *testing.T) { impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value - serviceAccount: serviceAccount-value + agentServiceAccount: agentServiceAccount-value `), wantError: "validate names: missing required names: servingCertificateSecret", }, @@ -199,7 +199,7 @@ func TestFromPath(t *testing.T) { impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value - serviceAccount: serviceAccount-value + agentServiceAccount: agentServiceAccount-value `), wantError: "validate names: missing required names: impersonationConfigMap", }, @@ -215,7 +215,7 @@ func TestFromPath(t *testing.T) { impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value - serviceAccount: serviceAccount-value + agentServiceAccount: agentServiceAccount-value `), wantError: "validate names: missing required names: impersonationLoadBalancerService", }, @@ -231,7 +231,7 @@ func TestFromPath(t *testing.T) { impersonationLoadBalancerService: impersonationLoadBalancerService-value impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value - serviceAccount: serviceAccount-value + agentServiceAccount: agentServiceAccount-value `), wantError: "validate names: missing required names: impersonationTLSCertificateSecret", }, @@ -247,7 +247,7 @@ func TestFromPath(t *testing.T) { impersonationLoadBalancerService: impersonationLoadBalancerService-value impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationSignerSecret: impersonationSignerSecret-value - serviceAccount: serviceAccount-value + agentServiceAccount: agentServiceAccount-value `), wantError: "validate names: missing required names: impersonationCACertificateSecret", }, @@ -263,7 +263,7 @@ func TestFromPath(t *testing.T) { impersonationLoadBalancerService: impersonationLoadBalancerService-value impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value - serviceAccount: serviceAccount-value + agentServiceAccount: agentServiceAccount-value `), wantError: "validate names: missing required names: impersonationSignerSecret", }, @@ -277,7 +277,7 @@ func TestFromPath(t *testing.T) { apiService: pinniped-api impersonationLoadBalancerService: impersonationLoadBalancerService-value impersonationSignerSecret: impersonationSignerSecret-value - serviceAccount: serviceAccount-value + agentServiceAccount: agentServiceAccount-value `), wantError: "validate names: missing required names: impersonationConfigMap, " + "impersonationTLSCertificateSecret, impersonationCACertificateSecret", diff --git a/internal/config/concierge/types.go b/internal/config/concierge/types.go index b2fed37b..ecd36d0a 100644 --- a/internal/config/concierge/types.go +++ b/internal/config/concierge/types.go @@ -41,7 +41,7 @@ type NamesConfigSpec struct { ImpersonationTLSCertificateSecret string `json:"impersonationTLSCertificateSecret"` ImpersonationCACertificateSecret string `json:"impersonationCACertificateSecret"` ImpersonationSignerSecret string `json:"impersonationSignerSecret"` - ServiceAccount string `json:"serviceAccount"` + AgentServiceAccount string `json:"agentServiceAccount"` } // ServingCertificateConfigSpec contains the configuration knobs for the API's diff --git a/internal/controllermanager/prepare_controllers.go b/internal/controllermanager/prepare_controllers.go index ed74c4ac..acd5bee7 100644 --- a/internal/controllermanager/prepare_controllers.go +++ b/internal/controllermanager/prepare_controllers.go @@ -121,7 +121,7 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) { agentConfig := kubecertagent.AgentConfig{ Namespace: c.ServerInstallationInfo.Namespace, - ServiceAccountName: c.NamesConfig.ServiceAccount, + ServiceAccountName: c.NamesConfig.AgentServiceAccount, ContainerImage: *c.KubeCertAgentConfig.Image, NamePrefix: *c.KubeCertAgentConfig.NamePrefix, ContainerImagePullSecrets: c.KubeCertAgentConfig.ImagePullSecrets, From 4ce77c483711962bd95a0ea7e12b15d3871d4b67 Mon Sep 17 00:00:00 2001 From: Monis Khan Date: Tue, 4 May 2021 12:38:47 -0400 Subject: [PATCH 21/32] supervisor gc: use singleton queue The supervisor treats all events the same hence it must use a singleton queue. Updated the integration test to remove the data race caused by calling methods on testing.T outside of the main test go routine. Signed-off-by: Monis Khan --- .../supervisorstorage/garbage_collector.go | 12 ++++-- .../garbage_collector_test.go | 38 +++++++++++++++++-- ...ervisor_storage_garbage_collection_test.go | 38 ++++++++++++++++--- 3 files changed, 75 insertions(+), 13 deletions(-) diff --git a/internal/controller/supervisorstorage/garbage_collector.go b/internal/controller/supervisorstorage/garbage_collector.go index e2d35d30..6da85d3f 100644 --- a/internal/controller/supervisorstorage/garbage_collector.go +++ b/internal/controller/supervisorstorage/garbage_collector.go @@ -59,7 +59,7 @@ func GarbageCollectorController( return isSecretWithGCAnnotation(oldObj) || isSecretWithGCAnnotation(newObj) }, DeleteFunc: func(obj metav1.Object) bool { return false }, // ignore all deletes - ParentFunc: nil, + ParentFunc: pinnipedcontroller.SingletonQueue(), }, controllerlib.InformerOption{}, ), @@ -67,16 +67,20 @@ func GarbageCollectorController( } func (c *garbageCollectorController) Sync(ctx controllerlib.Context) error { + // make sure we have a consistent, static meaning for the current time during the sync loop + frozenClock := clock.NewFakeClock(c.clock.Now()) + // The Sync method is triggered upon any change to any Secret, which would make this // controller too chatty, so it rate limits itself to a more reasonable interval. // Note that even during a period when no secrets are changing, it will still run // at the informer's full-resync interval (as long as there are some secrets). - if c.clock.Now().Sub(c.timeOfMostRecentSweep) < minimumRepeatInterval { + if since := frozenClock.Since(c.timeOfMostRecentSweep); since < minimumRepeatInterval { + ctx.Queue.AddAfter(ctx.Key, minimumRepeatInterval-since) return nil } plog.Info("starting storage garbage collection sweep") - c.timeOfMostRecentSweep = c.clock.Now() + c.timeOfMostRecentSweep = frozenClock.Now() listOfSecrets, err := c.secretInformer.Lister().List(labels.Everything()) if err != nil { @@ -97,7 +101,7 @@ func (c *garbageCollectorController) Sync(ctx controllerlib.Context) error { continue } - if garbageCollectAfterTime.Before(c.clock.Now()) { + if garbageCollectAfterTime.Before(frozenClock.Now()) { err = c.kubeClient.CoreV1().Secrets(secret.Namespace).Delete(ctx.Context, secret.Name, metav1.DeleteOptions{}) if err != nil { plog.WarningErr("failed to garbage collect resource", err, logKV(secret)) diff --git a/internal/controller/supervisorstorage/garbage_collector_test.go b/internal/controller/supervisorstorage/garbage_collector_test.go index a5606cf8..ac64acf3 100644 --- a/internal/controller/supervisorstorage/garbage_collector_test.go +++ b/internal/controller/supervisorstorage/garbage_collector_test.go @@ -66,6 +66,10 @@ func TestGarbageCollectorControllerInformerFilters(t *testing.T) { r.True(subject.Update(secretWithAnnotation, otherSecret)) r.True(subject.Update(otherSecret, secretWithAnnotation)) }) + + it("returns the same singleton key", func() { + r.Equal(controllerlib.Key{}, subject.Parent(secretWithAnnotation)) + }) }) when("any Secret with the required annotation is deleted", func() { @@ -136,9 +140,10 @@ func TestGarbageCollectorControllerSync(t *testing.T) { Context: cancelContext, Name: subject.Name(), Key: controllerlib.Key{ - Namespace: "", - Name: "", + Namespace: "foo", + Name: "bar", }, + Queue: &testQueue{t: t}, } // Must start informers before calling TestRunSynchronously() @@ -262,16 +267,23 @@ func TestGarbageCollectorControllerSync(t *testing.T) { // Run sync once with the current time set to frozenTime. r.NoError(controllerlib.TestSync(t, subject, *syncContext)) require.Empty(t, kubeClient.Actions()) + r.False(syncContext.Queue.(*testQueue).called) // Run sync again when not enough time has passed since the most recent run, so no delete // operations should happen even though there is a expired secret now. fakeClock.Step(29 * time.Second) r.NoError(controllerlib.TestSync(t, subject, *syncContext)) require.Empty(t, kubeClient.Actions()) + r.True(syncContext.Queue.(*testQueue).called) + r.Equal(controllerlib.Key{Namespace: "foo", Name: "bar"}, syncContext.Queue.(*testQueue).key) // assert key is passed through + r.Equal(time.Second, syncContext.Queue.(*testQueue).duration) // assert that we get the exact requeue time + + syncContext.Queue = &testQueue{t: t} // reset the queue for the next sync // Step to the exact threshold and run Sync again. Now we are past the rate limiting period. - fakeClock.Step(1*time.Second + 1*time.Millisecond) + fakeClock.Step(time.Second) r.NoError(controllerlib.TestSync(t, subject, *syncContext)) + r.False(syncContext.Queue.(*testQueue).called) // It should have deleted the expired secret. r.ElementsMatch( @@ -381,3 +393,23 @@ func TestGarbageCollectorControllerSync(t *testing.T) { }) }, spec.Parallel(), spec.Report(report.Terminal{})) } + +type testQueue struct { + t *testing.T + + called bool + key controllerlib.Key + duration time.Duration + + controllerlib.Queue // panic if any other methods called +} + +func (q *testQueue) AddAfter(key controllerlib.Key, duration time.Duration) { + q.t.Helper() + + require.False(q.t, q.called, "AddAfter should only be called once") + + q.called = true + q.key = key + q.duration = duration +} diff --git a/test/integration/supervisor_storage_garbage_collection_test.go b/test/integration/supervisor_storage_garbage_collection_test.go index 84b82196..1394b6f4 100644 --- a/test/integration/supervisor_storage_garbage_collection_test.go +++ b/test/integration/supervisor_storage_garbage_collection_test.go @@ -48,10 +48,15 @@ func TestStorageGarbageCollection(t *testing.T) { // in the same namespace just to get the controller to respond faster. // This is just a performance optimization to make this test pass faster because otherwise // this test has to wait ~3 minutes for the controller's next full-resync. - stopCh := make(chan bool, 1) // It is important that this channel be buffered. - go updateSecretEveryTwoSeconds(t, stopCh, secrets, secretNotYetExpired) + stopCh := make(chan struct{}) + errCh := make(chan error) + go updateSecretEveryTwoSeconds(stopCh, errCh, secrets, secretNotYetExpired) t.Cleanup(func() { - stopCh <- true + close(stopCh) + + if updateErr := <-errCh; updateErr != nil { + panic(updateErr) + } }) // Wait long enough for the next periodic sweep of the GC controller for the secrets to be deleted, which @@ -69,10 +74,15 @@ func TestStorageGarbageCollection(t *testing.T) { require.NoError(t, err) } -func updateSecretEveryTwoSeconds(t *testing.T, stopCh chan bool, secrets corev1client.SecretInterface, secret *v1.Secret) { +func updateSecretEveryTwoSeconds(stopCh chan struct{}, errCh chan error, secrets corev1client.SecretInterface, secret *v1.Secret) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() + var updateErr error + defer func() { + errCh <- updateErr + }() + i := 0 for { select { @@ -87,9 +97,25 @@ func updateSecretEveryTwoSeconds(t *testing.T, stopCh chan bool, secrets corev1c i++ secret.Data["foo"] = []byte(fmt.Sprintf("bar-%d", i)) - var updateErr error secret, updateErr = secrets.Update(ctx, secret, metav1.UpdateOptions{}) - require.NoError(t, updateErr) + + switch { + case updateErr == nil: + // continue to next update + + case k8serrors.IsConflict(updateErr), k8serrors.IsNotFound(updateErr): + select { + case _, ok := <-stopCh: + if !ok { // stopCh is closed meaning that test is already finished so these errors are expected + updateErr = nil + } + default: + } + + return // even if the error is expected, we must stop + default: + return // unexpected error + } } } From 3e13b5f39d88b813dadd8dd2b153e471ad73e994 Mon Sep 17 00:00:00 2001 From: Matt Moyer Date: Tue, 4 May 2021 14:13:20 -0500 Subject: [PATCH 22/32] Do some minor copyediting on "configure-supervisor-with-gitlab.md". Some minor edits I came across while reviewing this: - Capitalize "GitLab" the way they do. - Use `{{< ref "xyz" >}}` references when linking internally. The advantage of these is that they're "type checked" by Hugo when the site is rendered, so we'll know if we ever break one. - Add links to the GitLab docs about creating an OAuth client. These also cover adding a group-level or instance-wide application. - Re-wrap the YAML lines to fit a bit more naturally. - Add a `namespace` to the YAML examples, so they're more likely to work without tweaks. - Use "gitlab" instead of "my-oidc-identity-provider" as the example name, for clarity. - Re-word a few small bits. These are 100% subjective but hopefully an improvement? Signed-off-by: Matt Moyer --- .../howto/configure-supervisor-with-gitlab.md | 114 +++++++++++------- 1 file changed, 68 insertions(+), 46 deletions(-) diff --git a/site/content/docs/howto/configure-supervisor-with-gitlab.md b/site/content/docs/howto/configure-supervisor-with-gitlab.md index bf928b17..8b9a5427 100644 --- a/site/content/docs/howto/configure-supervisor-with-gitlab.md +++ b/site/content/docs/howto/configure-supervisor-with-gitlab.md @@ -1,92 +1,114 @@ --- -title: Configure the Pinniped Supervisor to use Gitlab as an OIDC Provider -description: Set up the Pinniped Supervisor to use Gitlab login. +title: Configure the Pinniped Supervisor to use GitLab as an OIDC Provider +description: Set up the Pinniped Supervisor to use GitLab login. cascade: layout: docs menu: docs: - name: Configure Supervisor With Gitlab + name: Configure Supervisor With GitLab weight: 35 parent: howtos --- The Supervisor is an [OpenID Connect (OIDC)](https://openid.net/connect/) issuer that supports connecting a single "upstream" OIDC identity provider to many "downstream" cluster clients. This guide shows you how to configure the Supervisor so that users can authenticate to their Kubernetes -cluster using their Gitlab credentials. +cluster using their GitLab credentials. + ## Prerequisites -This how-to guide assumes that you have already installed the Pinniped Supervisor with working ingress, -and that you have configured a `FederationDomain` to issue tokens for your downstream clusters, as -described [here](https://pinniped.dev/docs/howto/configure-supervisor/). +This how-to guide assumes that you have already [installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress, +and that you have [configured a `FederationDomain` to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}). -## Configuring your Gitlab Application -1. In Gitlab, navigate to User Settings > Applications +## Configuring your GitLab Application + +Follow the instructions for [using GitLab as an OAuth2 authentication service provider](https://docs.gitlab.com/ee/integration/oauth_provider.html) and create a user, group, or instance-wide application. + +For example, to create a user-owned application: + +1. In GitLab, navigate to [_User Settings_ > _Applications_](https://gitlab.com/-/profile/applications) 1. Create a new application: 1. Enter the name of your application. - 1. Enter the redirect URI. This is the `issuer` you configured in your `FederationDomain` appended with `/callback`. - 1. Check the box saying that the application is Confidential. + 1. Enter the redirect URI. This is the `spec.issuer` you configured in your `FederationDomain` appended with `/callback`. + 1. Check the box saying that the application is _Confidential_. 1. Select scope `openid`. Optionally select `profile` and `email`. - 1. Save the application and make note of the Application ID and Secret. + 1. Save the application and make note of the _Application ID_ and _Secret_. ## Configuring the Supervisor cluster + Create an [`OIDCIdentityProvider`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#oidcidentityprovider) in the same namespace as the Supervisor. -For example, here is an `OIDCIdentityProvider` that works against https://gitlab.com, and uses the email claim as the username. + +For example, here is an `OIDCIdentityProvider` that works against [gitlab.com](https://gitlab.com) and uses the GitLab `email` claim as the Kubernetes username: + ```yaml apiVersion: idp.supervisor.pinniped.dev/v1alpha1 kind: OIDCIdentityProvider metadata: - name: my-oidc-provider + namespace: pinniped-supervisor + name: gitlab spec: - # The upstream issuer name. - # This should be something like https://gitlab.com or https://gitlab.your-company-name.example.com. - issuer: "https://gitlab.com" - # If needed, specify the CA bundle for the GitLab server as base64 encoded PEM data. - #tls: - # certificateAuthorityData: "" + + # Specify the upstream issuer name. This should be something like + # https://gitlab.com or https://gitlab.your-company.example.com. + issuer: https://gitlab.com + + # Specify the CA bundle for the GitLab server as base64-encoded PEM + # data. This will only be needed for self-managed GitLab. + # tls: + # certificateAuthorityData: "" + + # Request any scopes other than "openid" that you selected when + # creating your GitLab application. authorizationConfig: - # Any scopes other than "openid" that you selected when creating your GitLab application. additionalScopes: [ email, profile ] - # See here for a list of available claims: https://docs.gitlab.com/ee/integration/openid_connect_provider.html#shared-information + + # Specify how GitLab claims are mapped to Kubernetes identities. claims: - # The name of the claim in your GitLab token that will be mapped to the "username" claim in downstream - # tokens minted by the Supervisor. + + # Specify the name of the claim in your GitLab token that will be mapped + # to the "username" claim in downstream tokens minted by the Supervisor. # For example, "email" or "sub". - username: "email" - # The name of the claim in GitLab that represents the groups that the user belongs to. - # Note that GitLab's "groups" claim comes from their /userinfo endpoint, not the token. - groups: "groups" + # + # See here for a full list of available claims: + # https://docs.gitlab.com/ee/integration/openid_connect_provider.html + username: email + + # Specify the name of the claim in GitLab that represents the groups + # that the user belongs to. Note that GitLab's "groups" claim comes from + # their "/userinfo" endpoint, not the token. + groups: groups + + # Specify the name of the Kubernetes Secret that contains your GitLab + # application's client credentials (created below). client: - # The name of the kubernetes secret that contains your GitLab application's client ID and client secret. - secretName: my-oidc-provider-client-secret + secretName: gitlab-client-credentials ``` -Then, create a `Secret` containing the Client ID and Client Secret in the same namespace as the Supervisor. +Then, create a `Secret` containing the Client ID and Client Secret in the same namespace as the Supervisor: + ```yaml apiVersion: v1 kind: Secret metadata: - name: my-oidc-provider-client-secret + namespace: pinniped-supervisor + name: gitlab-client-credentials +type: secrets.pinniped.dev/oidc-client stringData: - # clientID should be the Application ID that you got from GitLab. - clientID: xxx - # clientSecret should be the Secret that you got from GitLab. - clientSecret: yyy -type: "secrets.pinniped.dev/oidc-client" + + # The "Application ID" that you got from GitLab. + clientID: "" + + # The "Secret" that you got from GitLab. + clientSecret: "" ``` -To validate your configuration, run +To validate your configuration, run: + ```shell -kubectl describe OIDCIdentityProvider my-oidc-identity-provider +kubectl describe OIDCIdentityProvider gitlab ``` Look at the `status` field. If it was configured correctly, you should see `phase: Ready`. ## Next Steps -Now that you have configured the Supervisor to use GitLab, -you may want to check out [Configure Concierge JWT Authentication](https://pinniped.dev/docs/howto/configure-concierge-jwt/) -to learn how to configure the Concierge to use the JWTs that the Supervisor now issues. - - - - +Now that you have configured the Supervisor to use GitLab, you may want to [configure the Concierge to validate JWTs issued by the Supervisor]({{< ref "configure-concierge-jwt" >}}). From 8136c787a7efc8212e364d9bca0d7061464c701e Mon Sep 17 00:00:00 2001 From: Matt Moyer Date: Tue, 4 May 2021 15:33:33 -0500 Subject: [PATCH 23/32] More adjustments to configure-supervisor-with-gitlab.md. - Use `nickname` claim as an example, which means we only need the `openid` scope. This is also more stable since emails can change over time. - Put the OIDCIdentityProvider and Secret into one YAML blob, since they will likely be copy-pasted together anyway. - Add a separate section for using alternate claims. - Add a separate section for using a private GitLab instance. Signed-off-by: Matt Moyer --- .../howto/configure-supervisor-with-gitlab.md | 87 ++++++++++++------- 1 file changed, 57 insertions(+), 30 deletions(-) diff --git a/site/content/docs/howto/configure-supervisor-with-gitlab.md b/site/content/docs/howto/configure-supervisor-with-gitlab.md index 8b9a5427..d958f9aa 100644 --- a/site/content/docs/howto/configure-supervisor-with-gitlab.md +++ b/site/content/docs/howto/configure-supervisor-with-gitlab.md @@ -19,7 +19,7 @@ cluster using their GitLab credentials. This how-to guide assumes that you have already [installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress, and that you have [configured a `FederationDomain` to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}). -## Configuring your GitLab Application +## Configure your GitLab Application Follow the instructions for [using GitLab as an OAuth2 authentication service provider](https://docs.gitlab.com/ee/integration/oauth_provider.html) and create a user, group, or instance-wide application. @@ -27,17 +27,17 @@ For example, to create a user-owned application: 1. In GitLab, navigate to [_User Settings_ > _Applications_](https://gitlab.com/-/profile/applications) 1. Create a new application: - 1. Enter the name of your application. + 1. Enter a name for your application, such as "My Kubernetes Clusters". 1. Enter the redirect URI. This is the `spec.issuer` you configured in your `FederationDomain` appended with `/callback`. 1. Check the box saying that the application is _Confidential_. - 1. Select scope `openid`. Optionally select `profile` and `email`. + 1. Select scope `openid`. This provides access to the `nickname` (GitLab username) and `groups` (GitLab groups) claims. 1. Save the application and make note of the _Application ID_ and _Secret_. -## Configuring the Supervisor cluster +## Configure the Supervisor cluster -Create an [`OIDCIdentityProvider`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#oidcidentityprovider) in the same namespace as the Supervisor. +Create an [OIDCIdentityProvider](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#oidcidentityprovider) in the same namespace as the Supervisor. -For example, here is an `OIDCIdentityProvider` that works against [gitlab.com](https://gitlab.com) and uses the GitLab `email` claim as the Kubernetes username: +For example, this OIDCIdentityProvider and corresponding Secret for [gitlab.com](https://gitlab.com) use the `nickname` claim (GitLab username) as the Kubernetes username: ```yaml apiVersion: idp.supervisor.pinniped.dev/v1alpha1 @@ -47,30 +47,15 @@ metadata: name: gitlab spec: - # Specify the upstream issuer name. This should be something like - # https://gitlab.com or https://gitlab.your-company.example.com. + # Specify the upstream issuer URL. issuer: https://gitlab.com - # Specify the CA bundle for the GitLab server as base64-encoded PEM - # data. This will only be needed for self-managed GitLab. - # tls: - # certificateAuthorityData: "" - - # Request any scopes other than "openid" that you selected when - # creating your GitLab application. - authorizationConfig: - additionalScopes: [ email, profile ] - # Specify how GitLab claims are mapped to Kubernetes identities. claims: # Specify the name of the claim in your GitLab token that will be mapped # to the "username" claim in downstream tokens minted by the Supervisor. - # For example, "email" or "sub". - # - # See here for a full list of available claims: - # https://docs.gitlab.com/ee/integration/openid_connect_provider.html - username: email + username: nickname # Specify the name of the claim in GitLab that represents the groups # that the user belongs to. Note that GitLab's "groups" claim comes from @@ -81,11 +66,7 @@ spec: # application's client credentials (created below). client: secretName: gitlab-client-credentials -``` - -Then, create a `Secret` containing the Client ID and Client Secret in the same namespace as the Supervisor: - -```yaml +--- apiVersion: v1 kind: Secret metadata: @@ -101,14 +82,60 @@ stringData: clientSecret: "" ``` -To validate your configuration, run: +Once your OIDCIdentityProvider has been created, you can validate your configuration by running: ```shell -kubectl describe OIDCIdentityProvider gitlab +kubectl describe OIDCIdentityProvider -n pinniped-supervisor gitlab ``` Look at the `status` field. If it was configured correctly, you should see `phase: Ready`. +### (Optional) Use a different GitLab claim for Kubernetes usernames + +You can also use other GitLab claims as the username. +To do this, make sure you have configured the appropriate scopes on your GitLab application, such as `email`. + +You must also adjust the `spec.authorizationConfig` to request those scopes at login and adjust `spec.claims` to use those claims in Kubernetes, for example: + +```yaml +# [...] +spec: + # Request any scopes other than "openid" that you selected when + # creating your GitLab application. The "openid" scope is always + # included. + # + # See here for a full list of available claims: + # https://docs.gitlab.com/ee/integration/openid_connect_provider.html + authorizationConfig: + additionalScopes: [ email ] + claims: + username: email + groups: groups +# [...] +``` + +### (Optional) Use a private GitLab instance + +To use privately hosted instance of GitLab, you can change the `spec.issuer` and `spec.tls.certificateAuthorityData` fields, for example: + +```yaml +apiVersion: idp.supervisor.pinniped.dev/v1alpha1 +kind: OIDCIdentityProvider +# [...] +spec: + # Specify your GitLab instance URL. + issuer: https://gitlab.your-company.example.com. + + # Specify the CA bundle for the GitLab server as base64-encoded PEM + # data. This will only be needed for self-managed GitLab. For example, + # the output of `cat my-ca-bundle.pem | base64`. + # + # This configuration is only necessary if your instance uses a custom CA. + tls: + certificateAuthorityData: "" +# [...] +``` + ## Next Steps Now that you have configured the Supervisor to use GitLab, you may want to [configure the Concierge to validate JWTs issued by the Supervisor]({{< ref "configure-concierge-jwt" >}}). From f167a075ddc5ac7d9fa376bafce6c0af16e585da Mon Sep 17 00:00:00 2001 From: Matt Moyer Date: Tue, 4 May 2021 15:47:18 -0500 Subject: [PATCH 24/32] Clean up this language in configure-supervisor-with-gitlab.md a bit more. This was duplicitive. Signed-off-by: Matt Moyer --- site/content/docs/howto/configure-supervisor-with-gitlab.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/site/content/docs/howto/configure-supervisor-with-gitlab.md b/site/content/docs/howto/configure-supervisor-with-gitlab.md index d958f9aa..e81d9e59 100644 --- a/site/content/docs/howto/configure-supervisor-with-gitlab.md +++ b/site/content/docs/howto/configure-supervisor-with-gitlab.md @@ -127,10 +127,9 @@ spec: issuer: https://gitlab.your-company.example.com. # Specify the CA bundle for the GitLab server as base64-encoded PEM - # data. This will only be needed for self-managed GitLab. For example, - # the output of `cat my-ca-bundle.pem | base64`. + # data. For example, the output of `cat my-ca-bundle.pem | base64`. # - # This configuration is only necessary if your instance uses a custom CA. + # This is only necessary if your instance uses a custom CA. tls: certificateAuthorityData: "" # [...] From 5240f5e84ad3a984d3331adab3a6383ae09b7450 Mon Sep 17 00:00:00 2001 From: Margo Crawford Date: Thu, 6 May 2021 11:53:41 -0700 Subject: [PATCH 25/32] Change access token storage lifetime to be the same as the refresh token's to avoid garbage collection breaking the refresh flow Also changed the access token lifetime to be 2 minutes instead of 15 since we now have cert caching. --- internal/oidc/oidc.go | 15 +++++---------- internal/oidc/token/token_handler_test.go | 6 +++--- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/internal/oidc/oidc.go b/internal/oidc/oidc.go index da511515..d5f08e76 100644 --- a/internal/oidc/oidc.go +++ b/internal/oidc/oidc.go @@ -1,4 +1,4 @@ -// Copyright 2020 the Pinniped contributors. All Rights Reserved. +// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package oidc contains common OIDC functionality needed by Pinniped. @@ -164,13 +164,8 @@ type TimeoutsConfiguration struct { OIDCSessionStorageLifetime time.Duration // AccessTokenSessionStorageLifetime is the length of time after which an access token's session data is allowed - // to be garbage collected from storage. These must exist in storage for as long as the refresh token is valid. - // Therefore, this can be just slightly longer than the AccessTokenLifespan. Access tokens are handed back to - // the token endpoint for the token exchange use case. During a token exchange, if the access token is expired - // and still exists in storage, then the endpoint will be able to give a slightly more specific error message, - // rather than a more generic error that is returned when the token does not exist. If this is desirable, then - // the AccessTokenSessionStorageLifetime can be made to be significantly larger than AccessTokenLifespan, at the - // cost of slower cleanup. + // to be garbage collected from storage. These must exist in storage for as long as the refresh token is valid + // or else the refresh flow will not work properly. So this must be longer than RefreshTokenLifespan. AccessTokenSessionStorageLifetime time.Duration // RefreshTokenSessionStorageLifetime is the length of time after which a refresh token's session data is allowed @@ -186,7 +181,7 @@ type TimeoutsConfiguration struct { // Get the defaults for the Supervisor server. func DefaultOIDCTimeoutsConfiguration() TimeoutsConfiguration { - accessTokenLifespan := 15 * time.Minute + accessTokenLifespan := 2 * time.Minute authorizationCodeLifespan := 10 * time.Minute refreshTokenLifespan := 9 * time.Hour @@ -199,7 +194,7 @@ func DefaultOIDCTimeoutsConfiguration() TimeoutsConfiguration { AuthorizationCodeSessionStorageLifetime: authorizationCodeLifespan + refreshTokenLifespan, PKCESessionStorageLifetime: authorizationCodeLifespan + (1 * time.Minute), OIDCSessionStorageLifetime: authorizationCodeLifespan + (1 * time.Minute), - AccessTokenSessionStorageLifetime: accessTokenLifespan + (1 * time.Minute), + AccessTokenSessionStorageLifetime: refreshTokenLifespan + accessTokenLifespan, RefreshTokenSessionStorageLifetime: refreshTokenLifespan + accessTokenLifespan, } } diff --git a/internal/oidc/token/token_handler_test.go b/internal/oidc/token/token_handler_test.go index 6bc1ad63..3f47e987 100644 --- a/internal/oidc/token/token_handler_test.go +++ b/internal/oidc/token/token_handler_test.go @@ -1,4 +1,4 @@ -// Copyright 2020 the Pinniped contributors. All Rights Reserved. +// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package token @@ -60,8 +60,8 @@ const ( hmacSecret = "this needs to be at least 32 characters to meet entropy requirements" authCodeExpirationSeconds = 10 * 60 // Current, we set our auth code expiration to 10 minutes - accessTokenExpirationSeconds = 15 * 60 // Currently, we set our access token expiration to 15 minutes - idTokenExpirationSeconds = 15 * 60 // Currently, we set our ID token expiration to 15 minutes + accessTokenExpirationSeconds = 2 * 60 // Currently, we set our access token expiration to 2 minutes + idTokenExpirationSeconds = 2 * 60 // Currently, we set our ID token expiration to 2 minutes timeComparisonFudgeSeconds = 15 ) From 2634c9f04a670d483b3cbf4ee6aa91ff31886878 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 May 2021 05:49:58 +0000 Subject: [PATCH 26/32] Bump golang from 1.16.3 to 1.16.4 Bumps golang from 1.16.3 to 1.16.4. Signed-off-by: dependabot[bot] --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 09b81b05..e4371745 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ # Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -FROM golang:1.16.3 as build-env +FROM golang:1.16.4 as build-env WORKDIR /work COPY . . From 7ece1968932d9ec8a6036fc59e9f0d988cee57f4 Mon Sep 17 00:00:00 2001 From: Mo Khan Date: Fri, 7 May 2021 15:59:04 -0400 Subject: [PATCH 27/32] upstreamwatcher: preserve oidc discovery error Signed-off-by: Mo Khan --- .../upstreamwatcher/upstreamwatcher.go | 19 ++++++++++++++++++- .../upstreamwatcher/upstreamwatcher_test.go | 10 ++++++---- test/integration/supervisor_upstream_test.go | 9 +++++---- 3 files changed, 29 insertions(+), 9 deletions(-) diff --git a/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher.go b/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher.go index 345d2b5f..6d04e37d 100644 --- a/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher.go +++ b/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher.go @@ -269,11 +269,17 @@ func (c *controller) validateIssuer(ctx context.Context, upstream *v1alpha1.OIDC discoveredProvider, err = oidc.NewProvider(oidc.ClientContext(ctx, httpClient), upstream.Spec.Issuer) if err != nil { + const klogLevelTrace = 6 + c.log.V(klogLevelTrace).WithValues( + "namespace", upstream.Namespace, + "name", upstream.Name, + "issuer", upstream.Spec.Issuer, + ).Error(err, "failed to perform OIDC discovery") return &v1alpha1.Condition{ Type: typeOIDCDiscoverySucceeded, Status: v1alpha1.ConditionFalse, Reason: reasonUnreachable, - Message: fmt.Sprintf("failed to perform OIDC discovery against %q", upstream.Spec.Issuer), + Message: fmt.Sprintf("failed to perform OIDC discovery against %q:\n%s", upstream.Spec.Issuer, truncateErr(err)), } } @@ -419,3 +425,14 @@ func computeScopes(additionalScopes []string) []string { sort.Strings(scopes) return scopes } + +func truncateErr(err error) string { + const max = 100 + msg := err.Error() + + if len(msg) <= max { + return msg + } + + return msg[:max] + fmt.Sprintf(" [truncated %d chars]", len(msg)-max) +} diff --git a/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher_test.go b/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher_test.go index f7397352..1b1a2219 100644 --- a/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher_test.go +++ b/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher_test.go @@ -370,7 +370,7 @@ func TestController(t *testing.T) { inputUpstreams: []runtime.Object{&v1alpha1.OIDCIdentityProvider{ ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName}, Spec: v1alpha1.OIDCIdentityProviderSpec{ - Issuer: "invalid-url", + Issuer: "invalid-url-that-is-really-really-long", Client: v1alpha1.OIDCClient{SecretName: testSecretName}, AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{AdditionalScopes: testAdditionalScopes}, }, @@ -382,9 +382,10 @@ func TestController(t *testing.T) { }}, wantErr: controllerlib.ErrSyntheticRequeue.Error(), wantLogs: []string{ + `upstream-observer "msg"="failed to perform OIDC discovery" "error"="Get \"invalid-url-that-is-really-really-long/.well-known/openid-configuration\": unsupported protocol scheme \"\"" "issuer"="invalid-url-that-is-really-really-long" "name"="test-name" "namespace"="test-namespace"`, `upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="loaded client credentials" "reason"="Success" "status"="True" "type"="ClientCredentialsValid"`, - `upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="failed to perform OIDC discovery against \"invalid-url\"" "reason"="Unreachable" "status"="False" "type"="OIDCDiscoverySucceeded"`, - `upstream-observer "msg"="found failing condition" "error"="OIDCIdentityProvider has a failing condition" "message"="failed to perform OIDC discovery against \"invalid-url\"" "name"="test-name" "namespace"="test-namespace" "reason"="Unreachable" "type"="OIDCDiscoverySucceeded"`, + `upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="failed to perform OIDC discovery against \"invalid-url-that-is-really-really-long\":\nGet \"invalid-url-that-is-really-really-long/.well-known/openid-configuration\": unsupported protocol [truncated 9 chars]" "reason"="Unreachable" "status"="False" "type"="OIDCDiscoverySucceeded"`, + `upstream-observer "msg"="found failing condition" "error"="OIDCIdentityProvider has a failing condition" "message"="failed to perform OIDC discovery against \"invalid-url-that-is-really-really-long\":\nGet \"invalid-url-that-is-really-really-long/.well-known/openid-configuration\": unsupported protocol [truncated 9 chars]" "name"="test-name" "namespace"="test-namespace" "reason"="Unreachable" "type"="OIDCDiscoverySucceeded"`, }, wantResultingCache: []provider.UpstreamOIDCIdentityProviderI{}, wantResultingUpstreams: []v1alpha1.OIDCIdentityProvider{{ @@ -404,7 +405,8 @@ func TestController(t *testing.T) { Status: "False", LastTransitionTime: now, Reason: "Unreachable", - Message: `failed to perform OIDC discovery against "invalid-url"`, + Message: `failed to perform OIDC discovery against "invalid-url-that-is-really-really-long": +Get "invalid-url-that-is-really-really-long/.well-known/openid-configuration": unsupported protocol [truncated 9 chars]`, }, }, }, diff --git a/test/integration/supervisor_upstream_test.go b/test/integration/supervisor_upstream_test.go index b43735a6..fd7a1908 100644 --- a/test/integration/supervisor_upstream_test.go +++ b/test/integration/supervisor_upstream_test.go @@ -34,10 +34,11 @@ func TestSupervisorUpstreamOIDCDiscovery(t *testing.T) { Message: `secret "does-not-exist" not found`, }, { - Type: "OIDCDiscoverySucceeded", - Status: v1alpha1.ConditionFalse, - Reason: "Unreachable", - Message: `failed to perform OIDC discovery against "https://127.0.0.1:444444/issuer"`, + Type: "OIDCDiscoverySucceeded", + Status: v1alpha1.ConditionFalse, + Reason: "Unreachable", + Message: `failed to perform OIDC discovery against "https://127.0.0.1:444444/issuer": +Get "https://127.0.0.1:444444/issuer/.well-known/openid-configuration": dial tcp: address 444444: in [truncated 10 chars]`, }, }) }) From 47f5e822d073b4183df8fb3fc237ca5659ba4644 Mon Sep 17 00:00:00 2001 From: Matt Moyer Date: Fri, 7 May 2021 16:22:08 -0500 Subject: [PATCH 28/32] Fix TestImpersonationProxy on EKS. The admin kubeconfigs we have on EKS clusters are a bit different from others, because there is no certificate/key (EKS does not use certificate auth). This code didn't quite work correctly in that case. The fix is to allow the case where `tlsConfig.GetClientCertificate` is non-nil, but returns a value with no certificates. Signed-off-by: Matt Moyer --- .../concierge_impersonation_proxy_test.go | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/test/integration/concierge_impersonation_proxy_test.go b/test/integration/concierge_impersonation_proxy_test.go index 390ae1f1..4fa7329e 100644 --- a/test/integration/concierge_impersonation_proxy_test.go +++ b/test/integration/concierge_impersonation_proxy_test.go @@ -1705,17 +1705,18 @@ func getCredForConfig(t *testing.T, config *rest.Config) *loginv1alpha1.ClusterC if tlsConfig != nil && tlsConfig.GetClientCertificate != nil { cert, err := tlsConfig.GetClientCertificate(nil) require.NoError(t, err) - require.Len(t, cert.Certificate, 1) + if len(cert.Certificate) > 0 { + require.Len(t, cert.Certificate, 1) + publicKey := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Certificate[0], + }) + out.ClientCertificateData = string(publicKey) - publicKey := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Certificate[0], - }) - out.ClientCertificateData = string(publicKey) - - privateKey, err := keyutil.MarshalPrivateKeyToPEM(cert.PrivateKey) - require.NoError(t, err) - out.ClientKeyData = string(privateKey) + privateKey, err := keyutil.MarshalPrivateKeyToPEM(cert.PrivateKey) + require.NoError(t, err) + out.ClientKeyData = string(privateKey) + } } if *out == (loginv1alpha1.ClusterCredential{}) { From 56d316e8d31b5fe4246b0f7856ff3a183aefe713 Mon Sep 17 00:00:00 2001 From: Mo Khan Date: Mon, 10 May 2021 00:22:34 -0400 Subject: [PATCH 29/32] upstreamwatcher: do not truncate explicit oidc errors This change makes it easier to understand misconfigurations caused by issuers with extraneous trailing slashes. Signed-off-by: Mo Khan --- .../upstreamwatcher/upstreamwatcher.go | 7 +- .../upstreamwatcher/upstreamwatcher_test.go | 165 ++++++++++++++++++ test/integration/supervisor_upstream_test.go | 32 ++++ 3 files changed, 201 insertions(+), 3 deletions(-) diff --git a/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher.go b/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher.go index 6d04e37d..d996492f 100644 --- a/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher.go +++ b/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher.go @@ -13,6 +13,7 @@ import ( "net/http" "net/url" "sort" + "strings" "time" "github.com/coreos/go-oidc/v3/oidc" @@ -279,7 +280,7 @@ func (c *controller) validateIssuer(ctx context.Context, upstream *v1alpha1.OIDC Type: typeOIDCDiscoverySucceeded, Status: v1alpha1.ConditionFalse, Reason: reasonUnreachable, - Message: fmt.Sprintf("failed to perform OIDC discovery against %q:\n%s", upstream.Spec.Issuer, truncateErr(err)), + Message: fmt.Sprintf("failed to perform OIDC discovery against %q:\n%s", upstream.Spec.Issuer, truncateNonOIDCErr(err)), } } @@ -426,11 +427,11 @@ func computeScopes(additionalScopes []string) []string { return scopes } -func truncateErr(err error) string { +func truncateNonOIDCErr(err error) string { const max = 100 msg := err.Error() - if len(msg) <= max { + if len(msg) <= max || strings.HasPrefix(msg, "oidc:") { return msg } diff --git a/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher_test.go b/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher_test.go index 1b1a2219..164c08ed 100644 --- a/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher_test.go +++ b/internal/controller/supervisorconfig/upstreamwatcher/upstreamwatcher_test.go @@ -602,6 +602,151 @@ Get "invalid-url-that-is-really-really-long/.well-known/openid-configuration": u }, }}, }, + { + name: "existing valid upstream with trailing slash", + inputUpstreams: []runtime.Object{&v1alpha1.OIDCIdentityProvider{ + ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName, Generation: 1234}, + Spec: v1alpha1.OIDCIdentityProviderSpec{ + Issuer: testIssuerURL + "/ends-with-slash/", + TLS: &v1alpha1.TLSSpec{CertificateAuthorityData: testIssuerCABase64}, + Client: v1alpha1.OIDCClient{SecretName: testSecretName}, + AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{AdditionalScopes: testAdditionalScopes}, + Claims: v1alpha1.OIDCClaims{Groups: testGroupsClaim, Username: testUsernameClaim}, + }, + Status: v1alpha1.OIDCIdentityProviderStatus{ + Phase: "Ready", + Conditions: []v1alpha1.Condition{ + {Type: "ClientCredentialsValid", Status: "True", LastTransitionTime: earlier, Reason: "Success", Message: "loaded client credentials"}, + {Type: "OIDCDiscoverySucceeded", Status: "True", LastTransitionTime: earlier, Reason: "Success", Message: "discovered issuer configuration"}, + }, + }, + }}, + inputSecrets: []runtime.Object{&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testSecretName}, + Type: "secrets.pinniped.dev/oidc-client", + Data: testValidSecretData, + }}, + wantLogs: []string{ + `upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="loaded client credentials" "reason"="Success" "status"="True" "type"="ClientCredentialsValid"`, + `upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="discovered issuer configuration" "reason"="Success" "status"="True" "type"="OIDCDiscoverySucceeded"`, + }, + wantResultingCache: []provider.UpstreamOIDCIdentityProviderI{ + &oidctestutil.TestUpstreamOIDCIdentityProvider{ + Name: testName, + ClientID: testClientID, + AuthorizationURL: *testIssuerAuthorizeURL, + Scopes: testExpectedScopes, + UsernameClaim: testUsernameClaim, + GroupsClaim: testGroupsClaim, + }, + }, + wantResultingUpstreams: []v1alpha1.OIDCIdentityProvider{{ + ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName, Generation: 1234}, + Status: v1alpha1.OIDCIdentityProviderStatus{ + Phase: "Ready", + Conditions: []v1alpha1.Condition{ + {Type: "ClientCredentialsValid", Status: "True", LastTransitionTime: earlier, Reason: "Success", Message: "loaded client credentials", ObservedGeneration: 1234}, + {Type: "OIDCDiscoverySucceeded", Status: "True", LastTransitionTime: earlier, Reason: "Success", Message: "discovered issuer configuration", ObservedGeneration: 1234}, + }, + }, + }}, + }, + { + name: "issuer is invalid URL, missing trailing slash", + inputUpstreams: []runtime.Object{&v1alpha1.OIDCIdentityProvider{ + ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName}, + Spec: v1alpha1.OIDCIdentityProviderSpec{ + Issuer: testIssuerURL + "/ends-with-slash", + TLS: &v1alpha1.TLSSpec{CertificateAuthorityData: testIssuerCABase64}, + Client: v1alpha1.OIDCClient{SecretName: testSecretName}, + AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{AdditionalScopes: testAdditionalScopes}, + }, + }}, + inputSecrets: []runtime.Object{&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testSecretName}, + Type: "secrets.pinniped.dev/oidc-client", + Data: testValidSecretData, + }}, + wantErr: controllerlib.ErrSyntheticRequeue.Error(), + wantLogs: []string{ + `upstream-observer "msg"="failed to perform OIDC discovery" "error"="oidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/ends-with-slash\" got \"` + testIssuerURL + `/ends-with-slash/\"" "issuer"="` + testIssuerURL + `/ends-with-slash" "name"="test-name" "namespace"="test-namespace"`, + `upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="loaded client credentials" "reason"="Success" "status"="True" "type"="ClientCredentialsValid"`, + `upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="failed to perform OIDC discovery against \"` + testIssuerURL + `/ends-with-slash\":\noidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/ends-with-slash\" got \"` + testIssuerURL + `/ends-with-slash/\"" "reason"="Unreachable" "status"="False" "type"="OIDCDiscoverySucceeded"`, + `upstream-observer "msg"="found failing condition" "error"="OIDCIdentityProvider has a failing condition" "message"="failed to perform OIDC discovery against \"` + testIssuerURL + `/ends-with-slash\":\noidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/ends-with-slash\" got \"` + testIssuerURL + `/ends-with-slash/\"" "name"="test-name" "namespace"="test-namespace" "reason"="Unreachable" "type"="OIDCDiscoverySucceeded"`, + }, + wantResultingCache: []provider.UpstreamOIDCIdentityProviderI{}, + wantResultingUpstreams: []v1alpha1.OIDCIdentityProvider{{ + ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName}, + Status: v1alpha1.OIDCIdentityProviderStatus{ + Phase: "Error", + Conditions: []v1alpha1.Condition{ + { + Type: "ClientCredentialsValid", + Status: "True", + LastTransitionTime: now, + Reason: "Success", + Message: "loaded client credentials", + }, + { + Type: "OIDCDiscoverySucceeded", + Status: "False", + LastTransitionTime: now, + Reason: "Unreachable", + Message: `failed to perform OIDC discovery against "` + testIssuerURL + `/ends-with-slash": +oidc: issuer did not match the issuer returned by provider, expected "` + testIssuerURL + `/ends-with-slash" got "` + testIssuerURL + `/ends-with-slash/"`, + }, + }, + }, + }}, + }, + { + name: "issuer is invalid URL, extra trailing slash", + inputUpstreams: []runtime.Object{&v1alpha1.OIDCIdentityProvider{ + ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName}, + Spec: v1alpha1.OIDCIdentityProviderSpec{ + Issuer: testIssuerURL + "/", + TLS: &v1alpha1.TLSSpec{CertificateAuthorityData: testIssuerCABase64}, + Client: v1alpha1.OIDCClient{SecretName: testSecretName}, + AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{AdditionalScopes: testAdditionalScopes}, + }, + }}, + inputSecrets: []runtime.Object{&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testSecretName}, + Type: "secrets.pinniped.dev/oidc-client", + Data: testValidSecretData, + }}, + wantErr: controllerlib.ErrSyntheticRequeue.Error(), + wantLogs: []string{ + `upstream-observer "msg"="failed to perform OIDC discovery" "error"="oidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/\" got \"` + testIssuerURL + `\"" "issuer"="` + testIssuerURL + `/" "name"="test-name" "namespace"="test-namespace"`, + `upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="loaded client credentials" "reason"="Success" "status"="True" "type"="ClientCredentialsValid"`, + `upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="failed to perform OIDC discovery against \"` + testIssuerURL + `/\":\noidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/\" got \"` + testIssuerURL + `\"" "reason"="Unreachable" "status"="False" "type"="OIDCDiscoverySucceeded"`, + `upstream-observer "msg"="found failing condition" "error"="OIDCIdentityProvider has a failing condition" "message"="failed to perform OIDC discovery against \"` + testIssuerURL + `/\":\noidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/\" got \"` + testIssuerURL + `\"" "name"="test-name" "namespace"="test-namespace" "reason"="Unreachable" "type"="OIDCDiscoverySucceeded"`, + }, + wantResultingCache: []provider.UpstreamOIDCIdentityProviderI{}, + wantResultingUpstreams: []v1alpha1.OIDCIdentityProvider{{ + ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName}, + Status: v1alpha1.OIDCIdentityProviderStatus{ + Phase: "Error", + Conditions: []v1alpha1.Condition{ + { + Type: "ClientCredentialsValid", + Status: "True", + LastTransitionTime: now, + Reason: "Success", + Message: "loaded client credentials", + }, + { + Type: "OIDCDiscoverySucceeded", + Status: "False", + LastTransitionTime: now, + Reason: "Unreachable", + Message: `failed to perform OIDC discovery against "` + testIssuerURL + `/": +oidc: issuer did not match the issuer returned by provider, expected "` + testIssuerURL + `/" got "` + testIssuerURL + `"`, + }, + }, + }, + }}, + }, } for _, tt := range tests { tt := tt @@ -730,5 +875,25 @@ func newTestIssuer(t *testing.T) (string, string) { }) }) + // handle the four issuer with trailing slash configs + + // valid case in= out= + // handled above at the root of testURL + + // valid case in=/ out=/ + mux.HandleFunc("/ends-with-slash/.well-known/openid-configuration", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("content-type", "application/json") + _ = json.NewEncoder(w).Encode(&providerJSON{ + Issuer: testURL + "/ends-with-slash/", + AuthURL: "https://example.com/authorize", + }) + }) + + // invalid case in= out=/ + // can be tested using /ends-with-slash/ endpoint + + // invalid case in=/ out= + // can be tested using root endpoint + return caBundlePEM, testURL } diff --git a/test/integration/supervisor_upstream_test.go b/test/integration/supervisor_upstream_test.go index fd7a1908..6dfd5503 100644 --- a/test/integration/supervisor_upstream_test.go +++ b/test/integration/supervisor_upstream_test.go @@ -43,6 +43,38 @@ Get "https://127.0.0.1:444444/issuer/.well-known/openid-configuration": dial tcp }) }) + t.Run("invalid issuer with trailing slash", func(t *testing.T) { + t.Parallel() + spec := v1alpha1.OIDCIdentityProviderSpec{ + Issuer: env.SupervisorTestUpstream.Issuer + "/", + TLS: &v1alpha1.TLSSpec{ + CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorTestUpstream.CABundle)), + }, + AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{ + AdditionalScopes: []string{"email", "profile"}, + }, + Client: v1alpha1.OIDCClient{ + SecretName: library.CreateClientCredsSecret(t, "test-client-id", "test-client-secret").Name, + }, + } + upstream := library.CreateTestOIDCIdentityProvider(t, spec, v1alpha1.PhaseError) + expectUpstreamConditions(t, upstream, []v1alpha1.Condition{ + { + Type: "ClientCredentialsValid", + Status: v1alpha1.ConditionTrue, + Reason: "Success", + Message: "loaded client credentials", + }, + { + Type: "OIDCDiscoverySucceeded", + Status: v1alpha1.ConditionFalse, + Reason: "Unreachable", + Message: `failed to perform OIDC discovery against "` + env.SupervisorTestUpstream.Issuer + `/": +oidc: issuer did not match the issuer returned by provider, expected "` + env.SupervisorTestUpstream.Issuer + `/" got "` + env.SupervisorTestUpstream.Issuer + `"`, + }, + }) + }) + t.Run("valid", func(t *testing.T) { t.Parallel() spec := v1alpha1.OIDCIdentityProviderSpec{ From 0770682bf941594da21ac70c2f8406fe41f93943 Mon Sep 17 00:00:00 2001 From: Mo Khan Date: Mon, 10 May 2021 00:50:59 -0400 Subject: [PATCH 30/32] impersonation proxy test: handle admin users with UID such as on EKS Signed-off-by: Mo Khan --- .../concierge_impersonation_proxy_test.go | 42 ++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/test/integration/concierge_impersonation_proxy_test.go b/test/integration/concierge_impersonation_proxy_test.go index 4fa7329e..4206b57e 100644 --- a/test/integration/concierge_impersonation_proxy_test.go +++ b/test/integration/concierge_impersonation_proxy_test.go @@ -622,7 +622,12 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl expectedOriginalUserInfo := authenticationv1.UserInfo{ Username: whoAmIAdmin.Status.KubernetesUserInfo.User.Username, // The WhoAmI API is lossy so this will fail when the admin user actually does have a UID - UID: whoAmIAdmin.Status.KubernetesUserInfo.User.UID, + // Thus we fallback to the CSR API to grab the UID + UID: getUIDViaCSR(ctx, t, whoAmIAdmin.Status.KubernetesUserInfo.User.UID, + newImpersonationProxyClientWithCredentials(t, + clusterAdminCredentials, impersonationProxyURL, impersonationProxyCACertPEM, nil). + Kubernetes, + ), Groups: whoAmIAdmin.Status.KubernetesUserInfo.User.Groups, Extra: expectedExtra, } @@ -1725,3 +1730,38 @@ func getCredForConfig(t *testing.T, config *rest.Config) *loginv1alpha1.ClusterC return out } + +func getUIDViaCSR(ctx context.Context, t *testing.T, uid string, client kubernetes.Interface) string { + t.Helper() + + if len(uid) != 0 { + return uid // in the future this may not be empty on some clusters + } + + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + + csrPEM, err := cert.MakeCSR(privateKey, &pkix.Name{ + CommonName: "panda-man", + Organization: []string{"living-the-dream", "need-more-sleep"}, + }, nil, nil) + require.NoError(t, err) + + csrName, _, err := csr.RequestCertificate( + client, + csrPEM, + "", + certificatesv1.KubeAPIServerClientSignerName, + []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + privateKey, + ) + require.NoError(t, err) + + csReq, err := client.CertificatesV1beta1().CertificateSigningRequests().Get(ctx, csrName, metav1.GetOptions{}) + require.NoError(t, err) + + err = client.CertificatesV1beta1().CertificateSigningRequests().Delete(ctx, csrName, metav1.DeleteOptions{}) + require.NoError(t, err) + + return csReq.Spec.UID +} From 716659b74aa2f71e183073a5eeaa893f8c15f186 Mon Sep 17 00:00:00 2001 From: Monis Khan Date: Mon, 10 May 2021 13:22:51 -0400 Subject: [PATCH 31/32] impersonation proxy test: handle admin users with mixed case extra keys Signed-off-by: Monis Khan --- .../concierge_impersonation_proxy_test.go | 40 +++++++++++-------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/test/integration/concierge_impersonation_proxy_test.go b/test/integration/concierge_impersonation_proxy_test.go index 4206b57e..118bf324 100644 --- a/test/integration/concierge_impersonation_proxy_test.go +++ b/test/integration/concierge_impersonation_proxy_test.go @@ -615,21 +615,26 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{}) require.NoError(t, err) - expectedExtra := make(map[string]authenticationv1.ExtraValue, len(whoAmIAdmin.Status.KubernetesUserInfo.User.Extra)) - for k, v := range whoAmIAdmin.Status.KubernetesUserInfo.User.Extra { + // The WhoAmI API is lossy: + // - It drops UID + // - It lowercases all extra keys + // the admin user on EKS has both a UID set and an extra key with uppercase characters + // Thus we fallback to the CSR API to grab the UID and Extra to handle this scenario + uid, extra := getUIDAndExtraViaCSR(ctx, t, whoAmIAdmin.Status.KubernetesUserInfo.User.UID, + newImpersonationProxyClientWithCredentials(t, + clusterAdminCredentials, impersonationProxyURL, impersonationProxyCACertPEM, nil). + Kubernetes, + ) + + expectedExtra := make(map[string]authenticationv1.ExtraValue, len(extra)) + for k, v := range extra { expectedExtra[k] = authenticationv1.ExtraValue(v) } expectedOriginalUserInfo := authenticationv1.UserInfo{ Username: whoAmIAdmin.Status.KubernetesUserInfo.User.Username, - // The WhoAmI API is lossy so this will fail when the admin user actually does have a UID - // Thus we fallback to the CSR API to grab the UID - UID: getUIDViaCSR(ctx, t, whoAmIAdmin.Status.KubernetesUserInfo.User.UID, - newImpersonationProxyClientWithCredentials(t, - clusterAdminCredentials, impersonationProxyURL, impersonationProxyCACertPEM, nil). - Kubernetes, - ), - Groups: whoAmIAdmin.Status.KubernetesUserInfo.User.Groups, - Extra: expectedExtra, + UID: uid, + Groups: whoAmIAdmin.Status.KubernetesUserInfo.User.Groups, + Extra: expectedExtra, } expectedOriginalUserInfoJSON, err := json.Marshal(expectedOriginalUserInfo) require.NoError(t, err) @@ -1731,13 +1736,9 @@ func getCredForConfig(t *testing.T, config *rest.Config) *loginv1alpha1.ClusterC return out } -func getUIDViaCSR(ctx context.Context, t *testing.T, uid string, client kubernetes.Interface) string { +func getUIDAndExtraViaCSR(ctx context.Context, t *testing.T, uid string, client kubernetes.Interface) (string, map[string]certificatesv1beta1.ExtraValue) { t.Helper() - if len(uid) != 0 { - return uid // in the future this may not be empty on some clusters - } - privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) require.NoError(t, err) @@ -1763,5 +1764,10 @@ func getUIDViaCSR(ctx context.Context, t *testing.T, uid string, client kubernet err = client.CertificatesV1beta1().CertificateSigningRequests().Delete(ctx, csrName, metav1.DeleteOptions{}) require.NoError(t, err) - return csReq.Spec.UID + outUID := uid // in the future this may not be empty on some clusters + if len(outUID) == 0 { + outUID = csReq.Spec.UID + } + + return outUID, csReq.Spec.Extra } From dbde150c38247ad37da6c2e8f24747c5ca5dc803 Mon Sep 17 00:00:00 2001 From: Pinny Date: Mon, 10 May 2021 22:01:16 +0000 Subject: [PATCH 32/32] Update CLI docs for v0.8.0 release --- site/content/docs/reference/cli.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/site/content/docs/reference/cli.md b/site/content/docs/reference/cli.md index e5786505..556d8ba8 100644 --- a/site/content/docs/reference/cli.md +++ b/site/content/docs/reference/cli.md @@ -29,6 +29,8 @@ pinniped get kubeconfig [flags] --concierge-endpoint string API base for the Concierge endpoint --concierge-mode mode Concierge mode of operation (default TokenCredentialRequestAPI) --concierge-skip-wait Skip waiting for any pending Concierge strategies to become ready (default: false) + --credential-cache string Path to cluster-specific credentials cache + --generated-name-suffix string Suffix to append to generated cluster, context, user kubeconfig entries (default "-pinniped") -h, --help help for kubeconfig --kubeconfig string Path to kubeconfig file --kubeconfig-context string Kubeconfig context name (default: current active context)