Merge branch 'main' into initial_ldap

This commit is contained in:
Ryan Richard 2021-05-11 11:09:37 -07:00
commit 675bbb2aba
37 changed files with 1949 additions and 231 deletions

View File

@ -3,7 +3,7 @@
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. # Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
FROM golang:1.16.3 as build-env FROM golang:1.16.4 as build-env
WORKDIR /work WORKDIR /work
COPY . . COPY . .

View File

@ -20,10 +20,12 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
"k8s.io/client-go/transport"
"k8s.io/klog/v2/klogr" "k8s.io/klog/v2/klogr"
"go.pinniped.dev/internal/execcredcache" "go.pinniped.dev/internal/execcredcache"
"go.pinniped.dev/internal/groupsuffix" "go.pinniped.dev/internal/groupsuffix"
"go.pinniped.dev/internal/plog"
"go.pinniped.dev/pkg/conciergeclient" "go.pinniped.dev/pkg/conciergeclient"
"go.pinniped.dev/pkg/oidcclient" "go.pinniped.dev/pkg/oidcclient"
"go.pinniped.dev/pkg/oidcclient/filesession" "go.pinniped.dev/pkg/oidcclient/filesession"
@ -36,12 +38,14 @@ func init() {
} }
type oidcLoginCommandDeps struct { type oidcLoginCommandDeps struct {
lookupEnv func(string) (string, bool)
login func(string, string, ...oidcclient.Option) (*oidctypes.Token, error) login func(string, string, ...oidcclient.Option) (*oidctypes.Token, error)
exchangeToken func(context.Context, *conciergeclient.Client, string) (*clientauthv1beta1.ExecCredential, error) exchangeToken func(context.Context, *conciergeclient.Client, string) (*clientauthv1beta1.ExecCredential, error)
} }
func oidcLoginCommandRealDeps() oidcLoginCommandDeps { func oidcLoginCommandRealDeps() oidcLoginCommandDeps {
return oidcLoginCommandDeps{ return oidcLoginCommandDeps{
lookupEnv: os.LookupEnv,
login: oidcclient.Login, login: oidcclient.Login,
exchangeToken: func(ctx context.Context, client *conciergeclient.Client, token string) (*clientauthv1beta1.ExecCredential, error) { exchangeToken: func(ctx context.Context, client *conciergeclient.Client, token string) (*clientauthv1beta1.ExecCredential, error) {
return client.ExchangeToken(ctx, token) return client.ExchangeToken(ctx, token)
@ -113,7 +117,12 @@ func oidcLoginCommand(deps oidcLoginCommandDeps) *cobra.Command {
return cmd return cmd
} }
func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLoginFlags) error { func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLoginFlags) error { //nolint:funlen
pLogger, err := SetLogLevel(deps.lookupEnv)
if err != nil {
plog.WarningErr("Received error while setting log level", err)
}
// Initialize the session cache. // Initialize the session cache.
var sessionOptions []filesession.Option var sessionOptions []filesession.Option
@ -129,6 +138,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin
// Initialize the login handler. // Initialize the login handler.
opts := []oidcclient.Option{ opts := []oidcclient.Option{
oidcclient.WithContext(cmd.Context()), oidcclient.WithContext(cmd.Context()),
oidcclient.WithLogger(klogr.New()),
oidcclient.WithScopes(flags.scopes), oidcclient.WithScopes(flags.scopes),
oidcclient.WithSessionCache(sessionCache), oidcclient.WithSessionCache(sessionCache),
} }
@ -187,7 +197,6 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin
} }
opts = append(opts, oidcclient.WithClient(client)) opts = append(opts, oidcclient.WithClient(client))
} }
// Look up cached credentials based on a hash of all the CLI arguments and the cluster info. // Look up cached credentials based on a hash of all the CLI arguments and the cluster info.
cacheKey := struct { cacheKey := struct {
Args []string `json:"args"` Args []string `json:"args"`
@ -200,10 +209,12 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin
if flags.credentialCachePath != "" { if flags.credentialCachePath != "" {
credCache = execcredcache.New(flags.credentialCachePath) credCache = execcredcache.New(flags.credentialCachePath)
if cred := credCache.Get(cacheKey); cred != nil { if cred := credCache.Get(cacheKey); cred != nil {
pLogger.Debug("using cached cluster credential.")
return json.NewEncoder(cmd.OutOrStdout()).Encode(cred) return json.NewEncoder(cmd.OutOrStdout()).Encode(cred)
} }
} }
pLogger.Debug("Performing OIDC login", "issuer", flags.issuer, "client id", flags.clientID)
// Do the basic login to get an OIDC token. // Do the basic login to get an OIDC token.
token, err := deps.login(flags.issuer, flags.clientID, opts...) token, err := deps.login(flags.issuer, flags.clientID, opts...)
if err != nil { if err != nil {
@ -213,6 +224,7 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin
// If the concierge was configured, exchange the credential for a separate short-lived, cluster-specific credential. // If the concierge was configured, exchange the credential for a separate short-lived, cluster-specific credential.
if concierge != nil { if concierge != nil {
pLogger.Debug("Exchanging token for cluster credential", "endpoint", flags.conciergeEndpoint, "authenticator type", flags.conciergeAuthenticatorType, "authenticator name", flags.conciergeAuthenticatorName)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel() defer cancel()
@ -220,10 +232,14 @@ func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLogin
if err != nil { if err != nil {
return fmt.Errorf("could not complete Concierge credential exchange: %w", err) return fmt.Errorf("could not complete Concierge credential exchange: %w", err)
} }
pLogger.Debug("Successfully exchanged token for cluster credential.")
} else {
pLogger.Debug("No concierge configured, skipping token credential exchange")
} }
// If there was a credential cache, save the resulting credential for future use. // If there was a credential cache, save the resulting credential for future use.
if credCache != nil { if credCache != nil {
pLogger.Debug("caching cluster credential for future use.")
credCache.Put(cacheKey, cred) credCache.Put(cacheKey, cred)
} }
return json.NewEncoder(cmd.OutOrStdout()).Encode(cred) return json.NewEncoder(cmd.OutOrStdout()).Encode(cred)
@ -245,7 +261,7 @@ func makeClient(caBundlePaths []string, caBundleData []string) (*http.Client, er
} }
pool.AppendCertsFromPEM(pem) pool.AppendCertsFromPEM(pem)
} }
return &http.Client{ client := &http.Client{
Transport: &http.Transport{ Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment, Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{ TLSClientConfig: &tls.Config{
@ -253,7 +269,10 @@ func makeClient(caBundlePaths []string, caBundleData []string) (*http.Client, er
MinVersion: tls.VersionTLS12, MinVersion: tls.VersionTLS12,
}, },
}, },
}, nil }
client.Transport = transport.DebugWrappers(client.Transport)
return client, nil
} }
func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential { func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential {
@ -272,6 +291,18 @@ func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential {
return &cred return &cred
} }
func SetLogLevel(lookupEnv func(string) (string, bool)) (*plog.PLogger, error) {
debug, _ := lookupEnv("PINNIPED_DEBUG")
if debug == "true" {
err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug)
if err != nil {
return nil, err
}
}
logger := plog.New("Pinniped login: ")
return &logger, nil
}
// mustGetConfigDir returns a directory that follows the XDG base directory convention: // mustGetConfigDir returns a directory that follows the XDG base directory convention:
// $XDG_CONFIG_HOME defines the base directory relative to which user specific configuration files should // $XDG_CONFIG_HOME defines the base directory relative to which user specific configuration files should
// be stored. If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config should be used. // be stored. If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config should be used.

View File

@ -16,10 +16,12 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
"k8s.io/klog/v2"
"go.pinniped.dev/internal/certauthority" "go.pinniped.dev/internal/certauthority"
"go.pinniped.dev/internal/here" "go.pinniped.dev/internal/here"
"go.pinniped.dev/internal/testutil" "go.pinniped.dev/internal/testutil"
"go.pinniped.dev/internal/testutil/testlogger"
"go.pinniped.dev/pkg/conciergeclient" "go.pinniped.dev/pkg/conciergeclient"
"go.pinniped.dev/pkg/oidcclient" "go.pinniped.dev/pkg/oidcclient"
"go.pinniped.dev/pkg/oidcclient/oidctypes" "go.pinniped.dev/pkg/oidcclient/oidctypes"
@ -41,10 +43,12 @@ func TestLoginOIDCCommand(t *testing.T) {
args []string args []string
loginErr error loginErr error
conciergeErr error conciergeErr error
env map[string]string
wantError bool wantError bool
wantStdout string wantStdout string
wantStderr string wantStderr string
wantOptionsCount int wantOptionsCount int
wantLogs []string
}{ }{
{ {
name: "help flag passed", name: "help flag passed",
@ -155,7 +159,7 @@ func TestLoginOIDCCommand(t *testing.T) {
"--upstream-identity-provider-type", "oidc", "--upstream-identity-provider-type", "oidc",
"--credential-cache", "", // must specify --credential-cache or else the cache file on disk causes test pollution "--credential-cache", "", // must specify --credential-cache or else the cache file on disk causes test pollution
}, },
wantOptionsCount: 3, wantOptionsCount: 4,
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n", wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n",
}, },
{ {
@ -166,7 +170,7 @@ func TestLoginOIDCCommand(t *testing.T) {
"--upstream-identity-provider-type", "ldap", "--upstream-identity-provider-type", "ldap",
"--credential-cache", "", // must specify --credential-cache or else the cache file on disk causes test pollution "--credential-cache", "", // must specify --credential-cache or else the cache file on disk causes test pollution
}, },
wantOptionsCount: 4, wantOptionsCount: 5,
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n", wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n",
}, },
{ {
@ -177,7 +181,7 @@ func TestLoginOIDCCommand(t *testing.T) {
"--credential-cache", "", // must specify --credential-cache or else the cache file on disk causes test pollution "--credential-cache", "", // must specify --credential-cache or else the cache file on disk causes test pollution
}, },
loginErr: fmt.Errorf("some login error"), loginErr: fmt.Errorf("some login error"),
wantOptionsCount: 3, wantOptionsCount: 4,
wantError: true, wantError: true,
wantStderr: here.Doc(` wantStderr: here.Doc(`
Error: could not complete Pinniped login: some login error Error: could not complete Pinniped login: some login error
@ -195,7 +199,7 @@ func TestLoginOIDCCommand(t *testing.T) {
"--credential-cache", "", // must specify --credential-cache or else the cache file on disk causes test pollution "--credential-cache", "", // must specify --credential-cache or else the cache file on disk causes test pollution
}, },
conciergeErr: fmt.Errorf("some concierge error"), conciergeErr: fmt.Errorf("some concierge error"),
wantOptionsCount: 3, wantOptionsCount: 4,
wantError: true, wantError: true,
wantStderr: here.Doc(` wantStderr: here.Doc(`
Error: could not complete Concierge credential exchange: some concierge error Error: could not complete Concierge credential exchange: some concierge error
@ -208,8 +212,13 @@ func TestLoginOIDCCommand(t *testing.T) {
"--issuer", "test-issuer", "--issuer", "test-issuer",
"--credential-cache", "", // must specify --credential-cache or else the cache file on disk causes test pollution "--credential-cache", "", // must specify --credential-cache or else the cache file on disk causes test pollution
}, },
wantOptionsCount: 3, env: map[string]string{"PINNIPED_DEBUG": "true"},
wantOptionsCount: 4,
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n", wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n",
wantLogs: []string{
"\"level\"=0 \"msg\"=\"Pinniped login: Performing OIDC login\" \"client id\"=\"test-client-id\" \"issuer\"=\"test-issuer\"",
"\"level\"=0 \"msg\"=\"Pinniped login: No concierge configured, skipping token credential exchange\"",
},
}, },
{ {
name: "success with all options", name: "success with all options",
@ -232,17 +241,30 @@ func TestLoginOIDCCommand(t *testing.T) {
"--upstream-identity-provider-name", "some-upstream-name", "--upstream-identity-provider-name", "some-upstream-name",
"--upstream-identity-provider-type", "ldap", "--upstream-identity-provider-type", "ldap",
}, },
wantOptionsCount: 9, env: map[string]string{"PINNIPED_DEBUG": "true"},
wantOptionsCount: 10,
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"token":"exchanged-token"}}` + "\n", wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"token":"exchanged-token"}}` + "\n",
wantLogs: []string{
"\"level\"=0 \"msg\"=\"Pinniped login: Performing OIDC login\" \"client id\"=\"test-client-id\" \"issuer\"=\"test-issuer\"",
"\"level\"=0 \"msg\"=\"Pinniped login: Exchanging token for cluster credential\" \"authenticator name\"=\"test-authenticator\" \"authenticator type\"=\"webhook\" \"endpoint\"=\"https://127.0.0.1:1234/\"",
"\"level\"=0 \"msg\"=\"Pinniped login: Successfully exchanged token for cluster credential.\"",
"\"level\"=0 \"msg\"=\"Pinniped login: caching cluster credential for future use.\"",
},
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
testLogger := testlogger.New(t)
klog.SetLogger(testLogger)
var ( var (
gotOptions []oidcclient.Option gotOptions []oidcclient.Option
) )
cmd := oidcLoginCommand(oidcLoginCommandDeps{ cmd := oidcLoginCommand(oidcLoginCommandDeps{
lookupEnv: func(s string) (string, bool) {
v, ok := tt.env[s]
return v, ok
},
login: func(issuer string, clientID string, opts ...oidcclient.Option) (*oidctypes.Token, error) { login: func(issuer string, clientID string, opts ...oidcclient.Option) (*oidctypes.Token, error) {
require.Equal(t, "test-issuer", issuer) require.Equal(t, "test-issuer", issuer)
require.Equal(t, "test-client-id", clientID) require.Equal(t, "test-client-id", clientID)
@ -288,6 +310,8 @@ func TestLoginOIDCCommand(t *testing.T) {
require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout") require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout")
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr") require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
require.Len(t, gotOptions, tt.wantOptionsCount) require.Len(t, gotOptions, tt.wantOptionsCount)
require.Equal(t, tt.wantLogs, testLogger.Lines())
}) })
} }
} }

View File

@ -17,6 +17,7 @@ import (
"go.pinniped.dev/internal/execcredcache" "go.pinniped.dev/internal/execcredcache"
"go.pinniped.dev/internal/groupsuffix" "go.pinniped.dev/internal/groupsuffix"
"go.pinniped.dev/internal/plog"
"go.pinniped.dev/pkg/conciergeclient" "go.pinniped.dev/pkg/conciergeclient"
"go.pinniped.dev/pkg/oidcclient/oidctypes" "go.pinniped.dev/pkg/oidcclient/oidctypes"
) )
@ -83,6 +84,11 @@ func staticLoginCommand(deps staticLoginDeps) *cobra.Command {
} }
func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams) error { func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams) error {
pLogger, err := SetLogLevel(deps.lookupEnv)
if err != nil {
plog.WarningErr("Received error while setting log level", err)
}
if flags.staticToken == "" && flags.staticTokenEnvName == "" { if flags.staticToken == "" && flags.staticTokenEnvName == "" {
return fmt.Errorf("one of --token or --token-env must be set") return fmt.Errorf("one of --token or --token-env must be set")
} }
@ -131,12 +137,14 @@ func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams
if flags.credentialCachePath != "" { if flags.credentialCachePath != "" {
credCache = execcredcache.New(flags.credentialCachePath) credCache = execcredcache.New(flags.credentialCachePath)
if cred := credCache.Get(cacheKey); cred != nil { if cred := credCache.Get(cacheKey); cred != nil {
pLogger.Debug("using cached cluster credential.")
return json.NewEncoder(out).Encode(cred) return json.NewEncoder(out).Encode(cred)
} }
} }
// If the concierge was configured, exchange the credential for a separate short-lived, cluster-specific credential. // If the concierge was configured, exchange the credential for a separate short-lived, cluster-specific credential.
if concierge != nil { if concierge != nil {
pLogger.Debug("exchanging static token for cluster credential", "endpoint", flags.conciergeEndpoint, "authenticator type", flags.conciergeAuthenticatorType, "authenticator name", flags.conciergeAuthenticatorName)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel() defer cancel()
@ -145,6 +153,7 @@ func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams
if err != nil { if err != nil {
return fmt.Errorf("could not complete Concierge credential exchange: %w", err) return fmt.Errorf("could not complete Concierge credential exchange: %w", err)
} }
pLogger.Debug("exchanged static token for cluster credential")
} }
// If there was a credential cache, save the resulting credential for future use. We only save to the cache if // If there was a credential cache, save the resulting credential for future use. We only save to the cache if

View File

@ -12,6 +12,10 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/klog/v2"
"go.pinniped.dev/internal/testutil/testlogger"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
@ -41,6 +45,7 @@ func TestLoginStaticCommand(t *testing.T) {
wantStdout string wantStdout string
wantStderr string wantStderr string
wantOptionsCount int wantOptionsCount int
wantLogs []string
}{ }{
{ {
name: "help flag passed", name: "help flag passed",
@ -126,10 +131,12 @@ func TestLoginStaticCommand(t *testing.T) {
"--concierge-authenticator-name", "test-authenticator", "--concierge-authenticator-name", "test-authenticator",
}, },
conciergeErr: fmt.Errorf("some concierge error"), conciergeErr: fmt.Errorf("some concierge error"),
env: map[string]string{"PINNIPED_DEBUG": "true"},
wantError: true, wantError: true,
wantStderr: here.Doc(` wantStderr: here.Doc(`
Error: could not complete Concierge credential exchange: some concierge error Error: could not complete Concierge credential exchange: some concierge error
`), `),
wantLogs: []string{"\"level\"=0 \"msg\"=\"Pinniped login: exchanging static token for cluster credential\" \"authenticator name\"=\"test-authenticator\" \"authenticator type\"=\"webhook\" \"endpoint\"=\"https://127.0.0.1/\""},
}, },
{ {
name: "invalid API group suffix", name: "invalid API group suffix",
@ -151,12 +158,15 @@ func TestLoginStaticCommand(t *testing.T) {
args: []string{ args: []string{
"--token", "test-token", "--token", "test-token",
}, },
env: map[string]string{"PINNIPED_DEBUG": "true"},
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"token":"test-token"}}` + "\n", wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"token":"test-token"}}` + "\n",
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
testLogger := testlogger.New(t)
klog.SetLogger(testLogger)
cmd := staticLoginCommand(staticLoginDeps{ cmd := staticLoginCommand(staticLoginDeps{
lookupEnv: func(s string) (string, bool) { lookupEnv: func(s string) (string, bool) {
v, ok := tt.env[s] v, ok := tt.env[s]
@ -192,6 +202,8 @@ func TestLoginStaticCommand(t *testing.T) {
} }
require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout") require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout")
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr") require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
require.Equal(t, tt.wantLogs, testLogger.Lines())
}) })
} }
} }

View File

@ -22,6 +22,13 @@ metadata:
labels: #@ labels() labels: #@ labels()
--- ---
apiVersion: v1 apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
namespace: #@ namespace()
labels: #@ labels()
---
apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: #@ defaultResourceNameWithSuffix("config") name: #@ defaultResourceNameWithSuffix("config")
@ -47,6 +54,7 @@ data:
impersonationTLSCertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-tls-serving-certificate") @) impersonationTLSCertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-tls-serving-certificate") @)
impersonationCACertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-ca-certificate") @) impersonationCACertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-ca-certificate") @)
impersonationSignerSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-signer-ca-certificate") @) impersonationSignerSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-signer-ca-certificate") @)
agentServiceAccount: (@= defaultResourceNameWithSuffix("kube-cert-agent") @)
labels: (@= json.encode(labels()).rstrip() @) labels: (@= json.encode(labels()).rstrip() @)
kubeCertAgent: kubeCertAgent:
namePrefix: (@= defaultResourceNameWithSuffix("kube-cert-agent-") @) namePrefix: (@= defaultResourceNameWithSuffix("kube-cert-agent-") @)

View File

@ -24,9 +24,6 @@ rules:
- apiGroups: [ flowcontrol.apiserver.k8s.io ] - apiGroups: [ flowcontrol.apiserver.k8s.io ]
resources: [ flowschemas, prioritylevelconfigurations ] resources: [ flowschemas, prioritylevelconfigurations ]
verbs: [ get, list, watch ] verbs: [ get, list, watch ]
- apiGroups: [ policy ]
resources: [ podsecuritypolicies ]
verbs: [ use ]
- apiGroups: [ security.openshift.io ] - apiGroups: [ security.openshift.io ]
resources: [ securitycontextconstraints ] resources: [ securitycontextconstraints ]
verbs: [ use ] verbs: [ use ]
@ -67,6 +64,34 @@ roleRef:
name: #@ defaultResourceNameWithSuffix("aggregated-api-server") name: #@ defaultResourceNameWithSuffix("aggregated-api-server")
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
#! Give permission to the kube-cert-agent Pod to run privileged.
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
namespace: #@ namespace()
labels: #@ labels()
rules:
- apiGroups: [ policy ]
resources: [ podsecuritypolicies ]
verbs: [ use ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
namespace: #@ namespace()
labels: #@ labels()
subjects:
- kind: ServiceAccount
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
namespace: #@ namespace()
roleRef:
kind: Role
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
apiGroup: rbac.authorization.k8s.io
#! Give permission to various objects within the app's own namespace #! Give permission to various objects within the app's own namespace
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1

View File

@ -35,6 +35,13 @@ only shares this information with the audit stack). To keep things simple,
we use the fake audit backend at the Metadata level for all requests. This we use the fake audit backend at the Metadata level for all requests. This
guarantees that we always have an audit event on every request. guarantees that we always have an audit event on every request.
One final wrinkle is that impersonation cannot impersonate UIDs (yet). This is
problematic because service account tokens always assert a UID. To handle this
case without losing authentication information, when we see an identity with a
UID that was asserted via a bearer token, we simply pass the request through
with the original bearer token and no impersonation headers set (as if the user
had made the request directly against the Kubernetes API server).
For all normal requests, we only use http/2.0 when proxying to the API server. For all normal requests, we only use http/2.0 when proxying to the API server.
For upgrade requests, we only use http/1.1 since these always go from http/1.1 For upgrade requests, we only use http/1.1 since these always go from http/1.1
to either websockets or SPDY. to either websockets or SPDY.

View File

@ -15,6 +15,8 @@ import (
"strings" "strings"
"time" "time"
authenticationv1 "k8s.io/api/authentication/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@ -26,6 +28,8 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
auditinternal "k8s.io/apiserver/pkg/apis/audit" auditinternal "k8s.io/apiserver/pkg/apis/audit"
"k8s.io/apiserver/pkg/audit/policy" "k8s.io/apiserver/pkg/audit/policy"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
"k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/endpoints/filterlatency" "k8s.io/apiserver/pkg/endpoints/filterlatency"
@ -45,6 +49,7 @@ import (
"go.pinniped.dev/internal/httputil/securityheader" "go.pinniped.dev/internal/httputil/securityheader"
"go.pinniped.dev/internal/kubeclient" "go.pinniped.dev/internal/kubeclient"
"go.pinniped.dev/internal/plog" "go.pinniped.dev/internal/plog"
"go.pinniped.dev/internal/valuelesscontext"
) )
// FactoryFunc is a function which can create an impersonator server. // FactoryFunc is a function which can create an impersonator server.
@ -176,6 +181,11 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
// See the genericapiserver.DefaultBuildHandlerChain func for details. // See the genericapiserver.DefaultBuildHandlerChain func for details.
handler = defaultBuildHandlerChainFunc(handler, c) handler = defaultBuildHandlerChainFunc(handler, c)
// we need to grab the bearer token before WithAuthentication deletes it.
handler = filterlatency.TrackCompleted(handler)
handler = withBearerTokenPreservation(handler)
handler = filterlatency.TrackStarted(handler, "bearertokenpreservation")
// Always set security headers so browsers do the right thing. // Always set security headers so browsers do the right thing.
handler = filterlatency.TrackCompleted(handler) handler = filterlatency.TrackCompleted(handler)
handler = securityheader.Wrap(handler) handler = securityheader.Wrap(handler)
@ -189,6 +199,9 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
serverConfig.AuditPolicyChecker = policy.FakeChecker(auditinternal.LevelMetadata, nil) serverConfig.AuditPolicyChecker = policy.FakeChecker(auditinternal.LevelMetadata, nil)
serverConfig.AuditBackend = &auditfake.Backend{} serverConfig.AuditBackend = &auditfake.Backend{}
// if we ever start unioning a TCR bearer token authenticator with serverConfig.Authenticator
// then we will need to update the related assumption in tokenPassthroughRoundTripper
delegatingAuthorizer := serverConfig.Authorization.Authorizer delegatingAuthorizer := serverConfig.Authorization.Authorizer
nestedImpersonationAuthorizer := &comparableAuthorizer{ nestedImpersonationAuthorizer := &comparableAuthorizer{
authorizerFunc: func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) { authorizerFunc: func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
@ -290,6 +303,35 @@ func (f authorizerFunc) Authorize(ctx context.Context, a authorizer.Attributes)
return f(ctx, a) return f(ctx, a)
} }
func withBearerTokenPreservation(delegate http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// this looks a bit hacky but lets us avoid writing any logic for parsing out the bearer token
var reqToken string
_, _, _ = bearertoken.New(authenticator.TokenFunc(func(_ context.Context, token string) (*authenticator.Response, bool, error) {
reqToken = token
return nil, false, nil
})).AuthenticateRequest(r)
// smuggle the token through the context. this does mean that we need to avoid logging the context.
if len(reqToken) != 0 {
ctx := context.WithValue(r.Context(), tokenKey, reqToken)
r = r.WithContext(ctx)
}
delegate.ServeHTTP(w, r)
})
}
func tokenFrom(ctx context.Context) string {
token, _ := ctx.Value(tokenKey).(string)
return token
}
// contextKey type is unexported to prevent collisions.
type contextKey int
const tokenKey contextKey = iota
func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapiserver.Config) http.Handler, error) { func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapiserver.Config) http.Handler, error) {
serverURL, err := url.Parse(restConfig.Host) serverURL, err := url.Parse(restConfig.Host)
if err != nil { if err != nil {
@ -300,11 +342,19 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi
if err != nil { if err != nil {
return nil, fmt.Errorf("could not get http/1.1 round tripper: %w", err) return nil, fmt.Errorf("could not get http/1.1 round tripper: %w", err)
} }
http1RoundTripperAnonymous, err := getTransportForProtocol(rest.AnonymousClientConfig(restConfig), "http/1.1")
if err != nil {
return nil, fmt.Errorf("could not get http/1.1 anonymous round tripper: %w", err)
}
http2RoundTripper, err := getTransportForProtocol(restConfig, "h2") http2RoundTripper, err := getTransportForProtocol(restConfig, "h2")
if err != nil { if err != nil {
return nil, fmt.Errorf("could not get http/2.0 round tripper: %w", err) return nil, fmt.Errorf("could not get http/2.0 round tripper: %w", err)
} }
http2RoundTripperAnonymous, err := getTransportForProtocol(rest.AnonymousClientConfig(restConfig), "h2")
if err != nil {
return nil, fmt.Errorf("could not get http/2.0 anonymous round tripper: %w", err)
}
return func(c *genericapiserver.Config) http.Handler { return func(c *genericapiserver.Config) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -347,15 +397,18 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi
return return
} }
// grab the request's bearer token if present. this is optional and does not fail the request if missing.
token := tokenFrom(r.Context())
// KAS only supports upgrades via http/1.1 to websockets/SPDY (upgrades never use http/2.0) // KAS only supports upgrades via http/1.1 to websockets/SPDY (upgrades never use http/2.0)
// Thus we default to using http/2.0 when the request is not an upgrade, otherwise we use http/1.1 // Thus we default to using http/2.0 when the request is not an upgrade, otherwise we use http/1.1
baseRT := http2RoundTripper baseRT, baseRTAnonymous := http2RoundTripper, http2RoundTripperAnonymous
isUpgradeRequest := httpstream.IsUpgradeRequest(r) isUpgradeRequest := httpstream.IsUpgradeRequest(r)
if isUpgradeRequest { if isUpgradeRequest {
baseRT = http1RoundTripper baseRT, baseRTAnonymous = http1RoundTripper, http1RoundTripperAnonymous
} }
rt, err := getTransportForUser(userInfo, baseRT, ae) rt, err := getTransportForUser(r.Context(), userInfo, baseRT, baseRTAnonymous, ae, token, c.Authentication.Authenticator)
if err != nil { if err != nil {
plog.WarningErr("rejecting request as we cannot act as the current user", err, plog.WarningErr("rejecting request as we cannot act as the current user", err,
"url", r.URL.String(), "url", r.URL.String(),
@ -413,8 +466,26 @@ func ensureNoImpersonationHeaders(r *http.Request) error {
return nil return nil
} }
func getTransportForUser(userInfo user.Info, delegate http.RoundTripper, ae *auditinternal.Event) (http.RoundTripper, error) { func getTransportForUser(ctx context.Context, userInfo user.Info, delegate, delegateAnonymous http.RoundTripper, ae *auditinternal.Event, token string, authenticator authenticator.Request) (http.RoundTripper, error) {
if canImpersonateFully(userInfo) {
return standardImpersonationRoundTripper(userInfo, ae, delegate)
}
return tokenPassthroughRoundTripper(ctx, delegateAnonymous, ae, token, authenticator)
}
func canImpersonateFully(userInfo user.Info) bool {
// nolint: gosimple // this structure is on purpose because we plan to expand this function
if len(userInfo.GetUID()) == 0 { if len(userInfo.GetUID()) == 0 {
return true
}
// once kube supports UID impersonation, add logic to detect if the KAS is
// new enough to have this functionality and return true in that case as well
return false
}
func standardImpersonationRoundTripper(userInfo user.Info, ae *auditinternal.Event, delegate http.RoundTripper) (http.RoundTripper, error) {
extra, err := buildExtra(userInfo.GetExtra(), ae) extra, err := buildExtra(userInfo.GetExtra(), ae)
if err != nil { if err != nil {
return nil, err return nil, err
@ -428,19 +499,84 @@ func getTransportForUser(userInfo user.Info, delegate http.RoundTripper, ae *aud
// transport.NewImpersonatingRoundTripper clones the request before setting headers // transport.NewImpersonatingRoundTripper clones the request before setting headers
// thus it will not accidentally mutate the input request (see http.Handler docs) // thus it will not accidentally mutate the input request (see http.Handler docs)
return transport.NewImpersonatingRoundTripper(impersonateConfig, delegate), nil return transport.NewImpersonatingRoundTripper(impersonateConfig, delegate), nil
}
func tokenPassthroughRoundTripper(ctx context.Context, delegateAnonymous http.RoundTripper, ae *auditinternal.Event, token string, authenticator authenticator.Request) (http.RoundTripper, error) {
// all code below assumes KAS does not support UID impersonation because that case is handled in the standard path
// it also assumes that the TCR API does not issue tokens - if this assumption changes, we will need
// some way to distinguish a token that is only valid against this impersonation proxy and not against KAS.
// this code will fail closed because said TCR token would not work against KAS and the request would fail.
// if we get here we know the final user info had a UID
// if the original user is also performing a nested impersonation, it means that said nested
// impersonation is trying to impersonate a UID since final user info == ae.ImpersonatedUser
// we know this KAS does not support UID impersonation so this request must be rejected
if ae.ImpersonatedUser != nil {
return nil, constable.Error("unable to impersonate uid")
} }
// 0. in the case of a request that is not attempting to do nested impersonation // see what KAS thinks this token translates into
// 1. if we make the assumption that the TCR API does not issue tokens (or pass the TCR API bearer token // this is important because certs have precedence over tokens and we want
// authenticator into this func - we need to know the authentication cred is something KAS would honor) // to make sure that we do not get confused and pass along the wrong token
// 2. then if preserve the incoming authorization header into the request's context tokenUser, err := tokenReview(ctx, token, authenticator)
// 3. we could reauthenticate it here (it would be a free cache hit) if err != nil {
// 4. confirm that it matches the passed in user info (i.e. it was actually the cred used to authenticate and not a client cert) return nil, err
// 5. then we could issue a reverse proxy request using an anonymous rest config and the bearer token }
// 6. thus instead of impersonating the user, we would just be passing their request through
// 7. this would preserve the UID info and thus allow us to safely support all token based auth // we want to compare the result of the token authentication with the original user that made the request
// 8. the above would be safe even if in the future Kube started supporting UIDs asserted by client certs // if the user who made the request and the token do not match, we cannot go any further at this point
return nil, constable.Error("unexpected uid") if !apiequality.Semantic.DeepEqual(ae.User, tokenUser) {
// this info leak seems fine for trace level logs
plog.Trace("failed to passthrough token due to user mismatch",
"original-username", ae.User.Username,
"original-uid", ae.User.UID,
"token-username", tokenUser.Username,
"token-uid", tokenUser.UID,
)
return nil, constable.Error("token authenticated as a different user")
}
// now we know that if we send this token to KAS, it will authenticate correctly
return transport.NewBearerAuthRoundTripper(token, delegateAnonymous), nil
}
func tokenReview(ctx context.Context, token string, authenticator authenticator.Request) (authenticationv1.UserInfo, error) {
if len(token) == 0 {
return authenticationv1.UserInfo{}, constable.Error("no token on request")
}
// create a header that contains nothing but the token
// an astute observer may ask "but what about the token's audience?"
// in this case, we want to leave audiences unset per the token review docs:
// > If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.
// i.e. we want to make sure that the given token is valid against KAS
fakeReq := &http.Request{Header: http.Header{}}
fakeReq.Header.Set("Authorization", "Bearer "+token)
// propagate cancellation of parent context (without any values such as audience)
fakeReq = fakeReq.WithContext(valuelesscontext.New(ctx))
// this will almost always be a free call that hits our 10 second cache TTL
resp, ok, err := authenticator.AuthenticateRequest(fakeReq)
if err != nil {
return authenticationv1.UserInfo{}, err
}
if !ok {
return authenticationv1.UserInfo{}, constable.Error("token failed to authenticate")
}
tokenUser := authenticationv1.UserInfo{
Username: resp.User.GetName(),
UID: resp.User.GetUID(),
Groups: resp.User.GetGroups(),
Extra: make(map[string]authenticationv1.ExtraValue, len(resp.User.GetExtra())),
}
for k, v := range resp.User.GetExtra() {
tokenUser.Extra[k] = v
}
return tokenUser, nil
} }
func buildExtra(extra map[string][]string, ae *auditinternal.Event) (map[string][]string, error) { func buildExtra(extra map[string][]string, ae *auditinternal.Event) (map[string][]string, error) {

View File

@ -22,6 +22,8 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/apimachinery/pkg/util/httpstream"
auditinternal "k8s.io/apiserver/pkg/apis/audit" auditinternal "k8s.io/apiserver/pkg/apis/audit"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
"k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/features"
@ -33,6 +35,7 @@ import (
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
"go.pinniped.dev/internal/certauthority" "go.pinniped.dev/internal/certauthority"
"go.pinniped.dev/internal/constable"
"go.pinniped.dev/internal/dynamiccert" "go.pinniped.dev/internal/dynamiccert"
"go.pinniped.dev/internal/here" "go.pinniped.dev/internal/here"
"go.pinniped.dev/internal/httputil/roundtripper" "go.pinniped.dev/internal/httputil/roundtripper"
@ -176,6 +179,26 @@ func TestImpersonator(t *testing.T) {
"X-Forwarded-For": {"127.0.0.1"}, "X-Forwarded-For": {"127.0.0.1"},
}, },
}, },
{
name: "when there is no client cert on request but it has basic auth, it is still an anonymous request",
clientCert: &clientCert{},
clientMutateHeaders: func(header http.Header) {
header.Set("Test", "val")
req := &http.Request{Header: header}
req.SetBasicAuth("foo", "bar")
},
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
wantKubeAPIServerRequestHeaders: http.Header{
"Impersonate-User": {"system:anonymous"},
"Impersonate-Group": {"system:unauthenticated"},
"Authorization": {"Bearer some-service-account-token"},
"User-Agent": {"test-agent"},
"Accept": {"application/vnd.kubernetes.protobuf,application/json"},
"Accept-Encoding": {"gzip"},
"X-Forwarded-For": {"127.0.0.1"},
"Test": {"val"},
},
},
{ {
name: "failed client cert authentication", name: "failed client cert authentication",
clientCert: newClientCert(t, unrelatedCA, "test-username", []string{"test-group1"}), clientCert: newClientCert(t, unrelatedCA, "test-username", []string{"test-group1"}),
@ -499,39 +522,12 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
"extra-2": {"some", "more", "extra", "stuff"}, "extra-2": {"some", "more", "extra", "stuff"},
} }
validURL, _ := url.Parse("http://pinniped.dev/blah")
newRequest := func(h http.Header, userInfo user.Info, event *auditinternal.Event) *http.Request {
ctx := context.Background()
if userInfo != nil {
ctx = request.WithUser(ctx, userInfo)
}
ae := &auditinternal.Event{Level: auditinternal.LevelMetadata}
if event != nil {
ae = event
}
ctx = request.WithAuditEvent(ctx, ae)
reqInfo := &request.RequestInfo{
IsResourceRequest: false,
Path: validURL.Path,
Verb: "get",
}
ctx = request.WithRequestInfo(ctx, reqInfo)
r, err := http.NewRequestWithContext(ctx, http.MethodGet, validURL.String(), nil)
require.NoError(t, err)
r.Header = h
return r
}
tests := []struct { tests := []struct {
name string name string
restConfig *rest.Config restConfig *rest.Config
wantCreationErr string wantCreationErr string
request *http.Request request *http.Request
authenticator authenticator.Request
wantHTTPBody string wantHTTPBody string
wantHTTPStatus int wantHTTPStatus int
wantKubeAPIServerRequestHeaders http.Header wantKubeAPIServerRequestHeaders http.Header
@ -563,50 +559,50 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
}, },
{ {
name: "Impersonate-User header already in request", name: "Impersonate-User header already in request",
request: newRequest(map[string][]string{"Impersonate-User": {"some-user"}}, nil, nil), request: newRequest(t, map[string][]string{"Impersonate-User": {"some-user"}}, nil, nil, ""),
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n", wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError, wantHTTPStatus: http.StatusInternalServerError,
}, },
{ {
name: "Impersonate-Group header already in request", name: "Impersonate-Group header already in request",
request: newRequest(map[string][]string{"Impersonate-Group": {"some-group"}}, nil, nil), request: newRequest(t, map[string][]string{"Impersonate-Group": {"some-group"}}, nil, nil, ""),
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n", wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError, wantHTTPStatus: http.StatusInternalServerError,
}, },
{ {
name: "Impersonate-Extra header already in request", name: "Impersonate-Extra header already in request",
request: newRequest(map[string][]string{"Impersonate-Extra-something": {"something"}}, nil, nil), request: newRequest(t, map[string][]string{"Impersonate-Extra-something": {"something"}}, nil, nil, ""),
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n", wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError, wantHTTPStatus: http.StatusInternalServerError,
}, },
{ {
name: "Impersonate-* header already in request", name: "Impersonate-* header already in request",
request: newRequest(map[string][]string{"Impersonate-Something": {"some-newfangled-impersonate-header"}}, nil, nil), request: newRequest(t, map[string][]string{"Impersonate-Something": {"some-newfangled-impersonate-header"}}, nil, nil, ""),
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n", wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError, wantHTTPStatus: http.StatusInternalServerError,
}, },
{ {
name: "unexpected authorization header", name: "unexpected authorization header",
request: newRequest(map[string][]string{"Authorization": {"panda"}}, nil, nil), request: newRequest(t, map[string][]string{"Authorization": {"panda"}}, nil, nil, ""),
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid authorization header","reason":"InternalError","details":{"causes":[{"message":"invalid authorization header"}]},"code":500}` + "\n", wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid authorization header","reason":"InternalError","details":{"causes":[{"message":"invalid authorization header"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError, wantHTTPStatus: http.StatusInternalServerError,
}, },
{ {
name: "missing user", name: "missing user",
request: newRequest(map[string][]string{}, nil, nil), request: newRequest(t, map[string][]string{}, nil, nil, ""),
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid user","reason":"InternalError","details":{"causes":[{"message":"invalid user"}]},"code":500}` + "\n", wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid user","reason":"InternalError","details":{"causes":[{"message":"invalid user"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError, wantHTTPStatus: http.StatusInternalServerError,
}, },
{ {
name: "unexpected UID", name: "unexpected UID",
request: newRequest(map[string][]string{}, &user.DefaultInfo{UID: "007"}, nil), request: newRequest(t, map[string][]string{}, &user.DefaultInfo{UID: "007"}, nil, ""),
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n", wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError, wantHTTPStatus: http.StatusInternalServerError,
}, },
{ {
name: "authenticated user but missing audit event", name: "authenticated user but missing audit event",
request: func() *http.Request { request: func() *http.Request {
req := newRequest(map[string][]string{ req := newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"}, "User-Agent": {"test-user-agent"},
"Connection": {"Upgrade"}, "Connection": {"Upgrade"},
"Upgrade": {"some-upgrade"}, "Upgrade": {"some-upgrade"},
@ -615,7 +611,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
Name: testUser, Name: testUser,
Groups: testGroups, Groups: testGroups,
Extra: testExtra, Extra: testExtra,
}, nil) }, nil, "")
ctx := request.WithAuditEvent(req.Context(), nil) ctx := request.WithAuditEvent(req.Context(), nil)
req = req.WithContext(ctx) req = req.WithContext(ctx)
return req return req
@ -625,7 +621,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
}, },
{ {
name: "authenticated user with upper case extra", name: "authenticated user with upper case extra",
request: newRequest(map[string][]string{ request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"}, "User-Agent": {"test-user-agent"},
"Connection": {"Upgrade"}, "Connection": {"Upgrade"},
"Upgrade": {"some-upgrade"}, "Upgrade": {"some-upgrade"},
@ -639,13 +635,13 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
"valid-key": {"valid-value"}, "valid-key": {"valid-value"},
"Invalid-key": {"still-valid-value"}, "Invalid-key": {"still-valid-value"},
}, },
}, nil), }, nil, ""),
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n", wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError, wantHTTPStatus: http.StatusInternalServerError,
}, },
{ {
name: "authenticated user with upper case extra across multiple lines", name: "authenticated user with upper case extra across multiple lines",
request: newRequest(map[string][]string{ request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"}, "User-Agent": {"test-user-agent"},
"Connection": {"Upgrade"}, "Connection": {"Upgrade"},
"Upgrade": {"some-upgrade"}, "Upgrade": {"some-upgrade"},
@ -659,13 +655,13 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
"valid-key": {"valid-value"}, "valid-key": {"valid-value"},
"valid-data\nInvalid-key": {"still-valid-value"}, "valid-data\nInvalid-key": {"still-valid-value"},
}, },
}, nil), }, nil, ""),
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n", wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError, wantHTTPStatus: http.StatusInternalServerError,
}, },
{ {
name: "authenticated user with reserved extra key", name: "authenticated user with reserved extra key",
request: newRequest(map[string][]string{ request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"}, "User-Agent": {"test-user-agent"},
"Connection": {"Upgrade"}, "Connection": {"Upgrade"},
"Upgrade": {"some-upgrade"}, "Upgrade": {"some-upgrade"},
@ -679,14 +675,164 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
"valid-key": {"valid-value"}, "valid-key": {"valid-value"},
"foo.impersonation-proxy.concierge.pinniped.dev": {"still-valid-value"}, "foo.impersonation-proxy.concierge.pinniped.dev": {"still-valid-value"},
}, },
}, nil), }, nil, ""),
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError,
},
{
name: "authenticated user with UID but no bearer token",
request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"},
"Connection": {"Upgrade"},
"Upgrade": {"some-upgrade"},
"Content-Type": {"some-type"},
"Content-Length": {"some-length"},
"Other-Header": {"test-header-value-1"},
}, &user.DefaultInfo{
UID: "-", // anything non-empty, rest of the fields get ignored in this code path
},
&auditinternal.Event{
User: authenticationv1.UserInfo{
Username: testUser,
UID: "fancy-uid",
Groups: testGroups,
Extra: map[string]authenticationv1.ExtraValue{
"extra-1": {"some", "extra", "stuff"},
"extra-2": {"some", "more", "extra", "stuff"},
},
},
ImpersonatedUser: nil,
},
"",
),
authenticator: nil,
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError,
},
{
name: "authenticated user with UID and bearer token and nested impersonation",
request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"},
"Connection": {"Upgrade"},
"Upgrade": {"some-upgrade"},
"Content-Type": {"some-type"},
"Content-Length": {"some-length"},
"Other-Header": {"test-header-value-1"},
}, &user.DefaultInfo{
UID: "-", // anything non-empty, rest of the fields get ignored in this code path
},
&auditinternal.Event{
User: authenticationv1.UserInfo{
Username: "dude",
UID: "--1--",
Groups: []string{"--a--", "--b--"},
Extra: map[string]authenticationv1.ExtraValue{
"--c--": {"--d--"},
"--e--": {"--f--"},
},
},
ImpersonatedUser: &authenticationv1.UserInfo{},
},
"token-from-user-nested",
),
authenticator: nil,
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError,
},
{
name: "authenticated user with UID and bearer token results in error",
request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"},
"Connection": {"Upgrade"},
"Upgrade": {"some-upgrade"},
"Content-Type": {"some-type"},
"Content-Length": {"some-length"},
"Other-Header": {"test-header-value-1"},
}, &user.DefaultInfo{
UID: "-", // anything non-empty, rest of the fields get ignored in this code path
},
&auditinternal.Event{
User: authenticationv1.UserInfo{
Username: "dude",
UID: "--1--",
Groups: []string{"--a--", "--b--"},
Extra: map[string]authenticationv1.ExtraValue{
"--c--": {"--d--"},
"--e--": {"--f--"},
},
},
ImpersonatedUser: nil,
},
"some-non-empty-token",
),
authenticator: testTokenAuthenticator(t, "", nil, constable.Error("some err")),
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError,
},
{
name: "authenticated user with UID and bearer token does not authenticate",
request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"},
"Connection": {"Upgrade"},
"Upgrade": {"some-upgrade"},
"Content-Type": {"some-type"},
"Content-Length": {"some-length"},
"Other-Header": {"test-header-value-1"},
}, &user.DefaultInfo{
UID: "-", // anything non-empty, rest of the fields get ignored in this code path
},
&auditinternal.Event{
User: authenticationv1.UserInfo{
Username: "dude",
UID: "--1--",
Groups: []string{"--a--", "--b--"},
Extra: map[string]authenticationv1.ExtraValue{
"--c--": {"--d--"},
"--e--": {"--f--"},
},
},
ImpersonatedUser: nil,
},
"this-token-does-not-work",
),
authenticator: testTokenAuthenticator(t, "some-other-token-works", nil, nil),
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError,
},
{
name: "authenticated user with UID and bearer token authenticates as different user",
request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"},
"Connection": {"Upgrade"},
"Upgrade": {"some-upgrade"},
"Content-Type": {"some-type"},
"Content-Length": {"some-length"},
"Other-Header": {"test-header-value-1"},
}, &user.DefaultInfo{
UID: "-", // anything non-empty, rest of the fields get ignored in this code path
},
&auditinternal.Event{
User: authenticationv1.UserInfo{
Username: "dude",
UID: "--1--",
Groups: []string{"--a--", "--b--"},
Extra: map[string]authenticationv1.ExtraValue{
"--c--": {"--d--"},
"--e--": {"--f--"},
},
},
ImpersonatedUser: nil,
},
"this-token-does-work",
),
authenticator: testTokenAuthenticator(t, "this-token-does-work", &user.DefaultInfo{Name: "someone-else"}, nil),
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n", wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
wantHTTPStatus: http.StatusInternalServerError, wantHTTPStatus: http.StatusInternalServerError,
}, },
// happy path // happy path
{ {
name: "authenticated user", name: "authenticated user",
request: newRequest(map[string][]string{ request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"}, "User-Agent": {"test-user-agent"},
"Accept": {"some-accepted-format"}, "Accept": {"some-accepted-format"},
"Accept-Encoding": {"some-accepted-encoding"}, "Accept-Encoding": {"some-accepted-encoding"},
@ -699,7 +845,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
Name: testUser, Name: testUser,
Groups: testGroups, Groups: testGroups,
Extra: testExtra, Extra: testExtra,
}, nil), }, nil, ""),
wantKubeAPIServerRequestHeaders: map[string][]string{ wantKubeAPIServerRequestHeaders: map[string][]string{
"Authorization": {"Bearer some-service-account-token"}, "Authorization": {"Bearer some-service-account-token"},
"Impersonate-Extra-Extra-1": {"some", "extra", "stuff"}, "Impersonate-Extra-Extra-1": {"some", "extra", "stuff"},
@ -717,9 +863,61 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
wantHTTPBody: "successful proxied response", wantHTTPBody: "successful proxied response",
wantHTTPStatus: http.StatusOK, wantHTTPStatus: http.StatusOK,
}, },
{
name: "authenticated user with UID and bearer token",
request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"},
"Accept": {"some-accepted-format"},
"Accept-Encoding": {"some-accepted-encoding"},
"Connection": {"Upgrade"},
"Upgrade": {"some-upgrade"},
"Content-Type": {"some-type"},
"Content-Length": {"some-length"},
"Other-Header": {"test-header-value-1"},
}, &user.DefaultInfo{
UID: "-", // anything non-empty, rest of the fields get ignored in this code path
},
&auditinternal.Event{
User: authenticationv1.UserInfo{
Username: testUser,
UID: "fancy-uid",
Groups: testGroups,
Extra: map[string]authenticationv1.ExtraValue{
"extra-1": {"some", "extra", "stuff"},
"extra-2": {"some", "more", "extra", "stuff"},
},
},
ImpersonatedUser: nil,
},
"token-from-user",
),
authenticator: testTokenAuthenticator(
t,
"token-from-user",
&user.DefaultInfo{
Name: testUser,
UID: "fancy-uid",
Groups: testGroups,
Extra: testExtra,
},
nil,
),
wantKubeAPIServerRequestHeaders: map[string][]string{
"Authorization": {"Bearer token-from-user"},
"User-Agent": {"test-user-agent"},
"Accept": {"some-accepted-format"},
"Accept-Encoding": {"some-accepted-encoding"},
"Connection": {"Upgrade"},
"Upgrade": {"some-upgrade"},
"Content-Type": {"some-type"},
"Other-Header": {"test-header-value-1"},
},
wantHTTPBody: "successful proxied response",
wantHTTPStatus: http.StatusOK,
},
{ {
name: "authenticated gke user", name: "authenticated gke user",
request: newRequest(map[string][]string{ request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"}, "User-Agent": {"test-user-agent"},
"Accept": {"some-accepted-format"}, "Accept": {"some-accepted-format"},
"Accept-Encoding": {"some-accepted-encoding"}, "Accept-Encoding": {"some-accepted-encoding"},
@ -736,7 +934,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
"iam.gke.io/user-assertion": {"ABC"}, "iam.gke.io/user-assertion": {"ABC"},
"user-assertion.cloud.google.com": {"XYZ"}, "user-assertion.cloud.google.com": {"XYZ"},
}, },
}, nil), }, nil, ""),
wantKubeAPIServerRequestHeaders: map[string][]string{ wantKubeAPIServerRequestHeaders: map[string][]string{
"Authorization": {"Bearer some-service-account-token"}, "Authorization": {"Bearer some-service-account-token"},
"Impersonate-Extra-Iam.gke.io%2fuser-Assertion": {"ABC"}, "Impersonate-Extra-Iam.gke.io%2fuser-Assertion": {"ABC"},
@ -756,7 +954,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
}, },
{ {
name: "authenticated openshift/openstack user", name: "authenticated openshift/openstack user",
request: newRequest(map[string][]string{ request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"}, "User-Agent": {"test-user-agent"},
"Accept": {"some-accepted-format"}, "Accept": {"some-accepted-format"},
"Accept-Encoding": {"some-accepted-encoding"}, "Accept-Encoding": {"some-accepted-encoding"},
@ -781,7 +979,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
"alpha.kubernetes.io/identity/user/domain/id": {"domain-id"}, "alpha.kubernetes.io/identity/user/domain/id": {"domain-id"},
"alpha.kubernetes.io/identity/user/domain/name": {"domain-name"}, "alpha.kubernetes.io/identity/user/domain/name": {"domain-name"},
}, },
}, nil), }, nil, ""),
wantKubeAPIServerRequestHeaders: map[string][]string{ wantKubeAPIServerRequestHeaders: map[string][]string{
"Authorization": {"Bearer some-service-account-token"}, "Authorization": {"Bearer some-service-account-token"},
"Impersonate-Extra-Scopes.authorization.openshift.io": {"user:info", "user:full"}, "Impersonate-Extra-Scopes.authorization.openshift.io": {"user:info", "user:full"},
@ -805,7 +1003,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
}, },
{ {
name: "authenticated user with almost reserved key", name: "authenticated user with almost reserved key",
request: newRequest(map[string][]string{ request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"}, "User-Agent": {"test-user-agent"},
"Accept": {"some-accepted-format"}, "Accept": {"some-accepted-format"},
"Accept-Encoding": {"some-accepted-encoding"}, "Accept-Encoding": {"some-accepted-encoding"},
@ -820,7 +1018,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
Extra: map[string][]string{ Extra: map[string][]string{
"foo.iimpersonation-proxy.concierge.pinniped.dev": {"still-valid-value"}, "foo.iimpersonation-proxy.concierge.pinniped.dev": {"still-valid-value"},
}, },
}, nil), }, nil, ""),
wantKubeAPIServerRequestHeaders: map[string][]string{ wantKubeAPIServerRequestHeaders: map[string][]string{
"Authorization": {"Bearer some-service-account-token"}, "Authorization": {"Bearer some-service-account-token"},
"Impersonate-Extra-Foo.iimpersonation-Proxy.concierge.pinniped.dev": {"still-valid-value"}, "Impersonate-Extra-Foo.iimpersonation-Proxy.concierge.pinniped.dev": {"still-valid-value"},
@ -839,7 +1037,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
}, },
{ {
name: "authenticated user with almost reserved key and nested impersonation", name: "authenticated user with almost reserved key and nested impersonation",
request: newRequest(map[string][]string{ request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"}, "User-Agent": {"test-user-agent"},
"Accept": {"some-accepted-format"}, "Accept": {"some-accepted-format"},
"Accept-Encoding": {"some-accepted-encoding"}, "Accept-Encoding": {"some-accepted-encoding"},
@ -866,6 +1064,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
}, },
ImpersonatedUser: &authenticationv1.UserInfo{}, ImpersonatedUser: &authenticationv1.UserInfo{},
}, },
"",
), ),
wantKubeAPIServerRequestHeaders: map[string][]string{ wantKubeAPIServerRequestHeaders: map[string][]string{
"Authorization": {"Bearer some-service-account-token"}, "Authorization": {"Bearer some-service-account-token"},
@ -886,7 +1085,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
}, },
{ {
name: "authenticated user with nested impersonation", name: "authenticated user with nested impersonation",
request: newRequest(map[string][]string{ request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"}, "User-Agent": {"test-user-agent"},
"Accept": {"some-accepted-format"}, "Accept": {"some-accepted-format"},
"Accept-Encoding": {"some-accepted-encoding"}, "Accept-Encoding": {"some-accepted-encoding"},
@ -912,6 +1111,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
}, },
ImpersonatedUser: &authenticationv1.UserInfo{}, ImpersonatedUser: &authenticationv1.UserInfo{},
}, },
"",
), ),
wantKubeAPIServerRequestHeaders: map[string][]string{ wantKubeAPIServerRequestHeaders: map[string][]string{
"Authorization": {"Bearer some-service-account-token"}, "Authorization": {"Bearer some-service-account-token"},
@ -933,7 +1133,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
}, },
{ {
name: "authenticated gke user with nested impersonation", name: "authenticated gke user with nested impersonation",
request: newRequest(map[string][]string{ request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"}, "User-Agent": {"test-user-agent"},
"Accept": {"some-accepted-format"}, "Accept": {"some-accepted-format"},
"Accept-Encoding": {"some-accepted-encoding"}, "Accept-Encoding": {"some-accepted-encoding"},
@ -959,6 +1159,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
}, },
ImpersonatedUser: &authenticationv1.UserInfo{}, ImpersonatedUser: &authenticationv1.UserInfo{},
}, },
"",
), ),
wantKubeAPIServerRequestHeaders: map[string][]string{ wantKubeAPIServerRequestHeaders: map[string][]string{
"Authorization": {"Bearer some-service-account-token"}, "Authorization": {"Bearer some-service-account-token"},
@ -980,7 +1181,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
}, },
{ {
name: "authenticated user with nested impersonation of gke user", name: "authenticated user with nested impersonation of gke user",
request: newRequest(map[string][]string{ request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"}, "User-Agent": {"test-user-agent"},
"Accept": {"some-accepted-format"}, "Accept": {"some-accepted-format"},
"Accept-Encoding": {"some-accepted-encoding"}, "Accept-Encoding": {"some-accepted-encoding"},
@ -1010,6 +1211,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
}, },
ImpersonatedUser: &authenticationv1.UserInfo{}, ImpersonatedUser: &authenticationv1.UserInfo{},
}, },
"",
), ),
wantKubeAPIServerRequestHeaders: map[string][]string{ wantKubeAPIServerRequestHeaders: map[string][]string{
"Authorization": {"Bearer some-service-account-token"}, "Authorization": {"Bearer some-service-account-token"},
@ -1031,13 +1233,13 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
}, },
{ {
name: "user is authenticated but the kube API request returns an error", name: "user is authenticated but the kube API request returns an error",
request: newRequest(map[string][]string{ request: newRequest(t, map[string][]string{
"User-Agent": {"test-user-agent"}, "User-Agent": {"test-user-agent"},
}, &user.DefaultInfo{ }, &user.DefaultInfo{
Name: testUser, Name: testUser,
Groups: testGroups, Groups: testGroups,
Extra: testExtra, Extra: testExtra,
}, nil), }, nil, ""),
kubeAPIServerStatusCode: http.StatusNotFound, kubeAPIServerStatusCode: http.StatusNotFound,
wantKubeAPIServerRequestHeaders: map[string][]string{ wantKubeAPIServerRequestHeaders: map[string][]string{
"Accept-Encoding": {"gzip"}, // because the rest client used in this test does not disable compression "Accept-Encoding": {"gzip"}, // because the rest client used in this test does not disable compression
@ -1095,6 +1297,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
metav1.AddToGroupVersion(scheme, metav1.Unversioned) metav1.AddToGroupVersion(scheme, metav1.Unversioned)
codecs := serializer.NewCodecFactory(scheme) codecs := serializer.NewCodecFactory(scheme)
serverConfig := genericapiserver.NewRecommendedConfig(codecs) serverConfig := genericapiserver.NewRecommendedConfig(codecs)
serverConfig.Authentication.Authenticator = tt.authenticator
w := httptest.NewRecorder() w := httptest.NewRecorder()
@ -1137,6 +1340,83 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
} }
} }
func newRequest(t *testing.T, h http.Header, userInfo user.Info, event *auditinternal.Event, token string) *http.Request {
t.Helper()
validURL, err := url.Parse("http://pinniped.dev/blah")
require.NoError(t, err)
ctx := context.Background()
if userInfo != nil {
ctx = request.WithUser(ctx, userInfo)
}
ae := &auditinternal.Event{Level: auditinternal.LevelMetadata}
if event != nil {
ae = event
}
ctx = request.WithAuditEvent(ctx, ae)
reqInfo := &request.RequestInfo{
IsResourceRequest: false,
Path: validURL.Path,
Verb: "get",
}
ctx = request.WithRequestInfo(ctx, reqInfo)
ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"must-be-ignored"})
if len(token) != 0 {
ctx = context.WithValue(ctx, tokenKey, token)
}
var cancel context.CancelFunc
ctx, cancel = context.WithDeadline(ctx, time.Now().Add(time.Hour))
t.Cleanup(cancel)
r, err := http.NewRequestWithContext(ctx, http.MethodGet, validURL.String(), nil)
require.NoError(t, err)
r.Header = h
return r
}
func testTokenAuthenticator(t *testing.T, token string, userInfo user.Info, err error) authenticator.Request {
t.Helper()
return authenticator.RequestFunc(func(r *http.Request) (*authenticator.Response, bool, error) {
if auds, ok := authenticator.AudiencesFrom(r.Context()); ok || len(auds) != 0 {
t.Errorf("unexpected audiences on request: %v", auds)
}
if ctxToken := tokenFrom(r.Context()); len(ctxToken) != 0 {
t.Errorf("unexpected token on request: %v", ctxToken)
}
if _, ok := r.Context().Deadline(); !ok {
t.Error("request should always have deadline")
}
if err != nil {
return nil, false, err
}
var reqToken string
_, _, _ = bearertoken.New(authenticator.TokenFunc(func(_ context.Context, token string) (*authenticator.Response, bool, error) {
reqToken = token
return nil, false, nil
})).AuthenticateRequest(r)
if reqToken != token {
return nil, false, nil
}
return &authenticator.Response{User: userInfo}, true, nil
})
}
type clientCert struct { type clientCert struct {
certPEM, keyPEM []byte certPEM, keyPEM []byte
} }
@ -1242,7 +1522,9 @@ func Test_deleteKnownImpersonationHeaders(t *testing.T) {
inputReq := (&http.Request{Header: tt.headers}).WithContext(context.Background()) inputReq := (&http.Request{Header: tt.headers}).WithContext(context.Background())
inputReqCopy := inputReq.Clone(inputReq.Context()) inputReqCopy := inputReq.Clone(inputReq.Context())
var called bool
delegate := http.HandlerFunc(func(w http.ResponseWriter, outputReq *http.Request) { delegate := http.HandlerFunc(func(w http.ResponseWriter, outputReq *http.Request) {
called = true
require.Nil(t, w) require.Nil(t, w)
// assert only headers mutated // assert only headers mutated
@ -1259,6 +1541,85 @@ func Test_deleteKnownImpersonationHeaders(t *testing.T) {
deleteKnownImpersonationHeaders(delegate).ServeHTTP(nil, inputReq) deleteKnownImpersonationHeaders(delegate).ServeHTTP(nil, inputReq)
require.Equal(t, inputReqCopy, inputReq) // assert no mutation occurred require.Equal(t, inputReqCopy, inputReq) // assert no mutation occurred
require.True(t, called)
})
}
}
func Test_withBearerTokenPreservation(t *testing.T) {
tests := []struct {
name string
headers http.Header
want string
}{
{
name: "has bearer token",
headers: map[string][]string{
"Authorization": {"Bearer thingy"},
},
want: "thingy",
},
{
name: "has bearer token but too many preceding spaces",
headers: map[string][]string{
"Authorization": {"Bearer 1"},
},
want: "",
},
{
name: "has bearer token with space, only keeps first part",
headers: map[string][]string{
"Authorization": {"Bearer panda man"},
},
want: "panda",
},
{
name: "has bearer token with surrounding whitespace",
headers: map[string][]string{
"Authorization": {" Bearer cool beans "},
},
want: "cool",
},
{
name: "has multiple bearer tokens",
headers: map[string][]string{
"Authorization": {"Bearer this thing", "what does this mean?"},
},
want: "this",
},
{
name: "no bearer token",
headers: map[string][]string{
"Not-Authorization": {"Bearer not a token"},
},
want: "",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
inputReq := (&http.Request{Header: tt.headers}).WithContext(context.Background())
inputReqCopy := inputReq.Clone(inputReq.Context())
var called bool
delegate := http.HandlerFunc(func(w http.ResponseWriter, outputReq *http.Request) {
called = true
require.Nil(t, w)
// assert only context is mutated
outputReqCopy := outputReq.Clone(inputReq.Context())
require.Equal(t, inputReqCopy, outputReqCopy)
require.Equal(t, tt.want, tokenFrom(outputReq.Context()))
if len(tt.want) == 0 {
require.True(t, inputReq == outputReq, "expect req to passed through when no token expected")
}
})
withBearerTokenPreservation(delegate).ServeHTTP(nil, inputReq)
require.Equal(t, inputReqCopy, inputReq) // assert no mutation occurred
require.True(t, called)
}) })
} }
} }

View File

@ -122,6 +122,9 @@ func validateNames(names *NamesConfigSpec) error {
if names.ImpersonationSignerSecret == "" { if names.ImpersonationSignerSecret == "" {
missingNames = append(missingNames, "impersonationSignerSecret") missingNames = append(missingNames, "impersonationSignerSecret")
} }
if names.AgentServiceAccount == "" {
missingNames = append(missingNames, "agentServiceAccount")
}
if len(missingNames) > 0 { if len(missingNames) > 0 {
return constable.Error("missing required names: " + strings.Join(missingNames, ", ")) return constable.Error("missing required names: " + strings.Join(missingNames, ", "))
} }

View File

@ -43,6 +43,7 @@ func TestFromPath(t *testing.T) {
impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value
impersonationSignerSecret: impersonationSignerSecret-value impersonationSignerSecret: impersonationSignerSecret-value
impersonationSignerSecret: impersonationSignerSecret-value impersonationSignerSecret: impersonationSignerSecret-value
agentServiceAccount: agentServiceAccount-value
labels: labels:
myLabelKey1: myLabelValue1 myLabelKey1: myLabelValue1
myLabelKey2: myLabelValue2 myLabelKey2: myLabelValue2
@ -72,6 +73,7 @@ func TestFromPath(t *testing.T) {
ImpersonationTLSCertificateSecret: "impersonationTLSCertificateSecret-value", ImpersonationTLSCertificateSecret: "impersonationTLSCertificateSecret-value",
ImpersonationCACertificateSecret: "impersonationCACertificateSecret-value", ImpersonationCACertificateSecret: "impersonationCACertificateSecret-value",
ImpersonationSignerSecret: "impersonationSignerSecret-value", ImpersonationSignerSecret: "impersonationSignerSecret-value",
AgentServiceAccount: "agentServiceAccount-value",
}, },
Labels: map[string]string{ Labels: map[string]string{
"myLabelKey1": "myLabelValue1", "myLabelKey1": "myLabelValue1",
@ -98,6 +100,7 @@ func TestFromPath(t *testing.T) {
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value
impersonationSignerSecret: impersonationSignerSecret-value impersonationSignerSecret: impersonationSignerSecret-value
agentServiceAccount: agentServiceAccount-value
`), `),
wantConfig: &Config{ wantConfig: &Config{
DiscoveryInfo: DiscoveryInfoSpec{ DiscoveryInfo: DiscoveryInfoSpec{
@ -119,6 +122,7 @@ func TestFromPath(t *testing.T) {
ImpersonationTLSCertificateSecret: "impersonationTLSCertificateSecret-value", ImpersonationTLSCertificateSecret: "impersonationTLSCertificateSecret-value",
ImpersonationCACertificateSecret: "impersonationCACertificateSecret-value", ImpersonationCACertificateSecret: "impersonationCACertificateSecret-value",
ImpersonationSignerSecret: "impersonationSignerSecret-value", ImpersonationSignerSecret: "impersonationSignerSecret-value",
AgentServiceAccount: "agentServiceAccount-value",
}, },
Labels: map[string]string{}, Labels: map[string]string{},
KubeCertAgentConfig: KubeCertAgentSpec{ KubeCertAgentConfig: KubeCertAgentSpec{
@ -133,7 +137,7 @@ func TestFromPath(t *testing.T) {
wantError: "validate names: missing required names: servingCertificateSecret, credentialIssuer, " + wantError: "validate names: missing required names: servingCertificateSecret, credentialIssuer, " +
"apiService, impersonationConfigMap, impersonationLoadBalancerService, " + "apiService, impersonationConfigMap, impersonationLoadBalancerService, " +
"impersonationTLSCertificateSecret, impersonationCACertificateSecret, " + "impersonationTLSCertificateSecret, impersonationCACertificateSecret, " +
"impersonationSignerSecret", "impersonationSignerSecret, agentServiceAccount",
}, },
{ {
name: "Missing apiService name", name: "Missing apiService name",
@ -147,6 +151,7 @@ func TestFromPath(t *testing.T) {
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value
impersonationSignerSecret: impersonationSignerSecret-value impersonationSignerSecret: impersonationSignerSecret-value
agentServiceAccount: agentServiceAccount-value
`), `),
wantError: "validate names: missing required names: apiService", wantError: "validate names: missing required names: apiService",
}, },
@ -162,6 +167,7 @@ func TestFromPath(t *testing.T) {
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value
impersonationSignerSecret: impersonationSignerSecret-value impersonationSignerSecret: impersonationSignerSecret-value
agentServiceAccount: agentServiceAccount-value
`), `),
wantError: "validate names: missing required names: credentialIssuer", wantError: "validate names: missing required names: credentialIssuer",
}, },
@ -177,6 +183,7 @@ func TestFromPath(t *testing.T) {
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value
impersonationSignerSecret: impersonationSignerSecret-value impersonationSignerSecret: impersonationSignerSecret-value
agentServiceAccount: agentServiceAccount-value
`), `),
wantError: "validate names: missing required names: servingCertificateSecret", wantError: "validate names: missing required names: servingCertificateSecret",
}, },
@ -192,6 +199,7 @@ func TestFromPath(t *testing.T) {
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value
impersonationSignerSecret: impersonationSignerSecret-value impersonationSignerSecret: impersonationSignerSecret-value
agentServiceAccount: agentServiceAccount-value
`), `),
wantError: "validate names: missing required names: impersonationConfigMap", wantError: "validate names: missing required names: impersonationConfigMap",
}, },
@ -207,6 +215,7 @@ func TestFromPath(t *testing.T) {
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value
impersonationSignerSecret: impersonationSignerSecret-value impersonationSignerSecret: impersonationSignerSecret-value
agentServiceAccount: agentServiceAccount-value
`), `),
wantError: "validate names: missing required names: impersonationLoadBalancerService", wantError: "validate names: missing required names: impersonationLoadBalancerService",
}, },
@ -222,6 +231,7 @@ func TestFromPath(t *testing.T) {
impersonationLoadBalancerService: impersonationLoadBalancerService-value impersonationLoadBalancerService: impersonationLoadBalancerService-value
impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value
impersonationSignerSecret: impersonationSignerSecret-value impersonationSignerSecret: impersonationSignerSecret-value
agentServiceAccount: agentServiceAccount-value
`), `),
wantError: "validate names: missing required names: impersonationTLSCertificateSecret", wantError: "validate names: missing required names: impersonationTLSCertificateSecret",
}, },
@ -237,6 +247,7 @@ func TestFromPath(t *testing.T) {
impersonationLoadBalancerService: impersonationLoadBalancerService-value impersonationLoadBalancerService: impersonationLoadBalancerService-value
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
impersonationSignerSecret: impersonationSignerSecret-value impersonationSignerSecret: impersonationSignerSecret-value
agentServiceAccount: agentServiceAccount-value
`), `),
wantError: "validate names: missing required names: impersonationCACertificateSecret", wantError: "validate names: missing required names: impersonationCACertificateSecret",
}, },
@ -252,6 +263,7 @@ func TestFromPath(t *testing.T) {
impersonationLoadBalancerService: impersonationLoadBalancerService-value impersonationLoadBalancerService: impersonationLoadBalancerService-value
impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value impersonationTLSCertificateSecret: impersonationTLSCertificateSecret-value
impersonationCACertificateSecret: impersonationCACertificateSecret-value impersonationCACertificateSecret: impersonationCACertificateSecret-value
agentServiceAccount: agentServiceAccount-value
`), `),
wantError: "validate names: missing required names: impersonationSignerSecret", wantError: "validate names: missing required names: impersonationSignerSecret",
}, },
@ -265,6 +277,7 @@ func TestFromPath(t *testing.T) {
apiService: pinniped-api apiService: pinniped-api
impersonationLoadBalancerService: impersonationLoadBalancerService-value impersonationLoadBalancerService: impersonationLoadBalancerService-value
impersonationSignerSecret: impersonationSignerSecret-value impersonationSignerSecret: impersonationSignerSecret-value
agentServiceAccount: agentServiceAccount-value
`), `),
wantError: "validate names: missing required names: impersonationConfigMap, " + wantError: "validate names: missing required names: impersonationConfigMap, " +
"impersonationTLSCertificateSecret, impersonationCACertificateSecret", "impersonationTLSCertificateSecret, impersonationCACertificateSecret",

View File

@ -41,6 +41,7 @@ type NamesConfigSpec struct {
ImpersonationTLSCertificateSecret string `json:"impersonationTLSCertificateSecret"` ImpersonationTLSCertificateSecret string `json:"impersonationTLSCertificateSecret"`
ImpersonationCACertificateSecret string `json:"impersonationCACertificateSecret"` ImpersonationCACertificateSecret string `json:"impersonationCACertificateSecret"`
ImpersonationSignerSecret string `json:"impersonationSignerSecret"` ImpersonationSignerSecret string `json:"impersonationSignerSecret"`
AgentServiceAccount string `json:"agentServiceAccount"`
} }
// ServingCertificateConfigSpec contains the configuration knobs for the API's // ServingCertificateConfigSpec contains the configuration knobs for the API's

View File

@ -5,7 +5,9 @@
package authenticator package authenticator
import ( import (
"crypto/x509"
"encoding/base64" "encoding/base64"
"fmt"
auth1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1" auth1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1"
) )
@ -22,8 +24,18 @@ type Closer interface {
// nil CA bundle will be returned. If the provided spec contains a CA bundle that is not properly // nil CA bundle will be returned. If the provided spec contains a CA bundle that is not properly
// encoded, an error will be returned. // encoded, an error will be returned.
func CABundle(spec *auth1alpha1.TLSSpec) ([]byte, error) { func CABundle(spec *auth1alpha1.TLSSpec) ([]byte, error) {
if spec == nil { if spec == nil || len(spec.CertificateAuthorityData) == 0 {
return nil, nil return nil, nil
} }
return base64.StdEncoding.DecodeString(spec.CertificateAuthorityData)
pem, err := base64.StdEncoding.DecodeString(spec.CertificateAuthorityData)
if err != nil {
return nil, err
}
if ok := x509.NewCertPool().AppendCertsFromPEM(pem); !ok {
return nil, fmt.Errorf("certificateAuthorityData is not valid PEM")
}
return pem, nil
} }

View File

@ -16,6 +16,7 @@ import (
loginapi "go.pinniped.dev/generated/latest/apis/concierge/login" loginapi "go.pinniped.dev/generated/latest/apis/concierge/login"
"go.pinniped.dev/internal/constable" "go.pinniped.dev/internal/constable"
"go.pinniped.dev/internal/plog" "go.pinniped.dev/internal/plog"
"go.pinniped.dev/internal/valuelesscontext"
) )
// ErrNoSuchAuthenticator is returned by Cache.AuthenticateTokenCredentialRequest() when the requested authenticator is not configured. // ErrNoSuchAuthenticator is returned by Cache.AuthenticateTokenCredentialRequest() when the requested authenticator is not configured.
@ -101,7 +102,7 @@ func (c *Cache) AuthenticateTokenCredentialRequest(ctx context.Context, req *log
// The incoming context could have an audience. Since we do not want to handle audiences right now, do not pass it // The incoming context could have an audience. Since we do not want to handle audiences right now, do not pass it
// through directly to the authentication webhook. // through directly to the authentication webhook.
ctx = valuelessContext{ctx} ctx = valuelesscontext.New(ctx)
// Call the selected authenticator. // Call the selected authenticator.
resp, authenticated, err := val.AuthenticateToken(ctx, req.Spec.Token) resp, authenticated, err := val.AuthenticateToken(ctx, req.Spec.Token)
@ -119,7 +120,3 @@ func (c *Cache) AuthenticateTokenCredentialRequest(ctx context.Context, req *log
} }
return respUser, nil return respUser, nil
} }
type valuelessContext struct{ context.Context }
func (valuelessContext) Value(interface{}) interface{} { return nil }

View File

@ -135,6 +135,15 @@ func TestNewWebhookAuthenticator(t *testing.T) {
require.EqualError(t, err, "invalid TLS configuration: illegal base64 data at input byte 7") require.EqualError(t, err, "invalid TLS configuration: illegal base64 data at input byte 7")
}) })
t.Run("invalid pem data", func(t *testing.T) {
res, err := newWebhookAuthenticator(&auth1alpha1.WebhookAuthenticatorSpec{
Endpoint: "https://example.com",
TLS: &auth1alpha1.TLSSpec{CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte("bad data"))},
}, ioutil.TempFile, clientcmd.WriteToFile)
require.Nil(t, res)
require.EqualError(t, err, "invalid TLS configuration: certificateAuthorityData is not valid PEM")
})
t.Run("valid config with no TLS spec", func(t *testing.T) { t.Run("valid config with no TLS spec", func(t *testing.T) {
res, err := newWebhookAuthenticator(&auth1alpha1.WebhookAuthenticatorSpec{ res, err := newWebhookAuthenticator(&auth1alpha1.WebhookAuthenticatorSpec{
Endpoint: "https://example.com", Endpoint: "https://example.com",

View File

@ -64,6 +64,9 @@ type AgentConfig struct {
// NamePrefix will be prefixed to all agent pod names. // NamePrefix will be prefixed to all agent pod names.
NamePrefix string NamePrefix string
// ServiceAccountName is the service account under which to run the agent pods.
ServiceAccountName string
// ContainerImagePullSecrets is a list of names of Kubernetes Secret objects that will be used as // ContainerImagePullSecrets is a list of names of Kubernetes Secret objects that will be used as
// ImagePullSecrets on the kube-cert-agent pods. // ImagePullSecrets on the kube-cert-agent pods.
ContainerImagePullSecrets []string ContainerImagePullSecrets []string
@ -472,6 +475,7 @@ func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) *
RestartPolicy: corev1.RestartPolicyAlways, RestartPolicy: corev1.RestartPolicyAlways,
NodeSelector: controllerManagerPod.Spec.NodeSelector, NodeSelector: controllerManagerPod.Spec.NodeSelector,
AutomountServiceAccountToken: pointer.BoolPtr(false), AutomountServiceAccountToken: pointer.BoolPtr(false),
ServiceAccountName: c.cfg.ServiceAccountName,
NodeName: controllerManagerPod.Spec.NodeName, NodeName: controllerManagerPod.Spec.NodeName,
Tolerations: controllerManagerPod.Spec.Tolerations, Tolerations: controllerManagerPod.Spec.Tolerations,
// We need to run the agent pod as root since the file permissions // We need to run the agent pod as root since the file permissions

View File

@ -123,6 +123,7 @@ func TestAgentController(t *testing.T) {
}}, }},
RestartPolicy: corev1.RestartPolicyAlways, RestartPolicy: corev1.RestartPolicyAlways,
TerminationGracePeriodSeconds: pointer.Int64Ptr(0), TerminationGracePeriodSeconds: pointer.Int64Ptr(0),
ServiceAccountName: "test-service-account-name",
AutomountServiceAccountToken: pointer.BoolPtr(false), AutomountServiceAccountToken: pointer.BoolPtr(false),
SecurityContext: &corev1.PodSecurityContext{ SecurityContext: &corev1.PodSecurityContext{
RunAsUser: pointer.Int64Ptr(0), RunAsUser: pointer.Int64Ptr(0),
@ -672,6 +673,7 @@ func TestAgentController(t *testing.T) {
AgentConfig{ AgentConfig{
Namespace: "concierge", Namespace: "concierge",
ContainerImage: "pinniped-server-image", ContainerImage: "pinniped-server-image",
ServiceAccountName: "test-service-account-name",
NamePrefix: "pinniped-concierge-kube-cert-agent-", NamePrefix: "pinniped-concierge-kube-cert-agent-",
ContainerImagePullSecrets: []string{"pinniped-image-pull-secret"}, ContainerImagePullSecrets: []string{"pinniped-image-pull-secret"},
CredentialIssuerName: "pinniped-concierge-config", CredentialIssuerName: "pinniped-concierge-config",

View File

@ -13,6 +13,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"sort" "sort"
"strings"
"time" "time"
"github.com/coreos/go-oidc/v3/oidc" "github.com/coreos/go-oidc/v3/oidc"
@ -269,11 +270,17 @@ func (c *oidcWatcherController) validateIssuer(ctx context.Context, upstream *v1
discoveredProvider, err = oidc.NewProvider(oidc.ClientContext(ctx, httpClient), upstream.Spec.Issuer) discoveredProvider, err = oidc.NewProvider(oidc.ClientContext(ctx, httpClient), upstream.Spec.Issuer)
if err != nil { if err != nil {
const klogLevelTrace = 6
c.log.V(klogLevelTrace).WithValues(
"namespace", upstream.Namespace,
"name", upstream.Name,
"issuer", upstream.Spec.Issuer,
).Error(err, "failed to perform OIDC discovery")
return &v1alpha1.Condition{ return &v1alpha1.Condition{
Type: typeOIDCDiscoverySucceeded, Type: typeOIDCDiscoverySucceeded,
Status: v1alpha1.ConditionFalse, Status: v1alpha1.ConditionFalse,
Reason: reasonUnreachable, Reason: reasonUnreachable,
Message: fmt.Sprintf("failed to perform OIDC discovery against %q", upstream.Spec.Issuer), Message: fmt.Sprintf("failed to perform OIDC discovery against %q:\n%s", upstream.Spec.Issuer, truncateNonOIDCErr(err)),
} }
} }
@ -428,3 +435,14 @@ func (*oidcWatcherController) computeScopes(additionalScopes []string) []string
sort.Strings(scopes) sort.Strings(scopes)
return scopes return scopes
} }
func truncateNonOIDCErr(err error) string {
const max = 100
msg := err.Error()
if len(msg) <= max || strings.HasPrefix(msg, "oidc:") {
return msg
}
return msg[:max] + fmt.Sprintf(" [truncated %d chars]", len(msg)-max)
}

View File

@ -370,7 +370,7 @@ func TestOIDCUpstreamWatcherControllerSync(t *testing.T) {
inputUpstreams: []runtime.Object{&v1alpha1.OIDCIdentityProvider{ inputUpstreams: []runtime.Object{&v1alpha1.OIDCIdentityProvider{
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName}, ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName},
Spec: v1alpha1.OIDCIdentityProviderSpec{ Spec: v1alpha1.OIDCIdentityProviderSpec{
Issuer: "invalid-url", Issuer: "invalid-url-that-is-really-really-long",
Client: v1alpha1.OIDCClient{SecretName: testSecretName}, Client: v1alpha1.OIDCClient{SecretName: testSecretName},
AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{AdditionalScopes: testAdditionalScopes}, AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{AdditionalScopes: testAdditionalScopes},
}, },
@ -382,9 +382,10 @@ func TestOIDCUpstreamWatcherControllerSync(t *testing.T) {
}}, }},
wantErr: controllerlib.ErrSyntheticRequeue.Error(), wantErr: controllerlib.ErrSyntheticRequeue.Error(),
wantLogs: []string{ wantLogs: []string{
`oidc-upstream-observer "msg"="failed to perform OIDC discovery" "error"="Get \"invalid-url-that-is-really-really-long/.well-known/openid-configuration\": unsupported protocol scheme \"\"" "issuer"="invalid-url-that-is-really-really-long" "name"="test-name" "namespace"="test-namespace"`,
`oidc-upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="loaded client credentials" "reason"="Success" "status"="True" "type"="ClientCredentialsValid"`, `oidc-upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="loaded client credentials" "reason"="Success" "status"="True" "type"="ClientCredentialsValid"`,
`oidc-upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="failed to perform OIDC discovery against \"invalid-url\"" "reason"="Unreachable" "status"="False" "type"="OIDCDiscoverySucceeded"`, `oidc-upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="failed to perform OIDC discovery against \"invalid-url-that-is-really-really-long\":\nGet \"invalid-url-that-is-really-really-long/.well-known/openid-configuration\": unsupported protocol [truncated 9 chars]" "reason"="Unreachable" "status"="False" "type"="OIDCDiscoverySucceeded"`,
`oidc-upstream-observer "msg"="found failing condition" "error"="OIDCIdentityProvider has a failing condition" "message"="failed to perform OIDC discovery against \"invalid-url\"" "name"="test-name" "namespace"="test-namespace" "reason"="Unreachable" "type"="OIDCDiscoverySucceeded"`, `oidc-upstream-observer "msg"="found failing condition" "error"="OIDCIdentityProvider has a failing condition" "message"="failed to perform OIDC discovery against \"invalid-url-that-is-really-really-long\":\nGet \"invalid-url-that-is-really-really-long/.well-known/openid-configuration\": unsupported protocol [truncated 9 chars]" "name"="test-name" "namespace"="test-namespace" "reason"="Unreachable" "type"="OIDCDiscoverySucceeded"`,
}, },
wantResultingCache: []provider.UpstreamOIDCIdentityProviderI{}, wantResultingCache: []provider.UpstreamOIDCIdentityProviderI{},
wantResultingUpstreams: []v1alpha1.OIDCIdentityProvider{{ wantResultingUpstreams: []v1alpha1.OIDCIdentityProvider{{
@ -404,7 +405,8 @@ func TestOIDCUpstreamWatcherControllerSync(t *testing.T) {
Status: "False", Status: "False",
LastTransitionTime: now, LastTransitionTime: now,
Reason: "Unreachable", Reason: "Unreachable",
Message: `failed to perform OIDC discovery against "invalid-url"`, Message: `failed to perform OIDC discovery against "invalid-url-that-is-really-really-long":
Get "invalid-url-that-is-really-really-long/.well-known/openid-configuration": unsupported protocol [truncated 9 chars]`,
}, },
}, },
}, },
@ -600,6 +602,151 @@ func TestOIDCUpstreamWatcherControllerSync(t *testing.T) {
}, },
}}, }},
}, },
{
name: "existing valid upstream with trailing slash",
inputUpstreams: []runtime.Object{&v1alpha1.OIDCIdentityProvider{
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName, Generation: 1234},
Spec: v1alpha1.OIDCIdentityProviderSpec{
Issuer: testIssuerURL + "/ends-with-slash/",
TLS: &v1alpha1.TLSSpec{CertificateAuthorityData: testIssuerCABase64},
Client: v1alpha1.OIDCClient{SecretName: testSecretName},
AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{AdditionalScopes: testAdditionalScopes},
Claims: v1alpha1.OIDCClaims{Groups: testGroupsClaim, Username: testUsernameClaim},
},
Status: v1alpha1.OIDCIdentityProviderStatus{
Phase: "Ready",
Conditions: []v1alpha1.Condition{
{Type: "ClientCredentialsValid", Status: "True", LastTransitionTime: earlier, Reason: "Success", Message: "loaded client credentials"},
{Type: "OIDCDiscoverySucceeded", Status: "True", LastTransitionTime: earlier, Reason: "Success", Message: "discovered issuer configuration"},
},
},
}},
inputSecrets: []runtime.Object{&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testSecretName},
Type: "secrets.pinniped.dev/oidc-client",
Data: testValidSecretData,
}},
wantLogs: []string{
`oidc-upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="loaded client credentials" "reason"="Success" "status"="True" "type"="ClientCredentialsValid"`,
`oidc-upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="discovered issuer configuration" "reason"="Success" "status"="True" "type"="OIDCDiscoverySucceeded"`,
},
wantResultingCache: []provider.UpstreamOIDCIdentityProviderI{
&oidctestutil.TestUpstreamOIDCIdentityProvider{
Name: testName,
ClientID: testClientID,
AuthorizationURL: *testIssuerAuthorizeURL,
Scopes: testExpectedScopes,
UsernameClaim: testUsernameClaim,
GroupsClaim: testGroupsClaim,
},
},
wantResultingUpstreams: []v1alpha1.OIDCIdentityProvider{{
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName, Generation: 1234},
Status: v1alpha1.OIDCIdentityProviderStatus{
Phase: "Ready",
Conditions: []v1alpha1.Condition{
{Type: "ClientCredentialsValid", Status: "True", LastTransitionTime: earlier, Reason: "Success", Message: "loaded client credentials", ObservedGeneration: 1234},
{Type: "OIDCDiscoverySucceeded", Status: "True", LastTransitionTime: earlier, Reason: "Success", Message: "discovered issuer configuration", ObservedGeneration: 1234},
},
},
}},
},
{
name: "issuer is invalid URL, missing trailing slash",
inputUpstreams: []runtime.Object{&v1alpha1.OIDCIdentityProvider{
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName},
Spec: v1alpha1.OIDCIdentityProviderSpec{
Issuer: testIssuerURL + "/ends-with-slash",
TLS: &v1alpha1.TLSSpec{CertificateAuthorityData: testIssuerCABase64},
Client: v1alpha1.OIDCClient{SecretName: testSecretName},
AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{AdditionalScopes: testAdditionalScopes},
},
}},
inputSecrets: []runtime.Object{&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testSecretName},
Type: "secrets.pinniped.dev/oidc-client",
Data: testValidSecretData,
}},
wantErr: controllerlib.ErrSyntheticRequeue.Error(),
wantLogs: []string{
`oidc-upstream-observer "msg"="failed to perform OIDC discovery" "error"="oidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/ends-with-slash\" got \"` + testIssuerURL + `/ends-with-slash/\"" "issuer"="` + testIssuerURL + `/ends-with-slash" "name"="test-name" "namespace"="test-namespace"`,
`oidc-upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="loaded client credentials" "reason"="Success" "status"="True" "type"="ClientCredentialsValid"`,
`oidc-upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="failed to perform OIDC discovery against \"` + testIssuerURL + `/ends-with-slash\":\noidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/ends-with-slash\" got \"` + testIssuerURL + `/ends-with-slash/\"" "reason"="Unreachable" "status"="False" "type"="OIDCDiscoverySucceeded"`,
`oidc-upstream-observer "msg"="found failing condition" "error"="OIDCIdentityProvider has a failing condition" "message"="failed to perform OIDC discovery against \"` + testIssuerURL + `/ends-with-slash\":\noidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/ends-with-slash\" got \"` + testIssuerURL + `/ends-with-slash/\"" "name"="test-name" "namespace"="test-namespace" "reason"="Unreachable" "type"="OIDCDiscoverySucceeded"`,
},
wantResultingCache: []provider.UpstreamOIDCIdentityProviderI{},
wantResultingUpstreams: []v1alpha1.OIDCIdentityProvider{{
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName},
Status: v1alpha1.OIDCIdentityProviderStatus{
Phase: "Error",
Conditions: []v1alpha1.Condition{
{
Type: "ClientCredentialsValid",
Status: "True",
LastTransitionTime: now,
Reason: "Success",
Message: "loaded client credentials",
},
{
Type: "OIDCDiscoverySucceeded",
Status: "False",
LastTransitionTime: now,
Reason: "Unreachable",
Message: `failed to perform OIDC discovery against "` + testIssuerURL + `/ends-with-slash":
oidc: issuer did not match the issuer returned by provider, expected "` + testIssuerURL + `/ends-with-slash" got "` + testIssuerURL + `/ends-with-slash/"`,
},
},
},
}},
},
{
name: "issuer is invalid URL, extra trailing slash",
inputUpstreams: []runtime.Object{&v1alpha1.OIDCIdentityProvider{
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName},
Spec: v1alpha1.OIDCIdentityProviderSpec{
Issuer: testIssuerURL + "/",
TLS: &v1alpha1.TLSSpec{CertificateAuthorityData: testIssuerCABase64},
Client: v1alpha1.OIDCClient{SecretName: testSecretName},
AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{AdditionalScopes: testAdditionalScopes},
},
}},
inputSecrets: []runtime.Object{&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testSecretName},
Type: "secrets.pinniped.dev/oidc-client",
Data: testValidSecretData,
}},
wantErr: controllerlib.ErrSyntheticRequeue.Error(),
wantLogs: []string{
`oidc-upstream-observer "msg"="failed to perform OIDC discovery" "error"="oidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/\" got \"` + testIssuerURL + `\"" "issuer"="` + testIssuerURL + `/" "name"="test-name" "namespace"="test-namespace"`,
`oidc-upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="loaded client credentials" "reason"="Success" "status"="True" "type"="ClientCredentialsValid"`,
`oidc-upstream-observer "level"=0 "msg"="updated condition" "name"="test-name" "namespace"="test-namespace" "message"="failed to perform OIDC discovery against \"` + testIssuerURL + `/\":\noidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/\" got \"` + testIssuerURL + `\"" "reason"="Unreachable" "status"="False" "type"="OIDCDiscoverySucceeded"`,
`oidc-upstream-observer "msg"="found failing condition" "error"="OIDCIdentityProvider has a failing condition" "message"="failed to perform OIDC discovery against \"` + testIssuerURL + `/\":\noidc: issuer did not match the issuer returned by provider, expected \"` + testIssuerURL + `/\" got \"` + testIssuerURL + `\"" "name"="test-name" "namespace"="test-namespace" "reason"="Unreachable" "type"="OIDCDiscoverySucceeded"`,
},
wantResultingCache: []provider.UpstreamOIDCIdentityProviderI{},
wantResultingUpstreams: []v1alpha1.OIDCIdentityProvider{{
ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testName},
Status: v1alpha1.OIDCIdentityProviderStatus{
Phase: "Error",
Conditions: []v1alpha1.Condition{
{
Type: "ClientCredentialsValid",
Status: "True",
LastTransitionTime: now,
Reason: "Success",
Message: "loaded client credentials",
},
{
Type: "OIDCDiscoverySucceeded",
Status: "False",
LastTransitionTime: now,
Reason: "Unreachable",
Message: `failed to perform OIDC discovery against "` + testIssuerURL + `/":
oidc: issuer did not match the issuer returned by provider, expected "` + testIssuerURL + `/" got "` + testIssuerURL + `"`,
},
},
},
}},
},
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt tt := tt
@ -728,5 +875,25 @@ func newTestIssuer(t *testing.T) (string, string) {
}) })
}) })
// handle the four issuer with trailing slash configs
// valid case in= out=
// handled above at the root of testURL
// valid case in=/ out=/
mux.HandleFunc("/ends-with-slash/.well-known/openid-configuration", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("content-type", "application/json")
_ = json.NewEncoder(w).Encode(&providerJSON{
Issuer: testURL + "/ends-with-slash/",
AuthURL: "https://example.com/authorize",
})
})
// invalid case in= out=/
// can be tested using /ends-with-slash/ endpoint
// invalid case in=/ out=
// can be tested using root endpoint
return caBundlePEM, testURL return caBundlePEM, testURL
} }

View File

@ -1,4 +1,4 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved. // Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package supervisorstorage package supervisorstorage
@ -59,7 +59,7 @@ func GarbageCollectorController(
return isSecretWithGCAnnotation(oldObj) || isSecretWithGCAnnotation(newObj) return isSecretWithGCAnnotation(oldObj) || isSecretWithGCAnnotation(newObj)
}, },
DeleteFunc: func(obj metav1.Object) bool { return false }, // ignore all deletes DeleteFunc: func(obj metav1.Object) bool { return false }, // ignore all deletes
ParentFunc: nil, ParentFunc: pinnipedcontroller.SingletonQueue(),
}, },
controllerlib.InformerOption{}, controllerlib.InformerOption{},
), ),
@ -67,16 +67,20 @@ func GarbageCollectorController(
} }
func (c *garbageCollectorController) Sync(ctx controllerlib.Context) error { func (c *garbageCollectorController) Sync(ctx controllerlib.Context) error {
// make sure we have a consistent, static meaning for the current time during the sync loop
frozenClock := clock.NewFakeClock(c.clock.Now())
// The Sync method is triggered upon any change to any Secret, which would make this // The Sync method is triggered upon any change to any Secret, which would make this
// controller too chatty, so it rate limits itself to a more reasonable interval. // controller too chatty, so it rate limits itself to a more reasonable interval.
// Note that even during a period when no secrets are changing, it will still run // Note that even during a period when no secrets are changing, it will still run
// at the informer's full-resync interval (as long as there are some secrets). // at the informer's full-resync interval (as long as there are some secrets).
if c.clock.Now().Sub(c.timeOfMostRecentSweep) < minimumRepeatInterval { if since := frozenClock.Since(c.timeOfMostRecentSweep); since < minimumRepeatInterval {
ctx.Queue.AddAfter(ctx.Key, minimumRepeatInterval-since)
return nil return nil
} }
plog.Info("starting storage garbage collection sweep") plog.Info("starting storage garbage collection sweep")
c.timeOfMostRecentSweep = c.clock.Now() c.timeOfMostRecentSweep = frozenClock.Now()
listOfSecrets, err := c.secretInformer.Lister().List(labels.Everything()) listOfSecrets, err := c.secretInformer.Lister().List(labels.Everything())
if err != nil { if err != nil {
@ -97,7 +101,7 @@ func (c *garbageCollectorController) Sync(ctx controllerlib.Context) error {
continue continue
} }
if garbageCollectAfterTime.Before(c.clock.Now()) { if garbageCollectAfterTime.Before(frozenClock.Now()) {
err = c.kubeClient.CoreV1().Secrets(secret.Namespace).Delete(ctx.Context, secret.Name, metav1.DeleteOptions{}) err = c.kubeClient.CoreV1().Secrets(secret.Namespace).Delete(ctx.Context, secret.Name, metav1.DeleteOptions{})
if err != nil { if err != nil {
plog.WarningErr("failed to garbage collect resource", err, logKV(secret)) plog.WarningErr("failed to garbage collect resource", err, logKV(secret))

View File

@ -66,6 +66,10 @@ func TestGarbageCollectorControllerInformerFilters(t *testing.T) {
r.True(subject.Update(secretWithAnnotation, otherSecret)) r.True(subject.Update(secretWithAnnotation, otherSecret))
r.True(subject.Update(otherSecret, secretWithAnnotation)) r.True(subject.Update(otherSecret, secretWithAnnotation))
}) })
it("returns the same singleton key", func() {
r.Equal(controllerlib.Key{}, subject.Parent(secretWithAnnotation))
})
}) })
when("any Secret with the required annotation is deleted", func() { when("any Secret with the required annotation is deleted", func() {
@ -136,9 +140,10 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
Context: cancelContext, Context: cancelContext,
Name: subject.Name(), Name: subject.Name(),
Key: controllerlib.Key{ Key: controllerlib.Key{
Namespace: "", Namespace: "foo",
Name: "", Name: "bar",
}, },
Queue: &testQueue{t: t},
} }
// Must start informers before calling TestRunSynchronously() // Must start informers before calling TestRunSynchronously()
@ -262,16 +267,23 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
// Run sync once with the current time set to frozenTime. // Run sync once with the current time set to frozenTime.
r.NoError(controllerlib.TestSync(t, subject, *syncContext)) r.NoError(controllerlib.TestSync(t, subject, *syncContext))
require.Empty(t, kubeClient.Actions()) require.Empty(t, kubeClient.Actions())
r.False(syncContext.Queue.(*testQueue).called)
// Run sync again when not enough time has passed since the most recent run, so no delete // Run sync again when not enough time has passed since the most recent run, so no delete
// operations should happen even though there is a expired secret now. // operations should happen even though there is a expired secret now.
fakeClock.Step(29 * time.Second) fakeClock.Step(29 * time.Second)
r.NoError(controllerlib.TestSync(t, subject, *syncContext)) r.NoError(controllerlib.TestSync(t, subject, *syncContext))
require.Empty(t, kubeClient.Actions()) require.Empty(t, kubeClient.Actions())
r.True(syncContext.Queue.(*testQueue).called)
r.Equal(controllerlib.Key{Namespace: "foo", Name: "bar"}, syncContext.Queue.(*testQueue).key) // assert key is passed through
r.Equal(time.Second, syncContext.Queue.(*testQueue).duration) // assert that we get the exact requeue time
syncContext.Queue = &testQueue{t: t} // reset the queue for the next sync
// Step to the exact threshold and run Sync again. Now we are past the rate limiting period. // Step to the exact threshold and run Sync again. Now we are past the rate limiting period.
fakeClock.Step(1*time.Second + 1*time.Millisecond) fakeClock.Step(time.Second)
r.NoError(controllerlib.TestSync(t, subject, *syncContext)) r.NoError(controllerlib.TestSync(t, subject, *syncContext))
r.False(syncContext.Queue.(*testQueue).called)
// It should have deleted the expired secret. // It should have deleted the expired secret.
r.ElementsMatch( r.ElementsMatch(
@ -381,3 +393,23 @@ func TestGarbageCollectorControllerSync(t *testing.T) {
}) })
}, spec.Parallel(), spec.Report(report.Terminal{})) }, spec.Parallel(), spec.Report(report.Terminal{}))
} }
type testQueue struct {
t *testing.T
called bool
key controllerlib.Key
duration time.Duration
controllerlib.Queue // panic if any other methods called
}
func (q *testQueue) AddAfter(key controllerlib.Key, duration time.Duration) {
q.t.Helper()
require.False(q.t, q.called, "AddAfter should only be called once")
q.called = true
q.key = key
q.duration = duration
}

View File

@ -121,6 +121,7 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
agentConfig := kubecertagent.AgentConfig{ agentConfig := kubecertagent.AgentConfig{
Namespace: c.ServerInstallationInfo.Namespace, Namespace: c.ServerInstallationInfo.Namespace,
ServiceAccountName: c.NamesConfig.AgentServiceAccount,
ContainerImage: *c.KubeCertAgentConfig.Image, ContainerImage: *c.KubeCertAgentConfig.Image,
NamePrefix: *c.KubeCertAgentConfig.NamePrefix, NamePrefix: *c.KubeCertAgentConfig.NamePrefix,
ContainerImagePullSecrets: c.KubeCertAgentConfig.ImagePullSecrets, ContainerImagePullSecrets: c.KubeCertAgentConfig.ImagePullSecrets,

View File

@ -164,13 +164,8 @@ type TimeoutsConfiguration struct {
OIDCSessionStorageLifetime time.Duration OIDCSessionStorageLifetime time.Duration
// AccessTokenSessionStorageLifetime is the length of time after which an access token's session data is allowed // AccessTokenSessionStorageLifetime is the length of time after which an access token's session data is allowed
// to be garbage collected from storage. These must exist in storage for as long as the refresh token is valid. // to be garbage collected from storage. These must exist in storage for as long as the refresh token is valid
// Therefore, this can be just slightly longer than the AccessTokenLifespan. Access tokens are handed back to // or else the refresh flow will not work properly. So this must be longer than RefreshTokenLifespan.
// the token endpoint for the token exchange use case. During a token exchange, if the access token is expired
// and still exists in storage, then the endpoint will be able to give a slightly more specific error message,
// rather than a more generic error that is returned when the token does not exist. If this is desirable, then
// the AccessTokenSessionStorageLifetime can be made to be significantly larger than AccessTokenLifespan, at the
// cost of slower cleanup.
AccessTokenSessionStorageLifetime time.Duration AccessTokenSessionStorageLifetime time.Duration
// RefreshTokenSessionStorageLifetime is the length of time after which a refresh token's session data is allowed // RefreshTokenSessionStorageLifetime is the length of time after which a refresh token's session data is allowed
@ -186,7 +181,7 @@ type TimeoutsConfiguration struct {
// Get the defaults for the Supervisor server. // Get the defaults for the Supervisor server.
func DefaultOIDCTimeoutsConfiguration() TimeoutsConfiguration { func DefaultOIDCTimeoutsConfiguration() TimeoutsConfiguration {
accessTokenLifespan := 15 * time.Minute accessTokenLifespan := 2 * time.Minute
authorizationCodeLifespan := 10 * time.Minute authorizationCodeLifespan := 10 * time.Minute
refreshTokenLifespan := 9 * time.Hour refreshTokenLifespan := 9 * time.Hour
@ -199,7 +194,7 @@ func DefaultOIDCTimeoutsConfiguration() TimeoutsConfiguration {
AuthorizationCodeSessionStorageLifetime: authorizationCodeLifespan + refreshTokenLifespan, AuthorizationCodeSessionStorageLifetime: authorizationCodeLifespan + refreshTokenLifespan,
PKCESessionStorageLifetime: authorizationCodeLifespan + (1 * time.Minute), PKCESessionStorageLifetime: authorizationCodeLifespan + (1 * time.Minute),
OIDCSessionStorageLifetime: authorizationCodeLifespan + (1 * time.Minute), OIDCSessionStorageLifetime: authorizationCodeLifespan + (1 * time.Minute),
AccessTokenSessionStorageLifetime: accessTokenLifespan + (1 * time.Minute), AccessTokenSessionStorageLifetime: refreshTokenLifespan + accessTokenLifespan,
RefreshTokenSessionStorageLifetime: refreshTokenLifespan + accessTokenLifespan, RefreshTokenSessionStorageLifetime: refreshTokenLifespan + accessTokenLifespan,
} }
} }

View File

@ -61,8 +61,8 @@ const (
hmacSecret = "this needs to be at least 32 characters to meet entropy requirements" hmacSecret = "this needs to be at least 32 characters to meet entropy requirements"
authCodeExpirationSeconds = 10 * 60 // Current, we set our auth code expiration to 10 minutes authCodeExpirationSeconds = 10 * 60 // Current, we set our auth code expiration to 10 minutes
accessTokenExpirationSeconds = 15 * 60 // Currently, we set our access token expiration to 15 minutes accessTokenExpirationSeconds = 2 * 60 // Currently, we set our access token expiration to 2 minutes
idTokenExpirationSeconds = 15 * 60 // Currently, we set our ID token expiration to 15 minutes idTokenExpirationSeconds = 2 * 60 // Currently, we set our ID token expiration to 2 minutes
timeComparisonFudgeSeconds = 15 timeComparisonFudgeSeconds = 15
) )

View File

@ -1,4 +1,4 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved. // Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// Package plog implements a thin layer over klog to help enforce pinniped's logging convention. // Package plog implements a thin layer over klog to help enforce pinniped's logging convention.
@ -26,56 +26,157 @@
// act of desperation to determine why the system is broken. // act of desperation to determine why the system is broken.
package plog package plog
import "k8s.io/klog/v2" import (
"k8s.io/klog/v2"
)
const errorKey = "error" const errorKey = "error"
// Use Error to log an unexpected system error. type _ interface {
func Error(msg string, err error, keysAndValues ...interface{}) { Error(msg string, err error, keysAndValues ...interface{})
klog.ErrorS(err, msg, keysAndValues...) Warning(msg string, keysAndValues ...interface{})
WarningErr(msg string, err error, keysAndValues ...interface{})
Info(msg string, keysAndValues ...interface{})
InfoErr(msg string, err error, keysAndValues ...interface{})
Debug(msg string, keysAndValues ...interface{})
DebugErr(msg string, err error, keysAndValues ...interface{})
Trace(msg string, keysAndValues ...interface{})
TraceErr(msg string, err error, keysAndValues ...interface{})
All(msg string, keysAndValues ...interface{})
} }
func Warning(msg string, keysAndValues ...interface{}) { type PLogger struct {
prefix string
depth int
}
func New(prefix string) PLogger {
return PLogger{
depth: 0,
prefix: prefix,
}
}
func (p *PLogger) Error(msg string, err error, keysAndValues ...interface{}) {
klog.ErrorSDepth(p.depth+1, err, p.prefix+msg, keysAndValues...)
}
func (p *PLogger) warningDepth(msg string, depth int, keysAndValues ...interface{}) {
// klog's structured logging has no concept of a warning (i.e. no WarningS function) // klog's structured logging has no concept of a warning (i.e. no WarningS function)
// Thus we use info at log level zero as a proxy // Thus we use info at log level zero as a proxy
// klog's info logs have an I prefix and its warning logs have a W prefix // klog's info logs have an I prefix and its warning logs have a W prefix
// Since we lose the W prefix by using InfoS, just add a key to make these easier to find // Since we lose the W prefix by using InfoS, just add a key to make these easier to find
keysAndValues = append([]interface{}{"warning", "true"}, keysAndValues...) keysAndValues = append([]interface{}{"warning", "true"}, keysAndValues...)
klog.V(klogLevelWarning).InfoS(msg, keysAndValues...) if klog.V(klogLevelWarning).Enabled() {
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
}
}
func (p *PLogger) Warning(msg string, keysAndValues ...interface{}) {
p.warningDepth(msg, p.depth+1, keysAndValues...)
}
// Use WarningErr to issue a Warning message with an error object as part of the message.
func (p *PLogger) WarningErr(msg string, err error, keysAndValues ...interface{}) {
p.warningDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
}
func (p *PLogger) infoDepth(msg string, depth int, keysAndValues ...interface{}) {
if klog.V(klogLevelInfo).Enabled() {
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
}
}
func (p *PLogger) Info(msg string, keysAndValues ...interface{}) {
p.infoDepth(msg, p.depth+1, keysAndValues...)
}
// Use InfoErr to log an expected error, e.g. validation failure of an http parameter.
func (p *PLogger) InfoErr(msg string, err error, keysAndValues ...interface{}) {
p.infoDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
}
func (p *PLogger) debugDepth(msg string, depth int, keysAndValues ...interface{}) {
if klog.V(klogLevelDebug).Enabled() {
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
}
}
func (p *PLogger) Debug(msg string, keysAndValues ...interface{}) {
p.debugDepth(msg, p.depth+1, keysAndValues...)
}
// Use DebugErr to issue a Debug message with an error object as part of the message.
func (p *PLogger) DebugErr(msg string, err error, keysAndValues ...interface{}) {
p.debugDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
}
func (p *PLogger) traceDepth(msg string, depth int, keysAndValues ...interface{}) {
if klog.V(klogLevelTrace).Enabled() {
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
}
}
func (p *PLogger) Trace(msg string, keysAndValues ...interface{}) {
p.traceDepth(msg, p.depth+1, keysAndValues...)
}
// Use TraceErr to issue a Trace message with an error object as part of the message.
func (p *PLogger) TraceErr(msg string, err error, keysAndValues ...interface{}) {
p.traceDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
}
func (p *PLogger) All(msg string, keysAndValues ...interface{}) {
if klog.V(klogLevelAll).Enabled() {
klog.InfoSDepth(p.depth+1, p.prefix+msg, keysAndValues...)
}
}
var pLogger = PLogger{ //nolint:gochecknoglobals
depth: 1,
}
// Use Error to log an unexpected system error.
func Error(msg string, err error, keysAndValues ...interface{}) {
pLogger.Error(msg, err, keysAndValues...)
}
func Warning(msg string, keysAndValues ...interface{}) {
pLogger.Warning(msg, keysAndValues...)
} }
// Use WarningErr to issue a Warning message with an error object as part of the message. // Use WarningErr to issue a Warning message with an error object as part of the message.
func WarningErr(msg string, err error, keysAndValues ...interface{}) { func WarningErr(msg string, err error, keysAndValues ...interface{}) {
Warning(msg, append([]interface{}{errorKey, err}, keysAndValues...)...) pLogger.WarningErr(msg, err, keysAndValues...)
} }
func Info(msg string, keysAndValues ...interface{}) { func Info(msg string, keysAndValues ...interface{}) {
klog.V(klogLevelInfo).InfoS(msg, keysAndValues...) pLogger.Info(msg, keysAndValues...)
} }
// Use InfoErr to log an expected error, e.g. validation failure of an http parameter. // Use InfoErr to log an expected error, e.g. validation failure of an http parameter.
func InfoErr(msg string, err error, keysAndValues ...interface{}) { func InfoErr(msg string, err error, keysAndValues ...interface{}) {
Info(msg, append([]interface{}{errorKey, err}, keysAndValues...)...) pLogger.InfoErr(msg, err, keysAndValues...)
} }
func Debug(msg string, keysAndValues ...interface{}) { func Debug(msg string, keysAndValues ...interface{}) {
klog.V(klogLevelDebug).InfoS(msg, keysAndValues...) pLogger.Debug(msg, keysAndValues...)
} }
// Use DebugErr to issue a Debug message with an error object as part of the message. // Use DebugErr to issue a Debug message with an error object as part of the message.
func DebugErr(msg string, err error, keysAndValues ...interface{}) { func DebugErr(msg string, err error, keysAndValues ...interface{}) {
Debug(msg, append([]interface{}{errorKey, err}, keysAndValues...)...) pLogger.DebugErr(msg, err, keysAndValues...)
} }
func Trace(msg string, keysAndValues ...interface{}) { func Trace(msg string, keysAndValues ...interface{}) {
klog.V(klogLevelTrace).InfoS(msg, keysAndValues...) pLogger.Trace(msg, keysAndValues...)
} }
// Use TraceErr to issue a Trace message with an error object as part of the message. // Use TraceErr to issue a Trace message with an error object as part of the message.
func TraceErr(msg string, err error, keysAndValues ...interface{}) { func TraceErr(msg string, err error, keysAndValues ...interface{}) {
Trace(msg, append([]interface{}{errorKey, err}, keysAndValues...)...) pLogger.TraceErr(msg, err, keysAndValues...)
} }
func All(msg string, keysAndValues ...interface{}) { func All(msg string, keysAndValues ...interface{}) {
klog.V(klogLevelAll).InfoS(msg, keysAndValues...) pLogger.All(msg, keysAndValues...)
} }

View File

@ -0,0 +1,14 @@
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package valuelesscontext
import "context"
func New(ctx context.Context) context.Context {
return valuelessContext{Context: ctx}
}
type valuelessContext struct{ context.Context }
func (valuelessContext) Value(interface{}) interface{} { return nil }

View File

@ -0,0 +1,242 @@
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package valuelesscontext
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/require"
"k8s.io/apiserver/pkg/authentication/authenticator"
)
func TestNew(t *testing.T) {
t.Parallel()
type contextKey int
tests := []struct {
name string
f func(*testing.T, context.Context) context.Context
wantReg, wantNew, wantBoth func(*testing.T, context.Context)
}{
{
name: "empty context",
f: func(t *testing.T, ctx context.Context) context.Context {
return ctx
},
wantReg: func(t *testing.T, ctx context.Context) {},
wantNew: func(t *testing.T, ctx context.Context) {},
wantBoth: func(t *testing.T, ctx context.Context) {
auds, ok := authenticator.AudiencesFrom(ctx)
require.False(t, ok)
require.Nil(t, auds)
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
require.False(t, ok)
require.Zero(t, val)
deadline, ok := ctx.Deadline()
require.False(t, ok)
require.Zero(t, deadline)
require.Nil(t, ctx.Done())
require.NoError(t, ctx.Err())
},
},
{
name: "context with audience",
f: func(t *testing.T, ctx context.Context) context.Context {
return authenticator.WithAudiences(ctx, authenticator.Audiences{"1", "2"})
},
wantReg: func(t *testing.T, ctx context.Context) {
auds, ok := authenticator.AudiencesFrom(ctx)
require.True(t, ok)
require.Equal(t, authenticator.Audiences{"1", "2"}, auds)
},
wantNew: func(t *testing.T, ctx context.Context) {
auds, ok := authenticator.AudiencesFrom(ctx)
require.False(t, ok)
require.Nil(t, auds)
},
wantBoth: func(t *testing.T, ctx context.Context) {
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
require.False(t, ok)
require.Zero(t, val)
deadline, ok := ctx.Deadline()
require.False(t, ok)
require.Zero(t, deadline)
require.Nil(t, ctx.Done())
require.NoError(t, ctx.Err())
},
},
{
name: "context with audience and past deadline",
f: func(t *testing.T, ctx context.Context) context.Context {
ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"})
var cancel context.CancelFunc
ctx, cancel = context.WithDeadline(ctx, time.Now().Add(-time.Hour))
t.Cleanup(cancel)
return ctx
},
wantReg: func(t *testing.T, ctx context.Context) {
auds, ok := authenticator.AudiencesFrom(ctx)
require.True(t, ok)
require.Equal(t, authenticator.Audiences{"3", "4"}, auds)
},
wantNew: func(t *testing.T, ctx context.Context) {
auds, ok := authenticator.AudiencesFrom(ctx)
require.False(t, ok)
require.Nil(t, auds)
},
wantBoth: func(t *testing.T, ctx context.Context) {
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
require.False(t, ok)
require.Zero(t, val)
deadline, ok := ctx.Deadline()
require.True(t, ok)
require.NotZero(t, deadline)
require.True(t, deadline.Before(time.Now()))
ch := ctx.Done()
require.NotNil(t, ch)
select {
case <-ch:
case <-time.After(10 * time.Second):
t.Error("expected closed done channel")
}
require.Equal(t, context.DeadlineExceeded, ctx.Err())
},
},
{
name: "context with audience and custom value and past deadline",
f: func(t *testing.T, ctx context.Context) context.Context {
ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"})
var cancel context.CancelFunc
ctx, cancel = context.WithDeadline(ctx, time.Now().Add(-time.Hour))
t.Cleanup(cancel)
ctx = context.WithValue(ctx, contextKey(0xDEADBEEF), "mooo")
return ctx
},
wantReg: func(t *testing.T, ctx context.Context) {
auds, ok := authenticator.AudiencesFrom(ctx)
require.True(t, ok)
require.Equal(t, authenticator.Audiences{"3", "4"}, auds)
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
require.True(t, ok)
require.Equal(t, "mooo", val)
},
wantNew: func(t *testing.T, ctx context.Context) {
auds, ok := authenticator.AudiencesFrom(ctx)
require.False(t, ok)
require.Nil(t, auds)
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
require.False(t, ok)
require.Zero(t, val)
},
wantBoth: func(t *testing.T, ctx context.Context) {
deadline, ok := ctx.Deadline()
require.True(t, ok)
require.NotZero(t, deadline)
require.True(t, deadline.Before(time.Now()))
ch := ctx.Done()
require.NotNil(t, ch)
select {
case <-ch:
case <-time.After(10 * time.Second):
t.Error("expected closed done channel")
}
require.Equal(t, context.DeadlineExceeded, ctx.Err())
},
},
{
name: "context with audience and custom value and future deadline",
f: func(t *testing.T, ctx context.Context) context.Context {
ctx = authenticator.WithAudiences(ctx, authenticator.Audiences{"3", "4"})
var cancel context.CancelFunc
ctx, cancel = context.WithDeadline(ctx, time.Now().Add(time.Hour))
t.Cleanup(cancel)
ctx = context.WithValue(ctx, contextKey(0xDEADBEEF), "mooo")
return ctx
},
wantReg: func(t *testing.T, ctx context.Context) {
auds, ok := authenticator.AudiencesFrom(ctx)
require.True(t, ok)
require.Equal(t, authenticator.Audiences{"3", "4"}, auds)
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
require.True(t, ok)
require.Equal(t, "mooo", val)
},
wantNew: func(t *testing.T, ctx context.Context) {
auds, ok := authenticator.AudiencesFrom(ctx)
require.False(t, ok)
require.Nil(t, auds)
val, ok := ctx.Value(contextKey(0xDEADBEEF)).(string)
require.False(t, ok)
require.Zero(t, val)
},
wantBoth: func(t *testing.T, ctx context.Context) {
deadline, ok := ctx.Deadline()
require.True(t, ok)
require.NotZero(t, deadline)
require.True(t, deadline.After(time.Now()))
ch := ctx.Done()
require.NotNil(t, ch)
select {
case <-ch:
t.Error("expected not closed done channel")
case <-time.After(3 * time.Second):
}
require.NoError(t, ctx.Err())
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
ctx := tt.f(t, context.Background())
t.Run("reg", func(t *testing.T) {
t.Parallel()
tt.wantReg(t, ctx)
})
t.Run("reg-both", func(t *testing.T) {
t.Parallel()
tt.wantBoth(t, ctx)
})
t.Run("new", func(t *testing.T) {
t.Parallel()
tt.wantNew(t, New(ctx))
})
t.Run("new-both", func(t *testing.T) {
t.Parallel()
tt.wantBoth(t, New(ctx))
})
})
}
}

View File

@ -20,6 +20,7 @@ import (
"time" "time"
"github.com/coreos/go-oidc/v3/oidc" "github.com/coreos/go-oidc/v3/oidc"
"github.com/go-logr/logr"
"github.com/pkg/browser" "github.com/pkg/browser"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/term" "golang.org/x/term"
@ -58,11 +59,14 @@ const (
defaultLDAPPasswordPrompt = "Password: " defaultLDAPPasswordPrompt = "Password: "
httpLocationHeaderName = "Location" httpLocationHeaderName = "Location"
debugLogLevel = 4
) )
type handlerState struct { type handlerState struct {
// Basic parameters. // Basic parameters.
ctx context.Context ctx context.Context
logger logr.Logger
issuer string issuer string
clientID string clientID string
scopes []string scopes []string
@ -117,6 +121,15 @@ func WithContext(ctx context.Context) Option {
} }
} }
// WithLogger specifies a PLogger to use with the login.
// If not specified this will default to a new logger.
func WithLogger(logger logr.Logger) Option {
return func(h *handlerState) error {
h.logger = logger
return nil
}
}
// WithListenPort specifies a TCP listen port on localhost, which will be used for the redirect_uri and to handle the // WithListenPort specifies a TCP listen port on localhost, which will be used for the redirect_uri and to handle the
// authorization code callback. By default, a random high port will be chosen which requires the authorization server // authorization code callback. By default, a random high port will be chosen which requires the authorization server
// to support wildcard port numbers as described by https://tools.ietf.org/html/rfc8252#section-7.3: // to support wildcard port numbers as described by https://tools.ietf.org/html/rfc8252#section-7.3:
@ -227,6 +240,7 @@ func Login(issuer string, clientID string, opts ...Option) (*oidctypes.Token, er
cache: &nopCache{}, cache: &nopCache{},
callbackPath: "/callback", callbackPath: "/callback",
ctx: context.Background(), ctx: context.Background(),
logger: logr.Discard(), // discard logs unless a logger is specified
callbacks: make(chan callbackResult), callbacks: make(chan callbackResult),
httpClient: http.DefaultClient, httpClient: http.DefaultClient,
@ -306,6 +320,7 @@ func (h *handlerState) baseLogin() (*oidctypes.Token, error) {
// If the ID token is still valid for a bit, return it immediately and skip the rest of the flow. // If the ID token is still valid for a bit, return it immediately and skip the rest of the flow.
cached := h.cache.GetToken(cacheKey) cached := h.cache.GetToken(cacheKey)
if cached != nil && cached.IDToken != nil && time.Until(cached.IDToken.Expiry.Time) > minIDTokenValidity { if cached != nil && cached.IDToken != nil && time.Until(cached.IDToken.Expiry.Time) > minIDTokenValidity {
h.logger.V(debugLogLevel).Info("Pinniped: Found unexpired cached token.")
return cached, nil return cached, nil
} }
@ -538,6 +553,7 @@ func (h *handlerState) initOIDCDiscovery() error {
return nil return nil
} }
h.logger.V(debugLogLevel).Info("Pinniped: Performing OIDC discovery", "issuer", h.issuer)
var err error var err error
h.provider, err = oidc.NewProvider(h.ctx, h.issuer) h.provider, err = oidc.NewProvider(h.ctx, h.issuer)
if err != nil { if err != nil {
@ -554,6 +570,7 @@ func (h *handlerState) initOIDCDiscovery() error {
} }
func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidctypes.Token, error) { func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidctypes.Token, error) {
h.logger.V(debugLogLevel).Info("Pinniped: Performing RFC8693 token exchange", "requestedAudience", h.requestedAudience)
// Perform OIDC discovery. This may have already been performed if there was not a cached base token. // Perform OIDC discovery. This may have already been performed if there was not a cached base token.
if err := h.initOIDCDiscovery(); err != nil { if err := h.initOIDCDiscovery(); err != nil {
return nil, err return nil, err
@ -624,6 +641,7 @@ func (h *handlerState) tokenExchangeRFC8693(baseToken *oidctypes.Token) (*oidcty
} }
func (h *handlerState) handleRefresh(ctx context.Context, refreshToken *oidctypes.RefreshToken) (*oidctypes.Token, error) { func (h *handlerState) handleRefresh(ctx context.Context, refreshToken *oidctypes.RefreshToken) (*oidctypes.Token, error) {
h.logger.V(debugLogLevel).Info("Pinniped: Refreshing cached token.")
refreshSource := h.oauth2Config.TokenSource(ctx, &oauth2.Token{RefreshToken: refreshToken.Token}) refreshSource := h.oauth2Config.TokenSource(ctx, &oauth2.Token{RefreshToken: refreshToken.Token})
refreshed, err := refreshSource.Token() refreshed, err := refreshSource.Token()

View File

@ -16,18 +16,22 @@ import (
"testing" "testing"
"time" "time"
"github.com/go-logr/stdr"
"github.com/coreos/go-oidc/v3/oidc" "github.com/coreos/go-oidc/v3/oidc"
"github.com/golang/mock/gomock" "github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/oauth2" "golang.org/x/oauth2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
"go.pinniped.dev/internal/httputil/httperr" "go.pinniped.dev/internal/httputil/httperr"
"go.pinniped.dev/internal/httputil/roundtripper" "go.pinniped.dev/internal/httputil/roundtripper"
"go.pinniped.dev/internal/mocks/mockupstreamoidcidentityprovider" "go.pinniped.dev/internal/mocks/mockupstreamoidcidentityprovider"
"go.pinniped.dev/internal/oidc/provider" "go.pinniped.dev/internal/oidc/provider"
"go.pinniped.dev/internal/testutil" "go.pinniped.dev/internal/testutil"
"go.pinniped.dev/internal/testutil/testlogger"
"go.pinniped.dev/pkg/oidcclient/nonce" "go.pinniped.dev/pkg/oidcclient/nonce"
"go.pinniped.dev/pkg/oidcclient/oidctypes" "go.pinniped.dev/pkg/oidcclient/oidctypes"
"go.pinniped.dev/pkg/oidcclient/pkce" "go.pinniped.dev/pkg/oidcclient/pkce"
@ -254,6 +258,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
clientID string clientID string
wantErr string wantErr string
wantToken *oidctypes.Token wantToken *oidctypes.Token
wantLogs []string
}{ }{
{ {
name: "option error", name: "option error",
@ -318,6 +323,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return WithSessionCache(cache)(h) return WithSessionCache(cache)(h)
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"test-issuer\""},
wantErr: `could not perform OIDC discovery for "test-issuer": Get "test-issuer/.well-known/openid-configuration": unsupported protocol scheme ""`, wantErr: `could not perform OIDC discovery for "test-issuer": Get "test-issuer/.well-known/openid-configuration": unsupported protocol scheme ""`,
}, },
{ {
@ -339,6 +345,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return WithSessionCache(cache)(h) return WithSessionCache(cache)(h)
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\""},
wantToken: &testToken, wantToken: &testToken,
}, },
{ {
@ -347,6 +354,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return func(h *handlerState) error { return nil } return func(h *handlerState) error { return nil }
}, },
issuer: errorServer.URL, issuer: errorServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""},
wantErr: fmt.Sprintf("could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL), wantErr: fmt.Sprintf("could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL),
}, },
{ {
@ -386,6 +394,8 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"",
"\"level\"=4 \"msg\"=\"Pinniped: Refreshing cached token.\""},
wantToken: &testToken, wantToken: &testToken,
}, },
{ {
@ -418,6 +428,8 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"",
"\"level\"=4 \"msg\"=\"Pinniped: Refreshing cached token.\""},
wantErr: "some validation error", wantErr: "some validation error",
}, },
{ {
@ -444,6 +456,8 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"",
"\"level\"=4 \"msg\"=\"Pinniped: Refreshing cached token.\""},
// Expect this to fall through to the authorization code flow, so it fails here. // Expect this to fall through to the authorization code flow, so it fails here.
wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address", wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address",
}, },
@ -456,6 +470,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address", wantErr: "could not open callback listener: listen tcp: address invalid-listen-address: missing port in address",
}, },
{ {
@ -466,6 +481,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
}) })
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: "could not open browser: some browser open error", wantErr: "could not open browser: some browser open error",
}, },
{ {
@ -483,6 +499,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: "timed out waiting for token callback: context canceled", wantErr: "timed out waiting for token callback: context canceled",
}, },
{ {
@ -499,6 +516,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: "error handling callback: some callback error", wantErr: "error handling callback: some callback error",
}, },
{ {
@ -559,6 +577,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantToken: &testToken, wantToken: &testToken,
}, },
{ {
@ -622,6 +641,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantToken: &testToken, wantToken: &testToken,
}, },
{ {
@ -638,6 +658,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: "error prompting for username: some prompt error", wantErr: "error prompting for username: some prompt error",
}, },
{ {
@ -651,6 +672,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: "error prompting for password: some prompt error", wantErr: "error prompting for password: some prompt error",
}, },
{ {
@ -691,6 +713,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `could not build authorize request: parse "%?access_type=offline&client_id=test-client-id&code_challenge=VVaezYqum7reIhoavCHD1n2d-piN3r_mywoYj7fCR7g&code_challenge_method=S256&nonce=test-nonce&pinniped_idp_name=some-upstream-name&pinniped_idp_type=ldap&redirect_uri=http%3A%2F%2F127.0.0.1%3A0%2Fcallback&response_type=code&scope=test-scope&state=test-state": invalid URL escape "%"`, wantErr: `could not build authorize request: parse "%?access_type=offline&client_id=test-client-id&code_challenge=VVaezYqum7reIhoavCHD1n2d-piN3r_mywoYj7fCR7g&code_challenge_method=S256&nonce=test-nonce&pinniped_idp_name=some-upstream-name&pinniped_idp_type=ldap&redirect_uri=http%3A%2F%2F127.0.0.1%3A0%2Fcallback&response_type=code&scope=test-scope&state=test-state": invalid URL escape "%"`,
}, },
{ {
@ -702,6 +725,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `authorization response error: Get "http://` + successServer.Listener.Addr().String() + wantErr: `authorization response error: Get "http://` + successServer.Listener.Addr().String() +
`/authorize?access_type=offline&client_id=test-client-id&code_challenge=VVaezYqum7reIhoavCHD1n2d-piN3r_mywoYj7fCR7g&code_challenge_method=S256&nonce=test-nonce&pinniped_idp_name=some-upstream-name&pinniped_idp_type=ldap&redirect_uri=http%3A%2F%2F127.0.0.1%3A0%2Fcallback&response_type=code&scope=test-scope&state=test-state": some error fetching authorize endpoint`, `/authorize?access_type=offline&client_id=test-client-id&code_challenge=VVaezYqum7reIhoavCHD1n2d-piN3r_mywoYj7fCR7g&code_challenge_method=S256&nonce=test-nonce&pinniped_idp_name=some-upstream-name&pinniped_idp_type=ldap&redirect_uri=http%3A%2F%2F127.0.0.1%3A0%2Fcallback&response_type=code&scope=test-scope&state=test-state": some error fetching authorize endpoint`,
}, },
@ -714,6 +738,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `error getting authorization: expected to be redirected, but response status was 502 Bad Gateway`, wantErr: `error getting authorization: expected to be redirected, but response status was 502 Bad Gateway`,
}, },
{ {
@ -730,6 +755,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `login failed with code "access_denied": optional-error-description`, wantErr: `login failed with code "access_denied": optional-error-description`,
}, },
{ {
@ -746,6 +772,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `error getting authorization: redirected to the wrong location: http://other-server.example.com/callback?code=foo&state=test-state`, wantErr: `error getting authorization: redirected to the wrong location: http://other-server.example.com/callback?code=foo&state=test-state`,
}, },
{ {
@ -762,6 +789,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `login failed with code "access_denied"`, wantErr: `login failed with code "access_denied"`,
}, },
{ {
@ -776,6 +804,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `missing or invalid state parameter in authorization response: http://127.0.0.1:0/callback?code=foo&state=wrong-state`, wantErr: `missing or invalid state parameter in authorization response: http://127.0.0.1:0/callback?code=foo&state=wrong-state`,
}, },
{ {
@ -802,6 +831,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: "error during authorization code exchange: some authcode exchange or token validation error", wantErr: "error during authorization code exchange: some authcode exchange or token validation error",
}, },
{ {
@ -898,6 +928,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
} }
}, },
issuer: successServer.URL, issuer: successServer.URL,
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantToken: &testToken, wantToken: &testToken,
}, },
{ {
@ -921,6 +952,9 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"cluster-1234\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + errorServer.URL + "\""},
wantErr: fmt.Sprintf("failed to exchange token: could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL), wantErr: fmt.Sprintf("failed to exchange token: could not perform OIDC discovery for %q: 500 Internal Server Error: some discovery error\n", errorServer.URL),
}, },
{ {
@ -944,6 +978,9 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"cluster-1234\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + brokenTokenURLServer.URL + "\""},
wantErr: `failed to exchange token: could not build RFC8693 request: parse "%": invalid URL escape "%"`, wantErr: `failed to exchange token: could not build RFC8693 request: parse "%": invalid URL escape "%"`,
}, },
{ {
@ -967,6 +1004,9 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-http-response\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: fmt.Sprintf(`failed to exchange token: Post "%s/token": failed to parse Location header "%%": parse "%%": invalid URL escape "%%"`, successServer.URL), wantErr: fmt.Sprintf(`failed to exchange token: Post "%s/token": failed to parse Location header "%%": parse "%%": invalid URL escape "%%"`, successServer.URL),
}, },
{ {
@ -990,6 +1030,9 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-http-400\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `failed to exchange token: unexpected HTTP response status 400`, wantErr: `failed to exchange token: unexpected HTTP response status 400`,
}, },
{ {
@ -1013,6 +1056,9 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-content-type\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `failed to exchange token: failed to decode content-type header: mime: invalid media parameter`, wantErr: `failed to exchange token: failed to decode content-type header: mime: invalid media parameter`,
}, },
{ {
@ -1036,6 +1082,9 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-wrong-content-type\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `failed to exchange token: unexpected HTTP response content type "invalid"`, wantErr: `failed to exchange token: unexpected HTTP response content type "invalid"`,
}, },
{ {
@ -1059,6 +1108,9 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-json\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `failed to exchange token: failed to decode response: unexpected EOF`, wantErr: `failed to exchange token: failed to decode response: unexpected EOF`,
}, },
{ {
@ -1082,6 +1134,9 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-tokentype\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `failed to exchange token: got unexpected token_type "invalid"`, wantErr: `failed to exchange token: got unexpected token_type "invalid"`,
}, },
{ {
@ -1105,6 +1160,9 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-issuedtokentype\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `failed to exchange token: got unexpected issued_token_type "invalid"`, wantErr: `failed to exchange token: got unexpected issued_token_type "invalid"`,
}, },
{ {
@ -1128,6 +1186,9 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience-produce-invalid-jwt\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantErr: `failed to exchange token: received invalid JWT: oidc: malformed jwt: square/go-jose: compact JWS format must have three parts`, wantErr: `failed to exchange token: received invalid JWT: oidc: malformed jwt: square/go-jose: compact JWS format must have three parts`,
}, },
{ {
@ -1157,6 +1218,9 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Found unexpired cached token.\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
wantToken: &testExchangedToken, wantToken: &testExchangedToken,
}, },
{ {
@ -1204,18 +1268,29 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
return nil return nil
} }
}, },
wantLogs: []string{
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"",
"\"level\"=4 \"msg\"=\"Pinniped: Refreshing cached token.\"",
"\"level\"=4 \"msg\"=\"Pinniped: Performing RFC8693 token exchange\" \"requestedAudience\"=\"test-audience\"",
},
wantToken: &testExchangedToken, wantToken: &testExchangedToken,
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
testLogger := testlogger.New(t)
klog.SetLogger(testLogger)
stdr.SetVerbosity(debugLogLevel) // set stdr's global log level to debug so the test logger will send output.
tok, err := Login(tt.issuer, tt.clientID, tok, err := Login(tt.issuer, tt.clientID,
WithContext(context.Background()), WithContext(context.Background()),
WithListenPort(0), WithListenPort(0),
WithScopes([]string{"test-scope"}), WithScopes([]string{"test-scope"}),
tt.opt(t), tt.opt(t),
WithLogger(testLogger),
) )
require.Equal(t, tt.wantLogs, testLogger.Lines())
if tt.wantErr != "" { if tt.wantErr != "" {
require.EqualError(t, err, tt.wantErr) require.EqualError(t, err, tt.wantErr)
require.Nil(t, tok) require.Nil(t, tok)

View File

@ -0,0 +1,140 @@
---
title: Configure the Pinniped Supervisor to use GitLab as an OIDC Provider
description: Set up the Pinniped Supervisor to use GitLab login.
cascade:
layout: docs
menu:
docs:
name: Configure Supervisor With GitLab
weight: 35
parent: howtos
---
The Supervisor is an [OpenID Connect (OIDC)](https://openid.net/connect/) issuer that supports connecting a single "upstream" OIDC identity provider to many "downstream" cluster clients.
This guide shows you how to configure the Supervisor so that users can authenticate to their Kubernetes
cluster using their GitLab credentials.
## Prerequisites
This how-to guide assumes that you have already [installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress,
and that you have [configured a `FederationDomain` to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}).
## Configure your GitLab Application
Follow the instructions for [using GitLab as an OAuth2 authentication service provider](https://docs.gitlab.com/ee/integration/oauth_provider.html) and create a user, group, or instance-wide application.
For example, to create a user-owned application:
1. In GitLab, navigate to [_User Settings_ > _Applications_](https://gitlab.com/-/profile/applications)
1. Create a new application:
1. Enter a name for your application, such as "My Kubernetes Clusters".
1. Enter the redirect URI. This is the `spec.issuer` you configured in your `FederationDomain` appended with `/callback`.
1. Check the box saying that the application is _Confidential_.
1. Select scope `openid`. This provides access to the `nickname` (GitLab username) and `groups` (GitLab groups) claims.
1. Save the application and make note of the _Application ID_ and _Secret_.
## Configure the Supervisor cluster
Create an [OIDCIdentityProvider](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#oidcidentityprovider) in the same namespace as the Supervisor.
For example, this OIDCIdentityProvider and corresponding Secret for [gitlab.com](https://gitlab.com) use the `nickname` claim (GitLab username) as the Kubernetes username:
```yaml
apiVersion: idp.supervisor.pinniped.dev/v1alpha1
kind: OIDCIdentityProvider
metadata:
namespace: pinniped-supervisor
name: gitlab
spec:
# Specify the upstream issuer URL.
issuer: https://gitlab.com
# Specify how GitLab claims are mapped to Kubernetes identities.
claims:
# Specify the name of the claim in your GitLab token that will be mapped
# to the "username" claim in downstream tokens minted by the Supervisor.
username: nickname
# Specify the name of the claim in GitLab that represents the groups
# that the user belongs to. Note that GitLab's "groups" claim comes from
# their "/userinfo" endpoint, not the token.
groups: groups
# Specify the name of the Kubernetes Secret that contains your GitLab
# application's client credentials (created below).
client:
secretName: gitlab-client-credentials
---
apiVersion: v1
kind: Secret
metadata:
namespace: pinniped-supervisor
name: gitlab-client-credentials
type: secrets.pinniped.dev/oidc-client
stringData:
# The "Application ID" that you got from GitLab.
clientID: "<your-client-id>"
# The "Secret" that you got from GitLab.
clientSecret: "<your-client-secret>"
```
Once your OIDCIdentityProvider has been created, you can validate your configuration by running:
```shell
kubectl describe OIDCIdentityProvider -n pinniped-supervisor gitlab
```
Look at the `status` field. If it was configured correctly, you should see `phase: Ready`.
### (Optional) Use a different GitLab claim for Kubernetes usernames
You can also use other GitLab claims as the username.
To do this, make sure you have configured the appropriate scopes on your GitLab application, such as `email`.
You must also adjust the `spec.authorizationConfig` to request those scopes at login and adjust `spec.claims` to use those claims in Kubernetes, for example:
```yaml
# [...]
spec:
# Request any scopes other than "openid" that you selected when
# creating your GitLab application. The "openid" scope is always
# included.
#
# See here for a full list of available claims:
# https://docs.gitlab.com/ee/integration/openid_connect_provider.html
authorizationConfig:
additionalScopes: [ email ]
claims:
username: email
groups: groups
# [...]
```
### (Optional) Use a private GitLab instance
To use privately hosted instance of GitLab, you can change the `spec.issuer` and `spec.tls.certificateAuthorityData` fields, for example:
```yaml
apiVersion: idp.supervisor.pinniped.dev/v1alpha1
kind: OIDCIdentityProvider
# [...]
spec:
# Specify your GitLab instance URL.
issuer: https://gitlab.your-company.example.com.
# Specify the CA bundle for the GitLab server as base64-encoded PEM
# data. For example, the output of `cat my-ca-bundle.pem | base64`.
#
# This is only necessary if your instance uses a custom CA.
tls:
certificateAuthorityData: "<gitlab-ca-bundle>"
# [...]
```
## Next Steps
Now that you have configured the Supervisor to use GitLab, you may want to [configure the Concierge to validate JWTs issued by the Supervisor]({{< ref "configure-concierge-jwt" >}}).

View File

@ -29,6 +29,8 @@ pinniped get kubeconfig [flags]
--concierge-endpoint string API base for the Concierge endpoint --concierge-endpoint string API base for the Concierge endpoint
--concierge-mode mode Concierge mode of operation (default TokenCredentialRequestAPI) --concierge-mode mode Concierge mode of operation (default TokenCredentialRequestAPI)
--concierge-skip-wait Skip waiting for any pending Concierge strategies to become ready (default: false) --concierge-skip-wait Skip waiting for any pending Concierge strategies to become ready (default: false)
--credential-cache string Path to cluster-specific credentials cache
--generated-name-suffix string Suffix to append to generated cluster, context, user kubeconfig entries (default "-pinniped")
-h, --help help for kubeconfig -h, --help help for kubeconfig
--kubeconfig string Path to kubeconfig file --kubeconfig string Path to kubeconfig file
--kubeconfig-context string Kubeconfig context name (default: current active context) --kubeconfig-context string Kubeconfig context name (default: current active context)

View File

@ -208,6 +208,8 @@ func TestCLILoginOIDC(t *testing.T) {
require.NoErrorf(t, json.Unmarshal(cmd2Output, &credOutput2), require.NoErrorf(t, json.Unmarshal(cmd2Output, &credOutput2),
"command returned something other than an ExecCredential:\n%s", string(cmd2Output)) "command returned something other than an ExecCredential:\n%s", string(cmd2Output))
require.Equal(t, credOutput, credOutput2) require.Equal(t, credOutput, credOutput2)
// the logs contain only the ExecCredential. There are 2 elements because the last one is "".
require.Len(t, strings.Split(string(cmd2Output), "\n"), 2)
// Overwrite the cache entry to remove the access and ID tokens. // Overwrite the cache entry to remove the access and ID tokens.
t.Logf("overwriting cache to remove valid ID token") t.Logf("overwriting cache to remove valid ID token")
@ -237,6 +239,26 @@ func TestCLILoginOIDC(t *testing.T) {
require.NoErrorf(t, json.Unmarshal(cmd3Output, &credOutput3), require.NoErrorf(t, json.Unmarshal(cmd3Output, &credOutput3),
"command returned something other than an ExecCredential:\n%s", string(cmd2Output)) "command returned something other than an ExecCredential:\n%s", string(cmd2Output))
require.NotEqual(t, credOutput2.Status.Token, credOutput3.Status.Token) require.NotEqual(t, credOutput2.Status.Token, credOutput3.Status.Token)
// the logs contain only the ExecCredential. There are 2 elements because the last one is "".
require.Len(t, strings.Split(string(cmd3Output), "\n"), 2)
t.Logf("starting fourth CLI subprocess to test debug logging")
err = os.Setenv("PINNIPED_DEBUG", "true")
require.NoError(t, err)
command := oidcLoginCommand(ctx, t, pinnipedExe, sessionCachePath)
cmd4CombinedOutput, err := command.CombinedOutput()
cmd4StringOutput := string(cmd4CombinedOutput)
require.NoError(t, err, cmd4StringOutput)
// the logs contain only the 4 debug lines plus the ExecCredential. There are 6 elements because the last one is "".
require.Len(t, strings.Split(cmd4StringOutput, "\n"), 6)
require.Contains(t, cmd4StringOutput, "Performing OIDC login")
require.Contains(t, cmd4StringOutput, "Found unexpired cached token")
require.Contains(t, cmd4StringOutput, "No concierge configured, skipping token credential exchange")
require.Contains(t, cmd4StringOutput, "caching cluster credential for future use.")
require.Contains(t, cmd4StringOutput, credOutput3.Status.Token)
err = os.Unsetenv("PINNIPED_DEBUG")
require.NoError(t, err)
} }
func runPinnipedLoginOIDC( func runPinnipedLoginOIDC(
@ -271,6 +293,7 @@ func runPinnipedLoginOIDC(
// Start a background goroutine to read stderr from the CLI and parse out the login URL. // Start a background goroutine to read stderr from the CLI and parse out the login URL.
loginURLChan := make(chan string) loginURLChan := make(chan string)
spawnTestGoroutine(t, func() (err error) { spawnTestGoroutine(t, func() (err error) {
t.Helper()
defer func() { defer func() {
closeErr := stderr.Close() closeErr := stderr.Close()
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) { if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
@ -282,16 +305,18 @@ func runPinnipedLoginOIDC(
}() }()
reader := bufio.NewReader(library.NewLoggerReader(t, "stderr", stderr)) reader := bufio.NewReader(library.NewLoggerReader(t, "stderr", stderr))
line, err := reader.ReadString('\n')
if err != nil { scanner := bufio.NewScanner(reader)
return fmt.Errorf("could not read login URL line from stderr: %w", err)
}
const prompt = "Please log in: " const prompt = "Please log in: "
if !strings.HasPrefix(line, prompt) { for scanner.Scan() {
return fmt.Errorf("expected %q to have prefix %q", line, prompt) line := scanner.Text()
} if strings.HasPrefix(line, prompt) {
loginURLChan <- strings.TrimPrefix(line, prompt) loginURLChan <- strings.TrimPrefix(line, prompt)
return readAndExpectEmpty(reader) return nil
}
}
return fmt.Errorf("expected stderr to contain %s", prompt)
}) })
// Start a background goroutine to read stdout from the CLI and parse out an ExecCredential. // Start a background goroutine to read stdout from the CLI and parse out an ExecCredential.

View File

@ -6,7 +6,11 @@ package integration
import ( import (
"bytes" "bytes"
"context" "context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509" "crypto/x509"
"crypto/x509/pkix"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"encoding/pem" "encoding/pem"
@ -28,6 +32,8 @@ import (
"golang.org/x/net/http2" "golang.org/x/net/http2"
authenticationv1 "k8s.io/api/authentication/v1" authenticationv1 "k8s.io/api/authentication/v1"
authorizationv1 "k8s.io/api/authorization/v1" authorizationv1 "k8s.io/api/authorization/v1"
certificatesv1 "k8s.io/api/certificates/v1"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors" k8serrors "k8s.io/apimachinery/pkg/api/errors"
@ -42,6 +48,8 @@ import (
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/client-go/transport" "k8s.io/client-go/transport"
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/certificate/csr"
"k8s.io/client-go/util/keyutil" "k8s.io/client-go/util/keyutil"
"sigs.k8s.io/yaml" "sigs.k8s.io/yaml"
@ -607,14 +615,24 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{}) Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
require.NoError(t, err) require.NoError(t, err)
expectedExtra := make(map[string]authenticationv1.ExtraValue, len(whoAmIAdmin.Status.KubernetesUserInfo.User.Extra)) // The WhoAmI API is lossy:
for k, v := range whoAmIAdmin.Status.KubernetesUserInfo.User.Extra { // - It drops UID
// - It lowercases all extra keys
// the admin user on EKS has both a UID set and an extra key with uppercase characters
// Thus we fallback to the CSR API to grab the UID and Extra to handle this scenario
uid, extra := getUIDAndExtraViaCSR(ctx, t, whoAmIAdmin.Status.KubernetesUserInfo.User.UID,
newImpersonationProxyClientWithCredentials(t,
clusterAdminCredentials, impersonationProxyURL, impersonationProxyCACertPEM, nil).
Kubernetes,
)
expectedExtra := make(map[string]authenticationv1.ExtraValue, len(extra))
for k, v := range extra {
expectedExtra[k] = authenticationv1.ExtraValue(v) expectedExtra[k] = authenticationv1.ExtraValue(v)
} }
expectedOriginalUserInfo := authenticationv1.UserInfo{ expectedOriginalUserInfo := authenticationv1.UserInfo{
Username: whoAmIAdmin.Status.KubernetesUserInfo.User.Username, Username: whoAmIAdmin.Status.KubernetesUserInfo.User.Username,
// The WhoAmI API is lossy so this will fail when the admin user actually does have a UID UID: uid,
UID: whoAmIAdmin.Status.KubernetesUserInfo.User.UID,
Groups: whoAmIAdmin.Status.KubernetesUserInfo.User.Groups, Groups: whoAmIAdmin.Status.KubernetesUserInfo.User.Groups,
Extra: expectedExtra, Extra: expectedExtra,
} }
@ -780,32 +798,148 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
whoAmI, whoAmI,
) )
// Test using a service account token. Authenticating as Service Accounts through the impersonation // Test using a service account token.
// proxy is not supported, so it should fail.
namespaceName := createTestNamespace(t, adminClient) namespaceName := createTestNamespace(t, adminClient)
_, saToken, _ := createServiceAccountToken(ctx, t, adminClient, namespaceName) saName, saToken, _ := createServiceAccountToken(ctx, t, adminClient, namespaceName)
impersonationProxyServiceAccountPinnipedConciergeClient := newImpersonationProxyClientWithCredentials(t, impersonationProxyServiceAccountPinnipedConciergeClient := newImpersonationProxyClientWithCredentials(t,
&loginv1alpha1.ClusterCredential{Token: saToken}, &loginv1alpha1.ClusterCredential{Token: saToken},
impersonationProxyURL, impersonationProxyCACertPEM, nil).PinnipedConcierge impersonationProxyURL, impersonationProxyCACertPEM, nil).PinnipedConcierge
_, err = impersonationProxyServiceAccountPinnipedConciergeClient.IdentityV1alpha1().WhoAmIRequests(). whoAmI, err = impersonationProxyServiceAccountPinnipedConciergeClient.IdentityV1alpha1().WhoAmIRequests().
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{}) Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
require.EqualError(t, err, "Internal error occurred: unimplemented functionality - unable to act as current user") require.NoError(t, err)
require.True(t, k8serrors.IsInternalError(err), err) require.Equal(t,
require.Equal(t, &k8serrors.StatusError{ expectedWhoAmIRequestResponse(
ErrStatus: metav1.Status{ serviceaccount.MakeUsername(namespaceName, saName),
Status: metav1.StatusFailure, []string{"system:serviceaccounts", "system:serviceaccounts:" + namespaceName, "system:authenticated"},
Code: http.StatusInternalServerError, nil,
Reason: metav1.StatusReasonInternalError, ),
Details: &metav1.StatusDetails{ whoAmI,
Causes: []metav1.StatusCause{ )
})
t.Run("WhoAmIRequests and SA token request", func(t *testing.T) {
namespaceName := createTestNamespace(t, adminClient)
kubeClient := adminClient.CoreV1()
saName, _, saUID := createServiceAccountToken(ctx, t, adminClient, namespaceName)
_, tokenRequestProbeErr := kubeClient.ServiceAccounts(namespaceName).CreateToken(ctx, saName, &authenticationv1.TokenRequest{}, metav1.CreateOptions{})
if k8serrors.IsNotFound(tokenRequestProbeErr) && tokenRequestProbeErr.Error() == "the server could not find the requested resource" {
return // stop test early since the token request API is not enabled on this cluster - other errors are caught below
}
pod, err := kubeClient.Pods(namespaceName).Create(ctx, &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-impersonation-proxy-",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{ {
Message: "unimplemented functionality - unable to act as current user", Name: "ignored-but-required",
Image: "does-not-matter",
}, },
}, },
ServiceAccountName: saName,
}, },
Message: "Internal error occurred: unimplemented functionality - unable to act as current user", }, metav1.CreateOptions{})
require.NoError(t, err)
tokenRequestBadAudience, err := kubeClient.ServiceAccounts(namespaceName).CreateToken(ctx, saName, &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{"should-fail-because-wrong-audience"}, // anything that is not an API server audience
BoundObjectRef: &authenticationv1.BoundObjectReference{
Kind: "Pod",
APIVersion: "",
Name: pod.Name,
UID: pod.UID,
}, },
}, err) },
}, metav1.CreateOptions{})
require.NoError(t, err)
impersonationProxySABadAudPinnipedConciergeClient := newImpersonationProxyClientWithCredentials(t,
&loginv1alpha1.ClusterCredential{Token: tokenRequestBadAudience.Status.Token},
impersonationProxyURL, impersonationProxyCACertPEM, nil).PinnipedConcierge
_, badAudErr := impersonationProxySABadAudPinnipedConciergeClient.IdentityV1alpha1().WhoAmIRequests().
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
require.True(t, k8serrors.IsUnauthorized(badAudErr), library.Sdump(badAudErr))
tokenRequest, err := kubeClient.ServiceAccounts(namespaceName).CreateToken(ctx, saName, &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{},
BoundObjectRef: &authenticationv1.BoundObjectReference{
Kind: "Pod",
APIVersion: "",
Name: pod.Name,
UID: pod.UID,
},
},
}, metav1.CreateOptions{})
require.NoError(t, err)
impersonationProxySAClient := newImpersonationProxyClientWithCredentials(t,
&loginv1alpha1.ClusterCredential{Token: tokenRequest.Status.Token},
impersonationProxyURL, impersonationProxyCACertPEM, nil)
whoAmITokenReq, err := impersonationProxySAClient.PinnipedConcierge.IdentityV1alpha1().WhoAmIRequests().
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
require.NoError(t, err)
// new service account tokens include the pod info in the extra fields
require.Equal(t,
expectedWhoAmIRequestResponse(
serviceaccount.MakeUsername(namespaceName, saName),
[]string{"system:serviceaccounts", "system:serviceaccounts:" + namespaceName, "system:authenticated"},
map[string]identityv1alpha1.ExtraValue{
"authentication.kubernetes.io/pod-name": {pod.Name},
"authentication.kubernetes.io/pod-uid": {string(pod.UID)},
},
),
whoAmITokenReq,
)
// allow the test SA to create CSRs
library.CreateTestClusterRoleBinding(t,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Name: saName, Namespace: namespaceName},
rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "system:node-bootstrapper"},
)
library.WaitForUserToHaveAccess(t, serviceaccount.MakeUsername(namespaceName, saName), []string{}, &authorizationv1.ResourceAttributes{
Verb: "create", Group: certificatesv1.GroupName, Version: "*", Resource: "certificatesigningrequests",
})
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err)
csrPEM, err := cert.MakeCSR(privateKey, &pkix.Name{
CommonName: "panda-man",
Organization: []string{"living-the-dream", "need-more-sleep"},
}, nil, nil)
require.NoError(t, err)
csrName, _, err := csr.RequestCertificate(
impersonationProxySAClient.Kubernetes,
csrPEM,
"",
certificatesv1.KubeAPIServerClientSignerName,
[]certificatesv1.KeyUsage{certificatesv1.UsageClientAuth},
privateKey,
)
require.NoError(t, err)
saCSR, err := impersonationProxySAClient.Kubernetes.CertificatesV1beta1().CertificateSigningRequests().Get(ctx, csrName, metav1.GetOptions{})
require.NoError(t, err)
err = adminClient.CertificatesV1beta1().CertificateSigningRequests().Delete(ctx, csrName, metav1.DeleteOptions{})
require.NoError(t, err)
// make sure the user info that the CSR captured matches the SA, including the UID
require.Equal(t, serviceaccount.MakeUsername(namespaceName, saName), saCSR.Spec.Username)
require.Equal(t, string(saUID), saCSR.Spec.UID)
require.Equal(t, []string{"system:serviceaccounts", "system:serviceaccounts:" + namespaceName, "system:authenticated"}, saCSR.Spec.Groups)
require.Equal(t, map[string]certificatesv1beta1.ExtraValue{
"authentication.kubernetes.io/pod-name": {pod.Name},
"authentication.kubernetes.io/pod-uid": {string(pod.UID)},
}, saCSR.Spec.Extra)
}) })
t.Run("kubectl as a client", func(t *testing.T) { t.Run("kubectl as a client", func(t *testing.T) {
@ -1581,8 +1715,8 @@ func getCredForConfig(t *testing.T, config *rest.Config) *loginv1alpha1.ClusterC
if tlsConfig != nil && tlsConfig.GetClientCertificate != nil { if tlsConfig != nil && tlsConfig.GetClientCertificate != nil {
cert, err := tlsConfig.GetClientCertificate(nil) cert, err := tlsConfig.GetClientCertificate(nil)
require.NoError(t, err) require.NoError(t, err)
if len(cert.Certificate) > 0 {
require.Len(t, cert.Certificate, 1) require.Len(t, cert.Certificate, 1)
publicKey := pem.EncodeToMemory(&pem.Block{ publicKey := pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE", Type: "CERTIFICATE",
Bytes: cert.Certificate[0], Bytes: cert.Certificate[0],
@ -1593,6 +1727,7 @@ func getCredForConfig(t *testing.T, config *rest.Config) *loginv1alpha1.ClusterC
require.NoError(t, err) require.NoError(t, err)
out.ClientKeyData = string(privateKey) out.ClientKeyData = string(privateKey)
} }
}
if *out == (loginv1alpha1.ClusterCredential{}) { if *out == (loginv1alpha1.ClusterCredential{}) {
t.Fatal("failed to get creds for config") t.Fatal("failed to get creds for config")
@ -1600,3 +1735,39 @@ func getCredForConfig(t *testing.T, config *rest.Config) *loginv1alpha1.ClusterC
return out return out
} }
func getUIDAndExtraViaCSR(ctx context.Context, t *testing.T, uid string, client kubernetes.Interface) (string, map[string]certificatesv1beta1.ExtraValue) {
t.Helper()
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err)
csrPEM, err := cert.MakeCSR(privateKey, &pkix.Name{
CommonName: "panda-man",
Organization: []string{"living-the-dream", "need-more-sleep"},
}, nil, nil)
require.NoError(t, err)
csrName, _, err := csr.RequestCertificate(
client,
csrPEM,
"",
certificatesv1.KubeAPIServerClientSignerName,
[]certificatesv1.KeyUsage{certificatesv1.UsageClientAuth},
privateKey,
)
require.NoError(t, err)
csReq, err := client.CertificatesV1beta1().CertificateSigningRequests().Get(ctx, csrName, metav1.GetOptions{})
require.NoError(t, err)
err = client.CertificatesV1beta1().CertificateSigningRequests().Delete(ctx, csrName, metav1.DeleteOptions{})
require.NoError(t, err)
outUID := uid // in the future this may not be empty on some clusters
if len(outUID) == 0 {
outUID = csReq.Spec.UID
}
return outUID, csReq.Spec.Extra
}

View File

@ -337,6 +337,6 @@ status:
pinnipedExe, pinnipedExe,
kubeconfigPath, kubeconfigPath,
env.SupervisorUpstreamOIDC.Username, env.SupervisorUpstreamOIDC.Username,
expectedGroupsPlusUnauthenticated, expectedGroupsPlusAuthenticated,
) )
} }

View File

@ -48,10 +48,15 @@ func TestStorageGarbageCollection(t *testing.T) {
// in the same namespace just to get the controller to respond faster. // in the same namespace just to get the controller to respond faster.
// This is just a performance optimization to make this test pass faster because otherwise // This is just a performance optimization to make this test pass faster because otherwise
// this test has to wait ~3 minutes for the controller's next full-resync. // this test has to wait ~3 minutes for the controller's next full-resync.
stopCh := make(chan bool, 1) // It is important that this channel be buffered. stopCh := make(chan struct{})
go updateSecretEveryTwoSeconds(t, stopCh, secrets, secretNotYetExpired) errCh := make(chan error)
go updateSecretEveryTwoSeconds(stopCh, errCh, secrets, secretNotYetExpired)
t.Cleanup(func() { t.Cleanup(func() {
stopCh <- true close(stopCh)
if updateErr := <-errCh; updateErr != nil {
panic(updateErr)
}
}) })
// Wait long enough for the next periodic sweep of the GC controller for the secrets to be deleted, which // Wait long enough for the next periodic sweep of the GC controller for the secrets to be deleted, which
@ -69,10 +74,15 @@ func TestStorageGarbageCollection(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
} }
func updateSecretEveryTwoSeconds(t *testing.T, stopCh chan bool, secrets corev1client.SecretInterface, secret *v1.Secret) { func updateSecretEveryTwoSeconds(stopCh chan struct{}, errCh chan error, secrets corev1client.SecretInterface, secret *v1.Secret) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel() defer cancel()
var updateErr error
defer func() {
errCh <- updateErr
}()
i := 0 i := 0
for { for {
select { select {
@ -87,9 +97,25 @@ func updateSecretEveryTwoSeconds(t *testing.T, stopCh chan bool, secrets corev1c
i++ i++
secret.Data["foo"] = []byte(fmt.Sprintf("bar-%d", i)) secret.Data["foo"] = []byte(fmt.Sprintf("bar-%d", i))
var updateErr error
secret, updateErr = secrets.Update(ctx, secret, metav1.UpdateOptions{}) secret, updateErr = secrets.Update(ctx, secret, metav1.UpdateOptions{})
require.NoError(t, updateErr)
switch {
case updateErr == nil:
// continue to next update
case k8serrors.IsConflict(updateErr), k8serrors.IsNotFound(updateErr):
select {
case _, ok := <-stopCh:
if !ok { // stopCh is closed meaning that test is already finished so these errors are expected
updateErr = nil
}
default:
}
return // even if the error is expected, we must stop
default:
return // unexpected error
}
} }
} }

View File

@ -37,7 +37,40 @@ func TestSupervisorUpstreamOIDCDiscovery(t *testing.T) {
Type: "OIDCDiscoverySucceeded", Type: "OIDCDiscoverySucceeded",
Status: v1alpha1.ConditionFalse, Status: v1alpha1.ConditionFalse,
Reason: "Unreachable", Reason: "Unreachable",
Message: `failed to perform OIDC discovery against "https://127.0.0.1:444444/issuer"`, Message: `failed to perform OIDC discovery against "https://127.0.0.1:444444/issuer":
Get "https://127.0.0.1:444444/issuer/.well-known/openid-configuration": dial tcp: address 444444: in [truncated 10 chars]`,
},
})
})
t.Run("invalid issuer with trailing slash", func(t *testing.T) {
t.Parallel()
spec := v1alpha1.OIDCIdentityProviderSpec{
Issuer: env.SupervisorUpstreamOIDC.Issuer + "/",
TLS: &v1alpha1.TLSSpec{
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorUpstreamOIDC.CABundle)),
},
AuthorizationConfig: v1alpha1.OIDCAuthorizationConfig{
AdditionalScopes: []string{"email", "profile"},
},
Client: v1alpha1.OIDCClient{
SecretName: library.CreateClientCredsSecret(t, "test-client-id", "test-client-secret").Name,
},
}
upstream := library.CreateTestOIDCIdentityProvider(t, spec, v1alpha1.PhaseError)
expectUpstreamConditions(t, upstream, []v1alpha1.Condition{
{
Type: "ClientCredentialsValid",
Status: v1alpha1.ConditionTrue,
Reason: "Success",
Message: "loaded client credentials",
},
{
Type: "OIDCDiscoverySucceeded",
Status: v1alpha1.ConditionFalse,
Reason: "Unreachable",
Message: `failed to perform OIDC discovery against "` + env.SupervisorUpstreamOIDC.Issuer + `/":
oidc: issuer did not match the issuer returned by provider, expected "` + env.SupervisorUpstreamOIDC.Issuer + `/" got "` + env.SupervisorUpstreamOIDC.Issuer + `"`,
}, },
}) })
}) })