Merge branch 'main' into username-and-subject-claims
This commit is contained in:
commit
40c6a67631
@ -8,7 +8,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Success;Duplicate;Invalid
|
||||
// +kubebuilder:validation:Enum=Success;Duplicate;Invalid;SameIssuerHostMustUseSameSecret
|
||||
type OIDCProviderStatusCondition string
|
||||
|
||||
const (
|
||||
|
136
cmd/pinniped/cmd/deprecated.go
Normal file
136
cmd/pinniped/cmd/deprecated.go
Normal file
@ -0,0 +1,136 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
rootCmd.AddCommand(legacyGetKubeconfigCommand(kubeconfigRealDeps()))
|
||||
rootCmd.AddCommand(legacyExchangeTokenCommand(staticLoginRealDeps()))
|
||||
}
|
||||
|
||||
func legacyGetKubeconfigCommand(deps kubeconfigDeps) *cobra.Command {
|
||||
var (
|
||||
cmd = &cobra.Command{
|
||||
Hidden: true,
|
||||
Deprecated: "Please use `pinniped get kubeconfig` instead.",
|
||||
|
||||
Args: cobra.NoArgs, // do not accept positional arguments for this command
|
||||
Use: "get-kubeconfig",
|
||||
Short: "Print a kubeconfig for authenticating into a cluster via Pinniped",
|
||||
Long: here.Doc(`
|
||||
Print a kubeconfig for authenticating into a cluster via Pinniped.
|
||||
Requires admin-like access to the cluster using the current
|
||||
kubeconfig context in order to access Pinniped's metadata.
|
||||
The current kubeconfig is found similar to how kubectl finds it:
|
||||
using the value of the --kubeconfig option, or if that is not
|
||||
specified then from the value of the KUBECONFIG environment
|
||||
variable, or if that is not specified then it defaults to
|
||||
.kube/config in your home directory.
|
||||
Prints a kubeconfig which is suitable to access the cluster using
|
||||
Pinniped as the authentication mechanism. This kubeconfig output
|
||||
can be saved to a file and used with future kubectl commands, e.g.:
|
||||
pinniped get-kubeconfig --token $MY_TOKEN > $HOME/mycluster-kubeconfig
|
||||
kubectl --kubeconfig $HOME/mycluster-kubeconfig get pods
|
||||
`),
|
||||
}
|
||||
token string
|
||||
kubeconfig string
|
||||
contextOverride string
|
||||
namespace string
|
||||
authenticatorType string
|
||||
authenticatorName string
|
||||
)
|
||||
|
||||
cmd.Flags().StringVar(&token, "token", "", "Credential to include in the resulting kubeconfig output (Required)")
|
||||
cmd.Flags().StringVar(&kubeconfig, "kubeconfig", "", "Path to the kubeconfig file")
|
||||
cmd.Flags().StringVar(&contextOverride, "kubeconfig-context", "", "Kubeconfig context override")
|
||||
cmd.Flags().StringVar(&namespace, "pinniped-namespace", "pinniped-concierge", "Namespace in which Pinniped was installed")
|
||||
cmd.Flags().StringVar(&authenticatorType, "authenticator-type", "", "Authenticator type (e.g., 'webhook', 'jwt')")
|
||||
cmd.Flags().StringVar(&authenticatorName, "authenticator-name", "", "Authenticator name")
|
||||
mustMarkRequired(cmd, "token")
|
||||
plog.RemoveKlogGlobalFlags()
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return runGetKubeconfig(cmd.OutOrStdout(), deps, getKubeconfigParams{
|
||||
kubeconfigPath: kubeconfig,
|
||||
kubeconfigContextOverride: contextOverride,
|
||||
staticToken: token,
|
||||
concierge: getKubeconfigConciergeParams{
|
||||
namespace: namespace,
|
||||
authenticatorName: authenticatorName,
|
||||
authenticatorType: authenticatorType,
|
||||
},
|
||||
})
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func legacyExchangeTokenCommand(deps staticLoginDeps) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Hidden: true,
|
||||
Deprecated: "Please use `pinniped login static` instead.",
|
||||
|
||||
Args: cobra.NoArgs, // do not accept positional arguments for this command
|
||||
Use: "exchange-credential",
|
||||
Short: "Exchange a credential for a cluster-specific access credential",
|
||||
Long: here.Doc(`
|
||||
Exchange a credential which proves your identity for a time-limited,
|
||||
cluster-specific access credential.
|
||||
Designed to be conveniently used as an credential plugin for kubectl.
|
||||
See the help message for 'pinniped get-kubeconfig' for more
|
||||
information about setting up a kubeconfig file using Pinniped.
|
||||
Requires all of the following environment variables, which are
|
||||
typically set in the kubeconfig:
|
||||
- PINNIPED_TOKEN: the token to send to Pinniped for exchange
|
||||
- PINNIPED_NAMESPACE: the namespace of the authenticator to authenticate
|
||||
against
|
||||
- PINNIPED_AUTHENTICATOR_TYPE: the type of authenticator to authenticate
|
||||
against (e.g., "webhook", "jwt")
|
||||
- PINNIPED_AUTHENTICATOR_NAME: the name of the authenticator to authenticate
|
||||
against
|
||||
- PINNIPED_CA_BUNDLE: the CA bundle to trust when calling
|
||||
Pinniped's HTTPS endpoint
|
||||
- PINNIPED_K8S_API_ENDPOINT: the URL for the Pinniped credential
|
||||
exchange API
|
||||
For more information about credential plugins in general, see
|
||||
https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins
|
||||
`),
|
||||
}
|
||||
plog.RemoveKlogGlobalFlags()
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
// Make a little helper to grab OS environment variables and keep a list that were missing.
|
||||
var missing []string
|
||||
getEnv := func(name string) string {
|
||||
value, ok := os.LookupEnv(name)
|
||||
if !ok {
|
||||
missing = append(missing, name)
|
||||
}
|
||||
return value
|
||||
}
|
||||
flags := staticLoginParams{
|
||||
staticToken: getEnv("PINNIPED_TOKEN"),
|
||||
conciergeEnabled: true,
|
||||
conciergeNamespace: getEnv("PINNIPED_NAMESPACE"),
|
||||
conciergeAuthenticatorType: getEnv("PINNIPED_AUTHENTICATOR_TYPE"),
|
||||
conciergeAuthenticatorName: getEnv("PINNIPED_AUTHENTICATOR_NAME"),
|
||||
conciergeEndpoint: getEnv("PINNIPED_K8S_API_ENDPOINT"),
|
||||
conciergeCABundle: base64.StdEncoding.EncodeToString([]byte(getEnv("PINNIPED_CA_BUNDLE"))),
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
return fmt.Errorf("failed to get credential: required environment variable(s) not set: %v", missing)
|
||||
}
|
||||
return runStaticLogin(cmd.OutOrStdout(), deps, flags)
|
||||
}
|
||||
return cmd
|
||||
}
|
@ -1,173 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
|
||||
auth1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/authentication/v1alpha1"
|
||||
"go.pinniped.dev/internal/client"
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
rootCmd.AddCommand(newExchangeCredentialCmd(os.Args, os.Stdout, os.Stderr).cmd)
|
||||
}
|
||||
|
||||
type exchangeCredentialCommand struct {
|
||||
// runFunc is called by the cobra.Command.Run hook. It is included here for
|
||||
// testability.
|
||||
runFunc func(stdout, stderr io.Writer)
|
||||
|
||||
// cmd is the cobra.Command for this CLI command. It is included here for
|
||||
// testability.
|
||||
cmd *cobra.Command
|
||||
}
|
||||
|
||||
func newExchangeCredentialCmd(args []string, stdout, stderr io.Writer) *exchangeCredentialCommand {
|
||||
c := &exchangeCredentialCommand{
|
||||
runFunc: runExchangeCredential,
|
||||
}
|
||||
|
||||
c.cmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
c.runFunc(stdout, stderr)
|
||||
},
|
||||
Args: cobra.NoArgs, // do not accept positional arguments for this command
|
||||
Use: "exchange-credential",
|
||||
Short: "Exchange a credential for a cluster-specific access credential",
|
||||
Long: here.Doc(`
|
||||
Exchange a credential which proves your identity for a time-limited,
|
||||
cluster-specific access credential.
|
||||
|
||||
Designed to be conveniently used as an credential plugin for kubectl.
|
||||
See the help message for 'pinniped get-kubeconfig' for more
|
||||
information about setting up a kubeconfig file using Pinniped.
|
||||
|
||||
Requires all of the following environment variables, which are
|
||||
typically set in the kubeconfig:
|
||||
- PINNIPED_TOKEN: the token to send to Pinniped for exchange
|
||||
- PINNIPED_NAMESPACE: the namespace of the authenticator to authenticate
|
||||
against
|
||||
- PINNIPED_AUTHENTICATOR_TYPE: the type of authenticator to authenticate
|
||||
against (e.g., "webhook", "jwt")
|
||||
- PINNIPED_AUTHENTICATOR_NAME: the name of the authenticator to authenticate
|
||||
against
|
||||
- PINNIPED_CA_BUNDLE: the CA bundle to trust when calling
|
||||
Pinniped's HTTPS endpoint
|
||||
- PINNIPED_K8S_API_ENDPOINT: the URL for the Pinniped credential
|
||||
exchange API
|
||||
|
||||
For more information about credential plugins in general, see
|
||||
https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins
|
||||
`),
|
||||
}
|
||||
|
||||
c.cmd.SetArgs(args)
|
||||
c.cmd.SetOut(stdout)
|
||||
c.cmd.SetErr(stderr)
|
||||
|
||||
plog.RemoveKlogGlobalFlags()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
type envGetter func(string) (string, bool)
|
||||
type tokenExchanger func(
|
||||
ctx context.Context,
|
||||
namespace string,
|
||||
authenticator corev1.TypedLocalObjectReference,
|
||||
token string,
|
||||
caBundle string,
|
||||
apiEndpoint string,
|
||||
) (*clientauthenticationv1beta1.ExecCredential, error)
|
||||
|
||||
const (
|
||||
ErrMissingEnvVar = constable.Error("failed to get credential: environment variable not set")
|
||||
ErrInvalidAuthenticatorType = constable.Error("invalid authenticator type")
|
||||
)
|
||||
|
||||
func runExchangeCredential(stdout, _ io.Writer) {
|
||||
err := exchangeCredential(os.LookupEnv, client.ExchangeToken, stdout, 30*time.Second)
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "%s\n", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func exchangeCredential(envGetter envGetter, tokenExchanger tokenExchanger, outputWriter io.Writer, timeout time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
namespace, varExists := envGetter("PINNIPED_NAMESPACE")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_NAMESPACE")
|
||||
}
|
||||
|
||||
authenticatorType, varExists := envGetter("PINNIPED_AUTHENTICATOR_TYPE")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_AUTHENTICATOR_TYPE")
|
||||
}
|
||||
|
||||
authenticatorName, varExists := envGetter("PINNIPED_AUTHENTICATOR_NAME")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_AUTHENTICATOR_NAME")
|
||||
}
|
||||
|
||||
token, varExists := envGetter("PINNIPED_TOKEN")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_TOKEN")
|
||||
}
|
||||
|
||||
caBundle, varExists := envGetter("PINNIPED_CA_BUNDLE")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_CA_BUNDLE")
|
||||
}
|
||||
|
||||
apiEndpoint, varExists := envGetter("PINNIPED_K8S_API_ENDPOINT")
|
||||
if !varExists {
|
||||
return envVarNotSetError("PINNIPED_K8S_API_ENDPOINT")
|
||||
}
|
||||
|
||||
authenticator := corev1.TypedLocalObjectReference{Name: authenticatorName}
|
||||
switch strings.ToLower(authenticatorType) {
|
||||
case "webhook":
|
||||
authenticator.APIGroup = &auth1alpha1.SchemeGroupVersion.Group
|
||||
authenticator.Kind = "WebhookAuthenticator"
|
||||
case "jwt":
|
||||
authenticator.APIGroup = &auth1alpha1.SchemeGroupVersion.Group
|
||||
authenticator.Kind = "JWTAuthenticator"
|
||||
default:
|
||||
return fmt.Errorf(`%w: %q, supported values are "webhook" and "jwt"`, ErrInvalidAuthenticatorType, authenticatorType)
|
||||
}
|
||||
|
||||
cred, err := tokenExchanger(ctx, namespace, authenticator, token, caBundle, apiEndpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get credential: %w", err)
|
||||
}
|
||||
|
||||
err = json.NewEncoder(outputWriter).Encode(cred)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal response to stdout: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func envVarNotSetError(varName string) error {
|
||||
return fmt.Errorf("%w: %s", ErrMissingEnvVar, varName)
|
||||
}
|
@ -1,342 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sclevine/spec"
|
||||
"github.com/sclevine/spec/report"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
|
||||
auth1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/authentication/v1alpha1"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
knownGoodUsageForExchangeCredential = here.Doc(`
|
||||
Usage:
|
||||
exchange-credential [flags]
|
||||
|
||||
Flags:
|
||||
-h, --help help for exchange-credential
|
||||
|
||||
`)
|
||||
|
||||
knownGoodHelpForExchangeCredential = here.Doc(`
|
||||
Exchange a credential which proves your identity for a time-limited,
|
||||
cluster-specific access credential.
|
||||
|
||||
Designed to be conveniently used as an credential plugin for kubectl.
|
||||
See the help message for 'pinniped get-kubeconfig' for more
|
||||
information about setting up a kubeconfig file using Pinniped.
|
||||
|
||||
Requires all of the following environment variables, which are
|
||||
typically set in the kubeconfig:
|
||||
- PINNIPED_TOKEN: the token to send to Pinniped for exchange
|
||||
- PINNIPED_NAMESPACE: the namespace of the authenticator to authenticate
|
||||
against
|
||||
- PINNIPED_AUTHENTICATOR_TYPE: the type of authenticator to authenticate
|
||||
against (e.g., "webhook", "jwt")
|
||||
- PINNIPED_AUTHENTICATOR_NAME: the name of the authenticator to authenticate
|
||||
against
|
||||
- PINNIPED_CA_BUNDLE: the CA bundle to trust when calling
|
||||
Pinniped's HTTPS endpoint
|
||||
- PINNIPED_K8S_API_ENDPOINT: the URL for the Pinniped credential
|
||||
exchange API
|
||||
|
||||
For more information about credential plugins in general, see
|
||||
https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins
|
||||
|
||||
Usage:
|
||||
exchange-credential [flags]
|
||||
|
||||
Flags:
|
||||
-h, --help help for exchange-credential
|
||||
`)
|
||||
)
|
||||
|
||||
func TestNewCredentialExchangeCmd(t *testing.T) {
|
||||
spec.Run(t, "newCredentialExchangeCmd", func(t *testing.T, when spec.G, it spec.S) {
|
||||
var r *require.Assertions
|
||||
var stdout, stderr *bytes.Buffer
|
||||
|
||||
it.Before(func() {
|
||||
r = require.New(t)
|
||||
|
||||
stdout, stderr = bytes.NewBuffer([]byte{}), bytes.NewBuffer([]byte{})
|
||||
})
|
||||
|
||||
it("calls runFunc and does not print usage or help when correct arguments and flags are used", func() {
|
||||
c := newExchangeCredentialCmd([]string{}, stdout, stderr)
|
||||
|
||||
runFuncCalled := false
|
||||
c.runFunc = func(out, err io.Writer) {
|
||||
runFuncCalled = true
|
||||
}
|
||||
|
||||
r.NoError(c.cmd.Execute())
|
||||
r.True(runFuncCalled)
|
||||
r.Empty(stdout.String())
|
||||
r.Empty(stderr.String())
|
||||
})
|
||||
|
||||
it("fails when args are passed", func() {
|
||||
c := newExchangeCredentialCmd([]string{"some-arg"}, stdout, stderr)
|
||||
|
||||
runFuncCalled := false
|
||||
c.runFunc = func(out, err io.Writer) {
|
||||
runFuncCalled = true
|
||||
}
|
||||
|
||||
errorMessage := `unknown command "some-arg" for "exchange-credential"`
|
||||
r.EqualError(c.cmd.Execute(), errorMessage)
|
||||
r.False(runFuncCalled)
|
||||
|
||||
output := "Error: " + errorMessage + "\n" + knownGoodUsageForExchangeCredential
|
||||
r.Equal(output, stdout.String())
|
||||
r.Empty(stderr.String())
|
||||
})
|
||||
|
||||
it("prints a nice help message", func() {
|
||||
c := newExchangeCredentialCmd([]string{"--help"}, stdout, stderr)
|
||||
|
||||
runFuncCalled := false
|
||||
c.runFunc = func(out, err io.Writer) {
|
||||
runFuncCalled = true
|
||||
}
|
||||
|
||||
r.NoError(c.cmd.Execute())
|
||||
r.False(runFuncCalled)
|
||||
r.Equal(knownGoodHelpForExchangeCredential, stdout.String())
|
||||
r.Empty(stderr.String())
|
||||
})
|
||||
}, spec.Sequential(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
|
||||
func TestExchangeCredential(t *testing.T) {
|
||||
spec.Run(t, "cmd.exchangeCredential", func(t *testing.T, when spec.G, it spec.S) {
|
||||
var r *require.Assertions
|
||||
var buffer *bytes.Buffer
|
||||
var tokenExchanger tokenExchanger
|
||||
var fakeEnv map[string]string
|
||||
|
||||
var envGetter envGetter = func(envVarName string) (string, bool) {
|
||||
value, present := fakeEnv[envVarName]
|
||||
if !present {
|
||||
return "", false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
|
||||
it.Before(func() {
|
||||
r = require.New(t)
|
||||
buffer = new(bytes.Buffer)
|
||||
fakeEnv = map[string]string{
|
||||
"PINNIPED_NAMESPACE": "namespace from env",
|
||||
"PINNIPED_AUTHENTICATOR_TYPE": "Webhook",
|
||||
"PINNIPED_AUTHENTICATOR_NAME": "webhook name from env",
|
||||
"PINNIPED_TOKEN": "token from env",
|
||||
"PINNIPED_CA_BUNDLE": "ca bundle from env",
|
||||
"PINNIPED_K8S_API_ENDPOINT": "k8s api from env",
|
||||
}
|
||||
})
|
||||
|
||||
when("env vars are missing", func() {
|
||||
it("returns an error when PINNIPED_NAMESPACE is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_NAMESPACE")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_NAMESPACE")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_AUTHENTICATOR_TYPE is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_AUTHENTICATOR_TYPE")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_AUTHENTICATOR_TYPE")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_AUTHENTICATOR_NAME is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_AUTHENTICATOR_NAME")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_AUTHENTICATOR_NAME")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_TOKEN is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_TOKEN")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_TOKEN")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_CA_BUNDLE is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_CA_BUNDLE")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_CA_BUNDLE")
|
||||
})
|
||||
|
||||
it("returns an error when PINNIPED_K8S_API_ENDPOINT is missing", func() {
|
||||
delete(fakeEnv, "PINNIPED_K8S_API_ENDPOINT")
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: environment variable not set: PINNIPED_K8S_API_ENDPOINT")
|
||||
})
|
||||
})
|
||||
|
||||
when("env vars are invalid", func() {
|
||||
it("returns an error when PINNIPED_AUTHENTICATOR_TYPE is missing", func() {
|
||||
fakeEnv["PINNIPED_AUTHENTICATOR_TYPE"] = "invalid"
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, `invalid authenticator type: "invalid", supported values are "webhook" and "jwt"`)
|
||||
})
|
||||
})
|
||||
|
||||
when("the token exchange fails", func() {
|
||||
it.Before(func() {
|
||||
tokenExchanger = func(ctx context.Context, namespace string, authenticator corev1.TypedLocalObjectReference, token, caBundle, apiEndpoint string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
}
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to get credential: some error")
|
||||
})
|
||||
})
|
||||
|
||||
when("the JSON encoder fails", func() {
|
||||
it.Before(func() {
|
||||
tokenExchanger = func(ctx context.Context, namespace string, authenticator corev1.TypedLocalObjectReference, token, caBundle, apiEndpoint string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
return &clientauthenticationv1beta1.ExecCredential{
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
Token: "some token",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, &testutil.ErrorWriter{ReturnError: fmt.Errorf("some IO error")}, 30*time.Second)
|
||||
r.EqualError(err, "failed to marshal response to stdout: some IO error")
|
||||
})
|
||||
})
|
||||
|
||||
when("the token exchange times out", func() {
|
||||
it.Before(func() {
|
||||
tokenExchanger = func(ctx context.Context, namespace string, authenticator corev1.TypedLocalObjectReference, token, caBundle, apiEndpoint string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
select {
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
return &clientauthenticationv1beta1.ExecCredential{
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
Token: "some token",
|
||||
},
|
||||
}, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
it("returns an error", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 1*time.Millisecond)
|
||||
r.EqualError(err, "failed to get credential: context deadline exceeded")
|
||||
})
|
||||
})
|
||||
|
||||
when("the token exchange succeeds", func() {
|
||||
var actualNamespace, actualToken, actualCaBundle, actualAPIEndpoint string
|
||||
|
||||
it.Before(func() {
|
||||
tokenExchanger = func(ctx context.Context, namespace string, authenticator corev1.TypedLocalObjectReference, token, caBundle, apiEndpoint string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
actualNamespace, actualToken, actualCaBundle, actualAPIEndpoint = namespace, token, caBundle, apiEndpoint
|
||||
now := metav1.NewTime(time.Date(2020, 7, 29, 1, 2, 3, 0, time.UTC))
|
||||
return &clientauthenticationv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: &now,
|
||||
ClientCertificateData: "some certificate",
|
||||
ClientKeyData: "some key",
|
||||
Token: "some token",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
})
|
||||
|
||||
it("writes the execCredential to the given writer", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.NoError(err)
|
||||
r.Equal(fakeEnv["PINNIPED_NAMESPACE"], actualNamespace)
|
||||
r.Equal(fakeEnv["PINNIPED_TOKEN"], actualToken)
|
||||
r.Equal(fakeEnv["PINNIPED_CA_BUNDLE"], actualCaBundle)
|
||||
r.Equal(fakeEnv["PINNIPED_K8S_API_ENDPOINT"], actualAPIEndpoint)
|
||||
expected := `{
|
||||
"kind": "ExecCredential",
|
||||
"apiVersion": "client.authentication.k8s.io/v1beta1",
|
||||
"spec": {},
|
||||
"status": {
|
||||
"expirationTimestamp":"2020-07-29T01:02:03Z",
|
||||
"clientCertificateData": "some certificate",
|
||||
"clientKeyData":"some key",
|
||||
"token": "some token"
|
||||
}
|
||||
}`
|
||||
r.JSONEq(expected, buffer.String())
|
||||
})
|
||||
})
|
||||
|
||||
when("the authenticator info is passed", func() {
|
||||
var actualAuthenticator corev1.TypedLocalObjectReference
|
||||
|
||||
it.Before(func() {
|
||||
tokenExchanger = func(ctx context.Context, namespace string, authenticator corev1.TypedLocalObjectReference, token, caBundle, apiEndpoint string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
actualAuthenticator = authenticator
|
||||
return nil, nil
|
||||
}
|
||||
})
|
||||
|
||||
when("the authenticator is of type webhook", func() {
|
||||
it.Before(func() {
|
||||
fakeEnv["PINNIPED_AUTHENTICATOR_TYPE"] = "webhook"
|
||||
fakeEnv["PINNIPED_AUTHENTICATOR_NAME"] = "some-webhook-name"
|
||||
})
|
||||
|
||||
it("passes the correct authenticator type to the token exchanger", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.NoError(err)
|
||||
require.Equal(t, corev1.TypedLocalObjectReference{
|
||||
APIGroup: &auth1alpha1.SchemeGroupVersion.Group,
|
||||
Kind: "WebhookAuthenticator",
|
||||
Name: "some-webhook-name",
|
||||
}, actualAuthenticator)
|
||||
})
|
||||
})
|
||||
|
||||
when("the authenticator is of type jwt", func() {
|
||||
it.Before(func() {
|
||||
fakeEnv["PINNIPED_AUTHENTICATOR_TYPE"] = "jwt"
|
||||
fakeEnv["PINNIPED_AUTHENTICATOR_NAME"] = "some-jwt-authenticator-name"
|
||||
})
|
||||
|
||||
it("passes the correct authenticator type to the token exchanger", func() {
|
||||
err := exchangeCredential(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.NoError(err)
|
||||
require.Equal(t, corev1.TypedLocalObjectReference{
|
||||
APIGroup: &auth1alpha1.SchemeGroupVersion.Group,
|
||||
Kind: "JWTAuthenticator",
|
||||
Name: "some-jwt-authenticator-name",
|
||||
}, actualAuthenticator)
|
||||
})
|
||||
})
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
20
cmd/pinniped/cmd/get.go
Normal file
20
cmd/pinniped/cmd/get.go
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
//nolint: gochecknoglobals
|
||||
var getCmd = &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "get",
|
||||
SilenceUsage: true, // do not print usage message when commands fail
|
||||
}
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
rootCmd.AddCommand(getCmd)
|
||||
}
|
@ -1,346 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
v1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/config/v1alpha1"
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned"
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
rootCmd.AddCommand(newGetKubeConfigCommand().Command())
|
||||
}
|
||||
|
||||
type getKubeConfigFlags struct {
|
||||
token string
|
||||
kubeconfig string
|
||||
contextOverride string
|
||||
namespace string
|
||||
authenticatorName string
|
||||
authenticatorType string
|
||||
}
|
||||
|
||||
type getKubeConfigCommand struct {
|
||||
flags getKubeConfigFlags
|
||||
// Test mocking points
|
||||
getPathToSelf func() (string, error)
|
||||
kubeClientCreator func(restConfig *rest.Config) (pinnipedclientset.Interface, error)
|
||||
}
|
||||
|
||||
func newGetKubeConfigCommand() *getKubeConfigCommand {
|
||||
return &getKubeConfigCommand{
|
||||
flags: getKubeConfigFlags{
|
||||
namespace: "pinniped-concierge",
|
||||
},
|
||||
getPathToSelf: os.Executable,
|
||||
kubeClientCreator: func(restConfig *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedclientset.NewForConfig(restConfig)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *getKubeConfigCommand) Command() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
RunE: c.run,
|
||||
Args: cobra.NoArgs, // do not accept positional arguments for this command
|
||||
Use: "get-kubeconfig",
|
||||
Short: "Print a kubeconfig for authenticating into a cluster via Pinniped",
|
||||
Long: here.Doc(`
|
||||
Print a kubeconfig for authenticating into a cluster via Pinniped.
|
||||
|
||||
Requires admin-like access to the cluster using the current
|
||||
kubeconfig context in order to access Pinniped's metadata.
|
||||
The current kubeconfig is found similar to how kubectl finds it:
|
||||
using the value of the --kubeconfig option, or if that is not
|
||||
specified then from the value of the KUBECONFIG environment
|
||||
variable, or if that is not specified then it defaults to
|
||||
.kube/config in your home directory.
|
||||
|
||||
Prints a kubeconfig which is suitable to access the cluster using
|
||||
Pinniped as the authentication mechanism. This kubeconfig output
|
||||
can be saved to a file and used with future kubectl commands, e.g.:
|
||||
pinniped get-kubeconfig --token $MY_TOKEN > $HOME/mycluster-kubeconfig
|
||||
kubectl --kubeconfig $HOME/mycluster-kubeconfig get pods
|
||||
`),
|
||||
}
|
||||
cmd.Flags().StringVar(&c.flags.token, "token", "", "Credential to include in the resulting kubeconfig output (Required)")
|
||||
cmd.Flags().StringVar(&c.flags.kubeconfig, "kubeconfig", c.flags.kubeconfig, "Path to the kubeconfig file")
|
||||
cmd.Flags().StringVar(&c.flags.contextOverride, "kubeconfig-context", c.flags.contextOverride, "Kubeconfig context override")
|
||||
cmd.Flags().StringVar(&c.flags.namespace, "pinniped-namespace", c.flags.namespace, "Namespace in which Pinniped was installed")
|
||||
cmd.Flags().StringVar(&c.flags.authenticatorType, "authenticator-type", c.flags.authenticatorType, "Authenticator type (e.g., 'webhook', 'jwt')")
|
||||
cmd.Flags().StringVar(&c.flags.authenticatorName, "authenticator-name", c.flags.authenticatorType, "Authenticator name")
|
||||
mustMarkRequired(cmd, "token")
|
||||
plog.RemoveKlogGlobalFlags()
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *getKubeConfigCommand) run(cmd *cobra.Command, args []string) error {
|
||||
fullPathToSelf, err := c.getPathToSelf()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find path to self: %w", err)
|
||||
}
|
||||
|
||||
clientConfig := newClientConfig(c.flags.kubeconfig, c.flags.contextOverride)
|
||||
|
||||
currentKubeConfig, err := clientConfig.RawConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
restConfig, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clientset, err := c.kubeClientCreator(restConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
authenticatorType, authenticatorName := c.flags.authenticatorType, c.flags.authenticatorName
|
||||
if authenticatorType == "" || authenticatorName == "" {
|
||||
authenticatorType, authenticatorName, err = getDefaultAuthenticator(clientset, c.flags.namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
credentialIssuer, err := fetchPinnipedCredentialIssuer(clientset, c.flags.namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if credentialIssuer.Status.KubeConfigInfo == nil {
|
||||
return constable.Error(`CredentialIssuer "pinniped-config" was missing KubeConfigInfo`)
|
||||
}
|
||||
|
||||
v1Cluster, err := copyCurrentClusterFromExistingKubeConfig(currentKubeConfig, c.flags.contextOverride)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = issueWarningForNonMatchingServerOrCA(v1Cluster, credentialIssuer, cmd.ErrOrStderr())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config := newPinnipedKubeconfig(v1Cluster, fullPathToSelf, c.flags.token, c.flags.namespace, authenticatorType, authenticatorName)
|
||||
|
||||
err = writeConfigAsYAML(cmd.OutOrStdout(), config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func issueWarningForNonMatchingServerOrCA(v1Cluster v1.Cluster, credentialIssuer *configv1alpha1.CredentialIssuer, warningsWriter io.Writer) error {
|
||||
credentialIssuerCA, err := base64.StdEncoding.DecodeString(credentialIssuer.Status.KubeConfigInfo.CertificateAuthorityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if v1Cluster.Server != credentialIssuer.Status.KubeConfigInfo.Server ||
|
||||
!bytes.Equal(v1Cluster.CertificateAuthorityData, credentialIssuerCA) {
|
||||
_, err := warningsWriter.Write([]byte("WARNING: Server and certificate authority did not match between local kubeconfig and Pinniped's CredentialIssuer on the cluster. Using local kubeconfig values.\n"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("output write error: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type noAuthenticatorError struct{ Namespace string }
|
||||
|
||||
func (e noAuthenticatorError) Error() string {
|
||||
return fmt.Sprintf(`no authenticators were found in namespace %q`, e.Namespace)
|
||||
}
|
||||
|
||||
type indeterminateAuthenticatorError struct{ Namespace string }
|
||||
|
||||
func (e indeterminateAuthenticatorError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
`multiple authenticators were found in namespace %q, so --authenticator-name/--authenticator-type must be specified`,
|
||||
e.Namespace,
|
||||
)
|
||||
}
|
||||
|
||||
func getDefaultAuthenticator(clientset pinnipedclientset.Interface, namespace string) (string, string, error) {
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*20)
|
||||
defer cancelFunc()
|
||||
|
||||
webhooks, err := clientset.AuthenticationV1alpha1().WebhookAuthenticators(namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
type ref struct{ authenticatorType, authenticatorName string }
|
||||
authenticators := make([]ref, 0, len(webhooks.Items))
|
||||
for _, webhook := range webhooks.Items {
|
||||
authenticators = append(authenticators, ref{authenticatorType: "webhook", authenticatorName: webhook.Name})
|
||||
}
|
||||
|
||||
if len(authenticators) == 0 {
|
||||
return "", "", noAuthenticatorError{namespace}
|
||||
}
|
||||
if len(authenticators) > 1 {
|
||||
return "", "", indeterminateAuthenticatorError{namespace}
|
||||
}
|
||||
return authenticators[0].authenticatorType, authenticators[0].authenticatorName, nil
|
||||
}
|
||||
|
||||
func fetchPinnipedCredentialIssuer(clientset pinnipedclientset.Interface, pinnipedInstallationNamespace string) (*configv1alpha1.CredentialIssuer, error) {
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*20)
|
||||
defer cancelFunc()
|
||||
|
||||
credentialIssuers, err := clientset.ConfigV1alpha1().CredentialIssuers(pinnipedInstallationNamespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(credentialIssuers.Items) == 0 {
|
||||
return nil, constable.Error(fmt.Sprintf(
|
||||
`No CredentialIssuer was found in namespace "%s". Is Pinniped installed on this cluster in namespace "%s"?`,
|
||||
pinnipedInstallationNamespace,
|
||||
pinnipedInstallationNamespace,
|
||||
))
|
||||
}
|
||||
|
||||
if len(credentialIssuers.Items) > 1 {
|
||||
return nil, constable.Error(fmt.Sprintf(
|
||||
`More than one CredentialIssuer was found in namespace "%s"`,
|
||||
pinnipedInstallationNamespace,
|
||||
))
|
||||
}
|
||||
|
||||
return &credentialIssuers.Items[0], nil
|
||||
}
|
||||
|
||||
func newClientConfig(kubeconfigPathOverride string, currentContextName string) clientcmd.ClientConfig {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
loadingRules.ExplicitPath = kubeconfigPathOverride
|
||||
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{
|
||||
CurrentContext: currentContextName,
|
||||
})
|
||||
return clientConfig
|
||||
}
|
||||
|
||||
func writeConfigAsYAML(outputWriter io.Writer, config v1.Config) error {
|
||||
output, err := yaml.Marshal(&config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("YAML serialization error: %w", err)
|
||||
}
|
||||
|
||||
_, err = outputWriter.Write(output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("output write error: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyCurrentClusterFromExistingKubeConfig(currentKubeConfig clientcmdapi.Config, currentContextNameOverride string) (v1.Cluster, error) {
|
||||
v1Cluster := v1.Cluster{}
|
||||
|
||||
contextName := currentKubeConfig.CurrentContext
|
||||
if currentContextNameOverride != "" {
|
||||
contextName = currentContextNameOverride
|
||||
}
|
||||
|
||||
err := v1.Convert_api_Cluster_To_v1_Cluster(
|
||||
currentKubeConfig.Clusters[currentKubeConfig.Contexts[contextName].Cluster],
|
||||
&v1Cluster,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return v1.Cluster{}, err
|
||||
}
|
||||
|
||||
return v1Cluster, nil
|
||||
}
|
||||
|
||||
func newPinnipedKubeconfig(v1Cluster v1.Cluster, fullPathToSelf string, token string, namespace string, authenticatorType string, authenticatorName string) v1.Config {
|
||||
clusterName := "pinniped-cluster"
|
||||
userName := "pinniped-user"
|
||||
|
||||
return v1.Config{
|
||||
Kind: "Config",
|
||||
APIVersion: v1.SchemeGroupVersion.Version,
|
||||
Preferences: v1.Preferences{},
|
||||
Clusters: []v1.NamedCluster{
|
||||
{
|
||||
Name: clusterName,
|
||||
Cluster: v1Cluster,
|
||||
},
|
||||
},
|
||||
Contexts: []v1.NamedContext{
|
||||
{
|
||||
Name: clusterName,
|
||||
Context: v1.Context{
|
||||
Cluster: clusterName,
|
||||
AuthInfo: userName,
|
||||
},
|
||||
},
|
||||
},
|
||||
AuthInfos: []v1.NamedAuthInfo{
|
||||
{
|
||||
Name: userName,
|
||||
AuthInfo: v1.AuthInfo{
|
||||
Exec: &v1.ExecConfig{
|
||||
Command: fullPathToSelf,
|
||||
Args: []string{"exchange-credential"},
|
||||
Env: []v1.ExecEnvVar{
|
||||
{
|
||||
Name: "PINNIPED_K8S_API_ENDPOINT",
|
||||
Value: v1Cluster.Server,
|
||||
},
|
||||
{
|
||||
Name: "PINNIPED_CA_BUNDLE",
|
||||
Value: string(v1Cluster.CertificateAuthorityData)},
|
||||
{
|
||||
Name: "PINNIPED_NAMESPACE",
|
||||
Value: namespace,
|
||||
},
|
||||
{
|
||||
Name: "PINNIPED_TOKEN",
|
||||
Value: token,
|
||||
},
|
||||
{
|
||||
Name: "PINNIPED_AUTHENTICATOR_TYPE",
|
||||
Value: authenticatorType,
|
||||
},
|
||||
{
|
||||
Name: "PINNIPED_AUTHENTICATOR_NAME",
|
||||
Value: authenticatorName,
|
||||
},
|
||||
},
|
||||
APIVersion: clientauthenticationv1beta1.SchemeGroupVersion.String(),
|
||||
InstallHint: "The Pinniped CLI is required to authenticate to the current cluster.\n" +
|
||||
"For more information, please visit https://pinniped.dev",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
CurrentContext: clusterName,
|
||||
}
|
||||
}
|
@ -1,399 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
coretesting "k8s.io/client-go/testing"
|
||||
|
||||
authv1alpha "go.pinniped.dev/generated/1.19/apis/concierge/authentication/v1alpha1"
|
||||
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/config/v1alpha1"
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned"
|
||||
pinnipedfake "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned/fake"
|
||||
"go.pinniped.dev/internal/here"
|
||||
)
|
||||
|
||||
var (
|
||||
knownGoodUsageForGetKubeConfig = here.Doc(`
|
||||
Usage:
|
||||
get-kubeconfig [flags]
|
||||
|
||||
Flags:
|
||||
--authenticator-name string Authenticator name
|
||||
--authenticator-type string Authenticator type (e.g., 'webhook', 'jwt')
|
||||
-h, --help help for get-kubeconfig
|
||||
--kubeconfig string Path to the kubeconfig file
|
||||
--kubeconfig-context string Kubeconfig context override
|
||||
--pinniped-namespace string Namespace in which Pinniped was installed (default "pinniped-concierge")
|
||||
--token string Credential to include in the resulting kubeconfig output (Required)
|
||||
|
||||
`)
|
||||
|
||||
knownGoodHelpForGetKubeConfig = here.Doc(`
|
||||
Print a kubeconfig for authenticating into a cluster via Pinniped.
|
||||
|
||||
Requires admin-like access to the cluster using the current
|
||||
kubeconfig context in order to access Pinniped's metadata.
|
||||
The current kubeconfig is found similar to how kubectl finds it:
|
||||
using the value of the --kubeconfig option, or if that is not
|
||||
specified then from the value of the KUBECONFIG environment
|
||||
variable, or if that is not specified then it defaults to
|
||||
.kube/config in your home directory.
|
||||
|
||||
Prints a kubeconfig which is suitable to access the cluster using
|
||||
Pinniped as the authentication mechanism. This kubeconfig output
|
||||
can be saved to a file and used with future kubectl commands, e.g.:
|
||||
pinniped get-kubeconfig --token $MY_TOKEN > $HOME/mycluster-kubeconfig
|
||||
kubectl --kubeconfig $HOME/mycluster-kubeconfig get pods
|
||||
|
||||
Usage:
|
||||
get-kubeconfig [flags]
|
||||
|
||||
Flags:
|
||||
--authenticator-name string Authenticator name
|
||||
--authenticator-type string Authenticator type (e.g., 'webhook', 'jwt')
|
||||
-h, --help help for get-kubeconfig
|
||||
--kubeconfig string Path to the kubeconfig file
|
||||
--kubeconfig-context string Kubeconfig context override
|
||||
--pinniped-namespace string Namespace in which Pinniped was installed (default "pinniped-concierge")
|
||||
--token string Credential to include in the resulting kubeconfig output (Required)
|
||||
`)
|
||||
)
|
||||
|
||||
func TestNewGetKubeConfigCmd(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
wantError bool
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
}{
|
||||
{
|
||||
name: "help flag passed",
|
||||
args: []string{"--help"},
|
||||
wantStdout: knownGoodHelpForGetKubeConfig,
|
||||
},
|
||||
{
|
||||
name: "missing required flag",
|
||||
args: []string{},
|
||||
wantError: true,
|
||||
wantStdout: `Error: required flag(s) "token" not set` + "\n" + knownGoodUsageForGetKubeConfig,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cmd := newGetKubeConfigCommand().Command()
|
||||
require.NotNil(t, cmd)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.SetOut(&stdout)
|
||||
cmd.SetErr(&stderr)
|
||||
cmd.SetArgs(tt.args)
|
||||
err := cmd.Execute()
|
||||
if tt.wantError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout")
|
||||
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type expectedKubeconfigYAML struct {
|
||||
clusterCAData string
|
||||
clusterServer string
|
||||
command string
|
||||
token string
|
||||
pinnipedEndpoint string
|
||||
pinnipedCABundle string
|
||||
namespace string
|
||||
authenticatorType string
|
||||
authenticatorName string
|
||||
}
|
||||
|
||||
func (e expectedKubeconfigYAML) String() string {
|
||||
return here.Docf(`
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: %s
|
||||
server: %s
|
||||
name: pinniped-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: pinniped-cluster
|
||||
user: pinniped-user
|
||||
name: pinniped-cluster
|
||||
current-context: pinniped-cluster
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: pinniped-user
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
args:
|
||||
- exchange-credential
|
||||
command: %s
|
||||
env:
|
||||
- name: PINNIPED_K8S_API_ENDPOINT
|
||||
value: %s
|
||||
- name: PINNIPED_CA_BUNDLE
|
||||
value: %s
|
||||
- name: PINNIPED_NAMESPACE
|
||||
value: %s
|
||||
- name: PINNIPED_TOKEN
|
||||
value: %s
|
||||
- name: PINNIPED_AUTHENTICATOR_TYPE
|
||||
value: %s
|
||||
- name: PINNIPED_AUTHENTICATOR_NAME
|
||||
value: %s
|
||||
installHint: |-
|
||||
The Pinniped CLI is required to authenticate to the current cluster.
|
||||
For more information, please visit https://pinniped.dev
|
||||
`, e.clusterCAData, e.clusterServer, e.command, e.pinnipedEndpoint, e.pinnipedCABundle, e.namespace, e.token, e.authenticatorType, e.authenticatorName)
|
||||
}
|
||||
|
||||
func newCredentialIssuer(name, namespace, server, certificateAuthorityData string) *configv1alpha1.CredentialIssuer {
|
||||
return &configv1alpha1.CredentialIssuer{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "CredentialIssuer",
|
||||
APIVersion: configv1alpha1.SchemeGroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Status: configv1alpha1.CredentialIssuerStatus{
|
||||
KubeConfigInfo: &configv1alpha1.CredentialIssuerKubeConfigInfo{
|
||||
Server: server,
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(certificateAuthorityData)),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
mocks func(*getKubeConfigCommand)
|
||||
wantError string
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
}{
|
||||
{
|
||||
name: "failure to get path to self",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.getPathToSelf = func() (string, error) {
|
||||
return "", fmt.Errorf("some error getting path to self")
|
||||
}
|
||||
},
|
||||
wantError: "could not find path to self: some error getting path to self",
|
||||
},
|
||||
{
|
||||
name: "kubeconfig does not exist",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.kubeconfig = "./testdata/does-not-exist.yaml"
|
||||
},
|
||||
wantError: "stat ./testdata/does-not-exist.yaml: no such file or directory",
|
||||
},
|
||||
{
|
||||
name: "fail to get client",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return nil, fmt.Errorf("some error configuring clientset")
|
||||
}
|
||||
},
|
||||
wantError: "some error configuring clientset",
|
||||
},
|
||||
{
|
||||
name: "fail to get authenticators",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.authenticatorName = ""
|
||||
cmd.flags.authenticatorType = ""
|
||||
clientset := pinnipedfake.NewSimpleClientset()
|
||||
clientset.PrependReactor("*", "*", func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("some error getting authenticators")
|
||||
})
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return clientset, nil
|
||||
}
|
||||
},
|
||||
wantError: "some error getting authenticators",
|
||||
},
|
||||
{
|
||||
name: "zero authenticators",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.authenticatorName = ""
|
||||
cmd.flags.authenticatorType = ""
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(), nil
|
||||
}
|
||||
},
|
||||
wantError: `no authenticators were found in namespace "test-namespace"`,
|
||||
},
|
||||
{
|
||||
name: "multiple authenticators",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.authenticatorName = ""
|
||||
cmd.flags.authenticatorType = ""
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(
|
||||
&authv1alpha.WebhookAuthenticator{ObjectMeta: metav1.ObjectMeta{Namespace: "test-namespace", Name: "webhook-one"}},
|
||||
&authv1alpha.WebhookAuthenticator{ObjectMeta: metav1.ObjectMeta{Namespace: "test-namespace", Name: "webhook-two"}},
|
||||
), nil
|
||||
}
|
||||
},
|
||||
wantError: `multiple authenticators were found in namespace "test-namespace", so --authenticator-name/--authenticator-type must be specified`,
|
||||
},
|
||||
{
|
||||
name: "fail to get CredentialIssuers",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
clientset := pinnipedfake.NewSimpleClientset()
|
||||
clientset.PrependReactor("*", "*", func(_ coretesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("some error getting CredentialIssuers")
|
||||
})
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return clientset, nil
|
||||
}
|
||||
},
|
||||
wantError: "some error getting CredentialIssuers",
|
||||
},
|
||||
{
|
||||
name: "zero CredentialIssuers found",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(
|
||||
newCredentialIssuer("pinniped-config-1", "not-the-test-namespace", "", ""),
|
||||
), nil
|
||||
}
|
||||
},
|
||||
wantError: `No CredentialIssuer was found in namespace "test-namespace". Is Pinniped installed on this cluster in namespace "test-namespace"?`,
|
||||
},
|
||||
{
|
||||
name: "multiple CredentialIssuers found",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(
|
||||
newCredentialIssuer("pinniped-config-1", "test-namespace", "", ""),
|
||||
newCredentialIssuer("pinniped-config-2", "test-namespace", "", ""),
|
||||
), nil
|
||||
}
|
||||
},
|
||||
wantError: `More than one CredentialIssuer was found in namespace "test-namespace"`,
|
||||
},
|
||||
{
|
||||
name: "CredentialIssuer missing KubeConfigInfo",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
ci := newCredentialIssuer("pinniped-config", "test-namespace", "", "")
|
||||
ci.Status.KubeConfigInfo = nil
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(ci), nil
|
||||
}
|
||||
},
|
||||
wantError: `CredentialIssuer "pinniped-config" was missing KubeConfigInfo`,
|
||||
},
|
||||
{
|
||||
name: "KubeConfigInfo has invalid base64",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
ci := newCredentialIssuer("pinniped-config", "test-namespace", "https://example.com", "")
|
||||
ci.Status.KubeConfigInfo.CertificateAuthorityData = "invalid-base64-test-ca"
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(ci), nil
|
||||
}
|
||||
},
|
||||
wantError: `illegal base64 data at input byte 7`,
|
||||
},
|
||||
{
|
||||
name: "success using remote CA data",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
ci := newCredentialIssuer("pinniped-config", "test-namespace", "https://fake-server-url-value", "fake-certificate-authority-data-value")
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(ci), nil
|
||||
}
|
||||
},
|
||||
wantStdout: expectedKubeconfigYAML{
|
||||
clusterCAData: "ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==",
|
||||
clusterServer: "https://fake-server-url-value",
|
||||
command: "/path/to/pinniped",
|
||||
token: "test-token",
|
||||
pinnipedEndpoint: "https://fake-server-url-value",
|
||||
pinnipedCABundle: "fake-certificate-authority-data-value",
|
||||
namespace: "test-namespace",
|
||||
authenticatorType: "test-authenticator-type",
|
||||
authenticatorName: "test-authenticator-name",
|
||||
}.String(),
|
||||
},
|
||||
{
|
||||
name: "success using local CA data and discovered authenticator",
|
||||
mocks: func(cmd *getKubeConfigCommand) {
|
||||
cmd.flags.authenticatorName = ""
|
||||
cmd.flags.authenticatorType = ""
|
||||
|
||||
cmd.kubeClientCreator = func(_ *rest.Config) (pinnipedclientset.Interface, error) {
|
||||
return pinnipedfake.NewSimpleClientset(
|
||||
&authv1alpha.WebhookAuthenticator{ObjectMeta: metav1.ObjectMeta{Namespace: "test-namespace", Name: "discovered-authenticator"}},
|
||||
newCredentialIssuer("pinniped-config", "test-namespace", "https://example.com", "test-ca"),
|
||||
), nil
|
||||
}
|
||||
},
|
||||
wantStderr: `WARNING: Server and certificate authority did not match between local kubeconfig and Pinniped's CredentialIssuer on the cluster. Using local kubeconfig values.`,
|
||||
wantStdout: expectedKubeconfigYAML{
|
||||
clusterCAData: "ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==",
|
||||
clusterServer: "https://fake-server-url-value",
|
||||
command: "/path/to/pinniped",
|
||||
token: "test-token",
|
||||
pinnipedEndpoint: "https://fake-server-url-value",
|
||||
pinnipedCABundle: "fake-certificate-authority-data-value",
|
||||
namespace: "test-namespace",
|
||||
authenticatorType: "webhook",
|
||||
authenticatorName: "discovered-authenticator",
|
||||
}.String(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Start with a default getKubeConfigCommand, set some defaults, then apply any mocks.
|
||||
c := newGetKubeConfigCommand()
|
||||
c.flags.token = "test-token"
|
||||
c.flags.namespace = "test-namespace"
|
||||
c.flags.authenticatorName = "test-authenticator-name"
|
||||
c.flags.authenticatorType = "test-authenticator-type"
|
||||
c.getPathToSelf = func() (string, error) { return "/path/to/pinniped", nil }
|
||||
c.flags.kubeconfig = "./testdata/kubeconfig.yaml"
|
||||
tt.mocks(c)
|
||||
|
||||
cmd := &cobra.Command{}
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.SetOut(&stdout)
|
||||
cmd.SetErr(&stderr)
|
||||
cmd.SetArgs([]string{})
|
||||
err := c.run(cmd, []string{})
|
||||
if tt.wantError != "" {
|
||||
require.EqualError(t, err, tt.wantError)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, strings.TrimSpace(tt.wantStdout), strings.TrimSpace(stdout.String()), "unexpected stdout")
|
||||
require.Equal(t, strings.TrimSpace(tt.wantStderr), strings.TrimSpace(stderr.String()), "unexpected stderr")
|
||||
})
|
||||
}
|
||||
}
|
364
cmd/pinniped/cmd/kubeconfig.go
Normal file
364
cmd/pinniped/cmd/kubeconfig.go
Normal file
@ -0,0 +1,364 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth" // Adds handlers for various dynamic auth plugins in client-go
|
||||
|
||||
conciergev1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/authentication/v1alpha1"
|
||||
conciergeclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned"
|
||||
)
|
||||
|
||||
type kubeconfigDeps struct {
|
||||
getPathToSelf func() (string, error)
|
||||
getClientset func(clientcmd.ClientConfig) (conciergeclientset.Interface, error)
|
||||
}
|
||||
|
||||
func kubeconfigRealDeps() kubeconfigDeps {
|
||||
return kubeconfigDeps{
|
||||
getPathToSelf: os.Executable,
|
||||
getClientset: func(clientConfig clientcmd.ClientConfig) (conciergeclientset.Interface, error) {
|
||||
restConfig, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conciergeclientset.NewForConfig(restConfig)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
getCmd.AddCommand(kubeconfigCommand(kubeconfigRealDeps()))
|
||||
}
|
||||
|
||||
type getKubeconfigOIDCParams struct {
|
||||
issuer string
|
||||
clientID string
|
||||
listenPort uint16
|
||||
scopes []string
|
||||
skipBrowser bool
|
||||
sessionCachePath string
|
||||
debugSessionCache bool
|
||||
caBundlePaths []string
|
||||
requestAudience string
|
||||
}
|
||||
|
||||
type getKubeconfigConciergeParams struct {
|
||||
disabled bool
|
||||
namespace string
|
||||
authenticatorName string
|
||||
authenticatorType string
|
||||
}
|
||||
|
||||
type getKubeconfigParams struct {
|
||||
kubeconfigPath string
|
||||
kubeconfigContextOverride string
|
||||
staticToken string
|
||||
staticTokenEnvName string
|
||||
oidc getKubeconfigOIDCParams
|
||||
concierge getKubeconfigConciergeParams
|
||||
}
|
||||
|
||||
func kubeconfigCommand(deps kubeconfigDeps) *cobra.Command {
|
||||
var (
|
||||
cmd = cobra.Command{
|
||||
Args: cobra.NoArgs,
|
||||
Use: "kubeconfig",
|
||||
Short: "Generate a Pinniped-based kubeconfig for a cluster",
|
||||
SilenceUsage: true,
|
||||
}
|
||||
flags getKubeconfigParams
|
||||
)
|
||||
|
||||
f := cmd.Flags()
|
||||
f.StringVar(&flags.staticToken, "static-token", "", "Instead of doing an OIDC-based login, specify a static token")
|
||||
f.StringVar(&flags.staticTokenEnvName, "static-token-env", "", "Instead of doing an OIDC-based login, read a static token from the environment")
|
||||
|
||||
f.BoolVar(&flags.concierge.disabled, "no-concierge", false, "Generate a configuration which does not use the concierge, but sends the credential to the cluster directly")
|
||||
f.StringVar(&flags.concierge.namespace, "concierge-namespace", "pinniped-concierge", "Namespace in which the concierge was installed")
|
||||
f.StringVar(&flags.concierge.authenticatorType, "concierge-authenticator-type", "", "Concierge authenticator type (e.g., 'webhook', 'jwt') (default: autodiscover)")
|
||||
f.StringVar(&flags.concierge.authenticatorName, "concierge-authenticator-name", "", "Concierge authenticator name (default: autodiscover)")
|
||||
|
||||
f.StringVar(&flags.oidc.issuer, "oidc-issuer", "", "OpenID Connect issuer URL (default: autodiscover)")
|
||||
f.StringVar(&flags.oidc.clientID, "oidc-client-id", "pinniped-cli", "OpenID Connect client ID (default: autodiscover)")
|
||||
f.Uint16Var(&flags.oidc.listenPort, "oidc-listen-port", 0, "TCP port for localhost listener (authorization code flow only)")
|
||||
f.StringSliceVar(&flags.oidc.scopes, "oidc-scopes", []string{oidc.ScopeOfflineAccess, oidc.ScopeOpenID, "pinniped.sts.unrestricted"}, "OpenID Connect scopes to request during login")
|
||||
f.BoolVar(&flags.oidc.skipBrowser, "oidc-skip-browser", false, "During OpenID Connect login, skip opening the browser (just print the URL)")
|
||||
f.StringVar(&flags.oidc.sessionCachePath, "oidc-session-cache", "", "Path to OpenID Connect session cache file")
|
||||
f.StringSliceVar(&flags.oidc.caBundlePaths, "oidc-ca-bundle", nil, "Path to TLS certificate authority bundle (PEM format, optional, can be repeated)")
|
||||
f.BoolVar(&flags.oidc.debugSessionCache, "oidc-debug-session-cache", false, "Print debug logs related to the OpenID Connect session cache")
|
||||
f.StringVar(&flags.oidc.requestAudience, "oidc-request-audience", "", "Request a token with an alternate audience using RF8693 token exchange")
|
||||
f.StringVar(&flags.kubeconfigPath, "kubeconfig", os.Getenv("KUBECONFIG"), "Path to kubeconfig file")
|
||||
f.StringVar(&flags.kubeconfigContextOverride, "kubeconfig-context", "", "Kubeconfig context name (default: current active context)")
|
||||
|
||||
mustMarkHidden(&cmd, "oidc-debug-session-cache")
|
||||
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error { return runGetKubeconfig(cmd.OutOrStdout(), deps, flags) }
|
||||
return &cmd
|
||||
}
|
||||
|
||||
func runGetKubeconfig(out io.Writer, deps kubeconfigDeps, flags getKubeconfigParams) error {
|
||||
execConfig := clientcmdapi.ExecConfig{
|
||||
APIVersion: clientauthenticationv1beta1.SchemeGroupVersion.String(),
|
||||
Args: []string{},
|
||||
Env: []clientcmdapi.ExecEnvVar{},
|
||||
}
|
||||
|
||||
var err error
|
||||
execConfig.Command, err = deps.getPathToSelf()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not determine the Pinniped executable path: %w", err)
|
||||
}
|
||||
|
||||
oidcCABundle, err := loadCABundlePaths(flags.oidc.caBundlePaths)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not read --oidc-ca-bundle: %w", err)
|
||||
}
|
||||
|
||||
clientConfig := newClientConfig(flags.kubeconfigPath, flags.kubeconfigContextOverride)
|
||||
currentKubeConfig, err := clientConfig.RawConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not load --kubeconfig: %w", err)
|
||||
}
|
||||
cluster, err := copyCurrentClusterFromExistingKubeConfig(currentKubeConfig, flags.kubeconfigContextOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not load --kubeconfig/--kubeconfig-context: %w", err)
|
||||
}
|
||||
clientset, err := deps.getClientset(clientConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not configure Kubernetes client: %w", err)
|
||||
}
|
||||
|
||||
if !flags.concierge.disabled {
|
||||
authenticator, err := lookupAuthenticator(
|
||||
clientset,
|
||||
flags.concierge.namespace,
|
||||
flags.concierge.authenticatorType,
|
||||
flags.concierge.authenticatorName,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := configureConcierge(authenticator, &flags, cluster, &oidcCABundle, &execConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If one of the --static-* flags was passed, output a config that runs `pinniped login static`.
|
||||
if flags.staticToken != "" || flags.staticTokenEnvName != "" {
|
||||
if flags.staticToken != "" && flags.staticTokenEnvName != "" {
|
||||
return fmt.Errorf("only one of --static-token and --static-token-env can be specified")
|
||||
}
|
||||
execConfig.Args = append([]string{"login", "static"}, execConfig.Args...)
|
||||
if flags.staticToken != "" {
|
||||
execConfig.Args = append(execConfig.Args, "--token="+flags.staticToken)
|
||||
}
|
||||
if flags.staticTokenEnvName != "" {
|
||||
execConfig.Args = append(execConfig.Args, "--token-env="+flags.staticTokenEnvName)
|
||||
}
|
||||
return writeConfigAsYAML(out, newExecKubeconfig(cluster, &execConfig))
|
||||
}
|
||||
|
||||
// Otherwise continue to parse the OIDC-related flags and output a config that runs `pinniped login oidc`.
|
||||
execConfig.Args = append([]string{"login", "oidc"}, execConfig.Args...)
|
||||
if flags.oidc.issuer == "" {
|
||||
return fmt.Errorf("could not autodiscover --oidc-issuer, and none was provided")
|
||||
}
|
||||
execConfig.Args = append(execConfig.Args,
|
||||
"--issuer="+flags.oidc.issuer,
|
||||
"--client-id="+flags.oidc.clientID,
|
||||
"--scopes="+strings.Join(flags.oidc.scopes, ","),
|
||||
)
|
||||
if flags.oidc.skipBrowser {
|
||||
execConfig.Args = append(execConfig.Args, "--skip-browser")
|
||||
}
|
||||
if flags.oidc.listenPort != 0 {
|
||||
execConfig.Args = append(execConfig.Args, "--listen-port="+strconv.Itoa(int(flags.oidc.listenPort)))
|
||||
}
|
||||
if oidcCABundle != "" {
|
||||
execConfig.Args = append(execConfig.Args, "--ca-bundle-data="+base64.StdEncoding.EncodeToString([]byte(oidcCABundle)))
|
||||
}
|
||||
if flags.oidc.sessionCachePath != "" {
|
||||
execConfig.Args = append(execConfig.Args, "--session-cache="+flags.oidc.sessionCachePath)
|
||||
}
|
||||
if flags.oidc.debugSessionCache {
|
||||
execConfig.Args = append(execConfig.Args, "--debug-session-cache")
|
||||
}
|
||||
if flags.oidc.requestAudience != "" {
|
||||
execConfig.Args = append(execConfig.Args, "--request-audience="+flags.oidc.requestAudience)
|
||||
}
|
||||
return writeConfigAsYAML(out, newExecKubeconfig(cluster, &execConfig))
|
||||
}
|
||||
|
||||
func configureConcierge(authenticator metav1.Object, flags *getKubeconfigParams, v1Cluster *clientcmdapi.Cluster, oidcCABundle *string, execConfig *clientcmdapi.ExecConfig) error {
|
||||
switch auth := authenticator.(type) {
|
||||
case *conciergev1alpha1.WebhookAuthenticator:
|
||||
// If the --concierge-authenticator-type/--concierge-authenticator-name flags were not set explicitly, set
|
||||
// them to point at the discovered WebhookAuthenticator.
|
||||
if flags.concierge.authenticatorType == "" && flags.concierge.authenticatorName == "" {
|
||||
flags.concierge.authenticatorType = "webhook"
|
||||
flags.concierge.authenticatorName = auth.Name
|
||||
}
|
||||
case *conciergev1alpha1.JWTAuthenticator:
|
||||
// If the --concierge-authenticator-type/--concierge-authenticator-name flags were not set explicitly, set
|
||||
// them to point at the discovered JWTAuthenticator.
|
||||
if flags.concierge.authenticatorType == "" && flags.concierge.authenticatorName == "" {
|
||||
flags.concierge.authenticatorType = "jwt"
|
||||
flags.concierge.authenticatorName = auth.Name
|
||||
}
|
||||
|
||||
// If the --oidc-issuer flag was not set explicitly, default it to the spec.issuer field of the JWTAuthenticator.
|
||||
if flags.oidc.issuer == "" {
|
||||
flags.oidc.issuer = auth.Spec.Issuer
|
||||
}
|
||||
|
||||
// If the --oidc-request-audience flag was not set explicitly, default it to the spec.audience field of the JWTAuthenticator.
|
||||
if flags.oidc.requestAudience == "" {
|
||||
flags.oidc.requestAudience = auth.Spec.Audience
|
||||
}
|
||||
|
||||
// If the --oidc-ca-bundle flags was not set explicitly, default it to the
|
||||
// spec.tls.certificateAuthorityData field of the JWTAuthenticator.
|
||||
if *oidcCABundle == "" && auth.Spec.TLS != nil && auth.Spec.TLS.CertificateAuthorityData != "" {
|
||||
decoded, err := base64.StdEncoding.DecodeString(auth.Spec.TLS.CertificateAuthorityData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("tried to autodiscover --oidc-ca-bundle, but JWTAuthenticator %s/%s has invalid spec.tls.certificateAuthorityData: %w", auth.Namespace, auth.Name, err)
|
||||
}
|
||||
*oidcCABundle = string(decoded)
|
||||
}
|
||||
}
|
||||
|
||||
// Append the flags to configure the Concierge credential exchange at runtime.
|
||||
execConfig.Args = append(execConfig.Args,
|
||||
"--enable-concierge",
|
||||
"--concierge-namespace="+flags.concierge.namespace,
|
||||
"--concierge-authenticator-name="+flags.concierge.authenticatorName,
|
||||
"--concierge-authenticator-type="+flags.concierge.authenticatorType,
|
||||
"--concierge-endpoint="+v1Cluster.Server,
|
||||
"--concierge-ca-bundle-data="+base64.StdEncoding.EncodeToString(v1Cluster.CertificateAuthorityData),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadCABundlePaths(paths []string) (string, error) {
|
||||
if len(paths) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
blobs := make([][]byte, 0, len(paths))
|
||||
for _, p := range paths {
|
||||
pem, err := ioutil.ReadFile(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
blobs = append(blobs, pem)
|
||||
}
|
||||
return string(bytes.Join(blobs, []byte("\n"))), nil
|
||||
}
|
||||
|
||||
func newExecKubeconfig(cluster *clientcmdapi.Cluster, execConfig *clientcmdapi.ExecConfig) clientcmdapi.Config {
|
||||
const name = "pinniped"
|
||||
return clientcmdapi.Config{
|
||||
Kind: "Config",
|
||||
APIVersion: clientcmdapi.SchemeGroupVersion.Version,
|
||||
Clusters: map[string]*clientcmdapi.Cluster{name: cluster},
|
||||
AuthInfos: map[string]*clientcmdapi.AuthInfo{name: {Exec: execConfig}},
|
||||
Contexts: map[string]*clientcmdapi.Context{name: {Cluster: name, AuthInfo: name}},
|
||||
CurrentContext: name,
|
||||
}
|
||||
}
|
||||
|
||||
func lookupAuthenticator(clientset conciergeclientset.Interface, namespace, authType, authName string) (metav1.Object, error) {
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*20)
|
||||
defer cancelFunc()
|
||||
|
||||
// If one was specified, look it up or error.
|
||||
if authName != "" && authType != "" {
|
||||
switch strings.ToLower(authType) {
|
||||
case "webhook":
|
||||
return clientset.AuthenticationV1alpha1().WebhookAuthenticators(namespace).Get(ctx, authName, metav1.GetOptions{})
|
||||
case "jwt":
|
||||
return clientset.AuthenticationV1alpha1().JWTAuthenticators(namespace).Get(ctx, authName, metav1.GetOptions{})
|
||||
default:
|
||||
return nil, fmt.Errorf(`invalid authenticator type %q, supported values are "webhook" and "jwt"`, authType)
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise list all the available authenticators and hope there's just a single one.
|
||||
|
||||
jwtAuths, err := clientset.AuthenticationV1alpha1().JWTAuthenticators(namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list JWTAuthenticator objects for autodiscovery: %w", err)
|
||||
}
|
||||
webhooks, err := clientset.AuthenticationV1alpha1().WebhookAuthenticators(namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list WebhookAuthenticator objects for autodiscovery: %w", err)
|
||||
}
|
||||
|
||||
results := make([]metav1.Object, 0, len(jwtAuths.Items)+len(webhooks.Items))
|
||||
for i := range jwtAuths.Items {
|
||||
results = append(results, &jwtAuths.Items[i])
|
||||
}
|
||||
for i := range webhooks.Items {
|
||||
results = append(results, &webhooks.Items[i])
|
||||
}
|
||||
if len(results) == 0 {
|
||||
return nil, fmt.Errorf("no authenticators were found in namespace %q (try setting --concierge-namespace)", namespace)
|
||||
}
|
||||
if len(results) > 1 {
|
||||
return nil, fmt.Errorf("multiple authenticators were found in namespace %q, so the --concierge-authenticator-type/--concierge-authenticator-name flags must be specified", namespace)
|
||||
}
|
||||
return results[0], nil
|
||||
}
|
||||
|
||||
func newClientConfig(kubeconfigPathOverride string, currentContextName string) clientcmd.ClientConfig {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
loadingRules.ExplicitPath = kubeconfigPathOverride
|
||||
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{
|
||||
CurrentContext: currentContextName,
|
||||
})
|
||||
return clientConfig
|
||||
}
|
||||
|
||||
func writeConfigAsYAML(out io.Writer, config clientcmdapi.Config) error {
|
||||
output, err := clientcmd.Write(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = out.Write(output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not write output: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyCurrentClusterFromExistingKubeConfig(currentKubeConfig clientcmdapi.Config, currentContextNameOverride string) (*clientcmdapi.Cluster, error) {
|
||||
contextName := currentKubeConfig.CurrentContext
|
||||
if currentContextNameOverride != "" {
|
||||
contextName = currentContextNameOverride
|
||||
}
|
||||
context := currentKubeConfig.Contexts[contextName]
|
||||
if context == nil {
|
||||
return nil, fmt.Errorf("no such context %q", contextName)
|
||||
}
|
||||
return currentKubeConfig.Clusters[context.Cluster], nil
|
||||
}
|
524
cmd/pinniped/cmd/kubeconfig_test.go
Normal file
524
cmd/pinniped/cmd/kubeconfig_test.go
Normal file
@ -0,0 +1,524 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubetesting "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
conciergev1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/authentication/v1alpha1"
|
||||
conciergeclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned"
|
||||
fakeconciergeclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned/fake"
|
||||
"go.pinniped.dev/internal/certauthority"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
)
|
||||
|
||||
func TestGetKubeconfig(t *testing.T) {
|
||||
testCA, err := certauthority.New(pkix.Name{CommonName: "Test CA"}, 1*time.Hour)
|
||||
require.NoError(t, err)
|
||||
tmpdir := testutil.TempDir(t)
|
||||
testCABundlePath := filepath.Join(tmpdir, "testca.pem")
|
||||
require.NoError(t, ioutil.WriteFile(testCABundlePath, testCA.Bundle(), 0600))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
env map[string]string
|
||||
getPathToSelfErr error
|
||||
getClientsetErr error
|
||||
conciergeObjects []runtime.Object
|
||||
conciergeReactions []kubetesting.Reactor
|
||||
wantError bool
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
wantOptionsCount int
|
||||
}{
|
||||
{
|
||||
name: "help flag passed",
|
||||
args: []string{"--help"},
|
||||
wantStdout: here.Doc(`
|
||||
Generate a Pinniped-based kubeconfig for a cluster
|
||||
|
||||
Usage:
|
||||
kubeconfig [flags]
|
||||
|
||||
Flags:
|
||||
--concierge-authenticator-name string Concierge authenticator name (default: autodiscover)
|
||||
--concierge-authenticator-type string Concierge authenticator type (e.g., 'webhook', 'jwt') (default: autodiscover)
|
||||
--concierge-namespace string Namespace in which the concierge was installed (default "pinniped-concierge")
|
||||
-h, --help help for kubeconfig
|
||||
--kubeconfig string Path to kubeconfig file
|
||||
--kubeconfig-context string Kubeconfig context name (default: current active context)
|
||||
--no-concierge Generate a configuration which does not use the concierge, but sends the credential to the cluster directly
|
||||
--oidc-ca-bundle strings Path to TLS certificate authority bundle (PEM format, optional, can be repeated)
|
||||
--oidc-client-id string OpenID Connect client ID (default: autodiscover) (default "pinniped-cli")
|
||||
--oidc-issuer string OpenID Connect issuer URL (default: autodiscover)
|
||||
--oidc-listen-port uint16 TCP port for localhost listener (authorization code flow only)
|
||||
--oidc-request-audience string Request a token with an alternate audience using RF8693 token exchange
|
||||
--oidc-scopes strings OpenID Connect scopes to request during login (default [offline_access,openid,pinniped.sts.unrestricted])
|
||||
--oidc-session-cache string Path to OpenID Connect session cache file
|
||||
--oidc-skip-browser During OpenID Connect login, skip opening the browser (just print the URL)
|
||||
--static-token string Instead of doing an OIDC-based login, specify a static token
|
||||
--static-token-env string Instead of doing an OIDC-based login, read a static token from the environment
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "fail to get self-path",
|
||||
args: []string{},
|
||||
getPathToSelfErr: fmt.Errorf("some OS error"),
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: could not determine the Pinniped executable path: some OS error
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "invalid CA bundle paths",
|
||||
args: []string{
|
||||
"--oidc-ca-bundle", "./does/not/exist",
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: could not read --oidc-ca-bundle: open ./does/not/exist: no such file or directory
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "invalid kubeconfig path",
|
||||
args: []string{
|
||||
"--kubeconfig", "./does/not/exist",
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: could not load --kubeconfig: stat ./does/not/exist: no such file or directory
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "invalid kubeconfig context",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--kubeconfig-context", "invalid",
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: could not load --kubeconfig/--kubeconfig-context: no such context "invalid"
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "clientset creation failure",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
},
|
||||
getClientsetErr: fmt.Errorf("some kube error"),
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: could not configure Kubernetes client: some kube error
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "webhook authenticator not found",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--concierge-authenticator-type", "webhook",
|
||||
"--concierge-authenticator-name", "test-authenticator",
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: webhookauthenticators.authentication.concierge.pinniped.dev "test-authenticator" not found
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "JWT authenticator not found",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--concierge-authenticator-type", "jwt",
|
||||
"--concierge-authenticator-name", "test-authenticator",
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: jwtauthenticators.authentication.concierge.pinniped.dev "test-authenticator" not found
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "invalid authenticator type",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--concierge-authenticator-type", "invalid",
|
||||
"--concierge-authenticator-name", "test-authenticator",
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: invalid authenticator type "invalid", supported values are "webhook" and "jwt"
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "fail to autodetect authenticator, listing jwtauthenticators fails",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
},
|
||||
conciergeReactions: []kubetesting.Reactor{
|
||||
&kubetesting.SimpleReactor{
|
||||
Verb: "*",
|
||||
Resource: "jwtauthenticators",
|
||||
Reaction: func(kubetesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("some list error")
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: failed to list JWTAuthenticator objects for autodiscovery: some list error
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "fail to autodetect authenticator, listing webhookauthenticators fails",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
},
|
||||
conciergeReactions: []kubetesting.Reactor{
|
||||
&kubetesting.SimpleReactor{
|
||||
Verb: "*",
|
||||
Resource: "webhookauthenticators",
|
||||
Reaction: func(kubetesting.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("some list error")
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: failed to list WebhookAuthenticator objects for autodiscovery: some list error
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "fail to autodetect authenticator, none found",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: no authenticators were found in namespace "pinniped-concierge" (try setting --concierge-namespace)
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "fail to autodetect authenticator, multiple found",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--concierge-namespace", "test-namespace",
|
||||
},
|
||||
conciergeObjects: []runtime.Object{
|
||||
&conciergev1alpha1.JWTAuthenticator{ObjectMeta: metav1.ObjectMeta{Name: "test-authenticator-1", Namespace: "test-namespace"}},
|
||||
&conciergev1alpha1.JWTAuthenticator{ObjectMeta: metav1.ObjectMeta{Name: "test-authenticator-2", Namespace: "test-namespace"}},
|
||||
&conciergev1alpha1.WebhookAuthenticator{ObjectMeta: metav1.ObjectMeta{Name: "test-authenticator-3", Namespace: "test-namespace"}},
|
||||
&conciergev1alpha1.WebhookAuthenticator{ObjectMeta: metav1.ObjectMeta{Name: "test-authenticator-4", Namespace: "test-namespace"}},
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: multiple authenticators were found in namespace "test-namespace", so the --concierge-authenticator-type/--concierge-authenticator-name flags must be specified
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "autodetect webhook authenticator, missing --oidc-issuer",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--concierge-namespace", "test-namespace",
|
||||
},
|
||||
conciergeObjects: []runtime.Object{
|
||||
&conciergev1alpha1.WebhookAuthenticator{ObjectMeta: metav1.ObjectMeta{Name: "test-authenticator", Namespace: "test-namespace"}},
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: could not autodiscover --oidc-issuer, and none was provided
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "autodetect JWT authenticator, invalid TLS bundle",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--concierge-namespace", "test-namespace",
|
||||
},
|
||||
conciergeObjects: []runtime.Object{
|
||||
&conciergev1alpha1.JWTAuthenticator{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-authenticator", Namespace: "test-namespace"},
|
||||
Spec: conciergev1alpha1.JWTAuthenticatorSpec{
|
||||
TLS: &conciergev1alpha1.TLSSpec{
|
||||
CertificateAuthorityData: "invalid-base64",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: tried to autodiscover --oidc-ca-bundle, but JWTAuthenticator test-namespace/test-authenticator has invalid spec.tls.certificateAuthorityData: illegal base64 data at input byte 7
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "invalid static token flags",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--concierge-namespace", "test-namespace",
|
||||
"--static-token", "test-token",
|
||||
"--static-token-env", "TEST_TOKEN",
|
||||
},
|
||||
conciergeObjects: []runtime.Object{
|
||||
&conciergev1alpha1.WebhookAuthenticator{ObjectMeta: metav1.ObjectMeta{Name: "test-authenticator", Namespace: "test-namespace"}},
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: only one of --static-token and --static-token-env can be specified
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "valid static token",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--concierge-namespace", "test-namespace",
|
||||
"--static-token", "test-token",
|
||||
},
|
||||
conciergeObjects: []runtime.Object{
|
||||
&conciergev1alpha1.WebhookAuthenticator{ObjectMeta: metav1.ObjectMeta{Name: "test-authenticator", Namespace: "test-namespace"}},
|
||||
},
|
||||
wantStdout: here.Doc(`
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==
|
||||
server: https://fake-server-url-value
|
||||
name: pinniped
|
||||
contexts:
|
||||
- context:
|
||||
cluster: pinniped
|
||||
user: pinniped
|
||||
name: pinniped
|
||||
current-context: pinniped
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: pinniped
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
args:
|
||||
- login
|
||||
- static
|
||||
- --enable-concierge
|
||||
- --concierge-namespace=test-namespace
|
||||
- --concierge-authenticator-name=test-authenticator
|
||||
- --concierge-authenticator-type=webhook
|
||||
- --concierge-endpoint=https://fake-server-url-value
|
||||
- --concierge-ca-bundle-data=ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==
|
||||
- --token=test-token
|
||||
command: '.../path/to/pinniped'
|
||||
env: []
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "valid static token from env var",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--concierge-namespace", "test-namespace",
|
||||
"--static-token-env", "TEST_TOKEN",
|
||||
},
|
||||
conciergeObjects: []runtime.Object{
|
||||
&conciergev1alpha1.WebhookAuthenticator{ObjectMeta: metav1.ObjectMeta{Name: "test-authenticator", Namespace: "test-namespace"}},
|
||||
},
|
||||
wantStdout: here.Doc(`
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==
|
||||
server: https://fake-server-url-value
|
||||
name: pinniped
|
||||
contexts:
|
||||
- context:
|
||||
cluster: pinniped
|
||||
user: pinniped
|
||||
name: pinniped
|
||||
current-context: pinniped
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: pinniped
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
args:
|
||||
- login
|
||||
- static
|
||||
- --enable-concierge
|
||||
- --concierge-namespace=test-namespace
|
||||
- --concierge-authenticator-name=test-authenticator
|
||||
- --concierge-authenticator-type=webhook
|
||||
- --concierge-endpoint=https://fake-server-url-value
|
||||
- --concierge-ca-bundle-data=ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==
|
||||
- --token-env=TEST_TOKEN
|
||||
command: '.../path/to/pinniped'
|
||||
env: []
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "autodetect JWT authenticator",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
},
|
||||
conciergeObjects: []runtime.Object{
|
||||
&conciergev1alpha1.JWTAuthenticator{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-authenticator", Namespace: "pinniped-concierge"},
|
||||
Spec: conciergev1alpha1.JWTAuthenticatorSpec{
|
||||
Issuer: "https://example.com/issuer",
|
||||
Audience: "test-audience",
|
||||
TLS: &conciergev1alpha1.TLSSpec{
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString(testCA.Bundle()),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantStdout: here.Docf(`
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==
|
||||
server: https://fake-server-url-value
|
||||
name: pinniped
|
||||
contexts:
|
||||
- context:
|
||||
cluster: pinniped
|
||||
user: pinniped
|
||||
name: pinniped
|
||||
current-context: pinniped
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: pinniped
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
args:
|
||||
- login
|
||||
- oidc
|
||||
- --enable-concierge
|
||||
- --concierge-namespace=pinniped-concierge
|
||||
- --concierge-authenticator-name=test-authenticator
|
||||
- --concierge-authenticator-type=jwt
|
||||
- --concierge-endpoint=https://fake-server-url-value
|
||||
- --concierge-ca-bundle-data=ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==
|
||||
- --issuer=https://example.com/issuer
|
||||
- --client-id=pinniped-cli
|
||||
- --scopes=offline_access,openid,pinniped.sts.unrestricted
|
||||
- --ca-bundle-data=%s
|
||||
- --request-audience=test-audience
|
||||
command: '.../path/to/pinniped'
|
||||
env: []
|
||||
`, base64.StdEncoding.EncodeToString(testCA.Bundle())),
|
||||
},
|
||||
{
|
||||
name: "autodetect nothing, set a bunch of options",
|
||||
args: []string{
|
||||
"--kubeconfig", "./testdata/kubeconfig.yaml",
|
||||
"--concierge-authenticator-type", "webhook",
|
||||
"--concierge-authenticator-name", "test-authenticator",
|
||||
"--oidc-issuer", "https://example.com/issuer",
|
||||
"--oidc-skip-browser",
|
||||
"--oidc-listen-port", "1234",
|
||||
"--oidc-ca-bundle", testCABundlePath,
|
||||
"--oidc-session-cache", "/path/to/cache/dir/sessions.yaml",
|
||||
"--oidc-debug-session-cache",
|
||||
"--oidc-request-audience", "test-audience",
|
||||
},
|
||||
conciergeObjects: []runtime.Object{
|
||||
&conciergev1alpha1.WebhookAuthenticator{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-authenticator", Namespace: "pinniped-concierge"},
|
||||
},
|
||||
},
|
||||
wantStdout: here.Docf(`
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==
|
||||
server: https://fake-server-url-value
|
||||
name: pinniped
|
||||
contexts:
|
||||
- context:
|
||||
cluster: pinniped
|
||||
user: pinniped
|
||||
name: pinniped
|
||||
current-context: pinniped
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: pinniped
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
args:
|
||||
- login
|
||||
- oidc
|
||||
- --enable-concierge
|
||||
- --concierge-namespace=pinniped-concierge
|
||||
- --concierge-authenticator-name=test-authenticator
|
||||
- --concierge-authenticator-type=webhook
|
||||
- --concierge-endpoint=https://fake-server-url-value
|
||||
- --concierge-ca-bundle-data=ZmFrZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YS12YWx1ZQ==
|
||||
- --issuer=https://example.com/issuer
|
||||
- --client-id=pinniped-cli
|
||||
- --scopes=offline_access,openid,pinniped.sts.unrestricted
|
||||
- --skip-browser
|
||||
- --listen-port=1234
|
||||
- --ca-bundle-data=%s
|
||||
- --session-cache=/path/to/cache/dir/sessions.yaml
|
||||
- --debug-session-cache
|
||||
- --request-audience=test-audience
|
||||
command: '.../path/to/pinniped'
|
||||
env: []
|
||||
`, base64.StdEncoding.EncodeToString(testCA.Bundle())),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cmd := kubeconfigCommand(kubeconfigDeps{
|
||||
getPathToSelf: func() (string, error) {
|
||||
if tt.getPathToSelfErr != nil {
|
||||
return "", tt.getPathToSelfErr
|
||||
}
|
||||
return ".../path/to/pinniped", nil
|
||||
},
|
||||
getClientset: func(clientConfig clientcmd.ClientConfig) (conciergeclientset.Interface, error) {
|
||||
if tt.getClientsetErr != nil {
|
||||
return nil, tt.getClientsetErr
|
||||
}
|
||||
fake := fakeconciergeclientset.NewSimpleClientset(tt.conciergeObjects...)
|
||||
if len(tt.conciergeReactions) > 0 {
|
||||
fake.ReactionChain = tt.conciergeReactions
|
||||
}
|
||||
return fake, nil
|
||||
},
|
||||
})
|
||||
require.NotNil(t, cmd)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.SetOut(&stdout)
|
||||
cmd.SetErr(&stderr)
|
||||
cmd.SetArgs(tt.args)
|
||||
err := cmd.Execute()
|
||||
if tt.wantError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout")
|
||||
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
||||
})
|
||||
}
|
||||
}
|
@ -12,7 +12,8 @@ var loginCmd = &cobra.Command{
|
||||
Use: "login",
|
||||
Short: "login",
|
||||
Long: "Login to a Pinniped server",
|
||||
SilenceUsage: true, // do not print usage message when commands fail
|
||||
SilenceUsage: true, // Do not print usage message when commands fail.
|
||||
Hidden: true, // These commands are not really meant to be used directly by users, so it's confusing to have them discoverable.
|
||||
}
|
||||
|
||||
//nolint: gochecknoinits
|
||||
|
@ -4,21 +4,25 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
|
||||
"go.pinniped.dev/pkg/conciergeclient"
|
||||
"go.pinniped.dev/pkg/oidcclient"
|
||||
"go.pinniped.dev/pkg/oidcclient/filesession"
|
||||
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
||||
@ -26,17 +30,24 @@ import (
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
loginCmd.AddCommand(oidcLoginCommand(oidcclient.Login))
|
||||
loginCmd.AddCommand(oidcLoginCommand(oidcLoginCommandRealDeps()))
|
||||
}
|
||||
|
||||
func oidcLoginCommand(loginFunc func(issuer string, clientID string, opts ...oidcclient.Option) (*oidctypes.Token, error)) *cobra.Command {
|
||||
var (
|
||||
cmd = cobra.Command{
|
||||
Args: cobra.NoArgs,
|
||||
Use: "oidc --issuer ISSUER --client-id CLIENT_ID",
|
||||
Short: "Login using an OpenID Connect provider",
|
||||
SilenceUsage: true,
|
||||
type oidcLoginCommandDeps struct {
|
||||
login func(string, string, ...oidcclient.Option) (*oidctypes.Token, error)
|
||||
exchangeToken func(context.Context, *conciergeclient.Client, string) (*clientauthv1beta1.ExecCredential, error)
|
||||
}
|
||||
|
||||
func oidcLoginCommandRealDeps() oidcLoginCommandDeps {
|
||||
return oidcLoginCommandDeps{
|
||||
login: oidcclient.Login,
|
||||
exchangeToken: func(ctx context.Context, client *conciergeclient.Client, token string) (*clientauthv1beta1.ExecCredential, error) {
|
||||
return client.ExchangeToken(ctx, token)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type oidcLoginFlags struct {
|
||||
issuer string
|
||||
clientID string
|
||||
listenPort uint16
|
||||
@ -44,96 +55,168 @@ func oidcLoginCommand(loginFunc func(issuer string, clientID string, opts ...oid
|
||||
skipBrowser bool
|
||||
sessionCachePath string
|
||||
caBundlePaths []string
|
||||
caBundleData []string
|
||||
debugSessionCache bool
|
||||
requestAudience string
|
||||
conciergeEnabled bool
|
||||
conciergeNamespace string
|
||||
conciergeAuthenticatorType string
|
||||
conciergeAuthenticatorName string
|
||||
conciergeEndpoint string
|
||||
conciergeCABundle string
|
||||
}
|
||||
|
||||
func oidcLoginCommand(deps oidcLoginCommandDeps) *cobra.Command {
|
||||
var (
|
||||
cmd = cobra.Command{
|
||||
Args: cobra.NoArgs,
|
||||
Use: "oidc --issuer ISSUER",
|
||||
Short: "Login using an OpenID Connect provider",
|
||||
SilenceUsage: true,
|
||||
}
|
||||
flags oidcLoginFlags
|
||||
)
|
||||
cmd.Flags().StringVar(&issuer, "issuer", "", "OpenID Connect issuer URL.")
|
||||
cmd.Flags().StringVar(&clientID, "client-id", "pinniped-cli", "OpenID Connect client ID.")
|
||||
cmd.Flags().Uint16Var(&listenPort, "listen-port", 0, "TCP port for localhost listener (authorization code flow only).")
|
||||
cmd.Flags().StringSliceVar(&scopes, "scopes", []string{oidc.ScopeOfflineAccess, oidc.ScopeOpenID, "pinniped.sts.unrestricted"}, "OIDC scopes to request during login.")
|
||||
cmd.Flags().BoolVar(&skipBrowser, "skip-browser", false, "Skip opening the browser (just print the URL).")
|
||||
cmd.Flags().StringVar(&sessionCachePath, "session-cache", filepath.Join(mustGetConfigDir(), "sessions.yaml"), "Path to session cache file.")
|
||||
cmd.Flags().StringSliceVar(&caBundlePaths, "ca-bundle", nil, "Path to TLS certificate authority bundle (PEM format, optional, can be repeated).")
|
||||
cmd.Flags().BoolVar(&debugSessionCache, "debug-session-cache", false, "Print debug logs related to the session cache.")
|
||||
cmd.Flags().StringVar(&requestAudience, "request-audience", "", "Request a token with an alternate audience using RF8693 token exchange.")
|
||||
cmd.Flags().StringVar(&flags.issuer, "issuer", "", "OpenID Connect issuer URL")
|
||||
cmd.Flags().StringVar(&flags.clientID, "client-id", "pinniped-cli", "OpenID Connect client ID")
|
||||
cmd.Flags().Uint16Var(&flags.listenPort, "listen-port", 0, "TCP port for localhost listener (authorization code flow only)")
|
||||
cmd.Flags().StringSliceVar(&flags.scopes, "scopes", []string{oidc.ScopeOfflineAccess, oidc.ScopeOpenID, "pinniped.sts.unrestricted"}, "OIDC scopes to request during login")
|
||||
cmd.Flags().BoolVar(&flags.skipBrowser, "skip-browser", false, "Skip opening the browser (just print the URL)")
|
||||
cmd.Flags().StringVar(&flags.sessionCachePath, "session-cache", filepath.Join(mustGetConfigDir(), "sessions.yaml"), "Path to session cache file")
|
||||
cmd.Flags().StringSliceVar(&flags.caBundlePaths, "ca-bundle", nil, "Path to TLS certificate authority bundle (PEM format, optional, can be repeated)")
|
||||
cmd.Flags().StringSliceVar(&flags.caBundleData, "ca-bundle-data", nil, "Base64 endcoded TLS certificate authority bundle (base64 encoded PEM format, optional, can be repeated)")
|
||||
cmd.Flags().BoolVar(&flags.debugSessionCache, "debug-session-cache", false, "Print debug logs related to the session cache")
|
||||
cmd.Flags().StringVar(&flags.requestAudience, "request-audience", "", "Request a token with an alternate audience using RF8693 token exchange")
|
||||
cmd.Flags().BoolVar(&flags.conciergeEnabled, "enable-concierge", false, "Exchange the OIDC ID token with the Pinniped concierge during login")
|
||||
cmd.Flags().StringVar(&flags.conciergeNamespace, "concierge-namespace", "pinniped-concierge", "Namespace in which the concierge was installed")
|
||||
cmd.Flags().StringVar(&flags.conciergeAuthenticatorType, "concierge-authenticator-type", "", "Concierge authenticator type (e.g., 'webhook', 'jwt')")
|
||||
cmd.Flags().StringVar(&flags.conciergeAuthenticatorName, "concierge-authenticator-name", "", "Concierge authenticator name")
|
||||
cmd.Flags().StringVar(&flags.conciergeEndpoint, "concierge-endpoint", "", "API base for the Pinniped concierge endpoint")
|
||||
cmd.Flags().StringVar(&flags.conciergeCABundle, "concierge-ca-bundle-data", "", "CA bundle to use when connecting to the concierge")
|
||||
|
||||
mustMarkHidden(&cmd, "debug-session-cache")
|
||||
mustMarkRequired(&cmd, "issuer")
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error { return runOIDCLogin(cmd, deps, flags) }
|
||||
return &cmd
|
||||
}
|
||||
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
func runOIDCLogin(cmd *cobra.Command, deps oidcLoginCommandDeps, flags oidcLoginFlags) error {
|
||||
// Initialize the session cache.
|
||||
var sessionOptions []filesession.Option
|
||||
|
||||
// If the hidden --debug-session-cache option is passed, log all the errors from the session cache with klog.
|
||||
if debugSessionCache {
|
||||
if flags.debugSessionCache {
|
||||
logger := klogr.New().WithName("session")
|
||||
sessionOptions = append(sessionOptions, filesession.WithErrorReporter(func(err error) {
|
||||
logger.Error(err, "error during session cache operation")
|
||||
}))
|
||||
}
|
||||
sessionCache := filesession.New(sessionCachePath, sessionOptions...)
|
||||
sessionCache := filesession.New(flags.sessionCachePath, sessionOptions...)
|
||||
|
||||
// Initialize the login handler.
|
||||
opts := []oidcclient.Option{
|
||||
oidcclient.WithContext(cmd.Context()),
|
||||
oidcclient.WithScopes(scopes),
|
||||
oidcclient.WithScopes(flags.scopes),
|
||||
oidcclient.WithSessionCache(sessionCache),
|
||||
}
|
||||
|
||||
if listenPort != 0 {
|
||||
opts = append(opts, oidcclient.WithListenPort(listenPort))
|
||||
if flags.listenPort != 0 {
|
||||
opts = append(opts, oidcclient.WithListenPort(flags.listenPort))
|
||||
}
|
||||
|
||||
if requestAudience != "" {
|
||||
opts = append(opts, oidcclient.WithRequestAudience(requestAudience))
|
||||
if flags.requestAudience != "" {
|
||||
opts = append(opts, oidcclient.WithRequestAudience(flags.requestAudience))
|
||||
}
|
||||
|
||||
var concierge *conciergeclient.Client
|
||||
if flags.conciergeEnabled {
|
||||
var err error
|
||||
concierge, err = conciergeclient.New(
|
||||
conciergeclient.WithNamespace(flags.conciergeNamespace),
|
||||
conciergeclient.WithEndpoint(flags.conciergeEndpoint),
|
||||
conciergeclient.WithBase64CABundle(flags.conciergeCABundle),
|
||||
conciergeclient.WithAuthenticator(flags.conciergeAuthenticatorType, flags.conciergeAuthenticatorName),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid concierge parameters: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// --skip-browser replaces the default "browser open" function with one that prints to stderr.
|
||||
if skipBrowser {
|
||||
if flags.skipBrowser {
|
||||
opts = append(opts, oidcclient.WithBrowserOpen(func(url string) error {
|
||||
cmd.PrintErr("Please log in: ", url, "\n")
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
|
||||
if len(caBundlePaths) > 0 {
|
||||
if len(flags.caBundlePaths) > 0 || len(flags.caBundleData) > 0 {
|
||||
client, err := makeClient(flags.caBundlePaths, flags.caBundleData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts = append(opts, oidcclient.WithClient(client))
|
||||
}
|
||||
|
||||
// Do the basic login to get an OIDC token.
|
||||
token, err := deps.login(flags.issuer, flags.clientID, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not complete Pinniped login: %w", err)
|
||||
}
|
||||
cred := tokenCredential(token)
|
||||
|
||||
// If the concierge was configured, exchange the credential for a separate short-lived, cluster-specific credential.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if concierge != nil {
|
||||
cred, err = deps.exchangeToken(ctx, concierge, token.IDToken.Token)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not complete concierge credential exchange: %w", err)
|
||||
}
|
||||
}
|
||||
return json.NewEncoder(cmd.OutOrStdout()).Encode(cred)
|
||||
}
|
||||
func makeClient(caBundlePaths []string, caBundleData []string) (*http.Client, error) {
|
||||
pool := x509.NewCertPool()
|
||||
for _, p := range caBundlePaths {
|
||||
pem, err := ioutil.ReadFile(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not read --ca-bundle: %w", err)
|
||||
return nil, fmt.Errorf("could not read --ca-bundle: %w", err)
|
||||
}
|
||||
pool.AppendCertsFromPEM(pem)
|
||||
}
|
||||
tlsConfig := tls.Config{
|
||||
RootCAs: pool,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
for _, d := range caBundleData {
|
||||
pem, err := base64.StdEncoding.DecodeString(d)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read --ca-bundle-data: %w", err)
|
||||
}
|
||||
opts = append(opts, oidcclient.WithClient(&http.Client{
|
||||
pool.AppendCertsFromPEM(pem)
|
||||
}
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: &tlsConfig,
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: pool,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
},
|
||||
}))
|
||||
}
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
tok, err := loginFunc(issuer, clientID, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert the token out to Kubernetes ExecCredential JSON format for output.
|
||||
return json.NewEncoder(cmd.OutOrStdout()).Encode(&clientauthenticationv1beta1.ExecCredential{
|
||||
func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential {
|
||||
cred := clientauthv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: &tok.IDToken.Expiry,
|
||||
Token: tok.IDToken.Token,
|
||||
Status: &clientauthv1beta1.ExecCredentialStatus{
|
||||
Token: token.IDToken.Token,
|
||||
},
|
||||
})
|
||||
}
|
||||
return &cmd
|
||||
if !token.IDToken.Expiry.IsZero() {
|
||||
cred.Status.ExpirationTimestamp = &token.IDToken.Expiry
|
||||
}
|
||||
return &cred
|
||||
}
|
||||
|
||||
// mustGetConfigDir returns a directory that follows the XDG base directory convention:
|
||||
|
@ -5,13 +5,23 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
|
||||
"go.pinniped.dev/internal/certauthority"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
"go.pinniped.dev/pkg/conciergeclient"
|
||||
"go.pinniped.dev/pkg/oidcclient"
|
||||
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
||||
)
|
||||
@ -19,16 +29,22 @@ import (
|
||||
func TestLoginOIDCCommand(t *testing.T) {
|
||||
cfgDir := mustGetConfigDir()
|
||||
|
||||
testCA, err := certauthority.New(pkix.Name{CommonName: "Test CA"}, 1*time.Hour)
|
||||
require.NoError(t, err)
|
||||
tmpdir := testutil.TempDir(t)
|
||||
testCABundlePath := filepath.Join(tmpdir, "testca.pem")
|
||||
require.NoError(t, ioutil.WriteFile(testCABundlePath, testCA.Bundle(), 0600))
|
||||
|
||||
time1 := time.Date(3020, 10, 12, 13, 14, 15, 16, time.UTC)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
loginErr error
|
||||
conciergeErr error
|
||||
wantError bool
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
wantIssuer string
|
||||
wantClientID string
|
||||
wantOptionsCount int
|
||||
}{
|
||||
{
|
||||
@ -38,18 +54,25 @@ func TestLoginOIDCCommand(t *testing.T) {
|
||||
Login using an OpenID Connect provider
|
||||
|
||||
Usage:
|
||||
oidc --issuer ISSUER --client-id CLIENT_ID [flags]
|
||||
oidc --issuer ISSUER [flags]
|
||||
|
||||
Flags:
|
||||
--ca-bundle strings Path to TLS certificate authority bundle (PEM format, optional, can be repeated).
|
||||
--client-id string OpenID Connect client ID. (default "pinniped-cli")
|
||||
--ca-bundle strings Path to TLS certificate authority bundle (PEM format, optional, can be repeated)
|
||||
--ca-bundle-data strings Base64 endcoded TLS certificate authority bundle (base64 encoded PEM format, optional, can be repeated)
|
||||
--client-id string OpenID Connect client ID (default "pinniped-cli")
|
||||
--concierge-authenticator-name string Concierge authenticator name
|
||||
--concierge-authenticator-type string Concierge authenticator type (e.g., 'webhook', 'jwt')
|
||||
--concierge-ca-bundle-data string CA bundle to use when connecting to the concierge
|
||||
--concierge-endpoint string API base for the Pinniped concierge endpoint
|
||||
--concierge-namespace string Namespace in which the concierge was installed (default "pinniped-concierge")
|
||||
--enable-concierge Exchange the OIDC ID token with the Pinniped concierge during login
|
||||
-h, --help help for oidc
|
||||
--issuer string OpenID Connect issuer URL.
|
||||
--listen-port uint16 TCP port for localhost listener (authorization code flow only).
|
||||
--request-audience string Request a token with an alternate audience using RF8693 token exchange.
|
||||
--scopes strings OIDC scopes to request during login. (default [offline_access,openid,pinniped.sts.unrestricted])
|
||||
--session-cache string Path to session cache file. (default "` + cfgDir + `/sessions.yaml")
|
||||
--skip-browser Skip opening the browser (just print the URL).
|
||||
--issuer string OpenID Connect issuer URL
|
||||
--listen-port uint16 TCP port for localhost listener (authorization code flow only)
|
||||
--request-audience string Request a token with an alternate audience using RF8693 token exchange
|
||||
--scopes strings OIDC scopes to request during login (default [offline_access,openid,pinniped.sts.unrestricted])
|
||||
--session-cache string Path to session cache file (default "` + cfgDir + `/sessions.yaml")
|
||||
--skip-browser Skip opening the browser (just print the URL)
|
||||
`),
|
||||
},
|
||||
{
|
||||
@ -60,14 +83,78 @@ func TestLoginOIDCCommand(t *testing.T) {
|
||||
Error: required flag(s) "issuer" not set
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "missing concierge flags",
|
||||
args: []string{
|
||||
"--client-id", "test-client-id",
|
||||
"--issuer", "test-issuer",
|
||||
"--enable-concierge",
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: invalid concierge parameters: endpoint must not be empty
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "invalid CA bundle path",
|
||||
args: []string{
|
||||
"--client-id", "test-client-id",
|
||||
"--issuer", "test-issuer",
|
||||
"--ca-bundle", "./does/not/exist",
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: could not read --ca-bundle: open ./does/not/exist: no such file or directory
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "invalid CA bundle data",
|
||||
args: []string{
|
||||
"--client-id", "test-client-id",
|
||||
"--issuer", "test-issuer",
|
||||
"--ca-bundle-data", "invalid-base64",
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: could not read --ca-bundle-data: illegal base64 data at input byte 7
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "login error",
|
||||
args: []string{
|
||||
"--client-id", "test-client-id",
|
||||
"--issuer", "test-issuer",
|
||||
},
|
||||
loginErr: fmt.Errorf("some login error"),
|
||||
wantOptionsCount: 3,
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: could not complete Pinniped login: some login error
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "concierge token exchange error",
|
||||
args: []string{
|
||||
"--client-id", "test-client-id",
|
||||
"--issuer", "test-issuer",
|
||||
"--enable-concierge",
|
||||
"--concierge-authenticator-type", "jwt",
|
||||
"--concierge-authenticator-name", "test-authenticator",
|
||||
"--concierge-endpoint", "https://127.0.0.1:1234/",
|
||||
},
|
||||
conciergeErr: fmt.Errorf("some concierge error"),
|
||||
wantOptionsCount: 3,
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: could not complete concierge credential exchange: some concierge error
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "success with minimal options",
|
||||
args: []string{
|
||||
"--client-id", "test-client-id",
|
||||
"--issuer", "test-issuer",
|
||||
},
|
||||
wantIssuer: "test-issuer",
|
||||
wantClientID: "test-client-id",
|
||||
wantOptionsCount: 3,
|
||||
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n",
|
||||
},
|
||||
@ -79,31 +166,56 @@ func TestLoginOIDCCommand(t *testing.T) {
|
||||
"--skip-browser",
|
||||
"--listen-port", "1234",
|
||||
"--debug-session-cache",
|
||||
"--request-audience", "cluster-1234",
|
||||
"--ca-bundle-data", base64.StdEncoding.EncodeToString(testCA.Bundle()),
|
||||
"--ca-bundle", testCABundlePath,
|
||||
"--enable-concierge",
|
||||
"--concierge-namespace", "test-namespace",
|
||||
"--concierge-authenticator-type", "webhook",
|
||||
"--concierge-authenticator-name", "test-authenticator",
|
||||
"--concierge-endpoint", "https://127.0.0.1:1234/",
|
||||
"--concierge-ca-bundle-data", base64.StdEncoding.EncodeToString(testCA.Bundle()),
|
||||
},
|
||||
wantIssuer: "test-issuer",
|
||||
wantClientID: "test-client-id",
|
||||
wantOptionsCount: 5,
|
||||
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"expirationTimestamp":"3020-10-12T13:14:15Z","token":"test-id-token"}}` + "\n",
|
||||
wantOptionsCount: 7,
|
||||
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"token":"exchanged-token"}}` + "\n",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var (
|
||||
gotIssuer string
|
||||
gotClientID string
|
||||
gotOptions []oidcclient.Option
|
||||
)
|
||||
cmd := oidcLoginCommand(func(issuer string, clientID string, opts ...oidcclient.Option) (*oidctypes.Token, error) {
|
||||
gotIssuer = issuer
|
||||
gotClientID = clientID
|
||||
cmd := oidcLoginCommand(oidcLoginCommandDeps{
|
||||
login: func(issuer string, clientID string, opts ...oidcclient.Option) (*oidctypes.Token, error) {
|
||||
require.Equal(t, "test-issuer", issuer)
|
||||
require.Equal(t, "test-client-id", clientID)
|
||||
gotOptions = opts
|
||||
if tt.loginErr != nil {
|
||||
return nil, tt.loginErr
|
||||
}
|
||||
return &oidctypes.Token{
|
||||
IDToken: &oidctypes.IDToken{
|
||||
Token: "test-id-token",
|
||||
Expiry: metav1.NewTime(time1),
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
exchangeToken: func(ctx context.Context, client *conciergeclient.Client, token string) (*clientauthv1beta1.ExecCredential, error) {
|
||||
require.Equal(t, token, "test-id-token")
|
||||
if tt.conciergeErr != nil {
|
||||
return nil, tt.conciergeErr
|
||||
}
|
||||
return &clientauthv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: &clientauthv1beta1.ExecCredentialStatus{
|
||||
Token: "exchanged-token",
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
})
|
||||
require.NotNil(t, cmd)
|
||||
|
||||
@ -119,8 +231,6 @@ func TestLoginOIDCCommand(t *testing.T) {
|
||||
}
|
||||
require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout")
|
||||
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
||||
require.Equal(t, tt.wantIssuer, gotIssuer, "unexpected issuer")
|
||||
require.Equal(t, tt.wantClientID, gotClientID, "unexpected client ID")
|
||||
require.Len(t, gotOptions, tt.wantOptionsCount)
|
||||
})
|
||||
}
|
||||
|
120
cmd/pinniped/cmd/login_static.go
Normal file
120
cmd/pinniped/cmd/login_static.go
Normal file
@ -0,0 +1,120 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
|
||||
"go.pinniped.dev/pkg/conciergeclient"
|
||||
"go.pinniped.dev/pkg/oidcclient/oidctypes"
|
||||
)
|
||||
|
||||
//nolint: gochecknoinits
|
||||
func init() {
|
||||
loginCmd.AddCommand(staticLoginCommand(staticLoginRealDeps()))
|
||||
}
|
||||
|
||||
type staticLoginDeps struct {
|
||||
lookupEnv func(string) (string, bool)
|
||||
exchangeToken func(context.Context, *conciergeclient.Client, string) (*clientauthv1beta1.ExecCredential, error)
|
||||
}
|
||||
|
||||
func staticLoginRealDeps() staticLoginDeps {
|
||||
return staticLoginDeps{
|
||||
lookupEnv: os.LookupEnv,
|
||||
exchangeToken: func(ctx context.Context, client *conciergeclient.Client, token string) (*clientauthv1beta1.ExecCredential, error) {
|
||||
return client.ExchangeToken(ctx, token)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type staticLoginParams struct {
|
||||
staticToken string
|
||||
staticTokenEnvName string
|
||||
conciergeEnabled bool
|
||||
conciergeNamespace string
|
||||
conciergeAuthenticatorType string
|
||||
conciergeAuthenticatorName string
|
||||
conciergeEndpoint string
|
||||
conciergeCABundle string
|
||||
}
|
||||
|
||||
func staticLoginCommand(deps staticLoginDeps) *cobra.Command {
|
||||
var (
|
||||
cmd = cobra.Command{
|
||||
Args: cobra.NoArgs,
|
||||
Use: "static [--token TOKEN] [--token-env TOKEN_NAME]",
|
||||
Short: "Login using a static token",
|
||||
SilenceUsage: true,
|
||||
}
|
||||
flags staticLoginParams
|
||||
)
|
||||
cmd.Flags().StringVar(&flags.staticToken, "token", "", "Static token to present during login")
|
||||
cmd.Flags().StringVar(&flags.staticTokenEnvName, "token-env", "", "Environment variable containing a static token")
|
||||
cmd.Flags().BoolVar(&flags.conciergeEnabled, "enable-concierge", false, "Exchange the token with the Pinniped concierge during login")
|
||||
cmd.Flags().StringVar(&flags.conciergeNamespace, "concierge-namespace", "pinniped-concierge", "Namespace in which the concierge was installed")
|
||||
cmd.Flags().StringVar(&flags.conciergeAuthenticatorType, "concierge-authenticator-type", "", "Concierge authenticator type (e.g., 'webhook', 'jwt')")
|
||||
cmd.Flags().StringVar(&flags.conciergeAuthenticatorName, "concierge-authenticator-name", "", "Concierge authenticator name")
|
||||
cmd.Flags().StringVar(&flags.conciergeEndpoint, "concierge-endpoint", "", "API base for the Pinniped concierge endpoint")
|
||||
cmd.Flags().StringVar(&flags.conciergeCABundle, "concierge-ca-bundle-data", "", "CA bundle to use when connecting to the concierge")
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error { return runStaticLogin(cmd.OutOrStdout(), deps, flags) }
|
||||
return &cmd
|
||||
}
|
||||
|
||||
func runStaticLogin(out io.Writer, deps staticLoginDeps, flags staticLoginParams) error {
|
||||
if flags.staticToken == "" && flags.staticTokenEnvName == "" {
|
||||
return fmt.Errorf("one of --token or --token-env must be set")
|
||||
}
|
||||
|
||||
var concierge *conciergeclient.Client
|
||||
if flags.conciergeEnabled {
|
||||
var err error
|
||||
concierge, err = conciergeclient.New(
|
||||
conciergeclient.WithNamespace(flags.conciergeNamespace),
|
||||
conciergeclient.WithEndpoint(flags.conciergeEndpoint),
|
||||
conciergeclient.WithBase64CABundle(flags.conciergeCABundle),
|
||||
conciergeclient.WithAuthenticator(flags.conciergeAuthenticatorType, flags.conciergeAuthenticatorName),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid concierge parameters: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var token string
|
||||
if flags.staticToken != "" {
|
||||
token = flags.staticToken
|
||||
}
|
||||
if flags.staticTokenEnvName != "" {
|
||||
var ok bool
|
||||
token, ok = deps.lookupEnv(flags.staticTokenEnvName)
|
||||
if !ok {
|
||||
return fmt.Errorf("--token-env variable %q is not set", flags.staticTokenEnvName)
|
||||
}
|
||||
if token == "" {
|
||||
return fmt.Errorf("--token-env variable %q is empty", flags.staticTokenEnvName)
|
||||
}
|
||||
}
|
||||
cred := tokenCredential(&oidctypes.Token{IDToken: &oidctypes.IDToken{Token: token}})
|
||||
|
||||
// Exchange that token with the concierge, if configured.
|
||||
if concierge != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var err error
|
||||
cred, err = deps.exchangeToken(ctx, concierge, token)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not complete concierge credential exchange: %w", err)
|
||||
}
|
||||
}
|
||||
return json.NewEncoder(out).Encode(cred)
|
||||
}
|
180
cmd/pinniped/cmd/login_static_test.go
Normal file
180
cmd/pinniped/cmd/login_static_test.go
Normal file
@ -0,0 +1,180 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/x509/pkix"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
|
||||
"go.pinniped.dev/internal/certauthority"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
"go.pinniped.dev/pkg/conciergeclient"
|
||||
)
|
||||
|
||||
func TestLoginStaticCommand(t *testing.T) {
|
||||
testCA, err := certauthority.New(pkix.Name{CommonName: "Test CA"}, 1*time.Hour)
|
||||
require.NoError(t, err)
|
||||
tmpdir := testutil.TempDir(t)
|
||||
testCABundlePath := filepath.Join(tmpdir, "testca.pem")
|
||||
require.NoError(t, ioutil.WriteFile(testCABundlePath, testCA.Bundle(), 0600))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
env map[string]string
|
||||
loginErr error
|
||||
conciergeErr error
|
||||
wantError bool
|
||||
wantStdout string
|
||||
wantStderr string
|
||||
wantOptionsCount int
|
||||
}{
|
||||
{
|
||||
name: "help flag passed",
|
||||
args: []string{"--help"},
|
||||
wantStdout: here.Doc(`
|
||||
Login using a static token
|
||||
|
||||
Usage:
|
||||
static [--token TOKEN] [--token-env TOKEN_NAME] [flags]
|
||||
|
||||
Flags:
|
||||
--concierge-authenticator-name string Concierge authenticator name
|
||||
--concierge-authenticator-type string Concierge authenticator type (e.g., 'webhook', 'jwt')
|
||||
--concierge-ca-bundle-data string CA bundle to use when connecting to the concierge
|
||||
--concierge-endpoint string API base for the Pinniped concierge endpoint
|
||||
--concierge-namespace string Namespace in which the concierge was installed (default "pinniped-concierge")
|
||||
--enable-concierge Exchange the token with the Pinniped concierge during login
|
||||
-h, --help help for static
|
||||
--token string Static token to present during login
|
||||
--token-env string Environment variable containing a static token
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "missing required flags",
|
||||
args: []string{},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: one of --token or --token-env must be set
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "missing concierge flags",
|
||||
args: []string{
|
||||
"--token", "test-token",
|
||||
"--enable-concierge",
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: invalid concierge parameters: endpoint must not be empty
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "missing env var",
|
||||
args: []string{
|
||||
"--token-env", "TEST_TOKEN_ENV",
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: --token-env variable "TEST_TOKEN_ENV" is not set
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "empty env var",
|
||||
args: []string{
|
||||
"--token-env", "TEST_TOKEN_ENV",
|
||||
},
|
||||
env: map[string]string{
|
||||
"TEST_TOKEN_ENV": "",
|
||||
},
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: --token-env variable "TEST_TOKEN_ENV" is empty
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "env var token success",
|
||||
args: []string{
|
||||
"--token-env", "TEST_TOKEN_ENV",
|
||||
},
|
||||
env: map[string]string{
|
||||
"TEST_TOKEN_ENV": "test-token",
|
||||
},
|
||||
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"token":"test-token"}}` + "\n",
|
||||
},
|
||||
{
|
||||
name: "concierge failure",
|
||||
args: []string{
|
||||
"--token", "test-token",
|
||||
"--enable-concierge",
|
||||
"--concierge-endpoint", "https://127.0.0.1/",
|
||||
"--concierge-authenticator-type", "webhook",
|
||||
"--concierge-authenticator-name", "test-authenticator",
|
||||
},
|
||||
conciergeErr: fmt.Errorf("some concierge error"),
|
||||
wantError: true,
|
||||
wantStdout: here.Doc(`
|
||||
Error: could not complete concierge credential exchange: some concierge error
|
||||
`),
|
||||
},
|
||||
{
|
||||
name: "static token success",
|
||||
args: []string{
|
||||
"--token", "test-token",
|
||||
},
|
||||
wantStdout: `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{},"status":{"token":"test-token"}}` + "\n",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cmd := staticLoginCommand(staticLoginDeps{
|
||||
lookupEnv: func(s string) (string, bool) {
|
||||
v, ok := tt.env[s]
|
||||
return v, ok
|
||||
},
|
||||
exchangeToken: func(ctx context.Context, client *conciergeclient.Client, token string) (*clientauthv1beta1.ExecCredential, error) {
|
||||
require.Equal(t, token, "test-token")
|
||||
if tt.conciergeErr != nil {
|
||||
return nil, tt.conciergeErr
|
||||
}
|
||||
return &clientauthv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: &clientauthv1beta1.ExecCredentialStatus{
|
||||
Token: "exchanged-token",
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
})
|
||||
require.NotNil(t, cmd)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.SetOut(&stdout)
|
||||
cmd.SetErr(&stderr)
|
||||
cmd.SetArgs(tt.args)
|
||||
err := cmd.Execute()
|
||||
if tt.wantError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, tt.wantStdout, stdout.String(), "unexpected stdout")
|
||||
require.Equal(t, tt.wantStderr, stderr.String(), "unexpected stderr")
|
||||
})
|
||||
}
|
||||
}
|
@ -108,6 +108,7 @@ spec:
|
||||
- Success
|
||||
- Duplicate
|
||||
- Invalid
|
||||
- SameIssuerHostMustUseSameSecret
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Success;Duplicate;Invalid
|
||||
// +kubebuilder:validation:Enum=Success;Duplicate;Invalid;SameIssuerHostMustUseSameSecret
|
||||
type OIDCProviderStatusCondition string
|
||||
|
||||
const (
|
||||
|
@ -108,6 +108,7 @@ spec:
|
||||
- Success
|
||||
- Duplicate
|
||||
- Invalid
|
||||
- SameIssuerHostMustUseSameSecret
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Success;Duplicate;Invalid
|
||||
// +kubebuilder:validation:Enum=Success;Duplicate;Invalid;SameIssuerHostMustUseSameSecret
|
||||
type OIDCProviderStatusCondition string
|
||||
|
||||
const (
|
||||
|
@ -108,6 +108,7 @@ spec:
|
||||
- Success
|
||||
- Duplicate
|
||||
- Invalid
|
||||
- SameIssuerHostMustUseSameSecret
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Success;Duplicate;Invalid
|
||||
// +kubebuilder:validation:Enum=Success;Duplicate;Invalid;SameIssuerHostMustUseSameSecret
|
||||
type OIDCProviderStatusCondition string
|
||||
|
||||
const (
|
||||
|
@ -108,6 +108,7 @@ spec:
|
||||
- Success
|
||||
- Duplicate
|
||||
- Invalid
|
||||
- SameIssuerHostMustUseSameSecret
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
|
1
go.mod
1
go.mod
@ -7,7 +7,6 @@ require (
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/go-logr/logr v0.2.1
|
||||
github.com/go-logr/stdr v0.2.0
|
||||
github.com/gofrs/flock v0.8.0
|
||||
|
@ -9,12 +9,22 @@ import "net/http"
|
||||
// Wrap the provided http.Handler so it sets appropriate security-related response headers.
|
||||
func Wrap(wrapped http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
wrapped.ServeHTTP(w, r)
|
||||
h := w.Header()
|
||||
h.Set("Content-Security-Policy", "default-src 'none'; frame-ancestors 'none'")
|
||||
h.Set("X-Frame-Options", "DENY")
|
||||
h.Set("X-XSS-Protection", "1; mode=block")
|
||||
h.Set("X-Content-Type-Options", "nosniff")
|
||||
h.Set("Referrer-Policy", "no-referrer")
|
||||
wrapped.ServeHTTP(w, r)
|
||||
h.Set("X-DNS-Prefetch-Control", "off")
|
||||
|
||||
// first overwrite existing Cache-Control header with Set, then append more headers with Add
|
||||
h.Set("Cache-Control", "no-cache")
|
||||
h.Add("Cache-Control", "no-store")
|
||||
h.Add("Cache-Control", "max-age=0")
|
||||
h.Add("Cache-Control", "must-revalidate")
|
||||
|
||||
h.Set("Pragma", "no-cache")
|
||||
h.Set("Expires", "0")
|
||||
})
|
||||
}
|
||||
|
@ -26,5 +26,9 @@ func TestWrap(t *testing.T) {
|
||||
"X-Content-Type-Options": []string{"nosniff"},
|
||||
"X-Frame-Options": []string{"DENY"},
|
||||
"X-Xss-Protection": []string{"1; mode=block"},
|
||||
"X-Dns-Prefetch-Control": []string{"off"},
|
||||
"Cache-Control": []string{"no-cache", "no-store", "max-age=0", "must-revalidate"},
|
||||
"Pragma": []string{"no-cache"},
|
||||
"Expires": []string{"0"},
|
||||
}, rec.Header())
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
|
||||
"go.pinniped.dev/internal/httputil/httperr"
|
||||
"go.pinniped.dev/internal/httputil/securityheader"
|
||||
"go.pinniped.dev/internal/oidc"
|
||||
"go.pinniped.dev/internal/oidc/csrftoken"
|
||||
"go.pinniped.dev/internal/oidc/provider"
|
||||
@ -34,7 +35,7 @@ func NewHandler(
|
||||
upstreamStateEncoder oidc.Encoder,
|
||||
cookieCodec oidc.Codec,
|
||||
) http.Handler {
|
||||
return httperr.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
||||
return securityheader.Wrap(httperr.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
||||
if r.Method != http.MethodPost && r.Method != http.MethodGet {
|
||||
// https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest
|
||||
// Authorization Servers MUST support the use of the HTTP GET and POST methods defined in
|
||||
@ -142,7 +143,7 @@ func NewHandler(
|
||||
)
|
||||
|
||||
return nil
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
func readCSRFCookie(r *http.Request, codec oidc.Decoder) csrftoken.CSRFToken {
|
||||
@ -172,6 +173,13 @@ func chooseUpstreamIDP(idpListGetter oidc.IDPListGetter) (provider.UpstreamOIDCI
|
||||
"No upstream providers are configured",
|
||||
)
|
||||
} else if len(allUpstreamIDPs) > 1 {
|
||||
var upstreamIDPNames []string
|
||||
for _, idp := range allUpstreamIDPs {
|
||||
upstreamIDPNames = append(upstreamIDPNames, idp.GetName())
|
||||
}
|
||||
|
||||
plog.Warning("Too many upstream providers are configured (found: %s)", upstreamIDPNames)
|
||||
|
||||
return nil, httperr.New(
|
||||
http.StatusUnprocessableEntity,
|
||||
"Too many upstream providers are configured (support for multiple upstreams is not yet implemented)",
|
||||
|
@ -773,6 +773,7 @@ func TestAuthorizationEndpoint(t *testing.T) {
|
||||
|
||||
require.Equal(t, test.wantStatus, rsp.Code)
|
||||
testutil.RequireEqualContentType(t, rsp.Header().Get("Content-Type"), test.wantContentType)
|
||||
testutil.RequireSecurityHeaders(t, rsp)
|
||||
|
||||
actualLocation := rsp.Header().Get("Location")
|
||||
if test.wantLocationHeader != "" {
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
"github.com/ory/fosite/token/jwt"
|
||||
|
||||
"go.pinniped.dev/internal/httputil/httperr"
|
||||
"go.pinniped.dev/internal/httputil/securityheader"
|
||||
"go.pinniped.dev/internal/oidc"
|
||||
"go.pinniped.dev/internal/oidc/csrftoken"
|
||||
"go.pinniped.dev/internal/oidc/provider"
|
||||
@ -29,7 +30,7 @@ func NewHandler(
|
||||
stateDecoder, cookieDecoder oidc.Decoder,
|
||||
redirectURI string,
|
||||
) http.Handler {
|
||||
return httperr.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
||||
return securityheader.Wrap(httperr.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
||||
state, err := validateRequest(r, stateDecoder, cookieDecoder)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -92,7 +93,7 @@ func NewHandler(
|
||||
oauthHelper.WriteAuthorizeResponse(w, authorizeRequester, authorizeResponder)
|
||||
|
||||
return nil
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
func authcode(r *http.Request) string {
|
||||
|
@ -502,6 +502,8 @@ func TestCallbackEndpoint(t *testing.T) {
|
||||
t.Logf("response: %#v", rsp)
|
||||
t.Logf("response body: %q", rsp.Body.String())
|
||||
|
||||
testutil.RequireSecurityHeaders(t, rsp)
|
||||
|
||||
if test.wantExchangeAndValidateTokensCall != nil {
|
||||
require.Equal(t, 1, test.idp.ExchangeAuthcodeAndValidateTokensCallCount())
|
||||
test.wantExchangeAndValidateTokensCall.Ctx = req.Context()
|
||||
|
@ -6,6 +6,7 @@ package testutil
|
||||
import (
|
||||
"context"
|
||||
"mime"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -52,3 +53,15 @@ func RequireNumberOfSecretsMatchingLabelSelector(t *testing.T, secrets v1.Secret
|
||||
require.NoError(t, err)
|
||||
require.Len(t, storedAuthcodeSecrets.Items, expectedNumberOfSecrets)
|
||||
}
|
||||
|
||||
func RequireSecurityHeaders(t *testing.T, response *httptest.ResponseRecorder) {
|
||||
require.Equal(t, "default-src 'none'; frame-ancestors 'none'", response.Header().Get("Content-Security-Policy"))
|
||||
require.Equal(t, "DENY", response.Header().Get("X-Frame-Options"))
|
||||
require.Equal(t, "1; mode=block", response.Header().Get("X-XSS-Protection"))
|
||||
require.Equal(t, "nosniff", response.Header().Get("X-Content-Type-Options"))
|
||||
require.Equal(t, "no-referrer", response.Header().Get("Referrer-Policy"))
|
||||
require.Equal(t, "off", response.Header().Get("X-DNS-Prefetch-Control"))
|
||||
require.ElementsMatch(t, []string{"no-cache", "no-store", "max-age=0", "must-revalidate"}, response.Header().Values("Cache-Control"))
|
||||
require.Equal(t, "no-cache", response.Header().Get("Pragma"))
|
||||
require.Equal(t, "0", response.Header().Get("Expires"))
|
||||
}
|
||||
|
193
pkg/conciergeclient/conciergeclient.go
Normal file
193
pkg/conciergeclient/conciergeclient.go
Normal file
@ -0,0 +1,193 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package conciergeclient provides login helpers for the Pinniped concierge.
|
||||
package conciergeclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
auth1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/authentication/v1alpha1"
|
||||
loginv1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/login/v1alpha1"
|
||||
conciergeclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned"
|
||||
"go.pinniped.dev/internal/constable"
|
||||
)
|
||||
|
||||
// ErrLoginFailed is returned by Client.ExchangeToken when the concierge server rejects the login request for any reason.
|
||||
var ErrLoginFailed = constable.Error("login failed")
|
||||
|
||||
// Option is an optional configuration for New().
|
||||
type Option func(*Client) error
|
||||
|
||||
// Client is a configuration for talking to the Pinniped concierge.
|
||||
type Client struct {
|
||||
namespace string
|
||||
authenticator *corev1.TypedLocalObjectReference
|
||||
caBundle string
|
||||
endpoint *url.URL
|
||||
}
|
||||
|
||||
// WithNamespace configures the namespace where the TokenCredentialRequest is to be sent.
|
||||
func WithNamespace(namespace string) Option {
|
||||
return func(c *Client) error {
|
||||
c.namespace = namespace
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAuthenticator configures the authenticator reference (spec.authenticator) of the TokenCredentialRequests.
|
||||
func WithAuthenticator(authType, authName string) Option {
|
||||
return func(c *Client) error {
|
||||
if authName == "" {
|
||||
return fmt.Errorf("authenticator name must not be empty")
|
||||
}
|
||||
authenticator := corev1.TypedLocalObjectReference{Name: authName}
|
||||
switch strings.ToLower(authType) {
|
||||
case "webhook":
|
||||
authenticator.APIGroup = &auth1alpha1.SchemeGroupVersion.Group
|
||||
authenticator.Kind = "WebhookAuthenticator"
|
||||
case "jwt":
|
||||
authenticator.APIGroup = &auth1alpha1.SchemeGroupVersion.Group
|
||||
authenticator.Kind = "JWTAuthenticator"
|
||||
default:
|
||||
return fmt.Errorf(`invalid authenticator type: %q, supported values are "webhook" and "jwt"`, authType)
|
||||
}
|
||||
c.authenticator = &authenticator
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithCABundle configures the PEM-formatted TLS certificate authority to trust when connecting to the concierge.
|
||||
func WithCABundle(caBundle string) Option {
|
||||
return func(c *Client) error {
|
||||
if caBundle == "" {
|
||||
return nil
|
||||
}
|
||||
if p := x509.NewCertPool(); !p.AppendCertsFromPEM([]byte(caBundle)) {
|
||||
return fmt.Errorf("invalid CA bundle data: no certificates found")
|
||||
}
|
||||
c.caBundle = caBundle
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBase64CABundle configures the base64-encoded, PEM-formatted TLS certificate authority to trust when connecting to the concierge.
|
||||
func WithBase64CABundle(caBundleBase64 string) Option {
|
||||
return func(c *Client) error {
|
||||
caBundle, err := base64.StdEncoding.DecodeString(caBundleBase64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid CA bundle data: %w", err)
|
||||
}
|
||||
return WithCABundle(string(caBundle))(c)
|
||||
}
|
||||
}
|
||||
|
||||
// WithEndpoint configures the base API endpoint URL of the concierge service (same as Kubernetes API server).
|
||||
func WithEndpoint(endpoint string) Option {
|
||||
return func(c *Client) error {
|
||||
if endpoint == "" {
|
||||
return fmt.Errorf("endpoint must not be empty")
|
||||
}
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid endpoint URL: %w", err)
|
||||
}
|
||||
if u.Scheme != "https" {
|
||||
return fmt.Errorf(`invalid endpoint scheme %q (must be "https")`, u.Scheme)
|
||||
}
|
||||
c.endpoint = u
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// New validates the specified options and returns a newly initialized *Client.
|
||||
func New(opts ...Option) (*Client, error) {
|
||||
c := Client{namespace: "pinniped-concierge"}
|
||||
for _, opt := range opts {
|
||||
if err := opt(&c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if c.authenticator == nil {
|
||||
return nil, fmt.Errorf("WithAuthenticator must be specified")
|
||||
}
|
||||
if c.endpoint == nil {
|
||||
return nil, fmt.Errorf("WithEndpoint must be specified")
|
||||
}
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// clientset returns an anonymous client for the concierge API.
|
||||
func (c *Client) clientset() (conciergeclientset.Interface, error) {
|
||||
cfg, err := clientcmd.NewNonInteractiveClientConfig(clientcmdapi.Config{
|
||||
Clusters: map[string]*clientcmdapi.Cluster{
|
||||
"cluster": {
|
||||
Server: c.endpoint.String(),
|
||||
CertificateAuthorityData: []byte(c.caBundle),
|
||||
},
|
||||
},
|
||||
Contexts: map[string]*clientcmdapi.Context{
|
||||
"current": {
|
||||
Cluster: "cluster",
|
||||
AuthInfo: "client",
|
||||
},
|
||||
},
|
||||
AuthInfos: map[string]*clientcmdapi.AuthInfo{
|
||||
"client": {},
|
||||
},
|
||||
}, "current", &clientcmd.ConfigOverrides{}, nil).ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conciergeclientset.NewForConfig(cfg)
|
||||
}
|
||||
|
||||
// ExchangeToken performs a TokenCredentialRequest against the Pinniped concierge and returns the result as an ExecCredential.
|
||||
func (c *Client) ExchangeToken(ctx context.Context, token string) (*clientauthenticationv1beta1.ExecCredential, error) {
|
||||
clientset, err := c.clientset()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := clientset.LoginV1alpha1().TokenCredentialRequests(c.namespace).Create(ctx, &loginv1alpha1.TokenCredentialRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: c.namespace,
|
||||
},
|
||||
Spec: loginv1alpha1.TokenCredentialRequestSpec{
|
||||
Token: token,
|
||||
Authenticator: *c.authenticator,
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not login: %w", err)
|
||||
}
|
||||
if resp.Status.Credential == nil || resp.Status.Message != nil {
|
||||
if resp.Status.Message != nil {
|
||||
return nil, fmt.Errorf("%w: %s", ErrLoginFailed, *resp.Status.Message)
|
||||
}
|
||||
return nil, fmt.Errorf("%w: unknown cause", ErrLoginFailed)
|
||||
}
|
||||
|
||||
return &clientauthenticationv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ExpirationTimestamp: &resp.Status.Credential.ExpirationTimestamp,
|
||||
ClientCertificateData: resp.Status.Credential.ClientCertificateData,
|
||||
ClientKeyData: resp.Status.Credential.ClientKeyData,
|
||||
Token: resp.Status.Credential.Token,
|
||||
},
|
||||
}, nil
|
||||
}
|
263
pkg/conciergeclient/conciergeclient_test.go
Normal file
263
pkg/conciergeclient/conciergeclient_test.go
Normal file
@ -0,0 +1,263 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package conciergeclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
|
||||
loginv1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/login/v1alpha1"
|
||||
"go.pinniped.dev/internal/certauthority"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
t.Parallel()
|
||||
testCA, err := certauthority.New(pkix.Name{}, 1*time.Hour)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
opts []Option
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "some option error",
|
||||
opts: []Option{
|
||||
func(client *Client) error { return fmt.Errorf("some error") },
|
||||
},
|
||||
wantErr: "some error",
|
||||
},
|
||||
{
|
||||
name: "with invalid authenticator",
|
||||
opts: []Option{
|
||||
WithAuthenticator("invalid-type", "test-authenticator"),
|
||||
},
|
||||
wantErr: `invalid authenticator type: "invalid-type", supported values are "webhook" and "jwt"`,
|
||||
},
|
||||
{
|
||||
name: "with empty authenticator name",
|
||||
opts: []Option{
|
||||
WithAuthenticator("webhook", ""),
|
||||
},
|
||||
wantErr: `authenticator name must not be empty`,
|
||||
},
|
||||
{
|
||||
name: "invalid CA bundle",
|
||||
opts: []Option{
|
||||
WithCABundle("invalid-base64"),
|
||||
},
|
||||
wantErr: "invalid CA bundle data: no certificates found",
|
||||
},
|
||||
{
|
||||
name: "invalid base64 CA bundle",
|
||||
opts: []Option{
|
||||
WithBase64CABundle("invalid-base64"),
|
||||
},
|
||||
wantErr: "invalid CA bundle data: illegal base64 data at input byte 7",
|
||||
},
|
||||
{
|
||||
name: "empty endpoint",
|
||||
opts: []Option{
|
||||
WithEndpoint(""),
|
||||
},
|
||||
wantErr: `endpoint must not be empty`,
|
||||
},
|
||||
{
|
||||
name: "invalid endpoint",
|
||||
opts: []Option{
|
||||
WithEndpoint("%"),
|
||||
},
|
||||
wantErr: `invalid endpoint URL: parse "%": invalid URL escape "%"`,
|
||||
},
|
||||
{
|
||||
name: "non-https endpoint",
|
||||
opts: []Option{
|
||||
WithEndpoint("http://example.com"),
|
||||
},
|
||||
wantErr: `invalid endpoint scheme "http" (must be "https")`,
|
||||
},
|
||||
{
|
||||
name: "missing authenticator",
|
||||
opts: []Option{
|
||||
WithEndpoint("https://example.com"),
|
||||
},
|
||||
wantErr: "WithAuthenticator must be specified",
|
||||
},
|
||||
{
|
||||
name: "missing endpoint",
|
||||
opts: []Option{
|
||||
WithAuthenticator("jwt", "test-authenticator"),
|
||||
},
|
||||
wantErr: "WithEndpoint must be specified",
|
||||
},
|
||||
{
|
||||
name: "valid",
|
||||
opts: []Option{
|
||||
WithNamespace("test-namespace"),
|
||||
WithEndpoint("https://example.com"),
|
||||
WithCABundle(""),
|
||||
WithCABundle(string(testCA.Bundle())),
|
||||
WithBase64CABundle(base64.StdEncoding.EncodeToString(testCA.Bundle())),
|
||||
WithAuthenticator("jwt", "test-authenticator"),
|
||||
WithAuthenticator("webhook", "test-authenticator"),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got, err := New(tt.opts...)
|
||||
if tt.wantErr != "" {
|
||||
require.EqualError(t, err, tt.wantErr)
|
||||
require.Nil(t, got)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExchangeToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("clientset failure", func(t *testing.T) {
|
||||
c := Client{endpoint: &url.URL{}}
|
||||
_, err := c.ExchangeToken(ctx, "")
|
||||
require.EqualError(t, err, "invalid configuration: no configuration has been provided, try setting KUBERNETES_MASTER environment variable")
|
||||
})
|
||||
|
||||
t.Run("server error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Start a test server that returns only 500 errors.
|
||||
caBundle, endpoint := testutil.TLSTestServer(t, func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = w.Write([]byte("some server error"))
|
||||
})
|
||||
|
||||
client, err := New(WithEndpoint(endpoint), WithCABundle(caBundle), WithAuthenticator("jwt", "test-authenticator"))
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := client.ExchangeToken(ctx, "test-token")
|
||||
require.EqualError(t, err, `could not login: an error on the server ("some server error") has prevented the request from succeeding (post tokencredentialrequests.login.concierge.pinniped.dev)`)
|
||||
require.Nil(t, got)
|
||||
})
|
||||
|
||||
t.Run("login failure", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Start a test server that returns success but with an error message
|
||||
errorMessage := "some login failure"
|
||||
caBundle, endpoint := testutil.TLSTestServer(t, func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(&loginv1alpha1.TokenCredentialRequest{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "login.concierge.pinniped.dev/v1alpha1", Kind: "TokenCredentialRequest"},
|
||||
Status: loginv1alpha1.TokenCredentialRequestStatus{Message: &errorMessage},
|
||||
})
|
||||
})
|
||||
|
||||
client, err := New(WithEndpoint(endpoint), WithCABundle(caBundle), WithAuthenticator("jwt", "test-authenticator"))
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := client.ExchangeToken(ctx, "test-token")
|
||||
require.EqualError(t, err, `login failed: some login failure`)
|
||||
require.Nil(t, got)
|
||||
})
|
||||
|
||||
t.Run("login failure unknown error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Start a test server that returns without any error message but also without valid credentials
|
||||
caBundle, endpoint := testutil.TLSTestServer(t, func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(&loginv1alpha1.TokenCredentialRequest{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "login.concierge.pinniped.dev/v1alpha1", Kind: "TokenCredentialRequest"},
|
||||
})
|
||||
})
|
||||
|
||||
client, err := New(WithEndpoint(endpoint), WithCABundle(caBundle), WithAuthenticator("jwt", "test-authenticator"))
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := client.ExchangeToken(ctx, "test-token")
|
||||
require.EqualError(t, err, `login failed: unknown cause`)
|
||||
require.Nil(t, got)
|
||||
})
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
expires := metav1.NewTime(time.Now().Truncate(time.Second))
|
||||
|
||||
// Start a test server that returns successfully and asserts various properties of the request.
|
||||
caBundle, endpoint := testutil.TLSTestServer(t, func(w http.ResponseWriter, r *http.Request) {
|
||||
require.Equal(t, http.MethodPost, r.Method)
|
||||
require.Equal(t, "/apis/login.concierge.pinniped.dev/v1alpha1/namespaces/test-namespace/tokencredentialrequests", r.URL.Path)
|
||||
require.Equal(t, "application/json", r.Header.Get("content-type"))
|
||||
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
require.JSONEq(t,
|
||||
`{
|
||||
"kind": "TokenCredentialRequest",
|
||||
"apiVersion": "login.concierge.pinniped.dev/v1alpha1",
|
||||
"metadata": {
|
||||
"creationTimestamp": null,
|
||||
"namespace": "test-namespace"
|
||||
},
|
||||
"spec": {
|
||||
"token": "test-token",
|
||||
"authenticator": {
|
||||
"apiGroup": "authentication.concierge.pinniped.dev",
|
||||
"kind": "WebhookAuthenticator",
|
||||
"name": "test-webhook"
|
||||
}
|
||||
},
|
||||
"status": {}
|
||||
}`,
|
||||
string(body),
|
||||
)
|
||||
|
||||
w.Header().Set("content-type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(&loginv1alpha1.TokenCredentialRequest{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "login.concierge.pinniped.dev/v1alpha1", Kind: "TokenCredentialRequest"},
|
||||
Status: loginv1alpha1.TokenCredentialRequestStatus{
|
||||
Credential: &loginv1alpha1.ClusterCredential{
|
||||
ExpirationTimestamp: expires,
|
||||
ClientCertificateData: "test-certificate",
|
||||
ClientKeyData: "test-key",
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
client, err := New(WithNamespace("test-namespace"), WithEndpoint(endpoint), WithCABundle(caBundle), WithAuthenticator("webhook", "test-webhook"))
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := client.ExchangeToken(ctx, "test-token")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &clientauthenticationv1beta1.ExecCredential{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ExecCredential",
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
},
|
||||
Status: &clientauthenticationv1beta1.ExecCredentialStatus{
|
||||
ClientCertificateData: "test-certificate",
|
||||
ClientKeyData: "test-key",
|
||||
ExpirationTimestamp: &expires,
|
||||
},
|
||||
}, got)
|
||||
})
|
||||
}
|
@ -32,7 +32,7 @@ import (
|
||||
"go.pinniped.dev/test/library/browsertest"
|
||||
)
|
||||
|
||||
func TestCLIGetKubeconfig(t *testing.T) {
|
||||
func TestCLIGetKubeconfigStaticToken(t *testing.T) {
|
||||
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
|
||||
|
||||
// Create a test webhook configuration to use with the CLI.
|
||||
@ -42,28 +42,62 @@ func TestCLIGetKubeconfig(t *testing.T) {
|
||||
authenticator := library.CreateTestWebhookAuthenticator(ctx, t)
|
||||
|
||||
// Build pinniped CLI.
|
||||
pinnipedExe := buildPinnipedCLI(t)
|
||||
pinnipedExe := library.PinnipedCLIPath(t)
|
||||
|
||||
// Run pinniped CLI to get kubeconfig.
|
||||
kubeConfigYAML := runPinnipedCLIGetKubeconfig(t, pinnipedExe, env.TestUser.Token, env.ConciergeNamespace, "webhook", authenticator.Name)
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
args []string
|
||||
expectStderr string
|
||||
}{
|
||||
{
|
||||
name: "deprecated command",
|
||||
args: []string{
|
||||
"get-kubeconfig",
|
||||
"--token", env.TestUser.Token,
|
||||
"--pinniped-namespace", env.ConciergeNamespace,
|
||||
"--authenticator-type", "webhook",
|
||||
"--authenticator-name", authenticator.Name,
|
||||
},
|
||||
expectStderr: "Command \"get-kubeconfig\" is deprecated, Please use `pinniped get kubeconfig` instead.\n",
|
||||
},
|
||||
{
|
||||
name: "newer command, but still using static parameters",
|
||||
args: []string{
|
||||
"get", "kubeconfig",
|
||||
"--static-token", env.TestUser.Token,
|
||||
"--concierge-namespace", env.ConciergeNamespace,
|
||||
"--concierge-authenticator-type", "webhook",
|
||||
"--concierge-authenticator-name", authenticator.Name,
|
||||
},
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
stdout, stderr := runPinnipedCLI(t, pinnipedExe, tt.args...)
|
||||
require.Equal(t, tt.expectStderr, stderr)
|
||||
|
||||
// Even the deprecated command should now generate a kubeconfig with the new "pinniped login static" command.
|
||||
restConfig := library.NewRestConfigFromKubeconfig(t, stdout)
|
||||
require.NotNil(t, restConfig.ExecProvider)
|
||||
require.Equal(t, []string{"login", "static"}, restConfig.ExecProvider.Args[:2])
|
||||
|
||||
// In addition to the client-go based testing below, also try the kubeconfig
|
||||
// with kubectl to validate that it works.
|
||||
adminClient := library.NewClientset(t)
|
||||
t.Run(
|
||||
"access as user with kubectl",
|
||||
library.AccessAsUserWithKubectlTest(ctx, adminClient, kubeConfigYAML, env.TestUser.ExpectedUsername, env.ConciergeNamespace),
|
||||
library.AccessAsUserWithKubectlTest(ctx, adminClient, stdout, env.TestUser.ExpectedUsername, env.ConciergeNamespace),
|
||||
)
|
||||
for _, group := range env.TestUser.ExpectedGroups {
|
||||
group := group
|
||||
t.Run(
|
||||
"access as group "+group+" with kubectl",
|
||||
library.AccessAsGroupWithKubectlTest(ctx, adminClient, kubeConfigYAML, group, env.ConciergeNamespace),
|
||||
library.AccessAsGroupWithKubectlTest(ctx, adminClient, stdout, group, env.ConciergeNamespace),
|
||||
)
|
||||
}
|
||||
|
||||
// Create Kubernetes client with kubeconfig from pinniped CLI.
|
||||
kubeClient := library.NewClientsetForKubeConfig(t, kubeConfigYAML)
|
||||
kubeClient := library.NewClientsetForKubeConfig(t, stdout)
|
||||
|
||||
// Validate that we can auth to the API via our user.
|
||||
t.Run("access as user with client-go", library.AccessAsUserTest(ctx, adminClient, env.TestUser.ExpectedUsername, kubeClient))
|
||||
@ -71,41 +105,18 @@ func TestCLIGetKubeconfig(t *testing.T) {
|
||||
group := group
|
||||
t.Run("access as group "+group+" with client-go", library.AccessAsGroupTest(ctx, adminClient, group, kubeClient))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func buildPinnipedCLI(t *testing.T) string {
|
||||
func runPinnipedCLI(t *testing.T, pinnipedExe string, args ...string) (string, string) {
|
||||
t.Helper()
|
||||
|
||||
pinnipedExeDir, err := ioutil.TempDir("", "pinniped-cli-test-*")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { require.NoError(t, os.RemoveAll(pinnipedExeDir)) })
|
||||
|
||||
pinnipedExe := filepath.Join(pinnipedExeDir, "pinniped")
|
||||
output, err := exec.Command(
|
||||
"go",
|
||||
"build",
|
||||
"-o",
|
||||
pinnipedExe,
|
||||
"go.pinniped.dev/cmd/pinniped",
|
||||
).CombinedOutput()
|
||||
require.NoError(t, err, string(output))
|
||||
return pinnipedExe
|
||||
}
|
||||
|
||||
func runPinnipedCLIGetKubeconfig(t *testing.T, pinnipedExe, token, namespaceName, authenticatorType, authenticatorName string) string {
|
||||
t.Helper()
|
||||
|
||||
output, err := exec.Command(
|
||||
pinnipedExe,
|
||||
"get-kubeconfig",
|
||||
"--token", token,
|
||||
"--pinniped-namespace", namespaceName,
|
||||
"--authenticator-type", authenticatorType,
|
||||
"--authenticator-name", authenticatorName,
|
||||
).CombinedOutput()
|
||||
require.NoError(t, err, string(output))
|
||||
|
||||
return string(output)
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd := exec.Command(pinnipedExe, args...)
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
require.NoErrorf(t, cmd.Run(), "stderr:\n%s\n\nstdout:\n%s\n\n", stderr.String(), stdout.String())
|
||||
return stdout.String(), stderr.String()
|
||||
}
|
||||
|
||||
func TestCLILoginOIDC(t *testing.T) {
|
||||
@ -115,11 +126,10 @@ func TestCLILoginOIDC(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
// Build pinniped CLI.
|
||||
t.Logf("building CLI binary")
|
||||
pinnipedExe := buildPinnipedCLI(t)
|
||||
pinnipedExe := library.PinnipedCLIPath(t)
|
||||
|
||||
// Run "pinniped login oidc" to get an ExecCredential struct with an OIDC ID token.
|
||||
credOutput, sessionCachePath := runPinniedLoginOIDC(ctx, t, pinnipedExe)
|
||||
credOutput, sessionCachePath := runPinnipedLoginOIDC(ctx, t, pinnipedExe)
|
||||
|
||||
// Assert some properties of the ExecCredential.
|
||||
t.Logf("validating ExecCredential")
|
||||
@ -185,7 +195,7 @@ func TestCLILoginOIDC(t *testing.T) {
|
||||
require.NotEqual(t, credOutput2.Status.Token, credOutput3.Status.Token)
|
||||
}
|
||||
|
||||
func runPinniedLoginOIDC(
|
||||
func runPinnipedLoginOIDC(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
pinnipedExe string,
|
||||
@ -244,7 +254,7 @@ func runPinniedLoginOIDC(
|
||||
credOutputChan := make(chan clientauthenticationv1beta1.ExecCredential)
|
||||
spawnTestGoroutine(t, func() (err error) {
|
||||
defer func() {
|
||||
closeErr := stderr.Close()
|
||||
closeErr := stdout.Close()
|
||||
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
|
||||
return
|
||||
}
|
||||
@ -342,12 +352,6 @@ func oidcLoginCommand(ctx context.Context, t *testing.T, pinnipedExe string, ses
|
||||
}
|
||||
|
||||
// If there is a custom proxy, set it using standard environment variables.
|
||||
if env.Proxy != "" {
|
||||
cmd.Env = append(os.Environ(),
|
||||
"http_proxy="+env.Proxy,
|
||||
"https_proxy="+env.Proxy,
|
||||
"no_proxy=127.0.0.1",
|
||||
)
|
||||
}
|
||||
cmd.Env = append(os.Environ(), env.ProxyEnv()...)
|
||||
return cmd
|
||||
}
|
||||
|
@ -47,32 +47,29 @@ func TestSuccessfulCredentialRequest(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
authenticator func(t *testing.T) corev1.TypedLocalObjectReference
|
||||
authenticator func(context.Context, *testing.T) corev1.TypedLocalObjectReference
|
||||
token func(t *testing.T) (token string, username string, groups []string)
|
||||
}{
|
||||
{
|
||||
name: "webhook",
|
||||
authenticator: func(t *testing.T) corev1.TypedLocalObjectReference {
|
||||
return library.CreateTestWebhookAuthenticator(ctx, t)
|
||||
},
|
||||
authenticator: library.CreateTestWebhookAuthenticator,
|
||||
token: func(t *testing.T) (string, string, []string) {
|
||||
return library.IntegrationEnv(t).TestUser.Token, env.TestUser.ExpectedUsername, env.TestUser.ExpectedGroups
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "jwt authenticator",
|
||||
authenticator: func(t *testing.T) corev1.TypedLocalObjectReference {
|
||||
return library.CreateTestJWTAuthenticator(ctx, t, "email")
|
||||
},
|
||||
authenticator: library.CreateTestJWTAuthenticatorForCLIUpstream,
|
||||
token: func(t *testing.T) (string, string, []string) {
|
||||
pinnipedExe := buildPinnipedCLI(t)
|
||||
credOutput, _ := runPinniedLoginOIDC(ctx, t, pinnipedExe)
|
||||
pinnipedExe := library.PinnipedCLIPath(t)
|
||||
credOutput, _ := runPinnipedLoginOIDC(ctx, t, pinnipedExe)
|
||||
token := credOutput.Status.Token
|
||||
|
||||
// By default, the JWTAuthenticator expects the username to be in the "username" claim and the
|
||||
// groups to be in the "groups" claim.
|
||||
// We are configuring pinniped to set the username to be the "email" claim from the token.
|
||||
username, groups := getJWTEmailAndGroupsClaims(t, token)
|
||||
// However, we are configuring Pinniped in the `CreateTestJWTAuthenticatorForCLIUpstream` method above
|
||||
// to read the username from the "sub" claim of the token instead.
|
||||
username, groups := getJWTSubAndGroupsClaims(t, token)
|
||||
|
||||
return token, username, groups
|
||||
},
|
||||
@ -81,7 +78,7 @@ func TestSuccessfulCredentialRequest(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
authenticator := test.authenticator(t)
|
||||
authenticator := test.authenticator(ctx, t)
|
||||
token, username, groups := test.token(t)
|
||||
|
||||
var response *loginv1alpha1.TokenCredentialRequest
|
||||
@ -234,18 +231,18 @@ func safeDerefStringPtr(s *string) string {
|
||||
return *s
|
||||
}
|
||||
|
||||
func getJWTEmailAndGroupsClaims(t *testing.T, jwt string) (string, []string) {
|
||||
func getJWTSubAndGroupsClaims(t *testing.T, jwt string) (string, []string) {
|
||||
t.Helper()
|
||||
|
||||
token, err := jwtpkg.ParseSigned(jwt)
|
||||
require.NoError(t, err)
|
||||
|
||||
var claims struct {
|
||||
Email string `json:"email"`
|
||||
Sub string `json:"sub"`
|
||||
Groups []string `json:"groups"`
|
||||
}
|
||||
err = token.UnsafeClaimsWithoutVerification(&claims)
|
||||
require.NoError(t, err)
|
||||
|
||||
return claims.Email, claims.Groups
|
||||
return claims.Sub, claims.Groups
|
||||
}
|
||||
|
258
test/integration/e2e_test.go
Normal file
258
test/integration/e2e_test.go
Normal file
@ -0,0 +1,258 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
|
||||
authv1alpha "go.pinniped.dev/generated/1.19/apis/concierge/authentication/v1alpha1"
|
||||
configv1alpha1 "go.pinniped.dev/generated/1.19/apis/supervisor/config/v1alpha1"
|
||||
idpv1alpha1 "go.pinniped.dev/generated/1.19/apis/supervisor/idp/v1alpha1"
|
||||
"go.pinniped.dev/internal/certauthority"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
"go.pinniped.dev/test/library"
|
||||
"go.pinniped.dev/test/library/browsertest"
|
||||
)
|
||||
|
||||
// TestE2EFullIntegration tests a full integration scenario that combines the supervisor, concierge, and CLI.
|
||||
func TestE2EFullIntegration(t *testing.T) {
|
||||
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancelFunc()
|
||||
|
||||
// Build pinniped CLI.
|
||||
pinnipedExe := library.PinnipedCLIPath(t)
|
||||
tempDir := testutil.TempDir(t)
|
||||
|
||||
// Start the browser driver.
|
||||
page := browsertest.Open(t)
|
||||
|
||||
// Infer the downstream issuer URL from the callback associated with the upstream test client registration.
|
||||
issuerURL, err := url.Parse(env.SupervisorTestUpstream.CallbackURL)
|
||||
require.NoError(t, err)
|
||||
require.True(t, strings.HasSuffix(issuerURL.Path, "/callback"))
|
||||
issuerURL.Path = strings.TrimSuffix(issuerURL.Path, "/callback")
|
||||
t.Logf("testing with downstream issuer URL %s", issuerURL.String())
|
||||
|
||||
// Generate a CA bundle with which to serve this provider.
|
||||
t.Logf("generating test CA")
|
||||
ca, err := certauthority.New(pkix.Name{CommonName: "Downstream Test CA"}, 1*time.Hour)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save that bundle plus the one that signs the upstream issuer, for test purposes.
|
||||
testCABundlePath := filepath.Join(tempDir, "test-ca.pem")
|
||||
testCABundlePEM := []byte(string(ca.Bundle()) + "\n" + env.SupervisorTestUpstream.CABundle)
|
||||
testCABundleBase64 := base64.StdEncoding.EncodeToString(testCABundlePEM)
|
||||
require.NoError(t, ioutil.WriteFile(testCABundlePath, testCABundlePEM, 0600))
|
||||
|
||||
// Use the CA to issue a TLS server cert.
|
||||
t.Logf("issuing test certificate")
|
||||
tlsCert, err := ca.Issue(
|
||||
pkix.Name{CommonName: issuerURL.Hostname()},
|
||||
[]string{issuerURL.Hostname()},
|
||||
nil,
|
||||
1*time.Hour,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
certPEM, keyPEM, err := certauthority.ToPEM(tlsCert)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write the serving cert to a secret.
|
||||
certSecret := library.CreateTestSecret(t,
|
||||
env.SupervisorNamespace,
|
||||
"oidc-provider-tls",
|
||||
"kubernetes.io/tls",
|
||||
map[string]string{"tls.crt": string(certPEM), "tls.key": string(keyPEM)},
|
||||
)
|
||||
|
||||
// Create the downstream OIDCProvider and expect it to go into the success status condition.
|
||||
downstream := library.CreateTestOIDCProvider(ctx, t,
|
||||
issuerURL.String(),
|
||||
certSecret.Name,
|
||||
configv1alpha1.SuccessOIDCProviderStatusCondition,
|
||||
)
|
||||
|
||||
// Create upstream OIDC provider and wait for it to become ready.
|
||||
library.CreateTestUpstreamOIDCProvider(t, idpv1alpha1.UpstreamOIDCProviderSpec{
|
||||
Issuer: env.SupervisorTestUpstream.Issuer,
|
||||
TLS: &idpv1alpha1.TLSSpec{
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorTestUpstream.CABundle)),
|
||||
},
|
||||
AuthorizationConfig: idpv1alpha1.OIDCAuthorizationConfig{
|
||||
AdditionalScopes: []string{"email"},
|
||||
},
|
||||
Claims: idpv1alpha1.OIDCClaims{
|
||||
Username: "email",
|
||||
},
|
||||
Client: idpv1alpha1.OIDCClient{
|
||||
SecretName: library.CreateClientCredsSecret(t, env.SupervisorTestUpstream.ClientID, env.SupervisorTestUpstream.ClientSecret).Name,
|
||||
},
|
||||
}, idpv1alpha1.PhaseReady)
|
||||
|
||||
// Create a JWTAuthenticator that will validate the tokens from the downstream issuer.
|
||||
clusterAudience := "test-cluster-" + library.RandHex(t, 8)
|
||||
authenticator := library.CreateTestJWTAuthenticator(ctx, t, authv1alpha.JWTAuthenticatorSpec{
|
||||
Issuer: downstream.Spec.Issuer,
|
||||
Audience: clusterAudience,
|
||||
TLS: &authv1alpha.TLSSpec{CertificateAuthorityData: testCABundleBase64},
|
||||
})
|
||||
|
||||
// Create a ClusterRoleBinding to give our test user from the upstream read-only access to the cluster.
|
||||
library.CreateTestClusterRoleBinding(t,
|
||||
rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: env.SupervisorTestUpstream.Username},
|
||||
rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "view"},
|
||||
)
|
||||
|
||||
// Use a specific session cache for this test.
|
||||
sessionCachePath := tempDir + "/sessions.yaml"
|
||||
|
||||
// Run "pinniped get kubeconfig" to get a kubeconfig YAML.
|
||||
kubeconfigYAML, stderr := runPinnipedCLI(t, pinnipedExe, "get", "kubeconfig",
|
||||
"--concierge-namespace", env.ConciergeNamespace,
|
||||
"--concierge-authenticator-type", "jwt",
|
||||
"--concierge-authenticator-name", authenticator.Name,
|
||||
"--oidc-skip-browser",
|
||||
"--oidc-ca-bundle", testCABundlePath,
|
||||
"--oidc-session-cache", sessionCachePath,
|
||||
)
|
||||
require.Equal(t, "", stderr)
|
||||
|
||||
restConfig := library.NewRestConfigFromKubeconfig(t, kubeconfigYAML)
|
||||
require.NotNil(t, restConfig.ExecProvider)
|
||||
require.Equal(t, []string{"login", "oidc"}, restConfig.ExecProvider.Args[:2])
|
||||
kubeconfigPath := filepath.Join(tempDir, "kubeconfig.yaml")
|
||||
require.NoError(t, ioutil.WriteFile(kubeconfigPath, []byte(kubeconfigYAML), 0600))
|
||||
|
||||
// Wait 10 seconds for the JWTAuthenticator to become initialized.
|
||||
// TODO: remove this sleep once we have fixed the initialization problem.
|
||||
t.Log("sleeping 10s to wait for JWTAuthenticator to become initialized")
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Run "kubectl get namespaces" which should trigger a browser login via the plugin.
|
||||
start := time.Now()
|
||||
kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
|
||||
kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...)
|
||||
stderrPipe, err := kubectlCmd.StderrPipe()
|
||||
require.NoError(t, err)
|
||||
stdoutPipe, err := kubectlCmd.StdoutPipe()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("starting kubectl subprocess")
|
||||
require.NoError(t, kubectlCmd.Start())
|
||||
t.Cleanup(func() {
|
||||
err := kubectlCmd.Wait()
|
||||
t.Logf("kubectl subprocess exited with code %d", kubectlCmd.ProcessState.ExitCode())
|
||||
require.NoErrorf(t, err, "kubectl process did not exit cleanly")
|
||||
})
|
||||
|
||||
// Start a background goroutine to read stderr from the CLI and parse out the login URL.
|
||||
loginURLChan := make(chan string)
|
||||
spawnTestGoroutine(t, func() (err error) {
|
||||
defer func() {
|
||||
closeErr := stderrPipe.Close()
|
||||
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
|
||||
return
|
||||
}
|
||||
if err == nil {
|
||||
err = fmt.Errorf("stderr stream closed with error: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
reader := bufio.NewReader(library.NewLoggerReader(t, "stderr", stderrPipe))
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not read login URL line from stderr: %w", err)
|
||||
}
|
||||
const prompt = "Please log in: "
|
||||
if !strings.HasPrefix(line, prompt) {
|
||||
return fmt.Errorf("expected %q to have prefix %q", line, prompt)
|
||||
}
|
||||
loginURLChan <- strings.TrimPrefix(line, prompt)
|
||||
return readAndExpectEmpty(reader)
|
||||
})
|
||||
|
||||
// Start a background goroutine to read stdout from kubectl and return the result as a string.
|
||||
kubectlOutputChan := make(chan string)
|
||||
spawnTestGoroutine(t, func() (err error) {
|
||||
defer func() {
|
||||
closeErr := stdoutPipe.Close()
|
||||
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
|
||||
return
|
||||
}
|
||||
if err == nil {
|
||||
err = fmt.Errorf("stdout stream closed with error: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
output, err := ioutil.ReadAll(stdoutPipe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Logf("kubectl output:\n%s\n", output)
|
||||
kubectlOutputChan <- string(output)
|
||||
return nil
|
||||
})
|
||||
|
||||
// Wait for the CLI to print out the login URL and open the browser to it.
|
||||
t.Logf("waiting for CLI to output login URL")
|
||||
var loginURL string
|
||||
select {
|
||||
case <-time.After(1 * time.Minute):
|
||||
require.Fail(t, "timed out waiting for login URL")
|
||||
case loginURL = <-loginURLChan:
|
||||
}
|
||||
t.Logf("navigating to login page")
|
||||
require.NoError(t, page.Navigate(loginURL))
|
||||
|
||||
// Expect to be redirected to the upstream provider and log in.
|
||||
browsertest.LoginToUpstream(t, page, env.SupervisorTestUpstream)
|
||||
|
||||
// Expect to be redirected to the localhost callback.
|
||||
t.Logf("waiting for redirect to callback")
|
||||
browsertest.WaitForURL(t, page, regexp.MustCompile(`\Ahttp://127\.0\.0\.1:[0-9]+/callback\?.+\z`))
|
||||
|
||||
// Wait for the "pre" element that gets rendered for a `text/plain` page, and
|
||||
// assert that it contains the success message.
|
||||
t.Logf("verifying success page")
|
||||
browsertest.WaitForVisibleElements(t, page, "pre")
|
||||
msg, err := page.First("pre").Text()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "you have been logged in and may now close this tab", msg)
|
||||
|
||||
// Expect the CLI to output a list of namespaces in JSON format.
|
||||
t.Logf("waiting for kubectl to output namespace list JSON")
|
||||
var kubectlOutput string
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
require.Fail(t, "timed out waiting for kubectl output")
|
||||
case kubectlOutput = <-kubectlOutputChan:
|
||||
}
|
||||
require.Greaterf(t, len(strings.Split(kubectlOutput, "\n")), 2, "expected some namespaces to be returned")
|
||||
t.Logf("first kubectl command took %s", time.Since(start).String())
|
||||
|
||||
// Run kubectl again, which should work with no browser interaction.
|
||||
kubectlCmd2 := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
|
||||
kubectlCmd2.Env = append(os.Environ(), env.ProxyEnv()...)
|
||||
start = time.Now()
|
||||
kubectlOutput2, err := kubectlCmd2.CombinedOutput()
|
||||
require.NoError(t, err)
|
||||
require.Greaterf(t, len(bytes.Split(kubectlOutput2, []byte("\n"))), 2, "expected some namespaces to be returned again")
|
||||
t.Logf("second kubectl command took %s", time.Since(start).String())
|
||||
}
|
@ -165,8 +165,12 @@ func TestSupervisorLogin(t *testing.T) {
|
||||
authcode := callback.URL.Query().Get("code")
|
||||
require.NotEmpty(t, authcode)
|
||||
|
||||
// Call the token endpoint to get tokens.
|
||||
tokenResponse, err := downstreamOAuth2Config.Exchange(oidcHTTPClientContext, authcode, pkceParam.Verifier())
|
||||
// Call the token endpoint to get tokens. Give the Supervisor a couple of seconds to wire up its signing key.
|
||||
var tokenResponse *oauth2.Token
|
||||
assert.Eventually(t, func() bool {
|
||||
tokenResponse, err = downstreamOAuth2Config.Exchange(oidcHTTPClientContext, authcode, pkceParam.Verifier())
|
||||
return err == nil
|
||||
}, time.Second*5, time.Second*1)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedIDTokenClaims := []string{"iss", "exp", "sub", "aud", "auth_time", "iat", "jti", "nonce", "rat", "username"}
|
||||
|
45
test/library/cli.go
Normal file
45
test/library/cli.go
Normal file
@ -0,0 +1,45 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package library
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
)
|
||||
|
||||
//nolint: gochecknoglobals
|
||||
var pinnipedCLIBinaryCache struct {
|
||||
buf []byte
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// PinnipedCLIPath returns the path to the Pinniped CLI binary, built on demand and cached between tests.
|
||||
func PinnipedCLIPath(t *testing.T) string {
|
||||
t.Helper()
|
||||
pinnipedCLIBinaryCache.mutex.Lock()
|
||||
defer pinnipedCLIBinaryCache.mutex.Unlock()
|
||||
path := filepath.Join(testutil.TempDir(t), "pinniped")
|
||||
if pinnipedCLIBinaryCache.buf != nil {
|
||||
t.Log("using previously built pinniped CLI binary")
|
||||
require.NoError(t, ioutil.WriteFile(path, pinnipedCLIBinaryCache.buf, 0500))
|
||||
return path
|
||||
}
|
||||
|
||||
t.Log("building pinniped CLI binary")
|
||||
output, err := exec.Command("go", "build", "-o", path, "go.pinniped.dev/cmd/pinniped").CombinedOutput()
|
||||
require.NoError(t, err, string(output))
|
||||
|
||||
// Fill our cache so we don't have to do this again.
|
||||
pinnipedCLIBinaryCache.buf, err = ioutil.ReadFile(path)
|
||||
require.NoError(t, err, string(output))
|
||||
|
||||
return path
|
||||
}
|
@ -15,8 +15,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@ -49,7 +51,10 @@ func NewClientset(t *testing.T) kubernetes.Interface {
|
||||
|
||||
func NewClientsetForKubeConfig(t *testing.T, kubeConfig string) kubernetes.Interface {
|
||||
t.Helper()
|
||||
return newClientsetWithConfig(t, NewRestConfigFromKubeconfig(t, kubeConfig))
|
||||
}
|
||||
|
||||
func NewRestConfigFromKubeconfig(t *testing.T, kubeConfig string) *rest.Config {
|
||||
kubeConfigFile, err := ioutil.TempFile("", "pinniped-cli-test-*")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(kubeConfigFile.Name())
|
||||
@ -59,8 +64,7 @@ func NewClientsetForKubeConfig(t *testing.T, kubeConfig string) kubernetes.Inter
|
||||
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeConfigFile.Name())
|
||||
require.NoError(t, err)
|
||||
|
||||
return newClientsetWithConfig(t, restConfig)
|
||||
return restConfig
|
||||
}
|
||||
|
||||
func NewClientsetWithCertAndKey(t *testing.T, clientCertificateData, clientKeyData string) kubernetes.Interface {
|
||||
@ -164,13 +168,38 @@ func CreateTestWebhookAuthenticator(ctx context.Context, t *testing.T) corev1.Ty
|
||||
}
|
||||
}
|
||||
|
||||
// CreateTestJWTAuthenticator creates and returns a test JWTAuthenticator in
|
||||
// CreateTestJWTAuthenticatorForCLIUpstream creates and returns a test JWTAuthenticator in
|
||||
// $PINNIPED_TEST_CONCIERGE_NAMESPACE, which will be automatically deleted at the end of the current
|
||||
// test's lifetime. It returns a corev1.TypedLocalObjectReference which describes the test JWT
|
||||
// authenticator within the test namespace.
|
||||
//
|
||||
// CreateTestJWTAuthenticator gets the OIDC issuer info from IntegrationEnv().CLITestUpstream.
|
||||
func CreateTestJWTAuthenticator(ctx context.Context, t *testing.T, usernameClaim string) corev1.TypedLocalObjectReference {
|
||||
// CreateTestJWTAuthenticatorForCLIUpstream gets the OIDC issuer info from IntegrationEnv().CLITestUpstream.
|
||||
func CreateTestJWTAuthenticatorForCLIUpstream(ctx context.Context, t *testing.T) corev1.TypedLocalObjectReference {
|
||||
t.Helper()
|
||||
testEnv := IntegrationEnv(t)
|
||||
spec := auth1alpha1.JWTAuthenticatorSpec{
|
||||
Issuer: testEnv.CLITestUpstream.Issuer,
|
||||
Audience: testEnv.CLITestUpstream.ClientID,
|
||||
// The default UsernameClaim is "username" but the upstreams that we use for
|
||||
// integration tests won't necessarily have that claim, so use "sub" here.
|
||||
UsernameClaim: "sub",
|
||||
}
|
||||
// If the test upstream does not have a CA bundle specified, then don't configure one in the
|
||||
// JWTAuthenticator. Leaving TLSSpec set to nil will result in OIDC discovery using the OS's root
|
||||
// CA store.
|
||||
if testEnv.CLITestUpstream.CABundle != "" {
|
||||
spec.TLS = &auth1alpha1.TLSSpec{
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(testEnv.CLITestUpstream.CABundle)),
|
||||
}
|
||||
}
|
||||
return CreateTestJWTAuthenticator(ctx, t, spec)
|
||||
}
|
||||
|
||||
// CreateTestJWTAuthenticator creates and returns a test JWTAuthenticator in
|
||||
// $PINNIPED_TEST_CONCIERGE_NAMESPACE, which will be automatically deleted at the end of the current
|
||||
// test's lifetime. It returns a corev1.TypedLocalObjectReference which describes the test JWT
|
||||
// authenticator within the test namespace.
|
||||
func CreateTestJWTAuthenticator(ctx context.Context, t *testing.T, spec auth1alpha1.JWTAuthenticatorSpec) corev1.TypedLocalObjectReference {
|
||||
t.Helper()
|
||||
testEnv := IntegrationEnv(t)
|
||||
|
||||
@ -180,24 +209,9 @@ func CreateTestJWTAuthenticator(ctx context.Context, t *testing.T, usernameClaim
|
||||
createContext, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// If the test upstream does not have a CA bundle specified, then don't configure one in the
|
||||
// JWTAuthenticator. Leaving TLSSpec set to nil will result in OIDC discovery using the OS's root
|
||||
// CA store.
|
||||
tlsSpec := &auth1alpha1.TLSSpec{
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(testEnv.CLITestUpstream.CABundle)),
|
||||
}
|
||||
if testEnv.CLITestUpstream.CABundle == "" {
|
||||
tlsSpec = nil
|
||||
}
|
||||
|
||||
jwtAuthenticator, err := jwtAuthenticators.Create(createContext, &auth1alpha1.JWTAuthenticator{
|
||||
ObjectMeta: testObjectMeta(t, "jwt-authenticator"),
|
||||
Spec: auth1alpha1.JWTAuthenticatorSpec{
|
||||
Issuer: testEnv.CLITestUpstream.Issuer,
|
||||
Audience: testEnv.CLITestUpstream.ClientID,
|
||||
TLS: tlsSpec,
|
||||
UsernameClaim: usernameClaim,
|
||||
},
|
||||
Spec: spec,
|
||||
}, metav1.CreateOptions{})
|
||||
require.NoError(t, err, "could not create test JWTAuthenticator")
|
||||
t.Logf("created test JWTAuthenticator %s/%s", jwtAuthenticator.Namespace, jwtAuthenticator.Name)
|
||||
@ -232,7 +246,7 @@ func CreateTestOIDCProvider(ctx context.Context, t *testing.T, issuer string, ce
|
||||
defer cancel()
|
||||
|
||||
if issuer == "" {
|
||||
issuer = randomIssuer(t)
|
||||
issuer = fmt.Sprintf("http://test-issuer-%s.pinniped.dev", RandHex(t, 8))
|
||||
}
|
||||
|
||||
opcs := NewSupervisorClientset(t).ConfigV1alpha1().OIDCProviders(testEnv.SupervisorNamespace)
|
||||
@ -266,21 +280,22 @@ func CreateTestOIDCProvider(ctx context.Context, t *testing.T, issuer string, ce
|
||||
|
||||
// Wait for the OIDCProvider to enter the expected phase (or time out).
|
||||
var result *configv1alpha1.OIDCProvider
|
||||
require.Eventuallyf(t, func() bool {
|
||||
assert.Eventuallyf(t, func() bool {
|
||||
var err error
|
||||
result, err = opcs.Get(ctx, opc.Name, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
return result.Status.Status == expectStatus
|
||||
}, 60*time.Second, 1*time.Second, "expected the UpstreamOIDCProvider to go into phase %s", expectStatus)
|
||||
}, 60*time.Second, 1*time.Second, "expected the OIDCProvider to have status %q", expectStatus)
|
||||
require.Equal(t, expectStatus, result.Status.Status)
|
||||
|
||||
return opc
|
||||
}
|
||||
|
||||
func randomIssuer(t *testing.T) string {
|
||||
var buf [8]byte
|
||||
_, err := io.ReadFull(rand.Reader, buf[:])
|
||||
func RandHex(t *testing.T, numBytes int) string {
|
||||
buf := make([]byte, numBytes)
|
||||
_, err := io.ReadFull(rand.Reader, buf)
|
||||
require.NoError(t, err)
|
||||
return fmt.Sprintf("http://test-issuer-%s.pinniped.dev", hex.EncodeToString(buf[:]))
|
||||
return hex.EncodeToString(buf)
|
||||
}
|
||||
|
||||
func CreateTestSecret(t *testing.T, namespace string, baseName string, secretType string, stringData map[string]string) *corev1.Secret {
|
||||
@ -297,6 +312,7 @@ func CreateTestSecret(t *testing.T, namespace string, baseName string, secretTyp
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
t.Logf("cleaning up test Secret %s/%s", created.Namespace, created.Name)
|
||||
err := client.CoreV1().Secrets(namespace).Delete(context.Background(), created.Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@ -309,7 +325,7 @@ func CreateClientCredsSecret(t *testing.T, clientID string, clientSecret string)
|
||||
env := IntegrationEnv(t)
|
||||
return CreateTestSecret(t,
|
||||
env.SupervisorNamespace,
|
||||
"test-client-creds",
|
||||
"client-creds",
|
||||
"secrets.pinniped.dev/oidc-client",
|
||||
map[string]string{
|
||||
"clientID": clientID,
|
||||
@ -336,6 +352,7 @@ func CreateTestUpstreamOIDCProvider(t *testing.T, spec idpv1alpha1.UpstreamOIDCP
|
||||
|
||||
// Always clean this up after this point.
|
||||
t.Cleanup(func() {
|
||||
t.Logf("cleaning up test UpstreamOIDCProvider %s/%s", created.Namespace, created.Name)
|
||||
err := upstreams.Delete(context.Background(), created.Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@ -352,6 +369,31 @@ func CreateTestUpstreamOIDCProvider(t *testing.T, spec idpv1alpha1.UpstreamOIDCP
|
||||
return result
|
||||
}
|
||||
|
||||
func CreateTestClusterRoleBinding(t *testing.T, subject rbacv1.Subject, roleRef rbacv1.RoleRef) *rbacv1.ClusterRoleBinding {
|
||||
t.Helper()
|
||||
client := NewClientset(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create the ClusterRoleBinding using GenerateName to get a random name.
|
||||
clusterRoles := client.RbacV1().ClusterRoleBindings()
|
||||
|
||||
created, err := clusterRoles.Create(ctx, &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: testObjectMeta(t, "cluster-role"),
|
||||
Subjects: []rbacv1.Subject{subject},
|
||||
RoleRef: roleRef,
|
||||
}, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
t.Logf("created test ClusterRoleBinding %s", created.Name)
|
||||
|
||||
t.Cleanup(func() {
|
||||
t.Logf("cleaning up test ClusterRoleBinding %s", created.Name)
|
||||
err := clusterRoles.Delete(context.Background(), created.Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
return created
|
||||
}
|
||||
|
||||
func testObjectMeta(t *testing.T, baseName string) metav1.ObjectMeta {
|
||||
return metav1.ObjectMeta{
|
||||
GenerateName: fmt.Sprintf("test-%s-", baseName),
|
||||
|
@ -59,6 +59,14 @@ type TestOIDCUpstream struct {
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
// ProxyEnv returns a set of environment variable strings (e.g., to combine with os.Environ()) which set up the configured test HTTP proxy.
|
||||
func (e *TestEnv) ProxyEnv() []string {
|
||||
if e.Proxy == "" {
|
||||
return nil
|
||||
}
|
||||
return []string{"http_proxy=" + e.Proxy, "https_proxy=" + e.Proxy, "no_proxy=127.0.0.1"}
|
||||
}
|
||||
|
||||
// IntegrationEnv gets the integration test environment from OS environment variables. This
|
||||
// method also implies SkipUnlessIntegration().
|
||||
func IntegrationEnv(t *testing.T) *TestEnv {
|
||||
|
Loading…
Reference in New Issue
Block a user