Merge remote-tracking branch 'upstream/main' into impersonation-proxy

Signed-off-by: Andrew Keesler <akeesler@vmware.com>
This commit is contained in:
Andrew Keesler 2021-03-18 10:36:28 -04:00
commit 05a188d4cd
No known key found for this signature in database
GPG Key ID: 27CE0444346F9413
19 changed files with 895 additions and 100 deletions

View File

@ -0,0 +1,43 @@
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cmd
import (
"k8s.io/client-go/tools/clientcmd"
conciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
"go.pinniped.dev/internal/groupsuffix"
"go.pinniped.dev/internal/kubeclient"
)
// getConciergeClientsetFunc is a function that can return a clientset for the Concierge API given a
// clientConfig and the apiGroupSuffix with which the API is running.
type getConciergeClientsetFunc func(clientConfig clientcmd.ClientConfig, apiGroupSuffix string) (conciergeclientset.Interface, error)
// getRealConciergeClientset returns a real implementation of a conciergeclientset.Interface.
func getRealConciergeClientset(clientConfig clientcmd.ClientConfig, apiGroupSuffix string) (conciergeclientset.Interface, error) {
restConfig, err := clientConfig.ClientConfig()
if err != nil {
return nil, err
}
client, err := kubeclient.New(
kubeclient.WithConfig(restConfig),
kubeclient.WithMiddleware(groupsuffix.New(apiGroupSuffix)),
)
if err != nil {
return nil, err
}
return client.PinnipedConcierge, nil
}
// newClientConfig returns a clientcmd.ClientConfig given an optional kubeconfig path override and
// an optional context override.
func newClientConfig(kubeconfigPathOverride string, currentContextName string) clientcmd.ClientConfig {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.ExplicitPath = kubeconfigPathOverride
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{
CurrentContext: currentContextName,
})
return clientConfig
}

View File

@ -31,33 +31,19 @@ import (
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
conciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
"go.pinniped.dev/internal/groupsuffix"
"go.pinniped.dev/internal/kubeclient"
)
type kubeconfigDeps struct {
getPathToSelf func() (string, error)
getClientset func(clientConfig clientcmd.ClientConfig, apiGroupSuffix string) (conciergeclientset.Interface, error)
getClientset getConciergeClientsetFunc
log logr.Logger
}
func kubeconfigRealDeps() kubeconfigDeps {
return kubeconfigDeps{
getPathToSelf: os.Executable,
getClientset: func(clientConfig clientcmd.ClientConfig, apiGroupSuffix string) (conciergeclientset.Interface, error) {
restConfig, err := clientConfig.ClientConfig()
if err != nil {
return nil, err
}
client, err := kubeclient.New(
kubeclient.WithConfig(restConfig),
kubeclient.WithMiddleware(groupsuffix.New(apiGroupSuffix)),
)
if err != nil {
return nil, err
}
return client.PinnipedConcierge, nil
},
log: stdr.New(log.New(os.Stderr, "", 0)),
getClientset: getRealConciergeClientset,
log: stdr.New(log.New(os.Stderr, "", 0)),
}
}
@ -562,15 +548,6 @@ func lookupAuthenticator(clientset conciergeclientset.Interface, authType, authN
return results[0], nil
}
func newClientConfig(kubeconfigPathOverride string, currentContextName string) clientcmd.ClientConfig {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.ExplicitPath = kubeconfigPathOverride
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{
CurrentContext: currentContextName,
})
return clientConfig
}
func writeConfigAsYAML(out io.Writer, config clientcmdapi.Config) error {
output, err := clientcmd.Write(config)
if err != nil {

298
cmd/pinniped/cmd/whoami.go Normal file
View File

@ -0,0 +1,298 @@
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cmd
import (
"context"
"fmt"
"io"
"os"
"strings"
"time"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/clientcmd"
identityapi "go.pinniped.dev/generated/latest/apis/concierge/identity"
identityv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/identity/v1alpha1"
loginapi "go.pinniped.dev/generated/latest/apis/concierge/login"
loginv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
"go.pinniped.dev/internal/groupsuffix"
"go.pinniped.dev/internal/here"
"go.pinniped.dev/internal/plog"
)
//nolint: gochecknoinits
func init() {
rootCmd.AddCommand(newWhoamiCommand(getRealConciergeClientset))
}
type whoamiFlags struct {
outputFormat string // e.g., yaml, json, text
kubeconfigPath string
kubeconfigContextOverride string
apiGroupSuffix string
}
type clusterInfo struct {
name string
url string
}
func newWhoamiCommand(getClientset getConciergeClientsetFunc) *cobra.Command {
cmd := &cobra.Command{
Args: cobra.NoArgs, // do not accept positional arguments for this command
Use: "whoami",
Short: "Print information about the current user",
SilenceUsage: true,
}
flags := &whoamiFlags{}
// flags
f := cmd.Flags()
f.StringVarP(&flags.outputFormat, "output", "o", "text", "Output format (e.g., 'yaml', 'json', 'text')")
f.StringVar(&flags.kubeconfigPath, "kubeconfig", os.Getenv("KUBECONFIG"), "Path to kubeconfig file")
f.StringVar(&flags.kubeconfigContextOverride, "kubeconfig-context", "", "Kubeconfig context name (default: current active context)")
f.StringVar(&flags.apiGroupSuffix, "api-group-suffix", groupsuffix.PinnipedDefaultSuffix, "Concierge API group suffix")
cmd.RunE = func(cmd *cobra.Command, _ []string) error {
return runWhoami(cmd.OutOrStdout(), getClientset, flags)
}
return cmd
}
func runWhoami(output io.Writer, getClientset getConciergeClientsetFunc, flags *whoamiFlags) error {
clientConfig := newClientConfig(flags.kubeconfigPath, flags.kubeconfigContextOverride)
clientset, err := getClientset(clientConfig, flags.apiGroupSuffix)
if err != nil {
return fmt.Errorf("could not configure Kubernetes client: %w", err)
}
clusterInfo, err := getCurrentCluster(clientConfig)
if err != nil {
return fmt.Errorf("could not get current cluster info: %w", err)
}
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*20)
defer cancelFunc()
whoAmI, err := clientset.IdentityV1alpha1().WhoAmIRequests().Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
if err != nil {
hint := ""
if errors.IsNotFound(err) {
hint = " (is the Pinniped WhoAmI API running and healthy?)"
}
return fmt.Errorf("could not complete WhoAmIRequest%s: %w", hint, err)
}
if err := writeWhoamiOutput(output, flags, clusterInfo, whoAmI); err != nil {
return fmt.Errorf("could not write output: %w", err)
}
return nil
}
func getCurrentCluster(clientConfig clientcmd.ClientConfig) (*clusterInfo, error) {
currentKubeconfig, err := clientConfig.RawConfig()
if err != nil {
return nil, err
}
unknownClusterInfo := &clusterInfo{name: "???", url: "???"}
context, ok := currentKubeconfig.Contexts[currentKubeconfig.CurrentContext]
if !ok {
return unknownClusterInfo, nil
}
cluster, ok := currentKubeconfig.Clusters[context.Cluster]
if !ok {
return unknownClusterInfo, nil
}
return &clusterInfo{name: context.Cluster, url: cluster.Server}, nil
}
func writeWhoamiOutput(output io.Writer, flags *whoamiFlags, cInfo *clusterInfo, whoAmI *identityv1alpha1.WhoAmIRequest) error {
switch flags.outputFormat {
case "text":
return writeWhoamiOutputText(output, cInfo, whoAmI)
case "json":
return writeWhoamiOutputJSON(output, flags.apiGroupSuffix, whoAmI)
case "yaml":
return writeWhoamiOutputYAML(output, flags.apiGroupSuffix, whoAmI)
default:
return fmt.Errorf("unknown output format: %q", flags.outputFormat)
}
}
func writeWhoamiOutputText(output io.Writer, clusterInfo *clusterInfo, whoAmI *identityv1alpha1.WhoAmIRequest) error {
fmt.Fprint(output, here.Docf(`
Current cluster info:
Name: %s
URL: %s
Current user info:
Username: %s
Groups: %s
`, clusterInfo.name, clusterInfo.url, whoAmI.Status.KubernetesUserInfo.User.Username, prettyStrings(whoAmI.Status.KubernetesUserInfo.User.Groups)))
return nil
}
func writeWhoamiOutputJSON(output io.Writer, apiGroupSuffix string, whoAmI *identityv1alpha1.WhoAmIRequest) error {
return serialize(output, apiGroupSuffix, whoAmI, runtime.ContentTypeJSON)
}
func writeWhoamiOutputYAML(output io.Writer, apiGroupSuffix string, whoAmI *identityv1alpha1.WhoAmIRequest) error {
return serialize(output, apiGroupSuffix, whoAmI, runtime.ContentTypeYAML)
}
func serialize(output io.Writer, apiGroupSuffix string, whoAmI *identityv1alpha1.WhoAmIRequest, contentType string) error {
scheme, _, identityGV := conciergeschemeNew(apiGroupSuffix)
codecs := serializer.NewCodecFactory(scheme)
respInfo, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), contentType)
if !ok {
return fmt.Errorf("unknown content type: %q", contentType)
}
// I have seen the pretty serializer be nil before, so this will hopefully protect against that
// corner.
serializer := respInfo.PrettySerializer
if serializer == nil {
serializer = respInfo.Serializer
}
// Ensure that these fields are set so that the JSON/YAML output tells the full story.
whoAmI.APIVersion = identityGV.String()
whoAmI.Kind = "WhoAmIRequest"
return serializer.Encode(whoAmI, output)
}
func prettyStrings(ss []string) string {
b := &strings.Builder{}
for i, s := range ss {
if i != 0 {
b.WriteString(", ")
}
b.WriteString(s)
}
return b.String()
}
// conciergeschemeNew is a temporary private function to stand in place for
// "go.pinniped.dev/internal/concierge/scheme".New until the later function is merged to main.
func conciergeschemeNew(apiGroupSuffix string) (_ *runtime.Scheme, login, identity schema.GroupVersion) {
// standard set up of the server side scheme
scheme := runtime.NewScheme()
// add the options to empty v1
metav1.AddToGroupVersion(scheme, metav1.Unversioned)
// nothing fancy is required if using the standard group suffix
if apiGroupSuffix == groupsuffix.PinnipedDefaultSuffix {
schemeBuilder := runtime.NewSchemeBuilder(
loginv1alpha1.AddToScheme,
loginapi.AddToScheme,
identityv1alpha1.AddToScheme,
identityapi.AddToScheme,
)
utilruntime.Must(schemeBuilder.AddToScheme(scheme))
return scheme, loginv1alpha1.SchemeGroupVersion, identityv1alpha1.SchemeGroupVersion
}
loginConciergeGroupData, identityConciergeGroupData := groupsuffix.ConciergeAggregatedGroups(apiGroupSuffix)
addToSchemeAtNewGroup(scheme, loginv1alpha1.GroupName, loginConciergeGroupData.Group, loginv1alpha1.AddToScheme, loginapi.AddToScheme)
addToSchemeAtNewGroup(scheme, identityv1alpha1.GroupName, identityConciergeGroupData.Group, identityv1alpha1.AddToScheme, identityapi.AddToScheme)
// manually register conversions and defaulting into the correct scheme since we cannot directly call AddToScheme
schemeBuilder := runtime.NewSchemeBuilder(
loginv1alpha1.RegisterConversions,
loginv1alpha1.RegisterDefaults,
identityv1alpha1.RegisterConversions,
identityv1alpha1.RegisterDefaults,
)
utilruntime.Must(schemeBuilder.AddToScheme(scheme))
// we do not want to return errors from the scheme and instead would prefer to defer
// to the REST storage layer for consistency. The simplest way to do this is to force
// a cache miss from the authenticator cache. Kube API groups are validated via the
// IsDNS1123Subdomain func thus we can easily create a group that is guaranteed never
// to be in the authenticator cache. Add a timestamp just to be extra sure.
const authenticatorCacheMissPrefix = "_INVALID_API_GROUP_"
authenticatorCacheMiss := authenticatorCacheMissPrefix + time.Now().UTC().String()
// we do not have any defaulting functions for *loginv1alpha1.TokenCredentialRequest
// today, but we may have some in the future. Calling AddTypeDefaultingFunc overwrites
// any previously registered defaulting function. Thus to make sure that we catch
// a situation where we add a defaulting func, we attempt to call it here with a nil
// *loginv1alpha1.TokenCredentialRequest. This will do nothing when there is no
// defaulting func registered, but it will almost certainly panic if one is added.
scheme.Default((*loginv1alpha1.TokenCredentialRequest)(nil))
// on incoming requests, restore the authenticator API group to the standard group
// note that we are responsible for duplicating this logic for every external API version
scheme.AddTypeDefaultingFunc(&loginv1alpha1.TokenCredentialRequest{}, func(obj interface{}) {
credentialRequest := obj.(*loginv1alpha1.TokenCredentialRequest)
if credentialRequest.Spec.Authenticator.APIGroup == nil {
// force a cache miss because this is an invalid request
plog.Debug("invalid token credential request, nil group", "authenticator", credentialRequest.Spec.Authenticator)
credentialRequest.Spec.Authenticator.APIGroup = &authenticatorCacheMiss
return
}
restoredGroup, ok := groupsuffix.Unreplace(*credentialRequest.Spec.Authenticator.APIGroup, apiGroupSuffix)
if !ok {
// force a cache miss because this is an invalid request
plog.Debug("invalid token credential request, wrong group", "authenticator", credentialRequest.Spec.Authenticator)
credentialRequest.Spec.Authenticator.APIGroup = &authenticatorCacheMiss
return
}
credentialRequest.Spec.Authenticator.APIGroup = &restoredGroup
})
return scheme, schema.GroupVersion(loginConciergeGroupData), schema.GroupVersion(identityConciergeGroupData)
}
func addToSchemeAtNewGroup(scheme *runtime.Scheme, oldGroup, newGroup string, funcs ...func(*runtime.Scheme) error) {
// we need a temporary place to register our types to avoid double registering them
tmpScheme := runtime.NewScheme()
schemeBuilder := runtime.NewSchemeBuilder(funcs...)
utilruntime.Must(schemeBuilder.AddToScheme(tmpScheme))
for gvk := range tmpScheme.AllKnownTypes() {
if gvk.GroupVersion() == metav1.Unversioned {
continue // metav1.AddToGroupVersion registers types outside of our aggregated API group that we need to ignore
}
if gvk.Group != oldGroup {
panic(fmt.Errorf("tmp scheme has type not in the old aggregated API group %s: %s", oldGroup, gvk)) // programmer error
}
obj, err := tmpScheme.New(gvk)
if err != nil {
panic(err) // programmer error, scheme internal code is broken
}
newGVK := schema.GroupVersionKind{
Group: newGroup,
Version: gvk.Version,
Kind: gvk.Kind,
}
// register the existing type but with the new group in the correct scheme
scheme.AddKnownTypeWithName(newGVK, obj)
}
}

View File

@ -0,0 +1,273 @@
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cmd
import (
"bytes"
"testing"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
kubetesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/clientcmd"
identityv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/identity/v1alpha1"
conciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
fakeconciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
"go.pinniped.dev/internal/constable"
"go.pinniped.dev/internal/here"
)
func TestWhoami(t *testing.T) {
tests := []struct {
name string
args []string
groupsOverride []string
gettingClientsetErr error
callingAPIErr error
wantError bool
wantStdout, wantStderr string
}{
{
name: "help flag",
args: []string{"--help"},
wantStdout: here.Doc(`
Print information about the current user
Usage:
whoami [flags]
Flags:
--api-group-suffix string Concierge API group suffix (default "pinniped.dev")
-h, --help help for whoami
--kubeconfig string Path to kubeconfig file
--kubeconfig-context string Kubeconfig context name (default: current active context)
-o, --output string Output format (e.g., 'yaml', 'json', 'text') (default "text")
`),
},
{
name: "text output",
args: []string{"--kubeconfig", "testdata/kubeconfig.yaml"},
wantStdout: here.Doc(`
Current cluster info:
Name: kind-kind
URL: https://fake-server-url-value
Current user info:
Username: some-username
Groups: some-group-0, some-group-1
`),
},
{
name: "text output with long output flag",
args: []string{"--kubeconfig", "testdata/kubeconfig.yaml", "--output", "text"},
wantStdout: here.Doc(`
Current cluster info:
Name: kind-kind
URL: https://fake-server-url-value
Current user info:
Username: some-username
Groups: some-group-0, some-group-1
`),
},
{
name: "text output with 1 group",
args: []string{"--kubeconfig", "testdata/kubeconfig.yaml", "--output", "text"},
groupsOverride: []string{"some-group-0"},
wantStdout: here.Doc(`
Current cluster info:
Name: kind-kind
URL: https://fake-server-url-value
Current user info:
Username: some-username
Groups: some-group-0
`),
},
{
name: "text output with no groups",
args: []string{"--kubeconfig", "testdata/kubeconfig.yaml", "--output", "text"},
groupsOverride: []string{},
wantStdout: here.Doc(`
Current cluster info:
Name: kind-kind
URL: https://fake-server-url-value
Current user info:
Username: some-username
Groups:
`),
},
{
name: "json output",
args: []string{"--kubeconfig", "testdata/kubeconfig.yaml", "-o", "json"},
wantStdout: here.Doc(`
{
"kind": "WhoAmIRequest",
"apiVersion": "identity.concierge.pinniped.dev/v1alpha1",
"metadata": {
"creationTimestamp": null
},
"spec": {},
"status": {
"kubernetesUserInfo": {
"user": {
"username": "some-username",
"groups": [
"some-group-0",
"some-group-1"
]
}
}
}
}`),
},
{
name: "json output with api group suffix flag",
args: []string{"--kubeconfig", "testdata/kubeconfig.yaml", "-o", "json", "--api-group-suffix", "tuna.io"},
wantStdout: here.Doc(`
{
"kind": "WhoAmIRequest",
"apiVersion": "identity.concierge.tuna.io/v1alpha1",
"metadata": {
"creationTimestamp": null
},
"spec": {},
"status": {
"kubernetesUserInfo": {
"user": {
"username": "some-username",
"groups": [
"some-group-0",
"some-group-1"
]
}
}
}
}`),
},
{
name: "yaml output",
args: []string{"--kubeconfig", "testdata/kubeconfig.yaml", "-o", "yaml"},
wantStdout: here.Doc(`
apiVersion: identity.concierge.pinniped.dev/v1alpha1
kind: WhoAmIRequest
metadata:
creationTimestamp: null
spec: {}
status:
kubernetesUserInfo:
user:
groups:
- some-group-0
- some-group-1
username: some-username
`),
},
{
name: "yaml output with api group suffix",
args: []string{"--kubeconfig", "testdata/kubeconfig.yaml", "-o", "yaml", "--api-group-suffix", "tuna.io"},
wantStdout: here.Doc(`
apiVersion: identity.concierge.tuna.io/v1alpha1
kind: WhoAmIRequest
metadata:
creationTimestamp: null
spec: {}
status:
kubernetesUserInfo:
user:
groups:
- some-group-0
- some-group-1
username: some-username
`),
},
{
name: "extra args",
args: []string{"extra-arg"},
wantError: true,
wantStderr: "Error: unknown command \"extra-arg\" for \"whoami\"\n",
},
{
name: "cannot get cluster info",
args: []string{"--kubeconfig", "this-file-does-not-exist"},
wantError: true,
wantStderr: "Error: could not get current cluster info: stat this-file-does-not-exist: no such file or directory\n",
},
{
name: "getting clientset fails",
gettingClientsetErr: constable.Error("some get clientset error"),
wantError: true,
wantStderr: "Error: could not configure Kubernetes client: some get clientset error\n",
},
{
name: "calling API fails",
callingAPIErr: constable.Error("some API error"),
wantError: true,
wantStderr: "Error: could not complete WhoAmIRequest: some API error\n",
},
{
name: "calling API fails because WhoAmI API is not installed",
callingAPIErr: errors.NewNotFound(identityv1alpha1.SchemeGroupVersion.WithResource("whoamirequests").GroupResource(), "whatever"),
wantError: true,
wantStderr: "Error: could not complete WhoAmIRequest (is the Pinniped WhoAmI API running and healthy?): whoamirequests.identity.concierge.pinniped.dev \"whatever\" not found\n",
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
getClientset := func(clientConfig clientcmd.ClientConfig, apiGroupSuffix string) (conciergeclientset.Interface, error) {
if test.gettingClientsetErr != nil {
return nil, test.gettingClientsetErr
}
clientset := fakeconciergeclientset.NewSimpleClientset()
clientset.PrependReactor("create", "whoamirequests", func(_ kubetesting.Action) (bool, runtime.Object, error) {
if test.callingAPIErr != nil {
return true, nil, test.callingAPIErr
}
groups := []string{"some-group-0", "some-group-1"}
if test.groupsOverride != nil {
groups = test.groupsOverride
}
return true, &identityv1alpha1.WhoAmIRequest{
Status: identityv1alpha1.WhoAmIRequestStatus{
KubernetesUserInfo: identityv1alpha1.KubernetesUserInfo{
User: identityv1alpha1.UserInfo{
Username: "some-username",
Groups: groups,
},
},
},
}, nil
})
return clientset, nil
}
cmd := newWhoamiCommand(getClientset)
stdout, stderr := bytes.NewBuffer([]byte{}), bytes.NewBuffer([]byte{})
cmd.SetOut(stdout)
cmd.SetErr(stderr)
cmd.SetArgs(test.args)
err := cmd.Execute()
if test.wantError {
require.Error(t, err)
} else {
require.NoError(t, err)
}
require.Equal(t, test.wantStdout, stdout.String())
require.Equal(t, test.wantStderr, stderr.String())
})
}
}

View File

@ -24,8 +24,19 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"gopkg.in/square/go-jose.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
identityapi "go.pinniped.dev/generated/latest/apis/concierge/identity"
identityv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/identity/v1alpha1"
loginapi "go.pinniped.dev/generated/latest/apis/concierge/login"
loginv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
"go.pinniped.dev/internal/groupsuffix"
"go.pinniped.dev/internal/plog"
"go.pinniped.dev/internal/testutil"
"go.pinniped.dev/pkg/oidcclient"
"go.pinniped.dev/pkg/oidcclient/filesession"
@ -36,8 +47,6 @@ import (
func TestCLIGetKubeconfigStaticToken(t *testing.T) {
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
library.AssertNoRestartsDuringTest(t, env.ConciergeNamespace, "")
// Create a test webhook configuration to use with the CLI.
ctx, cancelFunc := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancelFunc()
@ -104,6 +113,19 @@ func TestCLIGetKubeconfigStaticToken(t *testing.T) {
group := group
t.Run("access as group "+group+" with client-go", library.AccessAsGroupTest(ctx, group, kubeClient))
}
// Validate that `pinniped whoami` returns the correct identity.
kubeconfigPath := filepath.Join(testutil.TempDir(t), "whoami-kubeconfig")
require.NoError(t, ioutil.WriteFile(kubeconfigPath, []byte(stdout), 0600))
assertWhoami(
ctx,
t,
false,
pinnipedExe,
kubeconfigPath,
env.TestUser.ExpectedUsername,
append(env.TestUser.ExpectedGroups, "system:authenticated"),
)
})
}
}
@ -119,6 +141,49 @@ func runPinnipedCLI(t *testing.T, envVars []string, pinnipedExe string, args ...
return stdout.String(), stderr.String()
}
func assertWhoami(ctx context.Context, t *testing.T, useProxy bool, pinnipedExe, kubeconfigPath, wantUsername string, wantGroups []string) {
t.Helper()
apiGroupSuffix := library.IntegrationEnv(t).APIGroupSuffix
var stdout, stderr bytes.Buffer
cmd := exec.CommandContext(
ctx,
pinnipedExe,
"whoami",
"--kubeconfig",
kubeconfigPath,
"--output",
"yaml",
"--api-group-suffix",
apiGroupSuffix,
)
if useProxy {
cmd.Env = append(os.Environ(), library.IntegrationEnv(t).ProxyEnv()...)
}
cmd.Stdout = &stdout
cmd.Stderr = &stderr
require.NoErrorf(t, cmd.Run(), "stderr:\n%s\n\nstdout:\n%s\n\n", stderr.String(), stdout.String())
whoAmI := deserializeWhoAmIRequest(t, stdout.String(), apiGroupSuffix)
require.Equal(t, wantUsername, whoAmI.Status.KubernetesUserInfo.User.Username)
require.ElementsMatch(t, wantGroups, whoAmI.Status.KubernetesUserInfo.User.Groups)
}
func deserializeWhoAmIRequest(t *testing.T, data string, apiGroupSuffix string) *identityv1alpha1.WhoAmIRequest {
t.Helper()
scheme, _, _ := conciergeschemeNew(apiGroupSuffix)
codecs := serializer.NewCodecFactory(scheme)
respInfo, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), runtime.ContentTypeYAML)
require.True(t, ok)
obj, err := runtime.Decode(respInfo.Serializer, []byte(data))
require.NoError(t, err)
return obj.(*identityv1alpha1.WhoAmIRequest)
}
func TestCLILoginOIDC(t *testing.T) {
env := library.IntegrationEnv(t)
@ -355,3 +420,110 @@ func oidcLoginCommand(ctx context.Context, t *testing.T, pinnipedExe string, ses
cmd.Env = append(os.Environ(), env.ProxyEnv()...)
return cmd
}
// conciergeschemeNew is a temporary private function to stand in place for
// "go.pinniped.dev/internal/concierge/scheme".New until the later function is merged to main.
func conciergeschemeNew(apiGroupSuffix string) (_ *runtime.Scheme, login, identity schema.GroupVersion) {
// standard set up of the server side scheme
scheme := runtime.NewScheme()
// add the options to empty v1
metav1.AddToGroupVersion(scheme, metav1.Unversioned)
// nothing fancy is required if using the standard group suffix
if apiGroupSuffix == groupsuffix.PinnipedDefaultSuffix {
schemeBuilder := runtime.NewSchemeBuilder(
loginv1alpha1.AddToScheme,
loginapi.AddToScheme,
identityv1alpha1.AddToScheme,
identityapi.AddToScheme,
)
utilruntime.Must(schemeBuilder.AddToScheme(scheme))
return scheme, loginv1alpha1.SchemeGroupVersion, identityv1alpha1.SchemeGroupVersion
}
loginConciergeGroupData, identityConciergeGroupData := groupsuffix.ConciergeAggregatedGroups(apiGroupSuffix)
addToSchemeAtNewGroup(scheme, loginv1alpha1.GroupName, loginConciergeGroupData.Group, loginv1alpha1.AddToScheme, loginapi.AddToScheme)
addToSchemeAtNewGroup(scheme, identityv1alpha1.GroupName, identityConciergeGroupData.Group, identityv1alpha1.AddToScheme, identityapi.AddToScheme)
// manually register conversions and defaulting into the correct scheme since we cannot directly call AddToScheme
schemeBuilder := runtime.NewSchemeBuilder(
loginv1alpha1.RegisterConversions,
loginv1alpha1.RegisterDefaults,
identityv1alpha1.RegisterConversions,
identityv1alpha1.RegisterDefaults,
)
utilruntime.Must(schemeBuilder.AddToScheme(scheme))
// we do not want to return errors from the scheme and instead would prefer to defer
// to the REST storage layer for consistency. The simplest way to do this is to force
// a cache miss from the authenticator cache. Kube API groups are validated via the
// IsDNS1123Subdomain func thus we can easily create a group that is guaranteed never
// to be in the authenticator cache. Add a timestamp just to be extra sure.
const authenticatorCacheMissPrefix = "_INVALID_API_GROUP_"
authenticatorCacheMiss := authenticatorCacheMissPrefix + time.Now().UTC().String()
// we do not have any defaulting functions for *loginv1alpha1.TokenCredentialRequest
// today, but we may have some in the future. Calling AddTypeDefaultingFunc overwrites
// any previously registered defaulting function. Thus to make sure that we catch
// a situation where we add a defaulting func, we attempt to call it here with a nil
// *loginv1alpha1.TokenCredentialRequest. This will do nothing when there is no
// defaulting func registered, but it will almost certainly panic if one is added.
scheme.Default((*loginv1alpha1.TokenCredentialRequest)(nil))
// on incoming requests, restore the authenticator API group to the standard group
// note that we are responsible for duplicating this logic for every external API version
scheme.AddTypeDefaultingFunc(&loginv1alpha1.TokenCredentialRequest{}, func(obj interface{}) {
credentialRequest := obj.(*loginv1alpha1.TokenCredentialRequest)
if credentialRequest.Spec.Authenticator.APIGroup == nil {
// force a cache miss because this is an invalid request
plog.Debug("invalid token credential request, nil group", "authenticator", credentialRequest.Spec.Authenticator)
credentialRequest.Spec.Authenticator.APIGroup = &authenticatorCacheMiss
return
}
restoredGroup, ok := groupsuffix.Unreplace(*credentialRequest.Spec.Authenticator.APIGroup, apiGroupSuffix)
if !ok {
// force a cache miss because this is an invalid request
plog.Debug("invalid token credential request, wrong group", "authenticator", credentialRequest.Spec.Authenticator)
credentialRequest.Spec.Authenticator.APIGroup = &authenticatorCacheMiss
return
}
credentialRequest.Spec.Authenticator.APIGroup = &restoredGroup
})
return scheme, schema.GroupVersion(loginConciergeGroupData), schema.GroupVersion(identityConciergeGroupData)
}
func addToSchemeAtNewGroup(scheme *runtime.Scheme, oldGroup, newGroup string, funcs ...func(*runtime.Scheme) error) {
// we need a temporary place to register our types to avoid double registering them
tmpScheme := runtime.NewScheme()
schemeBuilder := runtime.NewSchemeBuilder(funcs...)
utilruntime.Must(schemeBuilder.AddToScheme(tmpScheme))
for gvk := range tmpScheme.AllKnownTypes() {
if gvk.GroupVersion() == metav1.Unversioned {
continue // metav1.AddToGroupVersion registers types outside of our aggregated API group that we need to ignore
}
if gvk.Group != oldGroup {
panic(fmt.Errorf("tmp scheme has type not in the old aggregated API group %s: %s", oldGroup, gvk)) // programmer error
}
obj, err := tmpScheme.New(gvk)
if err != nil {
panic(err) // programmer error, scheme internal code is broken
}
newGVK := schema.GroupVersionKind{
Group: newGroup,
Version: gvk.Version,
Kind: gvk.Kind,
}
// register the existing type but with the new group in the correct scheme
scheme.AddKnownTypeWithName(newGVK, obj)
}
}

View File

@ -23,8 +23,6 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
env := library.IntegrationEnv(t)
defaultServingCertResourceName := env.ConciergeAppName + "-api-tls-serving-certificate"
library.AssertNoRestartsDuringTest(t, env.ConciergeNamespace, "")
tests := []struct {
name string
forceRotation func(context.Context, kubernetes.Interface, string) error

View File

@ -57,8 +57,6 @@ var maskKey = func(s string) string { return strings.ReplaceAll(s, "TESTING KEY"
func TestClient(t *testing.T) {
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
library.AssertNoRestartsDuringTest(t, env.ConciergeNamespace, "")
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()

View File

@ -23,8 +23,6 @@ func TestCredentialIssuer(t *testing.T) {
client := library.NewConciergeClientset(t)
aggregatedClientset := library.NewAggregatedClientset(t)
library.AssertNoRestartsDuringTest(t, env.ConciergeNamespace, "")
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()

View File

@ -25,8 +25,6 @@ import (
func TestUnsuccessfulCredentialRequest(t *testing.T) {
env := library.IntegrationEnv(t).WithCapability(library.AnonymousAuthenticationSupported)
library.AssertNoRestartsDuringTest(t, env.ConciergeNamespace, "")
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
@ -49,8 +47,6 @@ func TestUnsuccessfulCredentialRequest(t *testing.T) {
func TestSuccessfulCredentialRequest(t *testing.T) {
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
library.AssertNoRestartsDuringTest(t, env.ConciergeNamespace, "")
ctx, cancel := context.WithTimeout(context.Background(), 6*time.Minute)
defer cancel()
@ -135,9 +131,7 @@ func TestSuccessfulCredentialRequest(t *testing.T) {
}
func TestFailedCredentialRequestWhenTheRequestIsValidButTheTokenDoesNotAuthenticateTheUser(t *testing.T) {
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
library.AssertNoRestartsDuringTest(t, env.ConciergeNamespace, "")
_ = library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
// Create a testWebhook so we have a legitimate authenticator to pass to the
// TokenCredentialRequest API.
@ -157,9 +151,7 @@ func TestFailedCredentialRequestWhenTheRequestIsValidButTheTokenDoesNotAuthentic
}
func TestCredentialRequest_ShouldFailWhenRequestDoesNotIncludeToken(t *testing.T) {
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
library.AssertNoRestartsDuringTest(t, env.ConciergeNamespace, "")
_ = library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
// Create a testWebhook so we have a legitimate authenticator to pass to the
// TokenCredentialRequest API.

View File

@ -29,8 +29,6 @@ const (
func TestKubeCertAgent(t *testing.T) {
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
library.AssertNoRestartsDuringTest(t, env.ConciergeNamespace, "")
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()

View File

@ -46,9 +46,6 @@ func TestE2EFullIntegration(t *testing.T) {
defer library.DumpLogs(t, env.SupervisorNamespace, "")
defer library.DumpLogs(t, "dex", "app=proxy")
library.AssertNoRestartsDuringTest(t, env.ConciergeNamespace, "")
library.AssertNoRestartsDuringTest(t, env.SupervisorNamespace, "")
ctx, cancelFunc := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancelFunc()
@ -338,4 +335,15 @@ status:
username: `+env.SupervisorTestUpstream.Username+`
`,
string(kubectlOutput3))
// Validate that `pinniped whoami` returns the correct identity.
assertWhoami(
ctx,
t,
true,
pinnipedExe,
kubeconfigPath,
env.SupervisorTestUpstream.Username,
append(env.SupervisorTestUpstream.ExpectedGroups, "system:authenticated"),
)
}

View File

@ -44,8 +44,6 @@ func TestSupervisorOIDCDiscovery(t *testing.T) {
env := library.IntegrationEnv(t)
client := library.NewSupervisorClientset(t)
library.AssertNoRestartsDuringTest(t, env.SupervisorNamespace, "")
ns := env.SupervisorNamespace
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
@ -151,8 +149,6 @@ func TestSupervisorTLSTerminationWithSNI(t *testing.T) {
pinnipedClient := library.NewSupervisorClientset(t)
kubeClient := library.NewKubernetesClientset(t)
library.AssertNoRestartsDuringTest(t, env.SupervisorNamespace, "")
ns := env.SupervisorNamespace
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
@ -224,8 +220,6 @@ func TestSupervisorTLSTerminationWithDefaultCerts(t *testing.T) {
pinnipedClient := library.NewSupervisorClientset(t)
kubeClient := library.NewKubernetesClientset(t)
library.AssertNoRestartsDuringTest(t, env.SupervisorNamespace, "")
ns := env.SupervisorNamespace
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()

View File

@ -29,8 +29,6 @@ func TestSupervisorHealthz(t *testing.T) {
t.Skip("PINNIPED_TEST_SUPERVISOR_HTTP_ADDRESS not defined")
}
library.AssertNoRestartsDuringTest(t, env.SupervisorNamespace, "")
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()

View File

@ -43,8 +43,6 @@ func TestSupervisorLogin(t *testing.T) {
defer library.DumpLogs(t, env.SupervisorNamespace, "")
defer library.DumpLogs(t, "dex", "app=proxy")
library.AssertNoRestartsDuringTest(t, env.SupervisorNamespace, "")
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()

View File

@ -24,8 +24,6 @@ func TestSupervisorSecrets(t *testing.T) {
kubeClient := library.NewKubernetesClientset(t)
supervisorClient := library.NewSupervisorClientset(t)
library.AssertNoRestartsDuringTest(t, env.SupervisorNamespace, "")
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()

View File

@ -17,8 +17,6 @@ import (
func TestSupervisorUpstreamOIDCDiscovery(t *testing.T) {
env := library.IntegrationEnv(t)
library.AssertNoRestartsDuringTest(t, env.SupervisorNamespace, "")
t.Run("invalid missing secret and bad issuer", func(t *testing.T) {
t.Parallel()
spec := v1alpha1.OIDCIdentityProviderSpec{

View File

@ -14,6 +14,7 @@ import (
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
)
// RequireEventuallyWithoutError is similar to require.Eventually() except that it also allows the caller to
@ -52,52 +53,77 @@ func RequireNeverWithoutError(
}
}
// NewRestartAssertion allows a caller to assert that there were no restarts for a Pod in the
// assertNoRestartsDuringTest allows a caller to assert that there were no restarts for a Pod in the
// provided namespace with the provided labelSelector during the lifetime of a test.
func AssertNoRestartsDuringTest(t *testing.T, namespace, labelSelector string) {
func assertNoRestartsDuringTest(t *testing.T, namespace, labelSelector string) {
t.Helper()
kubeClient := NewKubernetesClientset(t)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
previousRestartCounts := getRestartCounts(t, namespace, labelSelector)
previousRestartCounts := getRestartCounts(ctx, t, kubeClient, namespace, labelSelector)
t.Cleanup(func() {
currentRestartCounts := getRestartCounts(t, namespace, labelSelector)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
currentRestartCounts := getRestartCounts(ctx, t, kubeClient, namespace, labelSelector)
for key, previousRestartCount := range previousRestartCounts {
currentRestartCount, ok := currentRestartCounts[key]
if assert.Truef(
// If the container no longer exists, that's a test failure.
if !assert.Truef(
t,
ok,
"pod namespace/name/container %s existed at beginning of the test, but not the end",
key,
"container %s existed at beginning of the test, but not the end",
key.String(),
) {
assert.Equal(
t,
previousRestartCount,
currentRestartCount,
"pod namespace/name/container %s has restarted %d times (original count was %d)",
key,
currentRestartCount,
previousRestartCount,
)
continue
}
// Expect the restart count to be the same as it was before the test.
if !assert.Equal(
t,
previousRestartCount,
currentRestartCount,
"container %s has restarted %d times (original count was %d)",
key.String(),
currentRestartCount,
previousRestartCount,
) {
// Attempt to dump the logs from the previous container that crashed.
dumpContainerLogs(ctx, t, kubeClient, key.namespace, key.pod, key.container, true)
}
}
})
}
func getRestartCounts(t *testing.T, namespace, labelSelector string) map[string]int32 {
t.Helper()
type containerRestartKey struct {
namespace string
pod string
container string
}
kubeClient := NewKubernetesClientset(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
func (k containerRestartKey) String() string {
return fmt.Sprintf("%s/%s/%s", k.namespace, k.pod, k.container)
}
type containerRestartMap map[containerRestartKey]int32
func getRestartCounts(ctx context.Context, t *testing.T, kubeClient kubernetes.Interface, namespace, labelSelector string) containerRestartMap {
t.Helper()
pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
require.NoError(t, err)
restartCounts := make(map[string]int32)
restartCounts := make(containerRestartMap)
for _, pod := range pods.Items {
for _, container := range pod.Status.ContainerStatuses {
key := fmt.Sprintf("%s/%s/%s", pod.Namespace, pod.Name, container.Name)
key := containerRestartKey{
namespace: pod.Namespace,
pod: pod.Name,
container: container.Name,
}
restartCounts[key] = container.RestartCount
}
}

View File

@ -6,12 +6,15 @@ package library
import (
"bufio"
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// DumpLogs is meant to be called in a `defer` to dump the logs of components in the cluster on a test failure.
@ -25,25 +28,37 @@ func DumpLogs(t *testing.T, namespace string, labelSelector string) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
logTailLines := int64(40)
pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
require.NoError(t, err)
for _, pod := range pods.Items {
for _, container := range pod.Status.ContainerStatuses {
t.Logf("pod %s/%s container %s restarted %d times:", pod.Namespace, pod.Name, container.Name, container.RestartCount)
req := kubeClient.CoreV1().Pods(namespace).GetLogs(pod.Name, &corev1.PodLogOptions{
Container: container.Name,
TailLines: &logTailLines,
})
logReader, err := req.Stream(ctx)
require.NoError(t, err)
scanner := bufio.NewScanner(logReader)
for scanner.Scan() {
t.Logf("%s/%s/%s > %s", pod.Namespace, pod.Name, container.Name, scanner.Text())
if container.RestartCount > 0 {
dumpContainerLogs(ctx, t, kubeClient, pod.Namespace, pod.Name, container.Name, true)
}
require.NoError(t, scanner.Err())
dumpContainerLogs(ctx, t, kubeClient, pod.Namespace, pod.Name, container.Name, false)
}
}
}
func dumpContainerLogs(ctx context.Context, t *testing.T, kubeClient kubernetes.Interface, namespace, pod, container string, prev bool) {
logTailLines := int64(40)
shortName := fmt.Sprintf("%s/%s/%s", namespace, pod, container)
logReader, err := kubeClient.CoreV1().Pods(namespace).GetLogs(pod, &corev1.PodLogOptions{
Container: container,
TailLines: &logTailLines,
Previous: prev,
}).Stream(ctx)
if !assert.NoErrorf(t, err, "failed to stream logs for container %s", shortName) {
return
}
scanner := bufio.NewScanner(logReader)
for scanner.Scan() {
prefix := shortName
if prev {
prefix += " (previous)"
}
t.Logf("%s > %s", prefix, scanner.Text())
}
assert.NoError(t, scanner.Err(), "failed to read logs from container %s", shortName)
}

View File

@ -7,6 +7,7 @@ import (
"io/ioutil"
"os"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/require"
@ -74,9 +75,17 @@ func (e *TestEnv) ProxyEnv() []string {
return []string{"http_proxy=" + e.Proxy, "https_proxy=" + e.Proxy, "no_proxy=127.0.0.1"}
}
// memoizedTestEnvsByTest maps *testing.T pointers to *TestEnv. It exists so that we don't do all the
// environment parsing N times per test and so that any implicit assertions happen only once.
var memoizedTestEnvsByTest sync.Map //nolint: gochecknoglobals
// IntegrationEnv gets the integration test environment from OS environment variables. This
// method also implies SkipUnlessIntegration().
func IntegrationEnv(t *testing.T) *TestEnv {
if existing, exists := memoizedTestEnvsByTest.Load(t); exists {
return existing.(*TestEnv)
}
t.Helper()
SkipUnlessIntegration(t)
@ -97,8 +106,12 @@ func IntegrationEnv(t *testing.T) *TestEnv {
require.NoErrorf(t, err, "capabilities specification was invalid YAML")
loadEnvVars(t, &result)
result.t = t
memoizedTestEnvsByTest.Store(t, &result)
// In every integration test, assert that no pods in our namespaces restart during the test.
assertNoRestartsDuringTest(t, result.ConciergeNamespace, "")
assertNoRestartsDuringTest(t, result.SupervisorNamespace, "")
return &result
}