diff --git a/cmd/local-user-authenticator/main.go b/cmd/local-user-authenticator/main.go index bd00bf36..54d0d18c 100644 --- a/cmd/local-user-authenticator/main.go +++ b/cmd/local-user-authenticator/main.go @@ -31,13 +31,13 @@ import ( kubeinformers "k8s.io/client-go/informers" corev1informers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" "k8s.io/klog/v2" "go.pinniped.dev/internal/constable" "go.pinniped.dev/internal/controller/apicerts" "go.pinniped.dev/internal/controllerlib" "go.pinniped.dev/internal/dynamiccert" + "go.pinniped.dev/internal/kubeclient" "go.pinniped.dev/internal/plog" ) @@ -279,21 +279,6 @@ func respondWithAuthenticated( } } -func newK8sClient() (kubernetes.Interface, error) { - kubeConfig, err := restclient.InClusterConfig() - if err != nil { - return nil, fmt.Errorf("could not load in-cluster configuration: %w", err) - } - - // Connect to the core Kubernetes API. - kubeClient, err := kubernetes.NewForConfig(kubeConfig) - if err != nil { - return nil, fmt.Errorf("could not load in-cluster configuration: %w", err) - } - - return kubeClient, nil -} - func startControllers( ctx context.Context, dynamicCertProvider dynamiccert.Provider, @@ -359,20 +344,20 @@ func run() error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - kubeClient, err := newK8sClient() + client, err := kubeclient.New() if err != nil { return fmt.Errorf("cannot create k8s client: %w", err) } kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions( - kubeClient, + client.Kubernetes, defaultResyncInterval, kubeinformers.WithNamespace(namespace), ) dynamicCertProvider := dynamiccert.New() - startControllers(ctx, dynamicCertProvider, kubeClient, kubeInformers) + startControllers(ctx, dynamicCertProvider, client.Kubernetes, kubeInformers) plog.Debug("controllers are ready") //nolint: gosec // Intentionally binding to all network interfaces. diff --git a/cmd/pinniped-supervisor/main.go b/cmd/pinniped-supervisor/main.go index 484b4935..0db874ae 100644 --- a/cmd/pinniped-supervisor/main.go +++ b/cmd/pinniped-supervisor/main.go @@ -17,13 +17,11 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/clock" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/pkg/version" "k8s.io/client-go/rest" - restclient "k8s.io/client-go/rest" "k8s.io/component-base/logs" "k8s.io/klog/v2" "k8s.io/klog/v2/klogr" @@ -37,7 +35,9 @@ import ( "go.pinniped.dev/internal/controller/supervisorconfig/upstreamwatcher" "go.pinniped.dev/internal/controller/supervisorstorage" "go.pinniped.dev/internal/controllerlib" + "go.pinniped.dev/internal/deploymentref" "go.pinniped.dev/internal/downward" + "go.pinniped.dev/internal/kubeclient" "go.pinniped.dev/internal/oidc/jwks" "go.pinniped.dev/internal/oidc/provider" "go.pinniped.dev/internal/oidc/provider/manager" @@ -252,81 +252,31 @@ func startControllers( go controllerManager.Start(ctx) } -func getSupervisorDeployment( - ctx context.Context, - kubeClient kubernetes.Interface, - podInfo *downward.PodInfo, -) (*appsv1.Deployment, error) { - ns := podInfo.Namespace - - pod, err := kubeClient.CoreV1().Pods(ns).Get(ctx, podInfo.Name, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("could not get pod: %w", err) - } - - podOwner := metav1.GetControllerOf(pod) - if podOwner == nil { - return nil, fmt.Errorf("pod %s/%s is missing owner", ns, podInfo.Name) - } - - rs, err := kubeClient.AppsV1().ReplicaSets(ns).Get(ctx, podOwner.Name, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("could not get replicaset: %w", err) - } - - rsOwner := metav1.GetControllerOf(rs) - if rsOwner == nil { - return nil, fmt.Errorf("replicaset %s/%s is missing owner", ns, podInfo.Name) - } - - d, err := kubeClient.AppsV1().Deployments(ns).Get(ctx, rsOwner.Name, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("could not get deployment: %w", err) - } - - return d, nil -} - -func newClients() (kubernetes.Interface, pinnipedclientset.Interface, error) { - kubeConfig, err := restclient.InClusterConfig() - if err != nil { - return nil, nil, fmt.Errorf("could not load in-cluster configuration: %w", err) - } - - // Connect to the core Kubernetes API. - kubeClient, err := kubernetes.NewForConfig(kubeConfig) - if err != nil { - return nil, nil, fmt.Errorf("could not create kube client: %w", err) - } - - // Connect to the Pinniped API. - pinnipedClient, err := pinnipedclientset.NewForConfig(kubeConfig) - if err != nil { - return nil, nil, fmt.Errorf("could not create pinniped client: %w", err) - } - - return kubeClient, pinnipedClient, nil -} - func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error { serverInstallationNamespace := podInfo.Namespace ctx, cancel := context.WithCancel(context.Background()) defer cancel() - kubeClient, pinnipedClient, err := newClients() + // TODO remove code that relies on supervisorDeployment directly + dref, supervisorDeployment, err := deploymentref.New(podInfo) + if err != nil { + return fmt.Errorf("cannot create deployment ref: %w", err) + } + + client, err := kubeclient.New(dref) if err != nil { return fmt.Errorf("cannot create k8s client: %w", err) } kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions( - kubeClient, + client.Kubernetes, defaultResyncInterval, kubeinformers.WithNamespace(serverInstallationNamespace), ) pinnipedInformers := pinnipedinformers.NewSharedInformerFactoryWithOptions( - pinnipedClient, + client.PinnipedSupervisor, defaultResyncInterval, pinnipedinformers.WithNamespace(serverInstallationNamespace), ) @@ -348,14 +298,9 @@ func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error { dynamicJWKSProvider, dynamicUpstreamIDPProvider, &secretCache, - kubeClient.CoreV1().Secrets(serverInstallationNamespace), + client.Kubernetes.CoreV1().Secrets(serverInstallationNamespace), ) - supervisorDeployment, err := getSupervisorDeployment(ctx, kubeClient, podInfo) - if err != nil { - return fmt.Errorf("cannot get supervisor deployment: %w", err) - } - startControllers( ctx, cfg, @@ -365,8 +310,8 @@ func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error { dynamicUpstreamIDPProvider, &secretCache, supervisorDeployment, - kubeClient, - pinnipedClient, + client.Kubernetes, + client.PinnipedSupervisor, kubeInformers, pinnipedInformers, ) diff --git a/cmd/pinniped/cmd/kubeconfig.go b/cmd/pinniped/cmd/kubeconfig.go index 913e7ca4..bbb9fa80 100644 --- a/cmd/pinniped/cmd/kubeconfig.go +++ b/cmd/pinniped/cmd/kubeconfig.go @@ -26,6 +26,7 @@ import ( conciergev1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/authentication/v1alpha1" conciergeclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned" + "go.pinniped.dev/internal/kubeclient" ) type kubeconfigDeps struct { @@ -41,7 +42,11 @@ func kubeconfigRealDeps() kubeconfigDeps { if err != nil { return nil, err } - return conciergeclientset.NewForConfig(restConfig) + client, err := kubeclient.New(kubeclient.WithConfig(restConfig)) + if err != nil { + return nil, err + } + return client.PinnipedConcierge, nil }, } } diff --git a/deploy/concierge/deployment.yaml b/deploy/concierge/deployment.yaml index b4d6351f..90a40e9e 100644 --- a/deploy/concierge/deployment.yaml +++ b/deploy/concierge/deployment.yaml @@ -148,6 +148,9 @@ spec: - path: "labels" fieldRef: fieldPath: metadata.labels + - path: "name" + fieldRef: + fieldPath: metadata.name - path: "namespace" fieldRef: fieldPath: metadata.namespace diff --git a/deploy/concierge/rbac.yaml b/deploy/concierge/rbac.yaml index 7ebc75d1..94eb1a74 100644 --- a/deploy/concierge/rbac.yaml +++ b/deploy/concierge/rbac.yaml @@ -69,6 +69,9 @@ rules: - apiGroups: [ config.concierge.pinniped.dev, authentication.concierge.pinniped.dev ] resources: [ "*" ] verbs: [ create, get, list, update, watch ] + - apiGroups: [apps] + resources: [replicasets,deployments] + verbs: [get] --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/internal/client/client.go b/internal/client/client.go index 8300f984..88f29546 100644 --- a/internal/client/client.go +++ b/internal/client/client.go @@ -17,6 +17,7 @@ import ( "go.pinniped.dev/generated/1.19/apis/concierge/login/v1alpha1" "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned" + "go.pinniped.dev/internal/kubeclient" ) // ErrLoginFailed is returned by ExchangeToken when the server rejects the login request. @@ -84,5 +85,9 @@ func getClient(apiEndpoint string, caBundle string) (versioned.Interface, error) if err != nil { return nil, err } - return versioned.NewForConfig(cfg) + client, err := kubeclient.New(kubeclient.WithConfig(cfg)) + if err != nil { + return nil, err + } + return client.PinnipedConcierge, nil } diff --git a/internal/concierge/server/server.go b/internal/concierge/server/server.go index d01b7233..a43e3d0f 100644 --- a/internal/concierge/server/server.go +++ b/internal/concierge/server/server.go @@ -105,7 +105,6 @@ func (a *App) runServer(ctx context.Context) error { if err != nil { return fmt.Errorf("could not read pod metadata: %w", err) } - serverInstallationNamespace := podInfo.Namespace // Initialize the cache of active authenticators. authenticators := authncache.New() @@ -125,16 +124,16 @@ func (a *App) runServer(ctx context.Context) error { // post start hook of the aggregated API server. startControllersFunc, err := controllermanager.PrepareControllers( &controllermanager.Config{ - ServerInstallationNamespace: serverInstallationNamespace, - NamesConfig: &cfg.NamesConfig, - Labels: cfg.Labels, - KubeCertAgentConfig: &cfg.KubeCertAgentConfig, - DiscoveryURLOverride: cfg.DiscoveryInfo.URL, - DynamicServingCertProvider: dynamicServingCertProvider, - DynamicSigningCertProvider: dynamicSigningCertProvider, - ServingCertDuration: time.Duration(*cfg.APIConfig.ServingCertificateConfig.DurationSeconds) * time.Second, - ServingCertRenewBefore: time.Duration(*cfg.APIConfig.ServingCertificateConfig.RenewBeforeSeconds) * time.Second, - AuthenticatorCache: authenticators, + ServerInstallationInfo: podInfo, + NamesConfig: &cfg.NamesConfig, + Labels: cfg.Labels, + KubeCertAgentConfig: &cfg.KubeCertAgentConfig, + DiscoveryURLOverride: cfg.DiscoveryInfo.URL, + DynamicServingCertProvider: dynamicServingCertProvider, + DynamicSigningCertProvider: dynamicSigningCertProvider, + ServingCertDuration: time.Duration(*cfg.APIConfig.ServingCertificateConfig.DurationSeconds) * time.Second, + ServingCertRenewBefore: time.Duration(*cfg.APIConfig.ServingCertificateConfig.RenewBeforeSeconds) * time.Second, + AuthenticatorCache: authenticators, }, ) if err != nil { diff --git a/internal/controller/supervisorconfig/generator/supervisor_secrets.go b/internal/controller/supervisorconfig/generator/supervisor_secrets.go index b9e0cc5e..e0d70ccb 100644 --- a/internal/controller/supervisorconfig/generator/supervisor_secrets.go +++ b/internal/controller/supervisorconfig/generator/supervisor_secrets.go @@ -13,7 +13,6 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" corev1informers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" @@ -200,13 +199,7 @@ func generateSecret(namespace, name string, labels map[string]string, secretData return nil, err } - deploymentGVK := schema.GroupVersionKind{ - Group: appsv1.SchemeGroupVersion.Group, - Version: appsv1.SchemeGroupVersion.Version, - Kind: "Deployment", - } - - isController := false + deploymentGVK := appsv1.SchemeGroupVersion.WithKind("Deployment") return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -218,7 +211,6 @@ func generateSecret(namespace, name string, labels map[string]string, secretData Kind: deploymentGVK.Kind, Name: owner.GetName(), UID: owner.GetUID(), - Controller: &isController, }, }, Labels: labels, diff --git a/internal/controller/supervisorconfig/generator/supervisor_secrets_test.go b/internal/controller/supervisorconfig/generator/supervisor_secrets_test.go index 4e16de9d..e027a585 100644 --- a/internal/controller/supervisorconfig/generator/supervisor_secrets_test.go +++ b/internal/controller/supervisorconfig/generator/supervisor_secrets_test.go @@ -268,7 +268,6 @@ func TestSupervisorSecretsControllerSync(t *testing.T) { generatedSymmetricKey = []byte("some-neato-32-byte-generated-key") otherGeneratedSymmetricKey = []byte("some-funio-32-byte-generated-key") - isController = false generatedSecret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: generatedSecretName, @@ -279,7 +278,6 @@ func TestSupervisorSecretsControllerSync(t *testing.T) { Kind: ownerGVK.Kind, Name: owner.GetName(), UID: owner.GetUID(), - Controller: &isController, }, }, Labels: labels, @@ -300,7 +298,6 @@ func TestSupervisorSecretsControllerSync(t *testing.T) { Kind: ownerGVK.Kind, Name: owner.GetName(), UID: owner.GetUID(), - Controller: &isController, }, }, Labels: labels, diff --git a/internal/controllermanager/prepare_controllers.go b/internal/controllermanager/prepare_controllers.go index 22ce266d..dd9607ab 100644 --- a/internal/controllermanager/prepare_controllers.go +++ b/internal/controllermanager/prepare_controllers.go @@ -10,14 +10,10 @@ import ( "fmt" "time" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/clock" k8sinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - restclient "k8s.io/client-go/rest" "k8s.io/klog/v2/klogr" - aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" loginv1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/login/v1alpha1" pinnipedclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned" @@ -31,7 +27,10 @@ import ( "go.pinniped.dev/internal/controller/issuerconfig" "go.pinniped.dev/internal/controller/kubecertagent" "go.pinniped.dev/internal/controllerlib" + "go.pinniped.dev/internal/deploymentref" + "go.pinniped.dev/internal/downward" "go.pinniped.dev/internal/dynamiccert" + "go.pinniped.dev/internal/kubeclient" ) const ( @@ -43,8 +42,8 @@ const ( // // It is used to inject parameters into PrepareControllers. type Config struct { - // ServerInstallationNamespace provides the namespace in which Pinniped is deployed. - ServerInstallationNamespace string + // ServerInstallationInfo provides the name of the pod in which Pinniped is running and the namespace in which Pinniped is deployed. + ServerInstallationInfo *downward.PodInfo // NamesConfig comes from the Pinniped config API (see api.Config). It specifies how Kubernetes // objects should be named. @@ -81,29 +80,29 @@ type Config struct { // Prepare the controllers and their informers and return a function that will start them when called. //nolint:funlen // Eh, fair, it is a really long function...but it is wiring the world...so... func PrepareControllers(c *Config) (func(ctx context.Context), error) { - // Create k8s clients. - kubeConfig, err := createConfig() + dref, _, err := deploymentref.New(c.ServerInstallationInfo) if err != nil { - return nil, fmt.Errorf("could not create config for the controllers: %w", err) + return nil, fmt.Errorf("cannot create deployment ref: %w", err) } - k8sClient, aggregatorClient, pinnipedClient, err := createClients(kubeConfig) + + client, err := kubeclient.New(dref) if err != nil { return nil, fmt.Errorf("could not create clients for the controllers: %w", err) } // Create informers. Don't forget to make sure they get started in the function returned below. - informers := createInformers(c.ServerInstallationNamespace, k8sClient, pinnipedClient) + informers := createInformers(c.ServerInstallationInfo.Namespace, client.Kubernetes, client.PinnipedConcierge) // Configuration for the kubecertagent controllers created below. agentPodConfig := &kubecertagent.AgentPodConfig{ - Namespace: c.ServerInstallationNamespace, + Namespace: c.ServerInstallationInfo.Namespace, ContainerImage: *c.KubeCertAgentConfig.Image, PodNamePrefix: *c.KubeCertAgentConfig.NamePrefix, ContainerImagePullSecrets: c.KubeCertAgentConfig.ImagePullSecrets, AdditionalLabels: c.Labels, } credentialIssuerLocationConfig := &kubecertagent.CredentialIssuerLocationConfig{ - Namespace: c.ServerInstallationNamespace, + Namespace: c.ServerInstallationInfo.Namespace, Name: c.NamesConfig.CredentialIssuer, } @@ -115,11 +114,11 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) { // CredentialIssuer resource and keeping that information up to date. WithController( issuerconfig.NewKubeConfigInfoPublisherController( - c.ServerInstallationNamespace, + c.ServerInstallationInfo.Namespace, c.NamesConfig.CredentialIssuer, c.Labels, c.DiscoveryURLOverride, - pinnipedClient, + client.PinnipedConcierge, informers.kubePublicNamespaceK8s.Core().V1().ConfigMaps(), controllerlib.WithInformer, ), @@ -129,10 +128,10 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) { // API certs controllers are responsible for managing the TLS certificates used to serve Pinniped's API. WithController( apicerts.NewCertsManagerController( - c.ServerInstallationNamespace, + c.ServerInstallationInfo.Namespace, c.NamesConfig.ServingCertificateSecret, c.Labels, - k8sClient, + client.Kubernetes, informers.installationNamespaceK8s.Core().V1().Secrets(), controllerlib.WithInformer, controllerlib.WithInitialEvent, @@ -144,10 +143,10 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) { ). WithController( apicerts.NewAPIServiceUpdaterController( - c.ServerInstallationNamespace, + c.ServerInstallationInfo.Namespace, c.NamesConfig.ServingCertificateSecret, loginv1alpha1.SchemeGroupVersion.Version+"."+loginv1alpha1.GroupName, - aggregatorClient, + client.Aggregation, informers.installationNamespaceK8s.Core().V1().Secrets(), controllerlib.WithInformer, ), @@ -155,7 +154,7 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) { ). WithController( apicerts.NewCertsObserverController( - c.ServerInstallationNamespace, + c.ServerInstallationInfo.Namespace, c.NamesConfig.ServingCertificateSecret, c.DynamicServingCertProvider, informers.installationNamespaceK8s.Core().V1().Secrets(), @@ -165,9 +164,9 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) { ). WithController( apicerts.NewCertsExpirerController( - c.ServerInstallationNamespace, + c.ServerInstallationInfo.Namespace, c.NamesConfig.ServingCertificateSecret, - k8sClient, + client.Kubernetes, informers.installationNamespaceK8s.Core().V1().Secrets(), controllerlib.WithInformer, c.ServingCertRenewBefore, @@ -183,8 +182,8 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) { credentialIssuerLocationConfig, c.Labels, clock.RealClock{}, - k8sClient, - pinnipedClient, + client.Kubernetes, + client.PinnipedConcierge, informers.kubeSystemNamespaceK8s.Core().V1().Pods(), informers.installationNamespaceK8s.Core().V1().Pods(), controllerlib.WithInformer, @@ -197,8 +196,8 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) { agentPodConfig, credentialIssuerLocationConfig, clock.RealClock{}, - k8sClient, - pinnipedClient, + client.Kubernetes, + client.PinnipedConcierge, informers.kubeSystemNamespaceK8s.Core().V1().Pods(), informers.installationNamespaceK8s.Core().V1().Pods(), controllerlib.WithInformer, @@ -209,8 +208,8 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) { kubecertagent.NewExecerController( credentialIssuerLocationConfig, c.DynamicSigningCertProvider, - kubecertagent.NewPodCommandExecutor(kubeConfig, k8sClient), - pinnipedClient, + kubecertagent.NewPodCommandExecutor(client.JSONConfig, client.Kubernetes), + client.PinnipedConcierge, clock.RealClock{}, informers.installationNamespaceK8s.Core().V1().Pods(), controllerlib.WithInformer, @@ -220,7 +219,7 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) { WithController( kubecertagent.NewDeleterController( agentPodConfig, - k8sClient, + client.Kubernetes, informers.kubeSystemNamespaceK8s.Core().V1().Pods(), informers.installationNamespaceK8s.Core().V1().Pods(), controllerlib.WithInformer, @@ -263,51 +262,6 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) { }, nil } -// Create the rest config that will be used by the clients for the controllers. -func createConfig() (*rest.Config, error) { - // Load the Kubernetes client configuration. - kubeConfig, err := restclient.InClusterConfig() - if err != nil { - return nil, fmt.Errorf("could not load in-cluster configuration: %w", err) - } - - return kubeConfig, nil -} - -// Create the k8s clients that will be used by the controllers. -func createClients(kubeConfig *rest.Config) ( - k8sClient *kubernetes.Clientset, - aggregatorClient *aggregatorclient.Clientset, - pinnipedClient *pinnipedclientset.Clientset, - err error, -) { - // explicitly use protobuf when talking to built-in kube APIs - protoKubeConfig := createProtoKubeConfig(kubeConfig) - - // Connect to the core Kubernetes API. - k8sClient, err = kubernetes.NewForConfig(protoKubeConfig) - if err != nil { - return nil, nil, nil, fmt.Errorf("could not initialize Kubernetes client: %w", err) - } - - // Connect to the Kubernetes aggregation API. - aggregatorClient, err = aggregatorclient.NewForConfig(protoKubeConfig) - if err != nil { - return nil, nil, nil, fmt.Errorf("could not initialize Kubernetes client: %w", err) - } - - // Connect to the pinniped API. - // I think we can't use protobuf encoding here because we are using CRDs - // (for which protobuf encoding is not supported). - pinnipedClient, err = pinnipedclientset.NewForConfig(kubeConfig) - if err != nil { - return nil, nil, nil, fmt.Errorf("could not initialize pinniped client: %w", err) - } - - //nolint: nakedret // Short function. Makes the order of return values more clear. - return -} - type informers struct { kubePublicNamespaceK8s k8sinformers.SharedInformerFactory kubeSystemNamespaceK8s k8sinformers.SharedInformerFactory @@ -318,8 +272,8 @@ type informers struct { // Create the informers that will be used by the controllers. func createInformers( serverInstallationNamespace string, - k8sClient *kubernetes.Clientset, - pinnipedClient *pinnipedclientset.Clientset, + k8sClient kubernetes.Interface, + pinnipedClient pinnipedclientset.Interface, ) *informers { return &informers{ kubePublicNamespaceK8s: k8sinformers.NewSharedInformerFactoryWithOptions( @@ -356,13 +310,3 @@ func (i *informers) startAndWaitForSync(ctx context.Context) { i.installationNamespaceK8s.WaitForCacheSync(ctx.Done()) i.installationNamespacePinniped.WaitForCacheSync(ctx.Done()) } - -// Returns a copy of the input config with the ContentConfig set to use protobuf. -// Do not use this config to communicate with any CRD based APIs. -func createProtoKubeConfig(kubeConfig *restclient.Config) *restclient.Config { - protoKubeConfig := restclient.CopyConfig(kubeConfig) - const protoThenJSON = runtime.ContentTypeProtobuf + "," + runtime.ContentTypeJSON - protoKubeConfig.AcceptContentTypes = protoThenJSON - protoKubeConfig.ContentType = runtime.ContentTypeProtobuf - return protoKubeConfig -} diff --git a/internal/deploymentref/deploymentref.go b/internal/deploymentref/deploymentref.go new file mode 100644 index 00000000..cee32f25 --- /dev/null +++ b/internal/deploymentref/deploymentref.go @@ -0,0 +1,72 @@ +// Copyright 2021 the Pinniped contributors. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package deploymentref + +import ( + "context" + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + + "go.pinniped.dev/internal/downward" + "go.pinniped.dev/internal/kubeclient" + "go.pinniped.dev/internal/ownerref" +) + +func New(podInfo *downward.PodInfo) (kubeclient.Option, *appsv1.Deployment, error) { + tempClient, err := kubeclient.New() + if err != nil { + return nil, nil, fmt.Errorf("cannot create temp client: %w", err) + } + + deployment, err := getDeployment(tempClient.Kubernetes, podInfo) + if err != nil { + return nil, nil, fmt.Errorf("cannot get deployment: %w", err) + } + + ref := metav1.OwnerReference{ + Name: deployment.Name, + UID: deployment.UID, + } + ref.APIVersion, ref.Kind = appsv1.SchemeGroupVersion.WithKind("Deployment").ToAPIVersionAndKind() + + return kubeclient.WithMiddleware(ownerref.New(ref)), deployment, nil +} + +func getDeployment(kubeClient kubernetes.Interface, podInfo *downward.PodInfo) (*appsv1.Deployment, error) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + ns := podInfo.Namespace + + pod, err := kubeClient.CoreV1().Pods(ns).Get(ctx, podInfo.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get pod: %w", err) + } + + podOwner := metav1.GetControllerOf(pod) + if podOwner == nil { + return nil, fmt.Errorf("pod %s/%s is missing owner", ns, podInfo.Name) + } + + rs, err := kubeClient.AppsV1().ReplicaSets(ns).Get(ctx, podOwner.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get replicaset: %w", err) + } + + rsOwner := metav1.GetControllerOf(rs) + if rsOwner == nil { + return nil, fmt.Errorf("replicaset %s/%s is missing owner", ns, podInfo.Name) + } + + d, err := kubeClient.AppsV1().Deployments(ns).Get(ctx, rsOwner.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get deployment: %w", err) + } + + return d, nil +} diff --git a/internal/kubeclient/kubeclient.go b/internal/kubeclient/kubeclient.go new file mode 100644 index 00000000..3aadd16b --- /dev/null +++ b/internal/kubeclient/kubeclient.go @@ -0,0 +1,117 @@ +// Copyright 2021 the Pinniped contributors. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package kubeclient + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + kubescheme "k8s.io/client-go/kubernetes/scheme" + restclient "k8s.io/client-go/rest" + aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" + aggregatorclientscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" + + pinnipedconciergeclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned" + pinnipedconciergeclientsetscheme "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned/scheme" + pinnipedsupervisorclientset "go.pinniped.dev/generated/1.19/client/supervisor/clientset/versioned" + pinnipedsupervisorclientsetscheme "go.pinniped.dev/generated/1.19/client/supervisor/clientset/versioned/scheme" +) + +type Client struct { + Kubernetes kubernetes.Interface + Aggregation aggregatorclient.Interface + PinnipedConcierge pinnipedconciergeclientset.Interface + PinnipedSupervisor pinnipedsupervisorclientset.Interface + + JSONConfig, ProtoConfig *restclient.Config +} + +// TODO expand this interface to address more complex use cases. +type Middleware interface { + Handles(httpMethod string) bool + Mutate(obj metav1.Object) (mutated bool) +} + +func New(opts ...Option) (*Client, error) { + c := &clientConfig{} + + for _, opt := range opts { + opt(c) + } + + // default to assuming we are running in a pod with the service account token mounted + if c.config == nil { + inClusterConfig, err := restclient.InClusterConfig() + if err != nil { + return nil, fmt.Errorf("could not load in-cluster configuration: %w", err) + } + WithConfig(inClusterConfig)(c) // make sure all writes to clientConfig flow through one code path + } + + // explicitly use json when talking to CRD APIs + jsonKubeConfig := createJSONKubeConfig(c.config) + + // explicitly use protobuf when talking to built-in kube APIs + protoKubeConfig := createProtoKubeConfig(c.config) + + // Connect to the core Kubernetes API. + k8sClient, err := kubernetes.NewForConfig(configWithWrapper(protoKubeConfig, kubescheme.Codecs, c.middlewares)) + if err != nil { + return nil, fmt.Errorf("could not initialize Kubernetes client: %w", err) + } + + // Connect to the Kubernetes aggregation API. + aggregatorClient, err := aggregatorclient.NewForConfig(configWithWrapper(protoKubeConfig, aggregatorclientscheme.Codecs, c.middlewares)) + if err != nil { + return nil, fmt.Errorf("could not initialize aggregation client: %w", err) + } + + // Connect to the pinniped concierge API. + // We cannot use protobuf encoding here because we are using CRDs + // (for which protobuf encoding is not yet supported). + // TODO we should try to add protobuf support to TokenCredentialRequests since it is an aggregated API + pinnipedConciergeClient, err := pinnipedconciergeclientset.NewForConfig(configWithWrapper(jsonKubeConfig, pinnipedconciergeclientsetscheme.Codecs, c.middlewares)) + if err != nil { + return nil, fmt.Errorf("could not initialize pinniped client: %w", err) + } + + // Connect to the pinniped supervisor API. + // We cannot use protobuf encoding here because we are using CRDs + // (for which protobuf encoding is not yet supported). + pinnipedSupervisorClient, err := pinnipedsupervisorclientset.NewForConfig(configWithWrapper(jsonKubeConfig, pinnipedsupervisorclientsetscheme.Codecs, c.middlewares)) + if err != nil { + return nil, fmt.Errorf("could not initialize pinniped client: %w", err) + } + + return &Client{ + Kubernetes: k8sClient, + Aggregation: aggregatorClient, + PinnipedConcierge: pinnipedConciergeClient, + PinnipedSupervisor: pinnipedSupervisorClient, + + JSONConfig: jsonKubeConfig, + ProtoConfig: protoKubeConfig, + }, nil +} + +// Returns a copy of the input config with the ContentConfig set to use json. +// Use this config to communicate with all CRD based APIs. +func createJSONKubeConfig(kubeConfig *restclient.Config) *restclient.Config { + jsonKubeConfig := restclient.CopyConfig(kubeConfig) + jsonKubeConfig.AcceptContentTypes = runtime.ContentTypeJSON + jsonKubeConfig.ContentType = runtime.ContentTypeJSON + return jsonKubeConfig +} + +// Returns a copy of the input config with the ContentConfig set to use protobuf. +// Do not use this config to communicate with any CRD based APIs. +func createProtoKubeConfig(kubeConfig *restclient.Config) *restclient.Config { + protoKubeConfig := restclient.CopyConfig(kubeConfig) + const protoThenJSON = runtime.ContentTypeProtobuf + "," + runtime.ContentTypeJSON + protoKubeConfig.AcceptContentTypes = protoThenJSON + protoKubeConfig.ContentType = runtime.ContentTypeProtobuf + return protoKubeConfig +} diff --git a/internal/kubeclient/option.go b/internal/kubeclient/option.go new file mode 100644 index 00000000..ba4ada35 --- /dev/null +++ b/internal/kubeclient/option.go @@ -0,0 +1,25 @@ +// Copyright 2021 the Pinniped contributors. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package kubeclient + +import restclient "k8s.io/client-go/rest" + +type Option func(*clientConfig) + +type clientConfig struct { + config *restclient.Config + middlewares []Middleware +} + +func WithConfig(config *restclient.Config) Option { + return func(c *clientConfig) { + c.config = config + } +} + +func WithMiddleware(middleware Middleware) Option { + return func(c *clientConfig) { + c.middlewares = append(c.middlewares, middleware) + } +} diff --git a/internal/kubeclient/roundtrip.go b/internal/kubeclient/roundtrip.go new file mode 100644 index 00000000..38240cdd --- /dev/null +++ b/internal/kubeclient/roundtrip.go @@ -0,0 +1,122 @@ +// Copyright 2021 the Pinniped contributors. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package kubeclient + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + restclient "k8s.io/client-go/rest" +) + +// TODO unit test + +func configWithWrapper(config *restclient.Config, negotiatedSerializer runtime.NegotiatedSerializer, middlewares []Middleware) *restclient.Config { + // no need for any wrapping when we have no middleware to inject + if len(middlewares) == 0 { + return config + } + + info, ok := runtime.SerializerInfoForMediaType(negotiatedSerializer.SupportedMediaTypes(), config.ContentType) + if !ok { + panic(fmt.Errorf("unknown content type: %s ", config.ContentType)) // static input, programmer error + } + serializer := info.Serializer // should perform no conversion + + f := func(rt http.RoundTripper) http.RoundTripper { + return roundTripperFunc(func(req *http.Request) (*http.Response, error) { + // ignore everything that has an unreadable body + if req.GetBody == nil { + return rt.RoundTrip(req) + } + + var reqMiddlewares []Middleware + for _, middleware := range middlewares { + middleware := middleware + if middleware.Handles(req.Method) { + reqMiddlewares = append(reqMiddlewares, middleware) + } + } + + // no middleware to handle this request + if len(reqMiddlewares) == 0 { + return rt.RoundTrip(req) + } + + body, err := req.GetBody() + if err != nil { + return nil, fmt.Errorf("get body failed: %w", err) + } + defer body.Close() + data, err := ioutil.ReadAll(body) + if err != nil { + return nil, fmt.Errorf("read body failed: %w", err) + } + + // attempt to decode with no defaults or into specified, i.e. defer to the decoder + // this should result in the a straight decode with no conversion + obj, _, err := serializer.Decode(data, nil, nil) + if err != nil { + return nil, fmt.Errorf("body decode failed: %w", err) + } + + accessor, err := meta.Accessor(obj) + if err != nil { + return rt.RoundTrip(req) // ignore everything that has no object meta for now + } + + // run all the mutating operations + var reqMutated bool + for _, reqMiddleware := range reqMiddlewares { + mutated := reqMiddleware.Mutate(accessor) + reqMutated = mutated || reqMutated + } + + // no mutation occurred, keep the original request + if !reqMutated { + return rt.RoundTrip(req) + } + + // we plan on making a new request so make sure to close the original request's body + _ = req.Body.Close() + + newData, err := runtime.Encode(serializer, obj) + if err != nil { + return nil, fmt.Errorf("new body encode failed: %w", err) + } + + // TODO log newData at high loglevel similar to REST client + + // simplest way to reuse the body creation logic + newReqForBody, err := http.NewRequest(req.Method, req.URL.String(), bytes.NewReader(newData)) + if err != nil { + return nil, fmt.Errorf("failed to create new req for body: %w", err) // this should never happen + } + + // shallow copy because we want to preserve all the headers and such but not mutate the original request + newReq := req.WithContext(req.Context()) + + // replace the body with the new data + newReq.ContentLength = newReqForBody.ContentLength + newReq.Body = newReqForBody.Body + newReq.GetBody = newReqForBody.GetBody + + return rt.RoundTrip(newReq) + }) + } + + cc := restclient.CopyConfig(config) + cc.Wrap(f) + return cc +} + +type roundTripperFunc func(req *http.Request) (*http.Response, error) + +func (f roundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return f(req) +} diff --git a/internal/ownerref/ownerref.go b/internal/ownerref/ownerref.go new file mode 100644 index 00000000..1afac4b7 --- /dev/null +++ b/internal/ownerref/ownerref.go @@ -0,0 +1,39 @@ +// Copyright 2021 the Pinniped contributors. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package ownerref + +import ( + "net/http" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "go.pinniped.dev/internal/kubeclient" +) + +func New(ref metav1.OwnerReference) kubeclient.Middleware { + return ownerRefMiddleware(ref) +} + +var _ kubeclient.Middleware = ownerRefMiddleware(metav1.OwnerReference{}) + +type ownerRefMiddleware metav1.OwnerReference + +func (o ownerRefMiddleware) Handles(httpMethod string) bool { + return httpMethod == http.MethodPost // only handle create requests +} + +// TODO this func assumes all objects are namespace scoped and are in the same namespace. +// i.e. it assumes all objects are safe to set an owner ref on +// i.e. the owner could be namespace scoped and thus cannot own a cluster scoped object +// this could be fixed by using a rest mapper to confirm the REST scoping +// or we could always use an owner ref to a cluster scoped object +func (o ownerRefMiddleware) Mutate(obj metav1.Object) (mutated bool) { + // we only want to set the owner ref on create and when one is not already present + if len(obj.GetOwnerReferences()) != 0 { + return false + } + + obj.SetOwnerReferences([]metav1.OwnerReference{metav1.OwnerReference(o)}) + return true +} diff --git a/internal/ownerref/ownerref_test.go b/internal/ownerref/ownerref_test.go new file mode 100644 index 00000000..7a12efa2 --- /dev/null +++ b/internal/ownerref/ownerref_test.go @@ -0,0 +1,142 @@ +// Copyright 2021 the Pinniped contributors. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package ownerref + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestOwnerReferenceMiddleware(t *testing.T) { + ref1 := metav1.OwnerReference{ + Name: "earth", + UID: "0x11", + } + ref2 := metav1.OwnerReference{ + Name: "mars", + UID: "0x12", + } + ref3 := metav1.OwnerReference{ + Name: "sun", + UID: "0x13", + } + + secret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "twizzlers"}} + configMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "pandas"}} + + secretWithOwner := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "twizzlers", OwnerReferences: []metav1.OwnerReference{ref3}}} + configMapWithOwner := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "pandas", OwnerReferences: []metav1.OwnerReference{ref3}}} + + type args struct { + ref metav1.OwnerReference + httpMethod string + obj metav1.Object + } + tests := []struct { + name string + args args + wantHandles, wantMutates bool + wantObj metav1.Object + }{ + { + name: "on update", + args: args{ + ref: ref1, + httpMethod: http.MethodPut, + obj: secret.DeepCopy(), + }, + wantHandles: false, + wantMutates: false, + wantObj: nil, + }, + { + name: "on create", + args: args{ + ref: ref1, + httpMethod: http.MethodPost, + obj: secret.DeepCopy(), + }, + wantHandles: true, + wantMutates: true, + wantObj: withOwnerRef(t, secret, ref1), + }, + { + name: "on create config map", + args: args{ + ref: ref2, + httpMethod: http.MethodPost, + obj: configMap.DeepCopy(), + }, + wantHandles: true, + wantMutates: true, + wantObj: withOwnerRef(t, configMap, ref2), + }, + { + name: "on create with pre-existing ref", + args: args{ + ref: ref1, + httpMethod: http.MethodPost, + obj: secretWithOwner.DeepCopy(), + }, + wantHandles: true, + wantMutates: false, + wantObj: nil, + }, + { + name: "on create with pre-existing ref config map", + args: args{ + ref: ref2, + httpMethod: http.MethodPost, + obj: configMapWithOwner.DeepCopy(), + }, + wantHandles: true, + wantMutates: false, + wantObj: nil, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + middleware := New(tt.args.ref) + + handles := middleware.Handles(tt.args.httpMethod) + require.Equal(t, tt.wantHandles, handles) + + if !handles { + return + } + + orig := tt.args.obj.(runtime.Object).DeepCopyObject() + + mutates := middleware.Mutate(tt.args.obj) + require.Equal(t, tt.wantMutates, mutates) + + if mutates { + require.NotEqual(t, orig, tt.args.obj) + require.Equal(t, tt.wantObj, tt.args.obj) + } else { + require.Equal(t, orig, tt.args.obj) + } + }) + } +} + +func withOwnerRef(t *testing.T, obj runtime.Object, ref metav1.OwnerReference) metav1.Object { + t.Helper() + + obj = obj.DeepCopyObject() + accessor, err := meta.Accessor(obj) + require.NoError(t, err) + + require.Len(t, accessor.GetOwnerReferences(), 0) + accessor.SetOwnerReferences([]metav1.OwnerReference{ref}) + + return accessor +} diff --git a/pkg/conciergeclient/conciergeclient.go b/pkg/conciergeclient/conciergeclient.go index 736ee03c..369e16a8 100644 --- a/pkg/conciergeclient/conciergeclient.go +++ b/pkg/conciergeclient/conciergeclient.go @@ -22,6 +22,7 @@ import ( loginv1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/login/v1alpha1" conciergeclientset "go.pinniped.dev/generated/1.19/client/concierge/clientset/versioned" "go.pinniped.dev/internal/constable" + "go.pinniped.dev/internal/kubeclient" ) // ErrLoginFailed is returned by Client.ExchangeToken when the concierge server rejects the login request for any reason. @@ -150,7 +151,11 @@ func (c *Client) clientset() (conciergeclientset.Interface, error) { if err != nil { return nil, err } - return conciergeclientset.NewForConfig(cfg) + client, err := kubeclient.New(kubeclient.WithConfig(cfg)) + if err != nil { + return nil, err + } + return client.PinnipedConcierge, nil } // ExchangeToken performs a TokenCredentialRequest against the Pinniped concierge and returns the result as an ExecCredential. diff --git a/test/integration/kubeclient_test.go b/test/integration/kubeclient_test.go new file mode 100644 index 00000000..59f5069f --- /dev/null +++ b/test/integration/kubeclient_test.go @@ -0,0 +1,259 @@ +// Copyright 2020 the Pinniped contributors. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package integration + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + + conciergeconfigv1alpha1 "go.pinniped.dev/generated/1.19/apis/concierge/config/v1alpha1" + supervisorconfigv1alpha1 "go.pinniped.dev/generated/1.19/apis/supervisor/config/v1alpha1" + "go.pinniped.dev/internal/kubeclient" + "go.pinniped.dev/internal/ownerref" + "go.pinniped.dev/test/library" +) + +func TestKubeClientOwnerRef(t *testing.T) { + env := library.IntegrationEnv(t) + + regularClient := library.NewClientset(t) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + namespaces := regularClient.CoreV1().Namespaces() + + namespace, err := namespaces.Create( + ctx, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "test-owner-ref-"}}, + metav1.CreateOptions{}, + ) + require.NoError(t, err) + + defer func() { + if t.Failed() { + return + } + err := namespaces.Delete(ctx, namespace.Name, metav1.DeleteOptions{}) + require.NoError(t, err) + }() + + // create something that we can point to + parentSecret, err := regularClient.CoreV1().Secrets(namespace.Name).Create( + ctx, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "parent-", + OwnerReferences: nil, // no owner refs set + }, + Data: map[string][]byte{"A": []byte("B")}, + }, + metav1.CreateOptions{}, + ) + require.NoError(t, err) + require.Len(t, parentSecret.OwnerReferences, 0) + + // create a client that should set an owner ref back to parent on create + ref := metav1.OwnerReference{ + APIVersion: "v1", + Kind: "Secret", + Name: parentSecret.Name, + UID: parentSecret.UID, + } + ownerRefClient, err := kubeclient.New( + kubeclient.WithMiddleware(ownerref.New(ref)), + kubeclient.WithConfig(library.NewClientConfig(t)), + ) + require.NoError(t, err) + + ownerRefSecrets := ownerRefClient.Kubernetes.CoreV1().Secrets(namespace.Name) + + // we expect this secret to have the owner ref set even though we did not set it explicitly + childSecret, err := ownerRefSecrets.Create( + ctx, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "child-", + OwnerReferences: nil, // no owner refs set + }, + Data: map[string][]byte{"C": []byte("D")}, + }, + metav1.CreateOptions{}, + ) + require.NoError(t, err) + hasOwnerRef(t, childSecret, ref) + + preexistingRef := *ref.DeepCopy() + preexistingRef.Name = "different" + preexistingRef.UID = "different" + + // we expect this secret to keep the owner ref that is was created with + otherSecret, err := ownerRefSecrets.Create( + ctx, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "child-", + OwnerReferences: []metav1.OwnerReference{preexistingRef}, // owner ref set explicitly + }, + Data: map[string][]byte{"C": []byte("D")}, + }, + metav1.CreateOptions{}, + ) + require.NoError(t, err) + hasOwnerRef(t, otherSecret, preexistingRef) + require.NotEqual(t, ref, preexistingRef) + // the secret has no owner so it should be immediately deleted + isEventuallyDeleted(t, func() error { + _, err := ownerRefSecrets.Get(ctx, otherSecret.Name, metav1.GetOptions{}) + return err + }) + + // we expect no owner ref to be set on update + parentSecretUpdate := parentSecret.DeepCopy() + parentSecretUpdate.Data = map[string][]byte{"E": []byte("F ")} + updatedParentSecret, err := ownerRefSecrets.Update(ctx, parentSecretUpdate, metav1.UpdateOptions{}) + require.NoError(t, err) + require.Equal(t, parentSecret.UID, updatedParentSecret.UID) + require.NotEqual(t, parentSecret.ResourceVersion, updatedParentSecret.ResourceVersion) + require.Len(t, updatedParentSecret.OwnerReferences, 0) + + // delete the parent object + err = ownerRefSecrets.Delete(ctx, parentSecret.Name, metav1.DeleteOptions{}) + require.NoError(t, err) + + // the child object should be cleaned up on its own + isEventuallyDeleted(t, func() error { + _, err := ownerRefSecrets.Get(ctx, childSecret.Name, metav1.GetOptions{}) + return err + }) + + // sanity check API service client + apiService, err := ownerRefClient.Aggregation.ApiregistrationV1().APIServices().Create( + ctx, + &apiregistrationv1.APIService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "v1.pandas.dev", + OwnerReferences: nil, // no owner refs set + }, + Spec: apiregistrationv1.APIServiceSpec{ + Version: "v1", + Group: "pandas.dev", + GroupPriorityMinimum: 10_000, + VersionPriority: 500, + }, + }, + metav1.CreateOptions{}, + ) + require.NoError(t, err) + hasOwnerRef(t, apiService, ref) + // this owner ref is invalid for an API service so it should be immediately deleted + isEventuallyDeleted(t, func() error { + _, err := ownerRefClient.Aggregation.ApiregistrationV1().APIServices().Get(ctx, apiService.Name, metav1.GetOptions{}) + return err + }) + + // sanity check concierge client + credentialIssuer, err := ownerRefClient.PinnipedConcierge.ConfigV1alpha1().CredentialIssuers(namespace.Name).Create( + ctx, + &conciergeconfigv1alpha1.CredentialIssuer{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "owner-ref-test-", + OwnerReferences: nil, // no owner refs set + }, + Status: conciergeconfigv1alpha1.CredentialIssuerStatus{ + Strategies: []conciergeconfigv1alpha1.CredentialIssuerStrategy{}, + }, + }, + metav1.CreateOptions{}, + ) + require.NoError(t, err) + hasOwnerRef(t, credentialIssuer, ref) + // this owner has already been deleted so the cred issuer should be immediately deleted + isEventuallyDeleted(t, func() error { + _, err := ownerRefClient.PinnipedConcierge.ConfigV1alpha1().CredentialIssuers(namespace.Name).Get(ctx, credentialIssuer.Name, metav1.GetOptions{}) + return err + }) + + // sanity check supervisor client + federationDomain, err := ownerRefClient.PinnipedSupervisor.ConfigV1alpha1().FederationDomains(namespace.Name).Create( + ctx, + &supervisorconfigv1alpha1.FederationDomain{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "owner-ref-test-", + OwnerReferences: nil, // no owner refs set + }, + Spec: supervisorconfigv1alpha1.FederationDomainSpec{ + Issuer: "https://pandas.dev", + }, + }, + metav1.CreateOptions{}, + ) + require.NoError(t, err) + hasOwnerRef(t, federationDomain, ref) + // this owner has already been deleted so the federation domain should be immediately deleted + isEventuallyDeleted(t, func() error { + _, err := ownerRefClient.PinnipedSupervisor.ConfigV1alpha1().FederationDomains(namespace.Name).Get(ctx, federationDomain.Name, metav1.GetOptions{}) + return err + }) + + // check some well-known, always created secrets to make sure they have an owner ref back to their deployment + + dref := metav1.OwnerReference{} + dref.APIVersion, dref.Kind = appsv1.SchemeGroupVersion.WithKind("Deployment").ToAPIVersionAndKind() + + supervisorDeployment, err := ownerRefClient.Kubernetes.AppsV1().Deployments(env.SupervisorNamespace).Get(ctx, env.SupervisorAppName, metav1.GetOptions{}) + require.NoError(t, err) + + supervisorKey, err := ownerRefClient.Kubernetes.CoreV1().Secrets(env.SupervisorNamespace).Get(ctx, env.SupervisorAppName+"-key", metav1.GetOptions{}) + require.NoError(t, err) + + supervisorDref := *dref.DeepCopy() + supervisorDref.Name = env.SupervisorAppName + supervisorDref.UID = supervisorDeployment.UID + hasOwnerRef(t, supervisorKey, supervisorDref) + + conciergeDeployment, err := ownerRefClient.Kubernetes.AppsV1().Deployments(env.ConciergeNamespace).Get(ctx, env.ConciergeAppName, metav1.GetOptions{}) + require.NoError(t, err) + + conciergeCert, err := ownerRefClient.Kubernetes.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, env.ConciergeAppName+"-api-tls-serving-certificate", metav1.GetOptions{}) + require.NoError(t, err) + + conciergeDref := *dref.DeepCopy() + conciergeDref.Name = env.ConciergeAppName + conciergeDref.UID = conciergeDeployment.UID + hasOwnerRef(t, conciergeCert, conciergeDref) +} + +func hasOwnerRef(t *testing.T, obj metav1.Object, ref metav1.OwnerReference) { + t.Helper() + + ownerReferences := obj.GetOwnerReferences() + require.Len(t, ownerReferences, 1) + require.Equal(t, ref, ownerReferences[0]) +} + +func isEventuallyDeleted(t *testing.T, f func() error) { + t.Helper() + + require.Eventually(t, func() bool { + err := f() + switch { + case err == nil: + return false + case errors.IsNotFound(err): + return true + default: + require.NoError(t, err) + return false + } + }, time.Minute, time.Second) +}