Merge branch 'main' of github.com:vmware-tanzu/pinniped into kubernetes-1.20
This commit is contained in:
commit
6f04613aed
@ -1,4 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package main provides a authentication webhook program.
|
||||
@ -31,13 +31,13 @@ import (
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/controller/apicerts"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/dynamiccert"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
"go.pinniped.dev/internal/plog"
|
||||
)
|
||||
|
||||
@ -279,21 +279,6 @@ func respondWithAuthenticated(
|
||||
}
|
||||
}
|
||||
|
||||
func newK8sClient() (kubernetes.Interface, error) {
|
||||
kubeConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the core Kubernetes API.
|
||||
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
}
|
||||
|
||||
return kubeClient, nil
|
||||
}
|
||||
|
||||
func startControllers(
|
||||
ctx context.Context,
|
||||
dynamicCertProvider dynamiccert.Provider,
|
||||
@ -359,20 +344,20 @@ func run() error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kubeClient, err := newK8sClient()
|
||||
client, err := kubeclient.New()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create k8s client: %w", err)
|
||||
}
|
||||
|
||||
kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(
|
||||
kubeClient,
|
||||
client.Kubernetes,
|
||||
defaultResyncInterval,
|
||||
kubeinformers.WithNamespace(namespace),
|
||||
)
|
||||
|
||||
dynamicCertProvider := dynamiccert.New()
|
||||
|
||||
startControllers(ctx, dynamicCertProvider, kubeClient, kubeInformers)
|
||||
startControllers(ctx, dynamicCertProvider, client.Kubernetes, kubeInformers)
|
||||
plog.Debug("controllers are ready")
|
||||
|
||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||
|
@ -17,13 +17,11 @@ import (
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/version"
|
||||
"k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/component-base/logs"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
@ -37,7 +35,9 @@ import (
|
||||
"go.pinniped.dev/internal/controller/supervisorconfig/upstreamwatcher"
|
||||
"go.pinniped.dev/internal/controller/supervisorstorage"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/deploymentref"
|
||||
"go.pinniped.dev/internal/downward"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
"go.pinniped.dev/internal/oidc/jwks"
|
||||
"go.pinniped.dev/internal/oidc/provider"
|
||||
"go.pinniped.dev/internal/oidc/provider/manager"
|
||||
@ -252,81 +252,31 @@ func startControllers(
|
||||
go controllerManager.Start(ctx)
|
||||
}
|
||||
|
||||
func getSupervisorDeployment(
|
||||
ctx context.Context,
|
||||
kubeClient kubernetes.Interface,
|
||||
podInfo *downward.PodInfo,
|
||||
) (*appsv1.Deployment, error) {
|
||||
ns := podInfo.Namespace
|
||||
|
||||
pod, err := kubeClient.CoreV1().Pods(ns).Get(ctx, podInfo.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get pod: %w", err)
|
||||
}
|
||||
|
||||
podOwner := metav1.GetControllerOf(pod)
|
||||
if podOwner == nil {
|
||||
return nil, fmt.Errorf("pod %s/%s is missing owner", ns, podInfo.Name)
|
||||
}
|
||||
|
||||
rs, err := kubeClient.AppsV1().ReplicaSets(ns).Get(ctx, podOwner.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get replicaset: %w", err)
|
||||
}
|
||||
|
||||
rsOwner := metav1.GetControllerOf(rs)
|
||||
if rsOwner == nil {
|
||||
return nil, fmt.Errorf("replicaset %s/%s is missing owner", ns, podInfo.Name)
|
||||
}
|
||||
|
||||
d, err := kubeClient.AppsV1().Deployments(ns).Get(ctx, rsOwner.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get deployment: %w", err)
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func newClients() (kubernetes.Interface, pinnipedclientset.Interface, error) {
|
||||
kubeConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the core Kubernetes API.
|
||||
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not create kube client: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the Pinniped API.
|
||||
pinnipedClient, err := pinnipedclientset.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not create pinniped client: %w", err)
|
||||
}
|
||||
|
||||
return kubeClient, pinnipedClient, nil
|
||||
}
|
||||
|
||||
func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
||||
serverInstallationNamespace := podInfo.Namespace
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kubeClient, pinnipedClient, err := newClients()
|
||||
// TODO remove code that relies on supervisorDeployment directly
|
||||
dref, supervisorDeployment, err := deploymentref.New(podInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create deployment ref: %w", err)
|
||||
}
|
||||
|
||||
client, err := kubeclient.New(dref)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create k8s client: %w", err)
|
||||
}
|
||||
|
||||
kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(
|
||||
kubeClient,
|
||||
client.Kubernetes,
|
||||
defaultResyncInterval,
|
||||
kubeinformers.WithNamespace(serverInstallationNamespace),
|
||||
)
|
||||
|
||||
pinnipedInformers := pinnipedinformers.NewSharedInformerFactoryWithOptions(
|
||||
pinnipedClient,
|
||||
client.PinnipedSupervisor,
|
||||
defaultResyncInterval,
|
||||
pinnipedinformers.WithNamespace(serverInstallationNamespace),
|
||||
)
|
||||
@ -348,14 +298,9 @@ func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
||||
dynamicJWKSProvider,
|
||||
dynamicUpstreamIDPProvider,
|
||||
&secretCache,
|
||||
kubeClient.CoreV1().Secrets(serverInstallationNamespace),
|
||||
client.Kubernetes.CoreV1().Secrets(serverInstallationNamespace),
|
||||
)
|
||||
|
||||
supervisorDeployment, err := getSupervisorDeployment(ctx, kubeClient, podInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get supervisor deployment: %w", err)
|
||||
}
|
||||
|
||||
startControllers(
|
||||
ctx,
|
||||
cfg,
|
||||
@ -365,8 +310,8 @@ func run(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
||||
dynamicUpstreamIDPProvider,
|
||||
&secretCache,
|
||||
supervisorDeployment,
|
||||
kubeClient,
|
||||
pinnipedClient,
|
||||
client.Kubernetes,
|
||||
client.PinnipedSupervisor,
|
||||
kubeInformers,
|
||||
pinnipedInformers,
|
||||
)
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
|
||||
conciergev1alpha1 "go.pinniped.dev/generated/1.20/apis/concierge/authentication/v1alpha1"
|
||||
conciergeclientset "go.pinniped.dev/generated/1.20/client/concierge/clientset/versioned"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
)
|
||||
|
||||
type kubeconfigDeps struct {
|
||||
@ -41,7 +42,11 @@ func kubeconfigRealDeps() kubeconfigDeps {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conciergeclientset.NewForConfig(restConfig)
|
||||
client, err := kubeclient.New(kubeclient.WithConfig(restConfig))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.PinnipedConcierge, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
@ -148,6 +148,9 @@ spec:
|
||||
- path: "labels"
|
||||
fieldRef:
|
||||
fieldPath: metadata.labels
|
||||
- path: "name"
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- path: "namespace"
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
|
@ -1,4 +1,4 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
@ -69,6 +69,9 @@ rules:
|
||||
- apiGroups: [ config.concierge.pinniped.dev, authentication.concierge.pinniped.dev ]
|
||||
resources: [ "*" ]
|
||||
verbs: [ create, get, list, update, watch ]
|
||||
- apiGroups: [apps]
|
||||
resources: [replicasets,deployments]
|
||||
verbs: [get]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
4
generated/1.20/apis/go.mod
generated
4
generated/1.20/apis/go.mod
generated
@ -4,6 +4,6 @@ module go.pinniped.dev/generated/1.20/apis
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
k8s.io/api v0.20.0
|
||||
k8s.io/apimachinery v0.20.0
|
||||
k8s.io/api v0.20.1
|
||||
k8s.io/apimachinery v0.20.1
|
||||
)
|
||||
|
8
generated/1.20/apis/go.sum
generated
8
generated/1.20/apis/go.sum
generated
@ -180,10 +180,10 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.20.0 h1:WwrYoZNM1W1aQEbyl8HNG+oWGzLpZQBlcerS9BQw9yI=
|
||||
k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg=
|
||||
k8s.io/apimachinery v0.20.0 h1:jjzbTJRXk0unNS71L7h3lxGDH/2HPxMPaQY+MjECKL8=
|
||||
k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/api v0.20.1 h1:ud1c3W3YNzGd6ABJlbFfKXBKXO+1KdGfcgGGNgFR03E=
|
||||
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
|
||||
k8s.io/apimachinery v0.20.1 h1:LAhz8pKbgR8tUwn7boK+b2HZdt7MiTu2mkYtFMUjTRQ=
|
||||
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
|
||||
|
4
generated/1.20/client/go.mod
generated
4
generated/1.20/client/go.mod
generated
@ -6,8 +6,8 @@ go 1.13
|
||||
require (
|
||||
github.com/go-openapi/spec v0.19.9
|
||||
go.pinniped.dev/generated/1.20/apis v0.0.0-00010101000000-000000000000
|
||||
k8s.io/apimachinery v0.20.0
|
||||
k8s.io/client-go v0.20.0
|
||||
k8s.io/apimachinery v0.20.1
|
||||
k8s.io/client-go v0.20.1
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd
|
||||
)
|
||||
|
||||
|
12
generated/1.20/client/go.sum
generated
12
generated/1.20/client/go.sum
generated
@ -415,12 +415,12 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.20.0 h1:WwrYoZNM1W1aQEbyl8HNG+oWGzLpZQBlcerS9BQw9yI=
|
||||
k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg=
|
||||
k8s.io/apimachinery v0.20.0 h1:jjzbTJRXk0unNS71L7h3lxGDH/2HPxMPaQY+MjECKL8=
|
||||
k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/client-go v0.20.0 h1:Xlax8PKbZsjX4gFvNtt4F5MoJ1V5prDvCuoq9B7iax0=
|
||||
k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY=
|
||||
k8s.io/api v0.20.1 h1:ud1c3W3YNzGd6ABJlbFfKXBKXO+1KdGfcgGGNgFR03E=
|
||||
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
|
||||
k8s.io/apimachinery v0.20.1 h1:LAhz8pKbgR8tUwn7boK+b2HZdt7MiTu2mkYtFMUjTRQ=
|
||||
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/client-go v0.20.1 h1:Qquik0xNFbK9aUG92pxHYsyfea5/RPO9o9bSywNor+M=
|
||||
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
|
||||
|
6
go.sum
6
go.sum
@ -1550,18 +1550,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
|
||||
k8s.io/api v0.20.0 h1:WwrYoZNM1W1aQEbyl8HNG+oWGzLpZQBlcerS9BQw9yI=
|
||||
k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg=
|
||||
k8s.io/api v0.20.1 h1:ud1c3W3YNzGd6ABJlbFfKXBKXO+1KdGfcgGGNgFR03E=
|
||||
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
|
||||
k8s.io/apimachinery v0.20.0 h1:jjzbTJRXk0unNS71L7h3lxGDH/2HPxMPaQY+MjECKL8=
|
||||
k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.20.1 h1:LAhz8pKbgR8tUwn7boK+b2HZdt7MiTu2mkYtFMUjTRQ=
|
||||
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apiserver v0.20.1 h1:yEqdkxlnQbxi/3e74cp0X16h140fpvPrNnNRAJBDuBk=
|
||||
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
|
||||
k8s.io/client-go v0.20.0 h1:Xlax8PKbZsjX4gFvNtt4F5MoJ1V5prDvCuoq9B7iax0=
|
||||
k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY=
|
||||
k8s.io/client-go v0.20.1 h1:Qquik0xNFbK9aUG92pxHYsyfea5/RPO9o9bSywNor+M=
|
||||
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
|
||||
k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
|
||||
|
@ -1,4 +1,4 @@
|
||||
1.20.0
|
||||
1.20.1
|
||||
1.19.5
|
||||
1.18.2
|
||||
1.17.11
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
|
||||
"go.pinniped.dev/generated/1.20/apis/concierge/login/v1alpha1"
|
||||
"go.pinniped.dev/generated/1.20/client/concierge/clientset/versioned"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
)
|
||||
|
||||
// ErrLoginFailed is returned by ExchangeToken when the server rejects the login request.
|
||||
@ -84,5 +85,9 @@ func getClient(apiEndpoint string, caBundle string) (versioned.Interface, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return versioned.NewForConfig(cfg)
|
||||
client, err := kubeclient.New(kubeclient.WithConfig(cfg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.PinnipedConcierge, nil
|
||||
}
|
||||
|
@ -105,7 +105,6 @@ func (a *App) runServer(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not read pod metadata: %w", err)
|
||||
}
|
||||
serverInstallationNamespace := podInfo.Namespace
|
||||
|
||||
// Initialize the cache of active authenticators.
|
||||
authenticators := authncache.New()
|
||||
@ -125,7 +124,7 @@ func (a *App) runServer(ctx context.Context) error {
|
||||
// post start hook of the aggregated API server.
|
||||
startControllersFunc, err := controllermanager.PrepareControllers(
|
||||
&controllermanager.Config{
|
||||
ServerInstallationNamespace: serverInstallationNamespace,
|
||||
ServerInstallationInfo: podInfo,
|
||||
NamesConfig: &cfg.NamesConfig,
|
||||
Labels: cfg.Labels,
|
||||
KubeCertAgentConfig: &cfg.KubeCertAgentConfig,
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package secretgenerator provides a supervisorSecretsController that can ensure existence of a generated secret.
|
||||
@ -13,7 +13,6 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/util/retry"
|
||||
@ -200,13 +199,7 @@ func generateSecret(namespace, name string, labels map[string]string, secretData
|
||||
return nil, err
|
||||
}
|
||||
|
||||
deploymentGVK := schema.GroupVersionKind{
|
||||
Group: appsv1.SchemeGroupVersion.Group,
|
||||
Version: appsv1.SchemeGroupVersion.Version,
|
||||
Kind: "Deployment",
|
||||
}
|
||||
|
||||
isController := false
|
||||
deploymentGVK := appsv1.SchemeGroupVersion.WithKind("Deployment")
|
||||
|
||||
return &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -218,7 +211,6 @@ func generateSecret(namespace, name string, labels map[string]string, secretData
|
||||
Kind: deploymentGVK.Kind,
|
||||
Name: owner.GetName(),
|
||||
UID: owner.GetUID(),
|
||||
Controller: &isController,
|
||||
},
|
||||
},
|
||||
Labels: labels,
|
||||
|
@ -268,7 +268,6 @@ func TestSupervisorSecretsControllerSync(t *testing.T) {
|
||||
generatedSymmetricKey = []byte("some-neato-32-byte-generated-key")
|
||||
otherGeneratedSymmetricKey = []byte("some-funio-32-byte-generated-key")
|
||||
|
||||
isController = false
|
||||
generatedSecret = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: generatedSecretName,
|
||||
@ -279,7 +278,6 @@ func TestSupervisorSecretsControllerSync(t *testing.T) {
|
||||
Kind: ownerGVK.Kind,
|
||||
Name: owner.GetName(),
|
||||
UID: owner.GetUID(),
|
||||
Controller: &isController,
|
||||
},
|
||||
},
|
||||
Labels: labels,
|
||||
@ -300,7 +298,6 @@ func TestSupervisorSecretsControllerSync(t *testing.T) {
|
||||
Kind: ownerGVK.Kind,
|
||||
Name: owner.GetName(),
|
||||
UID: owner.GetUID(),
|
||||
Controller: &isController,
|
||||
},
|
||||
},
|
||||
Labels: labels,
|
||||
|
@ -10,14 +10,10 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
k8sinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
|
||||
loginv1alpha1 "go.pinniped.dev/generated/1.20/apis/concierge/login/v1alpha1"
|
||||
pinnipedclientset "go.pinniped.dev/generated/1.20/client/concierge/clientset/versioned"
|
||||
@ -31,7 +27,10 @@ import (
|
||||
"go.pinniped.dev/internal/controller/issuerconfig"
|
||||
"go.pinniped.dev/internal/controller/kubecertagent"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/deploymentref"
|
||||
"go.pinniped.dev/internal/downward"
|
||||
"go.pinniped.dev/internal/dynamiccert"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -43,8 +42,8 @@ const (
|
||||
//
|
||||
// It is used to inject parameters into PrepareControllers.
|
||||
type Config struct {
|
||||
// ServerInstallationNamespace provides the namespace in which Pinniped is deployed.
|
||||
ServerInstallationNamespace string
|
||||
// ServerInstallationInfo provides the name of the pod in which Pinniped is running and the namespace in which Pinniped is deployed.
|
||||
ServerInstallationInfo *downward.PodInfo
|
||||
|
||||
// NamesConfig comes from the Pinniped config API (see api.Config). It specifies how Kubernetes
|
||||
// objects should be named.
|
||||
@ -81,29 +80,29 @@ type Config struct {
|
||||
// Prepare the controllers and their informers and return a function that will start them when called.
|
||||
//nolint:funlen // Eh, fair, it is a really long function...but it is wiring the world...so...
|
||||
func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
// Create k8s clients.
|
||||
kubeConfig, err := createConfig()
|
||||
dref, _, err := deploymentref.New(c.ServerInstallationInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create config for the controllers: %w", err)
|
||||
return nil, fmt.Errorf("cannot create deployment ref: %w", err)
|
||||
}
|
||||
k8sClient, aggregatorClient, pinnipedClient, err := createClients(kubeConfig)
|
||||
|
||||
client, err := kubeclient.New(dref)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create clients for the controllers: %w", err)
|
||||
}
|
||||
|
||||
// Create informers. Don't forget to make sure they get started in the function returned below.
|
||||
informers := createInformers(c.ServerInstallationNamespace, k8sClient, pinnipedClient)
|
||||
informers := createInformers(c.ServerInstallationInfo.Namespace, client.Kubernetes, client.PinnipedConcierge)
|
||||
|
||||
// Configuration for the kubecertagent controllers created below.
|
||||
agentPodConfig := &kubecertagent.AgentPodConfig{
|
||||
Namespace: c.ServerInstallationNamespace,
|
||||
Namespace: c.ServerInstallationInfo.Namespace,
|
||||
ContainerImage: *c.KubeCertAgentConfig.Image,
|
||||
PodNamePrefix: *c.KubeCertAgentConfig.NamePrefix,
|
||||
ContainerImagePullSecrets: c.KubeCertAgentConfig.ImagePullSecrets,
|
||||
AdditionalLabels: c.Labels,
|
||||
}
|
||||
credentialIssuerLocationConfig := &kubecertagent.CredentialIssuerLocationConfig{
|
||||
Namespace: c.ServerInstallationNamespace,
|
||||
Namespace: c.ServerInstallationInfo.Namespace,
|
||||
Name: c.NamesConfig.CredentialIssuer,
|
||||
}
|
||||
|
||||
@ -115,11 +114,11 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
// CredentialIssuer resource and keeping that information up to date.
|
||||
WithController(
|
||||
issuerconfig.NewKubeConfigInfoPublisherController(
|
||||
c.ServerInstallationNamespace,
|
||||
c.ServerInstallationInfo.Namespace,
|
||||
c.NamesConfig.CredentialIssuer,
|
||||
c.Labels,
|
||||
c.DiscoveryURLOverride,
|
||||
pinnipedClient,
|
||||
client.PinnipedConcierge,
|
||||
informers.kubePublicNamespaceK8s.Core().V1().ConfigMaps(),
|
||||
controllerlib.WithInformer,
|
||||
),
|
||||
@ -129,10 +128,10 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
// API certs controllers are responsible for managing the TLS certificates used to serve Pinniped's API.
|
||||
WithController(
|
||||
apicerts.NewCertsManagerController(
|
||||
c.ServerInstallationNamespace,
|
||||
c.ServerInstallationInfo.Namespace,
|
||||
c.NamesConfig.ServingCertificateSecret,
|
||||
c.Labels,
|
||||
k8sClient,
|
||||
client.Kubernetes,
|
||||
informers.installationNamespaceK8s.Core().V1().Secrets(),
|
||||
controllerlib.WithInformer,
|
||||
controllerlib.WithInitialEvent,
|
||||
@ -144,10 +143,10 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
).
|
||||
WithController(
|
||||
apicerts.NewAPIServiceUpdaterController(
|
||||
c.ServerInstallationNamespace,
|
||||
c.ServerInstallationInfo.Namespace,
|
||||
c.NamesConfig.ServingCertificateSecret,
|
||||
loginv1alpha1.SchemeGroupVersion.Version+"."+loginv1alpha1.GroupName,
|
||||
aggregatorClient,
|
||||
client.Aggregation,
|
||||
informers.installationNamespaceK8s.Core().V1().Secrets(),
|
||||
controllerlib.WithInformer,
|
||||
),
|
||||
@ -155,7 +154,7 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
).
|
||||
WithController(
|
||||
apicerts.NewCertsObserverController(
|
||||
c.ServerInstallationNamespace,
|
||||
c.ServerInstallationInfo.Namespace,
|
||||
c.NamesConfig.ServingCertificateSecret,
|
||||
c.DynamicServingCertProvider,
|
||||
informers.installationNamespaceK8s.Core().V1().Secrets(),
|
||||
@ -165,9 +164,9 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
).
|
||||
WithController(
|
||||
apicerts.NewCertsExpirerController(
|
||||
c.ServerInstallationNamespace,
|
||||
c.ServerInstallationInfo.Namespace,
|
||||
c.NamesConfig.ServingCertificateSecret,
|
||||
k8sClient,
|
||||
client.Kubernetes,
|
||||
informers.installationNamespaceK8s.Core().V1().Secrets(),
|
||||
controllerlib.WithInformer,
|
||||
c.ServingCertRenewBefore,
|
||||
@ -183,8 +182,8 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
credentialIssuerLocationConfig,
|
||||
c.Labels,
|
||||
clock.RealClock{},
|
||||
k8sClient,
|
||||
pinnipedClient,
|
||||
client.Kubernetes,
|
||||
client.PinnipedConcierge,
|
||||
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
||||
controllerlib.WithInformer,
|
||||
@ -197,8 +196,8 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
agentPodConfig,
|
||||
credentialIssuerLocationConfig,
|
||||
clock.RealClock{},
|
||||
k8sClient,
|
||||
pinnipedClient,
|
||||
client.Kubernetes,
|
||||
client.PinnipedConcierge,
|
||||
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
||||
controllerlib.WithInformer,
|
||||
@ -209,8 +208,8 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
kubecertagent.NewExecerController(
|
||||
credentialIssuerLocationConfig,
|
||||
c.DynamicSigningCertProvider,
|
||||
kubecertagent.NewPodCommandExecutor(kubeConfig, k8sClient),
|
||||
pinnipedClient,
|
||||
kubecertagent.NewPodCommandExecutor(client.JSONConfig, client.Kubernetes),
|
||||
client.PinnipedConcierge,
|
||||
clock.RealClock{},
|
||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
||||
controllerlib.WithInformer,
|
||||
@ -220,7 +219,7 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
WithController(
|
||||
kubecertagent.NewDeleterController(
|
||||
agentPodConfig,
|
||||
k8sClient,
|
||||
client.Kubernetes,
|
||||
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
||||
controllerlib.WithInformer,
|
||||
@ -263,51 +262,6 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Create the rest config that will be used by the clients for the controllers.
|
||||
func createConfig() (*rest.Config, error) {
|
||||
// Load the Kubernetes client configuration.
|
||||
kubeConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
}
|
||||
|
||||
return kubeConfig, nil
|
||||
}
|
||||
|
||||
// Create the k8s clients that will be used by the controllers.
|
||||
func createClients(kubeConfig *rest.Config) (
|
||||
k8sClient *kubernetes.Clientset,
|
||||
aggregatorClient *aggregatorclient.Clientset,
|
||||
pinnipedClient *pinnipedclientset.Clientset,
|
||||
err error,
|
||||
) {
|
||||
// explicitly use protobuf when talking to built-in kube APIs
|
||||
protoKubeConfig := createProtoKubeConfig(kubeConfig)
|
||||
|
||||
// Connect to the core Kubernetes API.
|
||||
k8sClient, err = kubernetes.NewForConfig(protoKubeConfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("could not initialize Kubernetes client: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the Kubernetes aggregation API.
|
||||
aggregatorClient, err = aggregatorclient.NewForConfig(protoKubeConfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("could not initialize Kubernetes client: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the pinniped API.
|
||||
// I think we can't use protobuf encoding here because we are using CRDs
|
||||
// (for which protobuf encoding is not supported).
|
||||
pinnipedClient, err = pinnipedclientset.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("could not initialize pinniped client: %w", err)
|
||||
}
|
||||
|
||||
//nolint: nakedret // Short function. Makes the order of return values more clear.
|
||||
return
|
||||
}
|
||||
|
||||
type informers struct {
|
||||
kubePublicNamespaceK8s k8sinformers.SharedInformerFactory
|
||||
kubeSystemNamespaceK8s k8sinformers.SharedInformerFactory
|
||||
@ -318,8 +272,8 @@ type informers struct {
|
||||
// Create the informers that will be used by the controllers.
|
||||
func createInformers(
|
||||
serverInstallationNamespace string,
|
||||
k8sClient *kubernetes.Clientset,
|
||||
pinnipedClient *pinnipedclientset.Clientset,
|
||||
k8sClient kubernetes.Interface,
|
||||
pinnipedClient pinnipedclientset.Interface,
|
||||
) *informers {
|
||||
return &informers{
|
||||
kubePublicNamespaceK8s: k8sinformers.NewSharedInformerFactoryWithOptions(
|
||||
@ -356,13 +310,3 @@ func (i *informers) startAndWaitForSync(ctx context.Context) {
|
||||
i.installationNamespaceK8s.WaitForCacheSync(ctx.Done())
|
||||
i.installationNamespacePinniped.WaitForCacheSync(ctx.Done())
|
||||
}
|
||||
|
||||
// Returns a copy of the input config with the ContentConfig set to use protobuf.
|
||||
// Do not use this config to communicate with any CRD based APIs.
|
||||
func createProtoKubeConfig(kubeConfig *restclient.Config) *restclient.Config {
|
||||
protoKubeConfig := restclient.CopyConfig(kubeConfig)
|
||||
const protoThenJSON = runtime.ContentTypeProtobuf + "," + runtime.ContentTypeJSON
|
||||
protoKubeConfig.AcceptContentTypes = protoThenJSON
|
||||
protoKubeConfig.ContentType = runtime.ContentTypeProtobuf
|
||||
return protoKubeConfig
|
||||
}
|
||||
|
72
internal/deploymentref/deploymentref.go
Normal file
72
internal/deploymentref/deploymentref.go
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package deploymentref
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"go.pinniped.dev/internal/downward"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
"go.pinniped.dev/internal/ownerref"
|
||||
)
|
||||
|
||||
func New(podInfo *downward.PodInfo) (kubeclient.Option, *appsv1.Deployment, error) {
|
||||
tempClient, err := kubeclient.New()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot create temp client: %w", err)
|
||||
}
|
||||
|
||||
deployment, err := getDeployment(tempClient.Kubernetes, podInfo)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot get deployment: %w", err)
|
||||
}
|
||||
|
||||
ref := metav1.OwnerReference{
|
||||
Name: deployment.Name,
|
||||
UID: deployment.UID,
|
||||
}
|
||||
ref.APIVersion, ref.Kind = appsv1.SchemeGroupVersion.WithKind("Deployment").ToAPIVersionAndKind()
|
||||
|
||||
return kubeclient.WithMiddleware(ownerref.New(ref)), deployment, nil
|
||||
}
|
||||
|
||||
func getDeployment(kubeClient kubernetes.Interface, podInfo *downward.PodInfo) (*appsv1.Deployment, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
ns := podInfo.Namespace
|
||||
|
||||
pod, err := kubeClient.CoreV1().Pods(ns).Get(ctx, podInfo.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get pod: %w", err)
|
||||
}
|
||||
|
||||
podOwner := metav1.GetControllerOf(pod)
|
||||
if podOwner == nil {
|
||||
return nil, fmt.Errorf("pod %s/%s is missing owner", ns, podInfo.Name)
|
||||
}
|
||||
|
||||
rs, err := kubeClient.AppsV1().ReplicaSets(ns).Get(ctx, podOwner.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get replicaset: %w", err)
|
||||
}
|
||||
|
||||
rsOwner := metav1.GetControllerOf(rs)
|
||||
if rsOwner == nil {
|
||||
return nil, fmt.Errorf("replicaset %s/%s is missing owner", ns, podInfo.Name)
|
||||
}
|
||||
|
||||
d, err := kubeClient.AppsV1().Deployments(ns).Get(ctx, rsOwner.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get deployment: %w", err)
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
117
internal/kubeclient/kubeclient.go
Normal file
117
internal/kubeclient/kubeclient.go
Normal file
@ -0,0 +1,117 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubeclient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubescheme "k8s.io/client-go/kubernetes/scheme"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
aggregatorclientscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
|
||||
pinnipedconciergeclientset "go.pinniped.dev/generated/1.20/client/concierge/clientset/versioned"
|
||||
pinnipedconciergeclientsetscheme "go.pinniped.dev/generated/1.20/client/concierge/clientset/versioned/scheme"
|
||||
pinnipedsupervisorclientset "go.pinniped.dev/generated/1.20/client/supervisor/clientset/versioned"
|
||||
pinnipedsupervisorclientsetscheme "go.pinniped.dev/generated/1.20/client/supervisor/clientset/versioned/scheme"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
Kubernetes kubernetes.Interface
|
||||
Aggregation aggregatorclient.Interface
|
||||
PinnipedConcierge pinnipedconciergeclientset.Interface
|
||||
PinnipedSupervisor pinnipedsupervisorclientset.Interface
|
||||
|
||||
JSONConfig, ProtoConfig *restclient.Config
|
||||
}
|
||||
|
||||
// TODO expand this interface to address more complex use cases.
|
||||
type Middleware interface {
|
||||
Handles(httpMethod string) bool
|
||||
Mutate(obj metav1.Object) (mutated bool)
|
||||
}
|
||||
|
||||
func New(opts ...Option) (*Client, error) {
|
||||
c := &clientConfig{}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
|
||||
// default to assuming we are running in a pod with the service account token mounted
|
||||
if c.config == nil {
|
||||
inClusterConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
}
|
||||
WithConfig(inClusterConfig)(c) // make sure all writes to clientConfig flow through one code path
|
||||
}
|
||||
|
||||
// explicitly use json when talking to CRD APIs
|
||||
jsonKubeConfig := createJSONKubeConfig(c.config)
|
||||
|
||||
// explicitly use protobuf when talking to built-in kube APIs
|
||||
protoKubeConfig := createProtoKubeConfig(c.config)
|
||||
|
||||
// Connect to the core Kubernetes API.
|
||||
k8sClient, err := kubernetes.NewForConfig(configWithWrapper(protoKubeConfig, kubescheme.Codecs, c.middlewares))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not initialize Kubernetes client: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the Kubernetes aggregation API.
|
||||
aggregatorClient, err := aggregatorclient.NewForConfig(configWithWrapper(protoKubeConfig, aggregatorclientscheme.Codecs, c.middlewares))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not initialize aggregation client: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the pinniped concierge API.
|
||||
// We cannot use protobuf encoding here because we are using CRDs
|
||||
// (for which protobuf encoding is not yet supported).
|
||||
// TODO we should try to add protobuf support to TokenCredentialRequests since it is an aggregated API
|
||||
pinnipedConciergeClient, err := pinnipedconciergeclientset.NewForConfig(configWithWrapper(jsonKubeConfig, pinnipedconciergeclientsetscheme.Codecs, c.middlewares))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not initialize pinniped client: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the pinniped supervisor API.
|
||||
// We cannot use protobuf encoding here because we are using CRDs
|
||||
// (for which protobuf encoding is not yet supported).
|
||||
pinnipedSupervisorClient, err := pinnipedsupervisorclientset.NewForConfig(configWithWrapper(jsonKubeConfig, pinnipedsupervisorclientsetscheme.Codecs, c.middlewares))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not initialize pinniped client: %w", err)
|
||||
}
|
||||
|
||||
return &Client{
|
||||
Kubernetes: k8sClient,
|
||||
Aggregation: aggregatorClient,
|
||||
PinnipedConcierge: pinnipedConciergeClient,
|
||||
PinnipedSupervisor: pinnipedSupervisorClient,
|
||||
|
||||
JSONConfig: jsonKubeConfig,
|
||||
ProtoConfig: protoKubeConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Returns a copy of the input config with the ContentConfig set to use json.
|
||||
// Use this config to communicate with all CRD based APIs.
|
||||
func createJSONKubeConfig(kubeConfig *restclient.Config) *restclient.Config {
|
||||
jsonKubeConfig := restclient.CopyConfig(kubeConfig)
|
||||
jsonKubeConfig.AcceptContentTypes = runtime.ContentTypeJSON
|
||||
jsonKubeConfig.ContentType = runtime.ContentTypeJSON
|
||||
return jsonKubeConfig
|
||||
}
|
||||
|
||||
// Returns a copy of the input config with the ContentConfig set to use protobuf.
|
||||
// Do not use this config to communicate with any CRD based APIs.
|
||||
func createProtoKubeConfig(kubeConfig *restclient.Config) *restclient.Config {
|
||||
protoKubeConfig := restclient.CopyConfig(kubeConfig)
|
||||
const protoThenJSON = runtime.ContentTypeProtobuf + "," + runtime.ContentTypeJSON
|
||||
protoKubeConfig.AcceptContentTypes = protoThenJSON
|
||||
protoKubeConfig.ContentType = runtime.ContentTypeProtobuf
|
||||
return protoKubeConfig
|
||||
}
|
25
internal/kubeclient/option.go
Normal file
25
internal/kubeclient/option.go
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubeclient
|
||||
|
||||
import restclient "k8s.io/client-go/rest"
|
||||
|
||||
type Option func(*clientConfig)
|
||||
|
||||
type clientConfig struct {
|
||||
config *restclient.Config
|
||||
middlewares []Middleware
|
||||
}
|
||||
|
||||
func WithConfig(config *restclient.Config) Option {
|
||||
return func(c *clientConfig) {
|
||||
c.config = config
|
||||
}
|
||||
}
|
||||
|
||||
func WithMiddleware(middleware Middleware) Option {
|
||||
return func(c *clientConfig) {
|
||||
c.middlewares = append(c.middlewares, middleware)
|
||||
}
|
||||
}
|
122
internal/kubeclient/roundtrip.go
Normal file
122
internal/kubeclient/roundtrip.go
Normal file
@ -0,0 +1,122 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubeclient
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// TODO unit test
|
||||
|
||||
func configWithWrapper(config *restclient.Config, negotiatedSerializer runtime.NegotiatedSerializer, middlewares []Middleware) *restclient.Config {
|
||||
// no need for any wrapping when we have no middleware to inject
|
||||
if len(middlewares) == 0 {
|
||||
return config
|
||||
}
|
||||
|
||||
info, ok := runtime.SerializerInfoForMediaType(negotiatedSerializer.SupportedMediaTypes(), config.ContentType)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("unknown content type: %s ", config.ContentType)) // static input, programmer error
|
||||
}
|
||||
serializer := info.Serializer // should perform no conversion
|
||||
|
||||
f := func(rt http.RoundTripper) http.RoundTripper {
|
||||
return roundTripperFunc(func(req *http.Request) (*http.Response, error) {
|
||||
// ignore everything that has an unreadable body
|
||||
if req.GetBody == nil {
|
||||
return rt.RoundTrip(req)
|
||||
}
|
||||
|
||||
var reqMiddlewares []Middleware
|
||||
for _, middleware := range middlewares {
|
||||
middleware := middleware
|
||||
if middleware.Handles(req.Method) {
|
||||
reqMiddlewares = append(reqMiddlewares, middleware)
|
||||
}
|
||||
}
|
||||
|
||||
// no middleware to handle this request
|
||||
if len(reqMiddlewares) == 0 {
|
||||
return rt.RoundTrip(req)
|
||||
}
|
||||
|
||||
body, err := req.GetBody()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get body failed: %w", err)
|
||||
}
|
||||
defer body.Close()
|
||||
data, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read body failed: %w", err)
|
||||
}
|
||||
|
||||
// attempt to decode with no defaults or into specified, i.e. defer to the decoder
|
||||
// this should result in the a straight decode with no conversion
|
||||
obj, _, err := serializer.Decode(data, nil, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("body decode failed: %w", err)
|
||||
}
|
||||
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return rt.RoundTrip(req) // ignore everything that has no object meta for now
|
||||
}
|
||||
|
||||
// run all the mutating operations
|
||||
var reqMutated bool
|
||||
for _, reqMiddleware := range reqMiddlewares {
|
||||
mutated := reqMiddleware.Mutate(accessor)
|
||||
reqMutated = mutated || reqMutated
|
||||
}
|
||||
|
||||
// no mutation occurred, keep the original request
|
||||
if !reqMutated {
|
||||
return rt.RoundTrip(req)
|
||||
}
|
||||
|
||||
// we plan on making a new request so make sure to close the original request's body
|
||||
_ = req.Body.Close()
|
||||
|
||||
newData, err := runtime.Encode(serializer, obj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new body encode failed: %w", err)
|
||||
}
|
||||
|
||||
// TODO log newData at high loglevel similar to REST client
|
||||
|
||||
// simplest way to reuse the body creation logic
|
||||
newReqForBody, err := http.NewRequest(req.Method, req.URL.String(), bytes.NewReader(newData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new req for body: %w", err) // this should never happen
|
||||
}
|
||||
|
||||
// shallow copy because we want to preserve all the headers and such but not mutate the original request
|
||||
newReq := req.WithContext(req.Context())
|
||||
|
||||
// replace the body with the new data
|
||||
newReq.ContentLength = newReqForBody.ContentLength
|
||||
newReq.Body = newReqForBody.Body
|
||||
newReq.GetBody = newReqForBody.GetBody
|
||||
|
||||
return rt.RoundTrip(newReq)
|
||||
})
|
||||
}
|
||||
|
||||
cc := restclient.CopyConfig(config)
|
||||
cc.Wrap(f)
|
||||
return cc
|
||||
}
|
||||
|
||||
type roundTripperFunc func(req *http.Request) (*http.Response, error)
|
||||
|
||||
func (f roundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return f(req)
|
||||
}
|
39
internal/ownerref/ownerref.go
Normal file
39
internal/ownerref/ownerref.go
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package ownerref
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
)
|
||||
|
||||
func New(ref metav1.OwnerReference) kubeclient.Middleware {
|
||||
return ownerRefMiddleware(ref)
|
||||
}
|
||||
|
||||
var _ kubeclient.Middleware = ownerRefMiddleware(metav1.OwnerReference{})
|
||||
|
||||
type ownerRefMiddleware metav1.OwnerReference
|
||||
|
||||
func (o ownerRefMiddleware) Handles(httpMethod string) bool {
|
||||
return httpMethod == http.MethodPost // only handle create requests
|
||||
}
|
||||
|
||||
// TODO this func assumes all objects are namespace scoped and are in the same namespace.
|
||||
// i.e. it assumes all objects are safe to set an owner ref on
|
||||
// i.e. the owner could be namespace scoped and thus cannot own a cluster scoped object
|
||||
// this could be fixed by using a rest mapper to confirm the REST scoping
|
||||
// or we could always use an owner ref to a cluster scoped object
|
||||
func (o ownerRefMiddleware) Mutate(obj metav1.Object) (mutated bool) {
|
||||
// we only want to set the owner ref on create and when one is not already present
|
||||
if len(obj.GetOwnerReferences()) != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
obj.SetOwnerReferences([]metav1.OwnerReference{metav1.OwnerReference(o)})
|
||||
return true
|
||||
}
|
142
internal/ownerref/ownerref_test.go
Normal file
142
internal/ownerref/ownerref_test.go
Normal file
@ -0,0 +1,142 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package ownerref
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func TestOwnerReferenceMiddleware(t *testing.T) {
|
||||
ref1 := metav1.OwnerReference{
|
||||
Name: "earth",
|
||||
UID: "0x11",
|
||||
}
|
||||
ref2 := metav1.OwnerReference{
|
||||
Name: "mars",
|
||||
UID: "0x12",
|
||||
}
|
||||
ref3 := metav1.OwnerReference{
|
||||
Name: "sun",
|
||||
UID: "0x13",
|
||||
}
|
||||
|
||||
secret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "twizzlers"}}
|
||||
configMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "pandas"}}
|
||||
|
||||
secretWithOwner := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "twizzlers", OwnerReferences: []metav1.OwnerReference{ref3}}}
|
||||
configMapWithOwner := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "pandas", OwnerReferences: []metav1.OwnerReference{ref3}}}
|
||||
|
||||
type args struct {
|
||||
ref metav1.OwnerReference
|
||||
httpMethod string
|
||||
obj metav1.Object
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantHandles, wantMutates bool
|
||||
wantObj metav1.Object
|
||||
}{
|
||||
{
|
||||
name: "on update",
|
||||
args: args{
|
||||
ref: ref1,
|
||||
httpMethod: http.MethodPut,
|
||||
obj: secret.DeepCopy(),
|
||||
},
|
||||
wantHandles: false,
|
||||
wantMutates: false,
|
||||
wantObj: nil,
|
||||
},
|
||||
{
|
||||
name: "on create",
|
||||
args: args{
|
||||
ref: ref1,
|
||||
httpMethod: http.MethodPost,
|
||||
obj: secret.DeepCopy(),
|
||||
},
|
||||
wantHandles: true,
|
||||
wantMutates: true,
|
||||
wantObj: withOwnerRef(t, secret, ref1),
|
||||
},
|
||||
{
|
||||
name: "on create config map",
|
||||
args: args{
|
||||
ref: ref2,
|
||||
httpMethod: http.MethodPost,
|
||||
obj: configMap.DeepCopy(),
|
||||
},
|
||||
wantHandles: true,
|
||||
wantMutates: true,
|
||||
wantObj: withOwnerRef(t, configMap, ref2),
|
||||
},
|
||||
{
|
||||
name: "on create with pre-existing ref",
|
||||
args: args{
|
||||
ref: ref1,
|
||||
httpMethod: http.MethodPost,
|
||||
obj: secretWithOwner.DeepCopy(),
|
||||
},
|
||||
wantHandles: true,
|
||||
wantMutates: false,
|
||||
wantObj: nil,
|
||||
},
|
||||
{
|
||||
name: "on create with pre-existing ref config map",
|
||||
args: args{
|
||||
ref: ref2,
|
||||
httpMethod: http.MethodPost,
|
||||
obj: configMapWithOwner.DeepCopy(),
|
||||
},
|
||||
wantHandles: true,
|
||||
wantMutates: false,
|
||||
wantObj: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
middleware := New(tt.args.ref)
|
||||
|
||||
handles := middleware.Handles(tt.args.httpMethod)
|
||||
require.Equal(t, tt.wantHandles, handles)
|
||||
|
||||
if !handles {
|
||||
return
|
||||
}
|
||||
|
||||
orig := tt.args.obj.(runtime.Object).DeepCopyObject()
|
||||
|
||||
mutates := middleware.Mutate(tt.args.obj)
|
||||
require.Equal(t, tt.wantMutates, mutates)
|
||||
|
||||
if mutates {
|
||||
require.NotEqual(t, orig, tt.args.obj)
|
||||
require.Equal(t, tt.wantObj, tt.args.obj)
|
||||
} else {
|
||||
require.Equal(t, orig, tt.args.obj)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func withOwnerRef(t *testing.T, obj runtime.Object, ref metav1.OwnerReference) metav1.Object {
|
||||
t.Helper()
|
||||
|
||||
obj = obj.DeepCopyObject()
|
||||
accessor, err := meta.Accessor(obj)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, accessor.GetOwnerReferences(), 0)
|
||||
accessor.SetOwnerReferences([]metav1.OwnerReference{ref})
|
||||
|
||||
return accessor
|
||||
}
|
@ -22,6 +22,7 @@ import (
|
||||
loginv1alpha1 "go.pinniped.dev/generated/1.20/apis/concierge/login/v1alpha1"
|
||||
conciergeclientset "go.pinniped.dev/generated/1.20/client/concierge/clientset/versioned"
|
||||
"go.pinniped.dev/internal/constable"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
)
|
||||
|
||||
// ErrLoginFailed is returned by Client.ExchangeToken when the concierge server rejects the login request for any reason.
|
||||
@ -150,7 +151,11 @@ func (c *Client) clientset() (conciergeclientset.Interface, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conciergeclientset.NewForConfig(cfg)
|
||||
client, err := kubeclient.New(kubeclient.WithConfig(cfg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.PinnipedConcierge, nil
|
||||
}
|
||||
|
||||
// ExchangeToken performs a TokenCredentialRequest against the Pinniped concierge and returns the result as an ExecCredential.
|
||||
|
259
test/integration/kubeclient_test.go
Normal file
259
test/integration/kubeclient_test.go
Normal file
@ -0,0 +1,259 @@
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
|
||||
|
||||
conciergeconfigv1alpha1 "go.pinniped.dev/generated/1.20/apis/concierge/config/v1alpha1"
|
||||
supervisorconfigv1alpha1 "go.pinniped.dev/generated/1.20/apis/supervisor/config/v1alpha1"
|
||||
"go.pinniped.dev/internal/kubeclient"
|
||||
"go.pinniped.dev/internal/ownerref"
|
||||
"go.pinniped.dev/test/library"
|
||||
)
|
||||
|
||||
func TestKubeClientOwnerRef(t *testing.T) {
|
||||
env := library.IntegrationEnv(t)
|
||||
|
||||
regularClient := library.NewClientset(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
|
||||
namespaces := regularClient.CoreV1().Namespaces()
|
||||
|
||||
namespace, err := namespaces.Create(
|
||||
ctx,
|
||||
&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "test-owner-ref-"}},
|
||||
metav1.CreateOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
return
|
||||
}
|
||||
err := namespaces.Delete(ctx, namespace.Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
// create something that we can point to
|
||||
parentSecret, err := regularClient.CoreV1().Secrets(namespace.Name).Create(
|
||||
ctx,
|
||||
&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "parent-",
|
||||
OwnerReferences: nil, // no owner refs set
|
||||
},
|
||||
Data: map[string][]byte{"A": []byte("B")},
|
||||
},
|
||||
metav1.CreateOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, parentSecret.OwnerReferences, 0)
|
||||
|
||||
// create a client that should set an owner ref back to parent on create
|
||||
ref := metav1.OwnerReference{
|
||||
APIVersion: "v1",
|
||||
Kind: "Secret",
|
||||
Name: parentSecret.Name,
|
||||
UID: parentSecret.UID,
|
||||
}
|
||||
ownerRefClient, err := kubeclient.New(
|
||||
kubeclient.WithMiddleware(ownerref.New(ref)),
|
||||
kubeclient.WithConfig(library.NewClientConfig(t)),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
ownerRefSecrets := ownerRefClient.Kubernetes.CoreV1().Secrets(namespace.Name)
|
||||
|
||||
// we expect this secret to have the owner ref set even though we did not set it explicitly
|
||||
childSecret, err := ownerRefSecrets.Create(
|
||||
ctx,
|
||||
&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "child-",
|
||||
OwnerReferences: nil, // no owner refs set
|
||||
},
|
||||
Data: map[string][]byte{"C": []byte("D")},
|
||||
},
|
||||
metav1.CreateOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
hasOwnerRef(t, childSecret, ref)
|
||||
|
||||
preexistingRef := *ref.DeepCopy()
|
||||
preexistingRef.Name = "different"
|
||||
preexistingRef.UID = "different"
|
||||
|
||||
// we expect this secret to keep the owner ref that is was created with
|
||||
otherSecret, err := ownerRefSecrets.Create(
|
||||
ctx,
|
||||
&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "child-",
|
||||
OwnerReferences: []metav1.OwnerReference{preexistingRef}, // owner ref set explicitly
|
||||
},
|
||||
Data: map[string][]byte{"C": []byte("D")},
|
||||
},
|
||||
metav1.CreateOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
hasOwnerRef(t, otherSecret, preexistingRef)
|
||||
require.NotEqual(t, ref, preexistingRef)
|
||||
// the secret has no owner so it should be immediately deleted
|
||||
isEventuallyDeleted(t, func() error {
|
||||
_, err := ownerRefSecrets.Get(ctx, otherSecret.Name, metav1.GetOptions{})
|
||||
return err
|
||||
})
|
||||
|
||||
// we expect no owner ref to be set on update
|
||||
parentSecretUpdate := parentSecret.DeepCopy()
|
||||
parentSecretUpdate.Data = map[string][]byte{"E": []byte("F ")}
|
||||
updatedParentSecret, err := ownerRefSecrets.Update(ctx, parentSecretUpdate, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, parentSecret.UID, updatedParentSecret.UID)
|
||||
require.NotEqual(t, parentSecret.ResourceVersion, updatedParentSecret.ResourceVersion)
|
||||
require.Len(t, updatedParentSecret.OwnerReferences, 0)
|
||||
|
||||
// delete the parent object
|
||||
err = ownerRefSecrets.Delete(ctx, parentSecret.Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// the child object should be cleaned up on its own
|
||||
isEventuallyDeleted(t, func() error {
|
||||
_, err := ownerRefSecrets.Get(ctx, childSecret.Name, metav1.GetOptions{})
|
||||
return err
|
||||
})
|
||||
|
||||
// sanity check API service client
|
||||
apiService, err := ownerRefClient.Aggregation.ApiregistrationV1().APIServices().Create(
|
||||
ctx,
|
||||
&apiregistrationv1.APIService{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "v1.pandas.dev",
|
||||
OwnerReferences: nil, // no owner refs set
|
||||
},
|
||||
Spec: apiregistrationv1.APIServiceSpec{
|
||||
Version: "v1",
|
||||
Group: "pandas.dev",
|
||||
GroupPriorityMinimum: 10_000,
|
||||
VersionPriority: 500,
|
||||
},
|
||||
},
|
||||
metav1.CreateOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
hasOwnerRef(t, apiService, ref)
|
||||
// this owner ref is invalid for an API service so it should be immediately deleted
|
||||
isEventuallyDeleted(t, func() error {
|
||||
_, err := ownerRefClient.Aggregation.ApiregistrationV1().APIServices().Get(ctx, apiService.Name, metav1.GetOptions{})
|
||||
return err
|
||||
})
|
||||
|
||||
// sanity check concierge client
|
||||
credentialIssuer, err := ownerRefClient.PinnipedConcierge.ConfigV1alpha1().CredentialIssuers(namespace.Name).Create(
|
||||
ctx,
|
||||
&conciergeconfigv1alpha1.CredentialIssuer{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "owner-ref-test-",
|
||||
OwnerReferences: nil, // no owner refs set
|
||||
},
|
||||
Status: conciergeconfigv1alpha1.CredentialIssuerStatus{
|
||||
Strategies: []conciergeconfigv1alpha1.CredentialIssuerStrategy{},
|
||||
},
|
||||
},
|
||||
metav1.CreateOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
hasOwnerRef(t, credentialIssuer, ref)
|
||||
// this owner has already been deleted so the cred issuer should be immediately deleted
|
||||
isEventuallyDeleted(t, func() error {
|
||||
_, err := ownerRefClient.PinnipedConcierge.ConfigV1alpha1().CredentialIssuers(namespace.Name).Get(ctx, credentialIssuer.Name, metav1.GetOptions{})
|
||||
return err
|
||||
})
|
||||
|
||||
// sanity check supervisor client
|
||||
federationDomain, err := ownerRefClient.PinnipedSupervisor.ConfigV1alpha1().FederationDomains(namespace.Name).Create(
|
||||
ctx,
|
||||
&supervisorconfigv1alpha1.FederationDomain{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "owner-ref-test-",
|
||||
OwnerReferences: nil, // no owner refs set
|
||||
},
|
||||
Spec: supervisorconfigv1alpha1.FederationDomainSpec{
|
||||
Issuer: "https://pandas.dev",
|
||||
},
|
||||
},
|
||||
metav1.CreateOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
hasOwnerRef(t, federationDomain, ref)
|
||||
// this owner has already been deleted so the federation domain should be immediately deleted
|
||||
isEventuallyDeleted(t, func() error {
|
||||
_, err := ownerRefClient.PinnipedSupervisor.ConfigV1alpha1().FederationDomains(namespace.Name).Get(ctx, federationDomain.Name, metav1.GetOptions{})
|
||||
return err
|
||||
})
|
||||
|
||||
// check some well-known, always created secrets to make sure they have an owner ref back to their deployment
|
||||
|
||||
dref := metav1.OwnerReference{}
|
||||
dref.APIVersion, dref.Kind = appsv1.SchemeGroupVersion.WithKind("Deployment").ToAPIVersionAndKind()
|
||||
|
||||
supervisorDeployment, err := ownerRefClient.Kubernetes.AppsV1().Deployments(env.SupervisorNamespace).Get(ctx, env.SupervisorAppName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
supervisorKey, err := ownerRefClient.Kubernetes.CoreV1().Secrets(env.SupervisorNamespace).Get(ctx, env.SupervisorAppName+"-key", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
supervisorDref := *dref.DeepCopy()
|
||||
supervisorDref.Name = env.SupervisorAppName
|
||||
supervisorDref.UID = supervisorDeployment.UID
|
||||
hasOwnerRef(t, supervisorKey, supervisorDref)
|
||||
|
||||
conciergeDeployment, err := ownerRefClient.Kubernetes.AppsV1().Deployments(env.ConciergeNamespace).Get(ctx, env.ConciergeAppName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
conciergeCert, err := ownerRefClient.Kubernetes.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, env.ConciergeAppName+"-api-tls-serving-certificate", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
conciergeDref := *dref.DeepCopy()
|
||||
conciergeDref.Name = env.ConciergeAppName
|
||||
conciergeDref.UID = conciergeDeployment.UID
|
||||
hasOwnerRef(t, conciergeCert, conciergeDref)
|
||||
}
|
||||
|
||||
func hasOwnerRef(t *testing.T, obj metav1.Object, ref metav1.OwnerReference) {
|
||||
t.Helper()
|
||||
|
||||
ownerReferences := obj.GetOwnerReferences()
|
||||
require.Len(t, ownerReferences, 1)
|
||||
require.Equal(t, ref, ownerReferences[0])
|
||||
}
|
||||
|
||||
func isEventuallyDeleted(t *testing.T, f func() error) {
|
||||
t.Helper()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
err := f()
|
||||
switch {
|
||||
case err == nil:
|
||||
return false
|
||||
case errors.IsNotFound(err):
|
||||
return true
|
||||
default:
|
||||
require.NoError(t, err)
|
||||
return false
|
||||
}
|
||||
}, time.Minute, time.Second)
|
||||
}
|
Loading…
Reference in New Issue
Block a user