Merge pull request #627 from mattmoyer/use-informers-for-credentialissuer-updates

Create CredentialIssuer at install, not runtime.
This commit is contained in:
Matt Moyer 2021-05-20 10:13:41 -05:00 committed by GitHub
commit 562942cdbf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 144 additions and 58 deletions

View File

@ -247,3 +247,9 @@ spec:
name: #@ defaultResourceNameWithSuffix("api") name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace() namespace: #@ namespace()
port: 443 port: 443
---
apiVersion: #@ pinnipedDevAPIGroupWithPrefix("config.concierge") + "/v1alpha1"
kind: CredentialIssuer
metadata:
name: #@ defaultResourceNameWithSuffix("config")
labels: #@ labels()

View File

@ -5,8 +5,12 @@ package issuerconfig
import ( import (
"context" "context"
"fmt"
"sort" "sort"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1" "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
"go.pinniped.dev/generated/latest/client/concierge/clientset/versioned" "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
) )
@ -28,6 +32,23 @@ func UpdateStrategy(ctx context.Context,
) )
} }
// Update a strategy on an existing CredentialIssuer, merging into any existing strategy entries.
func Update(ctx context.Context, client versioned.Interface, issuer *v1alpha1.CredentialIssuer, strategy v1alpha1.CredentialIssuerStrategy) error {
// Update the existing object to merge in the new strategy.
updated := issuer.DeepCopy()
mergeStrategy(&updated.Status, strategy)
// If the status has not changed, we're done.
if apiequality.Semantic.DeepEqual(issuer.Status, updated.Status) {
return nil
}
if _, err := client.ConfigV1alpha1().CredentialIssuers().UpdateStatus(ctx, updated, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("failed to update CredentialIssuer status: %w", err)
}
return nil
}
func mergeStrategy(configToUpdate *v1alpha1.CredentialIssuerStatus, strategy v1alpha1.CredentialIssuerStrategy) { func mergeStrategy(configToUpdate *v1alpha1.CredentialIssuerStatus, strategy v1alpha1.CredentialIssuerStrategy) {
var existing *v1alpha1.CredentialIssuerStrategy var existing *v1alpha1.CredentialIssuerStrategy
for i := range configToUpdate.Strategies { for i := range configToUpdate.Strategies {

View File

@ -32,6 +32,7 @@ import (
"k8s.io/utils/pointer" "k8s.io/utils/pointer"
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1" configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
configv1alpha1informers "go.pinniped.dev/generated/latest/client/concierge/informers/externalversions/config/v1alpha1"
pinnipedcontroller "go.pinniped.dev/internal/controller" pinnipedcontroller "go.pinniped.dev/internal/controller"
"go.pinniped.dev/internal/controller/issuerconfig" "go.pinniped.dev/internal/controller/issuerconfig"
"go.pinniped.dev/internal/controllerlib" "go.pinniped.dev/internal/controllerlib"
@ -101,6 +102,7 @@ type agentController struct {
agentDeployments appsv1informers.DeploymentInformer agentDeployments appsv1informers.DeploymentInformer
agentPods corev1informers.PodInformer agentPods corev1informers.PodInformer
kubePublicConfigMaps corev1informers.ConfigMapInformer kubePublicConfigMaps corev1informers.ConfigMapInformer
credentialIssuers configv1alpha1informers.CredentialIssuerInformer
executor PodCommandExecutor executor PodCommandExecutor
dynamicCertProvider dynamiccert.Private dynamicCertProvider dynamiccert.Private
clock clock.Clock clock clock.Clock
@ -129,6 +131,7 @@ func NewAgentController(
agentDeployments appsv1informers.DeploymentInformer, agentDeployments appsv1informers.DeploymentInformer,
agentPods corev1informers.PodInformer, agentPods corev1informers.PodInformer,
kubePublicConfigMaps corev1informers.ConfigMapInformer, kubePublicConfigMaps corev1informers.ConfigMapInformer,
credentialIssuers configv1alpha1informers.CredentialIssuerInformer,
dynamicCertProvider dynamiccert.Private, dynamicCertProvider dynamiccert.Private,
) controllerlib.Controller { ) controllerlib.Controller {
return newAgentController( return newAgentController(
@ -138,6 +141,7 @@ func NewAgentController(
agentDeployments, agentDeployments,
agentPods, agentPods,
kubePublicConfigMaps, kubePublicConfigMaps,
credentialIssuers,
NewPodCommandExecutor(client.JSONConfig, client.Kubernetes), NewPodCommandExecutor(client.JSONConfig, client.Kubernetes),
dynamicCertProvider, dynamicCertProvider,
&clock.RealClock{}, &clock.RealClock{},
@ -153,6 +157,7 @@ func newAgentController(
agentDeployments appsv1informers.DeploymentInformer, agentDeployments appsv1informers.DeploymentInformer,
agentPods corev1informers.PodInformer, agentPods corev1informers.PodInformer,
kubePublicConfigMaps corev1informers.ConfigMapInformer, kubePublicConfigMaps corev1informers.ConfigMapInformer,
credentialIssuers configv1alpha1informers.CredentialIssuerInformer,
podCommandExecutor PodCommandExecutor, podCommandExecutor PodCommandExecutor,
dynamicCertProvider dynamiccert.Private, dynamicCertProvider dynamiccert.Private,
clock clock.Clock, clock clock.Clock,
@ -170,6 +175,7 @@ func newAgentController(
agentDeployments: agentDeployments, agentDeployments: agentDeployments,
agentPods: agentPods, agentPods: agentPods,
kubePublicConfigMaps: kubePublicConfigMaps, kubePublicConfigMaps: kubePublicConfigMaps,
credentialIssuers: credentialIssuers,
executor: podCommandExecutor, executor: podCommandExecutor,
dynamicCertProvider: dynamicCertProvider, dynamicCertProvider: dynamicCertProvider,
clock: clock, clock: clock,
@ -206,6 +212,13 @@ func newAgentController(
}), }),
controllerlib.InformerOption{}, controllerlib.InformerOption{},
), ),
controllerlib.WithInformer(
credentialIssuers,
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
return obj.GetName() == cfg.CredentialIssuerName
}),
controllerlib.InformerOption{},
),
// Be sure to run once even to make sure the CredentialIssuer is updated if there are no controller manager // Be sure to run once even to make sure the CredentialIssuer is updated if there are no controller manager
// pods. We should be able to pass an empty key since we don't use the key in the sync (we sync // pods. We should be able to pass an empty key since we don't use the key in the sync (we sync
// the world). // the world).
@ -216,11 +229,17 @@ func newAgentController(
// Sync implements controllerlib.Syncer. // Sync implements controllerlib.Syncer.
func (c *agentController) Sync(ctx controllerlib.Context) error { func (c *agentController) Sync(ctx controllerlib.Context) error {
// Load the CredentialIssuer that we'll update with status.
credIssuer, err := c.credentialIssuers.Lister().Get(c.cfg.CredentialIssuerName)
if err != nil {
return fmt.Errorf("could not get CredentialIssuer to update: %w", err)
}
// Find the latest healthy kube-controller-manager Pod in kube-system.. // Find the latest healthy kube-controller-manager Pod in kube-system..
controllerManagerPods, err := c.kubeSystemPods.Lister().Pods(ControllerManagerNamespace).List(controllerManagerLabels) controllerManagerPods, err := c.kubeSystemPods.Lister().Pods(ControllerManagerNamespace).List(controllerManagerLabels)
if err != nil { if err != nil {
err := fmt.Errorf("could not list controller manager pods: %w", err) err := fmt.Errorf("could not list controller manager pods: %w", err)
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason) return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
} }
newestControllerManager := newestRunningPod(controllerManagerPods) newestControllerManager := newestRunningPod(controllerManagerPods)
@ -228,19 +247,19 @@ func (c *agentController) Sync(ctx controllerlib.Context) error {
// the CredentialIssuer. // the CredentialIssuer.
if newestControllerManager == nil { if newestControllerManager == nil {
err := fmt.Errorf("could not find a healthy kube-controller-manager pod (%s)", pluralize(controllerManagerPods)) err := fmt.Errorf("could not find a healthy kube-controller-manager pod (%s)", pluralize(controllerManagerPods))
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason) return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
} }
if err := c.createOrUpdateDeployment(ctx, newestControllerManager); err != nil { if err := c.createOrUpdateDeployment(ctx, newestControllerManager); err != nil {
err := fmt.Errorf("could not ensure agent deployment: %w", err) err := fmt.Errorf("could not ensure agent deployment: %w", err)
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason) return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
} }
// Find the latest healthy agent Pod in our namespace. // Find the latest healthy agent Pod in our namespace.
agentPods, err := c.agentPods.Lister().Pods(c.cfg.Namespace).List(agentLabels) agentPods, err := c.agentPods.Lister().Pods(c.cfg.Namespace).List(agentLabels)
if err != nil { if err != nil {
err := fmt.Errorf("could not list agent pods: %w", err) err := fmt.Errorf("could not list agent pods: %w", err)
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason) return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
} }
newestAgentPod := newestRunningPod(agentPods) newestAgentPod := newestRunningPod(agentPods)
@ -248,45 +267,39 @@ func (c *agentController) Sync(ctx controllerlib.Context) error {
// the CredentialIssuer. // the CredentialIssuer.
if newestAgentPod == nil { if newestAgentPod == nil {
err := fmt.Errorf("could not find a healthy agent pod (%s)", pluralize(agentPods)) err := fmt.Errorf("could not find a healthy agent pod (%s)", pluralize(agentPods))
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason) return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
} }
// Load the Kubernetes API info from the kube-public/cluster-info ConfigMap. // Load the Kubernetes API info from the kube-public/cluster-info ConfigMap.
configMap, err := c.kubePublicConfigMaps.Lister().ConfigMaps(ClusterInfoNamespace).Get(clusterInfoName) configMap, err := c.kubePublicConfigMaps.Lister().ConfigMaps(ClusterInfoNamespace).Get(clusterInfoName)
if err != nil { if err != nil {
err := fmt.Errorf("failed to get %s/%s configmap: %w", ClusterInfoNamespace, clusterInfoName, err) err := fmt.Errorf("failed to get %s/%s configmap: %w", ClusterInfoNamespace, clusterInfoName, err)
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason) return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason)
} }
apiInfo, err := c.extractAPIInfo(configMap) apiInfo, err := c.extractAPIInfo(configMap)
if err != nil { if err != nil {
err := fmt.Errorf("could not extract Kubernetes API endpoint info from %s/%s configmap: %w", ClusterInfoNamespace, clusterInfoName, err) err := fmt.Errorf("could not extract Kubernetes API endpoint info from %s/%s configmap: %w", ClusterInfoNamespace, clusterInfoName, err)
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason) return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason)
} }
// Load the certificate and key from the agent pod into our in-memory signer. // Load the certificate and key from the agent pod into our in-memory signer.
if err := c.loadSigningKey(newestAgentPod); err != nil { if err := c.loadSigningKey(newestAgentPod); err != nil {
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason) return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
} }
// Set the CredentialIssuer strategy to successful. // Set the CredentialIssuer strategy to successful.
return issuerconfig.UpdateStrategy( return issuerconfig.Update(ctx.Context, c.client.PinnipedConcierge, credIssuer, configv1alpha1.CredentialIssuerStrategy{
ctx.Context, Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
c.cfg.CredentialIssuerName, Status: configv1alpha1.SuccessStrategyStatus,
c.cfg.Labels, Reason: configv1alpha1.FetchedKeyStrategyReason,
c.client.PinnipedConcierge, Message: "key was fetched successfully",
configv1alpha1.CredentialIssuerStrategy{ LastUpdateTime: metav1.NewTime(c.clock.Now()),
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType, Frontend: &configv1alpha1.CredentialIssuerFrontend{
Status: configv1alpha1.SuccessStrategyStatus, Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
Reason: configv1alpha1.FetchedKeyStrategyReason, TokenCredentialRequestAPIInfo: apiInfo,
Message: "key was fetched successfully",
LastUpdateTime: metav1.NewTime(c.clock.Now()),
Frontend: &configv1alpha1.CredentialIssuerFrontend{
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
TokenCredentialRequestAPIInfo: apiInfo,
},
}, },
) })
} }
func (c *agentController) loadSigningKey(agentPod *corev1.Pod) error { func (c *agentController) loadSigningKey(agentPod *corev1.Pod) error {
@ -358,20 +371,15 @@ func (c *agentController) createOrUpdateDeployment(ctx controllerlib.Context, ne
return err return err
} }
func (c *agentController) failStrategyAndErr(ctx context.Context, err error, reason configv1alpha1.StrategyReason) error { func (c *agentController) failStrategyAndErr(ctx context.Context, credIssuer *configv1alpha1.CredentialIssuer, err error, reason configv1alpha1.StrategyReason) error {
return utilerrors.NewAggregate([]error{err, issuerconfig.UpdateStrategy( updateErr := issuerconfig.Update(ctx, c.client.PinnipedConcierge, credIssuer, configv1alpha1.CredentialIssuerStrategy{
ctx, Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
c.cfg.CredentialIssuerName, Status: configv1alpha1.ErrorStrategyStatus,
c.cfg.Labels, Reason: reason,
c.client.PinnipedConcierge, Message: err.Error(),
configv1alpha1.CredentialIssuerStrategy{ LastUpdateTime: metav1.NewTime(c.clock.Now()),
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType, })
Status: configv1alpha1.ErrorStrategyStatus, return utilerrors.NewAggregate([]error{err, updateErr})
Reason: reason,
Message: err.Error(),
LastUpdateTime: metav1.NewTime(c.clock.Now()),
},
)})
} }
func (c *agentController) extractAPIInfo(configMap *corev1.ConfigMap) (*configv1alpha1.TokenCredentialRequestAPIInfo, error) { func (c *agentController) extractAPIInfo(configMap *corev1.ConfigMap) (*configv1alpha1.TokenCredentialRequestAPIInfo, error) {

View File

@ -27,6 +27,7 @@ import (
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1" configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
conciergefake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake" conciergefake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
conciergeinformers "go.pinniped.dev/generated/latest/client/concierge/informers/externalversions"
"go.pinniped.dev/internal/controller/kubecertagent/mocks" "go.pinniped.dev/internal/controller/kubecertagent/mocks"
"go.pinniped.dev/internal/controllerlib" "go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/here" "go.pinniped.dev/internal/here"
@ -38,6 +39,10 @@ func TestAgentController(t *testing.T) {
t.Parallel() t.Parallel()
now := time.Date(2021, 4, 13, 9, 57, 0, 0, time.UTC) now := time.Date(2021, 4, 13, 9, 57, 0, 0, time.UTC)
initialCredentialIssuer := &configv1alpha1.CredentialIssuer{
ObjectMeta: metav1.ObjectMeta{Name: "pinniped-concierge-config"},
}
healthyKubeControllerManagerPod := &corev1.Pod{ healthyKubeControllerManagerPod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Namespace: "kube-system", Namespace: "kube-system",
@ -204,6 +209,7 @@ func TestAgentController(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
discoveryURLOverride *string discoveryURLOverride *string
pinnipedObjects []runtime.Object
kubeObjects []runtime.Object kubeObjects []runtime.Object
addKubeReactions func(*kubefake.Clientset) addKubeReactions func(*kubefake.Clientset)
mocks func(*testing.T, *mocks.MockPodCommandExecutorMockRecorder, *mocks.MockDynamicCertPrivateMockRecorder, *cache.Expiring) mocks func(*testing.T, *mocks.MockPodCommandExecutorMockRecorder, *mocks.MockDynamicCertPrivateMockRecorder, *cache.Expiring)
@ -212,8 +218,17 @@ func TestAgentController(t *testing.T) {
wantAgentDeployment *appsv1.Deployment wantAgentDeployment *appsv1.Deployment
wantStrategy *configv1alpha1.CredentialIssuerStrategy wantStrategy *configv1alpha1.CredentialIssuerStrategy
}{ }{
{
name: "no CredentialIssuer found",
wantDistinctErrors: []string{
`could not get CredentialIssuer to update: credentialissuer.config.concierge.pinniped.dev "pinniped-concierge-config" not found`,
},
},
{ {
name: "no kube-controller-manager pods", name: "no kube-controller-manager pods",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
&corev1.Pod{ &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -236,6 +251,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "only unhealthy kube-controller-manager pods", name: "only unhealthy kube-controller-manager pods",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
&corev1.Pod{ &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -276,6 +294,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "failed to created new deployment", name: "failed to created new deployment",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod, healthyKubeControllerManagerPod,
}, },
@ -300,6 +321,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "created new deployment, no agent pods running yet", name: "created new deployment, no agent pods running yet",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
&corev1.Pod{ &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -341,6 +365,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "created new deployment with defaulted paths, no agent pods running yet", name: "created new deployment with defaulted paths, no agent pods running yet",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
&corev1.Pod{ &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -382,6 +409,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "update to existing deployment, no running agent pods yet", name: "update to existing deployment, no running agent pods yet",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod, healthyKubeControllerManagerPod,
&corev1.Pod{ &corev1.Pod{
@ -424,6 +454,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "deployment exists, configmap missing", name: "deployment exists, configmap missing",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod, healthyKubeControllerManagerPod,
healthyAgentDeployment, healthyAgentDeployment,
@ -443,6 +476,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "deployment exists, configmap missing key", name: "deployment exists, configmap missing key",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod, healthyKubeControllerManagerPod,
healthyAgentDeployment, healthyAgentDeployment,
@ -466,6 +502,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "deployment exists, configmap key has invalid data", name: "deployment exists, configmap key has invalid data",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod, healthyKubeControllerManagerPod,
healthyAgentDeployment, healthyAgentDeployment,
@ -489,6 +528,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "deployment exists, configmap kubeconfig has no clusters", name: "deployment exists, configmap kubeconfig has no clusters",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod, healthyKubeControllerManagerPod,
healthyAgentDeployment, healthyAgentDeployment,
@ -512,6 +554,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "deployment exists, configmap is valid,, exec into agent pod fails", name: "deployment exists, configmap is valid,, exec into agent pod fails",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod, healthyKubeControllerManagerPod,
healthyAgentDeployment, healthyAgentDeployment,
@ -537,6 +582,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "deployment exists, configmap is valid, exec into agent pod returns bogus certs", name: "deployment exists, configmap is valid, exec into agent pod returns bogus certs",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod, healthyKubeControllerManagerPod,
healthyAgentDeployment, healthyAgentDeployment,
@ -565,6 +613,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "deployment exists, configmap is valid, exec is cached", name: "deployment exists, configmap is valid, exec is cached",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod, healthyKubeControllerManagerPod,
healthyAgentDeployment, healthyAgentDeployment,
@ -594,6 +645,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "deployment exists, configmap is valid, exec succeeds", name: "deployment exists, configmap is valid, exec succeeds",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod, healthyKubeControllerManagerPod,
healthyAgentDeployment, healthyAgentDeployment,
@ -620,6 +674,9 @@ func TestAgentController(t *testing.T) {
}, },
{ {
name: "deployment exists, configmap is valid, exec succeeds, overridden discovery URL", name: "deployment exists, configmap is valid, exec succeeds, overridden discovery URL",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{ kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod, healthyKubeControllerManagerPod,
healthyAgentDeployment, healthyAgentDeployment,
@ -651,12 +708,13 @@ func TestAgentController(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
conciergeClientset := conciergefake.NewSimpleClientset(tt.pinnipedObjects...)
conciergeInformers := conciergeinformers.NewSharedInformerFactory(conciergeClientset, 0)
kubeClientset := kubefake.NewSimpleClientset(tt.kubeObjects...) kubeClientset := kubefake.NewSimpleClientset(tt.kubeObjects...)
if tt.addKubeReactions != nil { if tt.addKubeReactions != nil {
tt.addKubeReactions(kubeClientset) tt.addKubeReactions(kubeClientset)
} }
conciergeClientset := conciergefake.NewSimpleClientset()
kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0) kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0)
log := testlogger.New(t) log := testlogger.New(t)
@ -676,7 +734,7 @@ func TestAgentController(t *testing.T) {
ServiceAccountName: "test-service-account-name", ServiceAccountName: "test-service-account-name",
NamePrefix: "pinniped-concierge-kube-cert-agent-", NamePrefix: "pinniped-concierge-kube-cert-agent-",
ContainerImagePullSecrets: []string{"pinniped-image-pull-secret"}, ContainerImagePullSecrets: []string{"pinniped-image-pull-secret"},
CredentialIssuerName: "pinniped-concierge-config", CredentialIssuerName: initialCredentialIssuer.Name,
Labels: map[string]string{"extralabel": "labelvalue"}, Labels: map[string]string{"extralabel": "labelvalue"},
DiscoveryURLOverride: tt.discoveryURLOverride, DiscoveryURLOverride: tt.discoveryURLOverride,
}, },
@ -685,6 +743,7 @@ func TestAgentController(t *testing.T) {
kubeInformers.Apps().V1().Deployments(), kubeInformers.Apps().V1().Deployments(),
kubeInformers.Core().V1().Pods(), kubeInformers.Core().V1().Pods(),
kubeInformers.Core().V1().ConfigMaps(), kubeInformers.Core().V1().ConfigMaps(),
conciergeInformers.Config().V1alpha1().CredentialIssuers(),
mockExecutor, mockExecutor,
mockDynamicCert, mockDynamicCert,
fakeClock, fakeClock,
@ -696,7 +755,7 @@ func TestAgentController(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
errorMessages := runControllerUntilQuiet(ctx, t, controller, kubeInformers) errorMessages := runControllerUntilQuiet(ctx, t, controller, kubeInformers, conciergeInformers)
assert.Equal(t, tt.wantDistinctErrors, deduplicate(errorMessages), "unexpected errors") assert.Equal(t, tt.wantDistinctErrors, deduplicate(errorMessages), "unexpected errors")
assert.Equal(t, tt.wantDistinctLogs, deduplicate(log.Lines()), "unexpected logs") assert.Equal(t, tt.wantDistinctLogs, deduplicate(log.Lines()), "unexpected logs")
@ -711,10 +770,12 @@ func TestAgentController(t *testing.T) {
} }
// Assert that the CredentialIssuer is in the expected final state // Assert that the CredentialIssuer is in the expected final state
credIssuer, err := conciergeClientset.ConfigV1alpha1().CredentialIssuers().Get(ctx, "pinniped-concierge-config", metav1.GetOptions{}) if tt.wantStrategy != nil {
require.NoError(t, err) credIssuer, err := conciergeClientset.ConfigV1alpha1().CredentialIssuers().Get(ctx, initialCredentialIssuer.Name, metav1.GetOptions{})
require.Len(t, credIssuer.Status.Strategies, 1, "expected a single strategy in the CredentialIssuer") require.NoError(t, err)
require.Equal(t, tt.wantStrategy, &credIssuer.Status.Strategies[0]) require.Len(t, credIssuer.Status.Strategies, 1, "expected a single strategy in the CredentialIssuer")
require.Equal(t, tt.wantStrategy, &credIssuer.Status.Strategies[0])
}
}) })
} }
} }
@ -794,7 +855,7 @@ func deduplicate(strings []string) []string {
return result return result
} }
func runControllerUntilQuiet(ctx context.Context, t *testing.T, controller controllerlib.Controller, informers ...informers.SharedInformerFactory) []string { func runControllerUntilQuiet(ctx context.Context, t *testing.T, controller controllerlib.Controller, informers ...interface{ Start(<-chan struct{}) }) []string {
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()

View File

@ -204,6 +204,7 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
informers.installationNamespaceK8s.Apps().V1().Deployments(), informers.installationNamespaceK8s.Apps().V1().Deployments(),
informers.installationNamespaceK8s.Core().V1().Pods(), informers.installationNamespaceK8s.Core().V1().Pods(),
informers.kubePublicNamespaceK8s.Core().V1().ConfigMaps(), informers.kubePublicNamespaceK8s.Core().V1().ConfigMaps(),
informers.pinniped.Config().V1alpha1().CredentialIssuers(),
c.DynamicSigningCertProvider, c.DynamicSigningCertProvider,
), ),
singletonWorker, singletonWorker,

View File

@ -43,23 +43,12 @@ func TestCredentialIssuer(t *testing.T) {
} }
require.Equal(t, env.ConciergeAppName, actualConfig.Labels["app"]) require.Equal(t, env.ConciergeAppName, actualConfig.Labels["app"])
// verify owner ref is set
require.Len(t, actualConfig.OwnerReferences, 1)
apiService, err := aggregatedClientset.ApiregistrationV1().APIServices().Get(ctx, "v1alpha1.login.concierge."+env.APIGroupSuffix, metav1.GetOptions{}) apiService, err := aggregatedClientset.ApiregistrationV1().APIServices().Get(ctx, "v1alpha1.login.concierge."+env.APIGroupSuffix, metav1.GetOptions{})
require.NoError(t, err) require.NoError(t, err)
// work around stupid behavior of WithoutVersionDecoder.Decode // work around stupid behavior of WithoutVersionDecoder.Decode
apiService.APIVersion, apiService.Kind = apiregistrationv1.SchemeGroupVersion.WithKind("APIService").ToAPIVersionAndKind() apiService.APIVersion, apiService.Kind = apiregistrationv1.SchemeGroupVersion.WithKind("APIService").ToAPIVersionAndKind()
ref := metav1.OwnerReference{
APIVersion: apiService.APIVersion,
Kind: apiService.Kind,
Name: apiService.Name,
UID: apiService.UID,
}
require.Equal(t, ref, actualConfig.OwnerReferences[0])
// Verify the cluster strategy status based on what's expected of the test cluster's ability to share signing keys. // Verify the cluster strategy status based on what's expected of the test cluster's ability to share signing keys.
actualStatusStrategies := actualConfigList.Items[0].Status.Strategies actualStatusStrategies := actualConfigList.Items[0].Status.Strategies