Merge pull request #839 from vmware-tanzu/deployment_selectors

Improve the selectors of Deployments and Services
This commit is contained in:
Ryan Richard 2021-09-15 14:46:31 -07:00 committed by GitHub
commit 7859a7b5c2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 346 additions and 54 deletions

View File

@ -3,7 +3,8 @@
#@ load("@ytt:data", "data")
#@ load("@ytt:json", "json")
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel", "pinnipedDevAPIGroupWithPrefix")
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel", "pinnipedDevAPIGroupWithPrefix")
#@ load("@ytt:template", "template")
#@ if not data.values.into_namespace:
---
@ -108,10 +109,16 @@ metadata:
spec:
replicas: #@ data.values.replicas
selector:
#! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades.
matchLabels: #@ defaultLabel()
template:
metadata:
labels: #@ defaultLabel()
labels:
#! This has always included defaultLabel(), which is used by this Deployment's selector.
_: #@ template.replace(defaultLabel())
#! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically
#! without accidentally selecting any other Deployment's Pods, especially the kube cert agent Deployment's Pods.
_: #@ template.replace(deploymentPodLabel())
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
spec:
@ -225,7 +232,7 @@ spec:
- weight: 50
podAffinityTerm:
labelSelector:
matchLabels: #@ defaultLabel()
matchLabels: #@ deploymentPodLabel()
topologyKey: kubernetes.io/hostname
---
apiVersion: v1
@ -235,9 +242,12 @@ metadata:
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
labels: #@ labels()
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: ClusterIP
selector: #@ defaultLabel()
selector: #@ deploymentPodLabel()
ports:
- protocol: TCP
port: 443
@ -249,9 +259,12 @@ metadata:
name: #@ defaultResourceNameWithSuffix("proxy")
namespace: #@ namespace()
labels: #@ labels()
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: ClusterIP
selector: #@ defaultLabel()
selector: #@ deploymentPodLabel()
ports:
- protocol: TCP
port: 443

View File

@ -25,9 +25,14 @@
#@ end
#@ def defaultLabel():
#! Note that the name of this label's key is also assumed by kubecertagent.go and impersonator_config.go
app: #@ data.values.app_name
#@ end
#@ def deploymentPodLabel():
deployment.pinniped.dev: concierge
#@ end
#@ def labels():
_: #@ template.replace(defaultLabel())
_: #@ template.replace(data.values.custom_labels)

View File

@ -145,7 +145,7 @@ rules:
#! We need to be able to create and update deployments in our namespace so we can manage the kube-cert-agent Deployment.
- apiGroups: [ apps ]
resources: [ deployments ]
verbs: [ create, get, list, patch, update, watch ]
verbs: [ create, get, list, patch, update, watch, delete ]
#! We need to be able to get replicasets so we can form the correct owner references on our generated objects.
- apiGroups: [ apps ]
resources: [ replicasets ]

View File

@ -73,6 +73,9 @@ metadata:
namespace: local-user-authenticator
labels:
app: local-user-authenticator
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: ClusterIP
selector:

View File

@ -3,7 +3,8 @@
#@ load("@ytt:data", "data")
#@ load("@ytt:json", "json")
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel")
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel")
#@ load("@ytt:template", "template")
#@ if not data.values.into_namespace:
---
@ -59,10 +60,16 @@ metadata:
spec:
replicas: #@ data.values.replicas
selector:
#! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades.
matchLabels: #@ defaultLabel()
template:
metadata:
labels: #@ defaultLabel()
labels:
#! This has always included defaultLabel(), which is used by this Deployment's selector.
_: #@ template.replace(defaultLabel())
#! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically
#! without accidentally selecting pods from any future Deployments which might also want to use the defaultLabel().
_: #@ template.replace(deploymentPodLabel())
spec:
securityContext:
runAsUser: #@ data.values.run_as_user
@ -156,5 +163,5 @@ spec:
- weight: 50
podAffinityTerm:
labelSelector:
matchLabels: #@ defaultLabel()
matchLabels: #@ deploymentPodLabel()
topologyKey: kubernetes.io/hostname

View File

@ -28,6 +28,10 @@
app: #@ data.values.app_name
#@ end
#@ def deploymentPodLabel():
deployment.pinniped.dev: supervisor
#@ end
#@ def labels():
_: #@ template.replace(defaultLabel())
_: #@ template.replace(data.values.custom_labels)

View File

@ -1,8 +1,8 @@
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:data", "data")
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix")
#@ load("helpers.lib.yaml", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix")
#@ if data.values.service_http_nodeport_port or data.values.service_https_nodeport_port:
---
@ -12,10 +12,12 @@ metadata:
name: #@ defaultResourceNameWithSuffix("nodeport")
namespace: #@ namespace()
labels: #@ labels()
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: NodePort
selector:
app: #@ data.values.app_name
selector: #@ deploymentPodLabel()
ports:
#@ if data.values.service_http_nodeport_port:
- name: http
@ -45,9 +47,12 @@ metadata:
name: #@ defaultResourceNameWithSuffix("clusterip")
namespace: #@ namespace()
labels: #@ labels()
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: ClusterIP
selector: #@ defaultLabel()
selector: #@ deploymentPodLabel()
ports:
#@ if data.values.service_http_clusterip_port:
- name: http
@ -71,9 +76,12 @@ metadata:
name: #@ defaultResourceNameWithSuffix("loadbalancer")
namespace: #@ namespace()
labels: #@ labels()
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: LoadBalancer
selector: #@ defaultLabel()
selector: #@ deploymentPodLabel()
#@ if data.values.service_loadbalancer_ip:
loadBalancerIP: #@ data.values.service_loadbalancer_ip
#@ end

View File

@ -24,3 +24,18 @@ nodes:
containerPort: 31235
hostPort: 12346
listenAddress: 127.0.0.1
kubeadmConfigPatches:
- |
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
extraArgs:
# To make sure the endpoints on our service are correct (this mostly matters for kubectl based
# installs where kapp is not doing magic changes to the deployment and service selectors).
# Setting this field to true makes it so that the API service will do the service cluster IP
# to endpoint IP translations internally instead of relying on the network stack (i.e. kube-proxy).
# The logic inside the API server is very straightforward - randomly pick an IP from the list
# of available endpoints. This means that over time, all endpoints associated with the service
# are exercised. For whatever reason, leaving this as false (i.e. use kube-proxy) appears to
# hide some network misconfigurations when used internally by the API server aggregation layer.
enable-aggregator-routing: "true"

View File

@ -48,7 +48,11 @@ const (
// agentPodLabelKey is used to identify which pods are created by the kube-cert-agent
// controllers.
agentPodLabelKey = "kube-cert-agent.pinniped.dev"
agentPodLabelValue = "v2"
agentPodLabelValue = "v3"
// conciergeDefaultLabelKeyName is the name of the key of the label applied to all Concierge resources.
// This name is determined in the YAML manifests, but this controller needs to treat it as a special case below.
conciergeDefaultLabelKeyName = "app"
ClusterInfoNamespace = "kube-public"
clusterInfoName = "cluster-info"
@ -84,10 +88,26 @@ type AgentConfig struct {
DiscoveryURLOverride *string
}
func (a *AgentConfig) agentLabels() map[string]string {
// Only select using the unique label which will not match the pods of any other Deployment.
// Older versions of Pinniped had multiple labels here.
func (a *AgentConfig) agentPodSelectorLabels() map[string]string {
return map[string]string{agentPodLabelKey: agentPodLabelValue}
}
// Label the agent pod using the configured labels plus the unique label which we will use in the selector.
func (a *AgentConfig) agentPodLabels() map[string]string {
allLabels := map[string]string{agentPodLabelKey: agentPodLabelValue}
for k, v := range a.Labels {
allLabels[k] = v
// Never label the agent pod with any label whose key is "app" because that could unfortunately match
// the selector of the main Concierge Deployment. This is sadly inconsistent because all other resources
// get labelled with the "app" label, but unfortunately the selector of the main Concierge Deployment is
// an immutable field, so we cannot update it to make it use a more specific label without breaking upgrades.
// Therefore, we take extra care here to avoid allowing the kube cert agent pods to match the selector of
// the main Concierge Deployment. Note that older versions of Pinniped included this "app" label, so during
// an upgrade we must take care to perform an update to remove it.
if k != conciergeDefaultLabelKeyName {
allLabels[k] = v
}
}
return allLabels
}
@ -236,7 +256,7 @@ func (c *agentController) Sync(ctx controllerlib.Context) error {
return fmt.Errorf("could not get CredentialIssuer to update: %w", err)
}
// Find the latest healthy kube-controller-manager Pod in kube-system..
// Find the latest healthy kube-controller-manager Pod in kube-system.
controllerManagerPods, err := c.kubeSystemPods.Lister().Pods(ControllerManagerNamespace).List(controllerManagerLabels)
if err != nil {
err := fmt.Errorf("could not list controller manager pods: %w", err)
@ -336,6 +356,7 @@ func (c *agentController) loadSigningKey(agentPod *corev1.Pod) error {
if err := c.dynamicCertProvider.SetCertKeyContent(certPEM, keyPEM); err != nil {
return fmt.Errorf("failed to set signing cert/key content from agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
}
c.log.Info("successfully loaded signing key from agent pod into cache")
// Remember that we've successfully loaded the key from this pod so we can skip the exec+load if nothing has changed.
c.execCache.Set(agentPod.UID, struct{}{}, 15*time.Minute)
@ -365,16 +386,42 @@ func (c *agentController) createOrUpdateDeployment(ctx controllerlib.Context, ne
return err
}
// Otherwise update the spec of the Deployment to match our desired state.
// Update the spec of the Deployment to match our desired state.
updatedDeployment := existingDeployment.DeepCopy()
updatedDeployment.Spec = expectedDeployment.Spec
updatedDeployment.ObjectMeta = mergeLabelsAndAnnotations(updatedDeployment.ObjectMeta, expectedDeployment.ObjectMeta)
desireSelectorUpdate := !apiequality.Semantic.DeepEqual(updatedDeployment.Spec.Selector, existingDeployment.Spec.Selector)
desireTemplateLabelsUpdate := !apiequality.Semantic.DeepEqual(updatedDeployment.Spec.Template.Labels, existingDeployment.Spec.Template.Labels)
// If the existing Deployment already matches our desired spec, we're done.
if apiequality.Semantic.DeepDerivative(updatedDeployment, existingDeployment) {
return nil
// DeepDerivative allows the map fields of updatedDeployment to be a subset of existingDeployment,
// but we want to check that certain of those map fields are exactly equal before deciding to skip the update.
if !desireSelectorUpdate && !desireTemplateLabelsUpdate {
return nil // already equal enough, so skip update
}
}
// Selector is an immutable field, so if we want to update it then we must delete and recreate the Deployment,
// and then we're done. Older versions of Pinniped had multiple labels in the Selector, so to support upgrades from
// those versions we take extra care to handle this case.
if desireSelectorUpdate {
log.Info("deleting deployment to update immutable Selector field")
err = c.client.Kubernetes.AppsV1().Deployments(existingDeployment.Namespace).Delete(ctx.Context, existingDeployment.Name, metav1.DeleteOptions{
Preconditions: &metav1.Preconditions{
UID: &existingDeployment.UID,
ResourceVersion: &existingDeployment.ResourceVersion,
},
})
if err != nil {
return err
}
log.Info("creating new deployment to update immutable Selector field")
_, err = c.client.Kubernetes.AppsV1().Deployments(expectedDeployment.Namespace).Create(ctx.Context, expectedDeployment, metav1.CreateOptions{})
return err
}
// Otherwise, update the Deployment.
log.Info("updating existing deployment")
_, err = c.client.Kubernetes.AppsV1().Deployments(updatedDeployment.Namespace).Update(ctx.Context, updatedDeployment, metav1.UpdateOptions{})
return err
@ -457,10 +504,10 @@ func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) *
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32Ptr(1),
Selector: metav1.SetAsLabelSelector(c.cfg.agentLabels()),
Selector: metav1.SetAsLabelSelector(c.cfg.agentPodSelectorLabels()),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: c.cfg.agentLabels(),
Labels: c.cfg.agentPodLabels(),
},
Spec: corev1.PodSpec{
TerminationGracePeriodSeconds: pointer.Int64Ptr(0),

View File

@ -32,6 +32,7 @@ import (
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/here"
"go.pinniped.dev/internal/kubeclient"
"go.pinniped.dev/internal/testutil"
"go.pinniped.dev/internal/testutil/testlogger"
)
@ -85,19 +86,18 @@ func TestAgentController(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: "concierge",
Name: "pinniped-concierge-kube-cert-agent",
Labels: map[string]string{"extralabel": "labelvalue"},
Labels: map[string]string{"extralabel": "labelvalue", "app": "anything"},
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32Ptr(1),
Selector: metav1.SetAsLabelSelector(map[string]string{
"extralabel": "labelvalue",
"kube-cert-agent.pinniped.dev": "v2",
"kube-cert-agent.pinniped.dev": "v3",
}),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"extralabel": "labelvalue",
"kube-cert-agent.pinniped.dev": "v2",
"kube-cert-agent.pinniped.dev": "v3",
},
},
Spec: corev1.PodSpec{
@ -151,6 +151,19 @@ func TestAgentController(t *testing.T) {
},
}
// Older versions of Pinniped had a selector which included "app: app_name", e.g. "app: concierge".
// Selector is an immutable field, but we want to support upgrading from those older versions anyway.
oldStyleLabels := map[string]string{
"app": "concierge",
"extralabel": "labelvalue",
"kube-cert-agent.pinniped.dev": "v2",
}
healthyAgentDeploymentWithOldStyleSelector := healthyAgentDeployment.DeepCopy()
healthyAgentDeploymentWithOldStyleSelector.Spec.Selector = metav1.SetAsLabelSelector(oldStyleLabels)
healthyAgentDeploymentWithOldStyleSelector.Spec.Template.ObjectMeta.Labels = oldStyleLabels
healthyAgentDeploymentWithOldStyleSelector.UID = "fake-uid-abc123" // needs UID to test delete options
healthyAgentDeploymentWithOldStyleSelector.ResourceVersion = "fake-resource-version-1234" // needs ResourceVersion to test delete options
// The host network setting from the kube-controller-manager pod should be applied on the
// deployment.
healthyKubeControllerManagerPodWithHostNetwork := healthyKubeControllerManagerPod.DeepCopy()
@ -186,7 +199,7 @@ func TestAgentController(t *testing.T) {
Namespace: "concierge",
Name: "pinniped-concierge-kube-cert-agent-xyz-1234",
UID: types.UID("pinniped-concierge-kube-cert-agent-xyz-1234-test-uid"),
Labels: map[string]string{"kube-cert-agent.pinniped.dev": "v2"},
Labels: map[string]string{"kube-cert-agent.pinniped.dev": "v3"},
CreationTimestamp: metav1.NewTime(now.Add(-2 * time.Hour)),
},
Spec: corev1.PodSpec{},
@ -227,6 +240,8 @@ func TestAgentController(t *testing.T) {
alsoAllowUndesiredDistinctErrors []string
wantDistinctLogs []string
wantAgentDeployment *appsv1.Deployment
wantDeploymentActionVerbs []string
wantDeploymentDeleteActionOpts []metav1.DeleteOptions
wantStrategy *configv1alpha1.CredentialIssuerStrategy
}{
{
@ -369,7 +384,8 @@ func TestAgentController(t *testing.T) {
wantDistinctLogs: []string{
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
},
wantAgentDeployment: healthyAgentDeployment,
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch", "create"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
@ -417,7 +433,8 @@ func TestAgentController(t *testing.T) {
wantDistinctLogs: []string{
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
},
wantAgentDeployment: healthyAgentDeploymentWithDefaultedPaths,
wantAgentDeployment: healthyAgentDeploymentWithDefaultedPaths,
wantDeploymentActionVerbs: []string{"list", "watch", "create"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
@ -426,6 +443,111 @@ func TestAgentController(t *testing.T) {
LastUpdateTime: metav1.NewTime(now),
},
},
{
name: "to support upgrade from old versions, update to immutable selector field of existing deployment causes delete and recreate, no running agent pods yet",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod,
healthyAgentDeploymentWithOldStyleSelector,
pendingAgentPod,
},
wantDistinctErrors: []string{
"could not find a healthy agent pod (1 candidate)",
},
wantDistinctLogs: []string{
`kube-cert-agent-controller "level"=0 "msg"="deleting deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
},
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch", "delete", "create"}, // must recreate deployment when Selector field changes
wantDeploymentDeleteActionOpts: []metav1.DeleteOptions{
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
Message: "could not find a healthy agent pod (1 candidate)",
LastUpdateTime: metav1.NewTime(now),
},
},
{
name: "to support upgrade from old versions, update to immutable selector field of existing deployment causes delete and recreate, when delete fails",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod,
healthyAgentDeploymentWithOldStyleSelector,
pendingAgentPod,
},
addKubeReactions: func(clientset *kubefake.Clientset) {
clientset.PrependReactor("delete", "deployments", func(action coretesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, fmt.Errorf("some delete error")
})
},
wantDistinctErrors: []string{
"could not ensure agent deployment: some delete error",
},
wantDistinctLogs: []string{
`kube-cert-agent-controller "level"=0 "msg"="deleting deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
},
wantAgentDeployment: healthyAgentDeploymentWithOldStyleSelector, // couldn't be deleted, so it didn't change
// delete to try to recreate deployment when Selector field changes, but delete always fails, so keeps trying to delete
wantDeploymentActionVerbs: []string{"list", "watch", "delete", "delete", "delete", "delete"},
wantDeploymentDeleteActionOpts: []metav1.DeleteOptions{
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
Message: "could not ensure agent deployment: some delete error",
LastUpdateTime: metav1.NewTime(now),
},
},
{
name: "to support upgrade from old versions, update to immutable selector field of existing deployment causes delete and recreate, when delete succeeds but create fails",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{
healthyKubeControllerManagerPod,
healthyAgentDeploymentWithOldStyleSelector,
pendingAgentPod,
},
addKubeReactions: func(clientset *kubefake.Clientset) {
clientset.PrependReactor("create", "deployments", func(action coretesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, fmt.Errorf("some create error")
})
},
wantDistinctErrors: []string{
"could not ensure agent deployment: some create error",
},
wantDistinctLogs: []string{
`kube-cert-agent-controller "level"=0 "msg"="deleting deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
},
wantAgentDeployment: nil, // was deleted, but couldn't be recreated
// delete to try to recreate deployment when Selector field changes, but create always fails, so keeps trying to recreate
wantDeploymentActionVerbs: []string{"list", "watch", "delete", "create", "create", "create", "create"},
wantDeploymentDeleteActionOpts: []metav1.DeleteOptions{
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
Message: "could not ensure agent deployment: some create error",
LastUpdateTime: metav1.NewTime(now),
},
},
{
name: "update to existing deployment, no running agent pods yet",
pinnipedObjects: []runtime.Object{
@ -462,7 +584,8 @@ func TestAgentController(t *testing.T) {
wantDistinctLogs: []string{
`kube-cert-agent-controller "level"=0 "msg"="updating existing deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
},
wantAgentDeployment: healthyAgentDeploymentWithExtraLabels,
wantAgentDeployment: healthyAgentDeploymentWithExtraLabels,
wantDeploymentActionVerbs: []string{"list", "watch", "update"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
@ -484,7 +607,8 @@ func TestAgentController(t *testing.T) {
wantDistinctErrors: []string{
"failed to get kube-public/cluster-info configmap: configmap \"cluster-info\" not found",
},
wantAgentDeployment: healthyAgentDeploymentWithHostNetwork,
wantAgentDeployment: healthyAgentDeploymentWithHostNetwork,
wantDeploymentActionVerbs: []string{"list", "watch", "update"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
@ -509,7 +633,8 @@ func TestAgentController(t *testing.T) {
wantDistinctErrors: []string{
"failed to get kube-public/cluster-info configmap: configmap \"cluster-info\" not found",
},
wantAgentDeployment: healthyAgentDeployment,
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
@ -535,7 +660,8 @@ func TestAgentController(t *testing.T) {
wantDistinctErrors: []string{
"could not extract Kubernetes API endpoint info from kube-public/cluster-info configmap: missing \"kubeconfig\" key",
},
wantAgentDeployment: healthyAgentDeployment,
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
@ -561,7 +687,8 @@ func TestAgentController(t *testing.T) {
wantDistinctErrors: []string{
"could not extract Kubernetes API endpoint info from kube-public/cluster-info configmap: key \"kubeconfig\" does not contain a valid kubeconfig",
},
wantAgentDeployment: healthyAgentDeployment,
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
@ -587,7 +714,8 @@ func TestAgentController(t *testing.T) {
wantDistinctErrors: []string{
"could not extract Kubernetes API endpoint info from kube-public/cluster-info configmap: kubeconfig in key \"kubeconfig\" does not contain any clusters",
},
wantAgentDeployment: healthyAgentDeployment,
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
@ -615,7 +743,8 @@ func TestAgentController(t *testing.T) {
wantDistinctErrors: []string{
"could not exec into agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: some exec error",
},
wantAgentDeployment: healthyAgentDeployment,
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
@ -643,7 +772,8 @@ func TestAgentController(t *testing.T) {
wantDistinctErrors: []string{
`failed to decode signing cert/key JSON from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: invalid character 'b' looking for beginning of value`,
},
wantAgentDeployment: healthyAgentDeployment,
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
@ -671,7 +801,8 @@ func TestAgentController(t *testing.T) {
wantDistinctErrors: []string{
`failed to decode signing cert base64 from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: illegal base64 data at input byte 4`,
},
wantAgentDeployment: healthyAgentDeployment,
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
@ -699,7 +830,8 @@ func TestAgentController(t *testing.T) {
wantDistinctErrors: []string{
`failed to decode signing key base64 from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: illegal base64 data at input byte 4`,
},
wantAgentDeployment: healthyAgentDeployment,
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
@ -730,7 +862,8 @@ func TestAgentController(t *testing.T) {
wantDistinctErrors: []string{
"failed to set signing cert/key content from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: some dynamic cert error",
},
wantAgentDeployment: healthyAgentDeployment,
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
@ -754,8 +887,9 @@ func TestAgentController(t *testing.T) {
// If we pre-fill the cache here, we should never see any calls to the executor or dynamicCert mocks.
execCache.Set(healthyAgentPod.UID, struct{}{}, 1*time.Hour)
},
wantDistinctErrors: []string{""},
wantAgentDeployment: healthyAgentDeployment,
wantDistinctErrors: []string{""},
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch"},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.SuccessStrategyStatus,
@ -782,9 +916,13 @@ func TestAgentController(t *testing.T) {
healthyAgentPod,
validClusterInfoConfigMap,
},
mocks: mockExecSucceeds,
wantDistinctErrors: []string{""},
wantAgentDeployment: healthyAgentDeployment,
mocks: mockExecSucceeds,
wantDistinctErrors: []string{""},
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch"},
wantDistinctLogs: []string{
`kube-cert-agent-controller "level"=0 "msg"="successfully loaded signing key from agent pod into cache"`,
},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.SuccessStrategyStatus,
@ -811,10 +949,14 @@ func TestAgentController(t *testing.T) {
healthyAgentPod,
validClusterInfoConfigMap,
},
discoveryURLOverride: pointer.StringPtr("https://overridden-server.example.com/some/path"),
mocks: mockExecSucceeds,
wantDistinctErrors: []string{""},
wantAgentDeployment: healthyAgentDeployment,
discoveryURLOverride: pointer.StringPtr("https://overridden-server.example.com/some/path"),
mocks: mockExecSucceeds,
wantDistinctErrors: []string{""},
wantAgentDeployment: healthyAgentDeployment,
wantDeploymentActionVerbs: []string{"list", "watch"},
wantDistinctLogs: []string{
`kube-cert-agent-controller "level"=0 "msg"="successfully loaded signing key from agent pod into cache"`,
},
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.SuccessStrategyStatus,
@ -843,6 +985,10 @@ func TestAgentController(t *testing.T) {
if tt.addKubeReactions != nil {
tt.addKubeReactions(kubeClientset)
}
actualDeleteActionOpts := &[]metav1.DeleteOptions{}
trackDeleteKubeClient := testutil.NewDeleteOptionsRecorder(kubeClientset, actualDeleteActionOpts)
kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0)
log := testlogger.New(t)
@ -863,10 +1009,16 @@ func TestAgentController(t *testing.T) {
NamePrefix: "pinniped-concierge-kube-cert-agent-",
ContainerImagePullSecrets: []string{"pinniped-image-pull-secret"},
CredentialIssuerName: initialCredentialIssuer.Name,
Labels: map[string]string{"extralabel": "labelvalue"},
DiscoveryURLOverride: tt.discoveryURLOverride,
Labels: map[string]string{
"extralabel": "labelvalue",
// The special label "app" should never be added to the Pods of the kube cert agent Deployment.
// Older versions of Pinniped added this label, but it matches the Selector of the main
// Concierge Deployment, so we do not want it to exist on the Kube cert agent pods.
"app": "anything",
},
DiscoveryURLOverride: tt.discoveryURLOverride,
},
&kubeclient.Client{Kubernetes: kubeClientset, PinnipedConcierge: conciergeClientset},
&kubeclient.Client{Kubernetes: trackDeleteKubeClient, PinnipedConcierge: conciergeClientset},
kubeInformers.Core().V1().Pods(),
kubeInformers.Apps().V1().Deployments(),
kubeInformers.Core().V1().Pods(),
@ -894,6 +1046,20 @@ func TestAgentController(t *testing.T) {
assert.Equal(t, tt.wantDistinctLogs, deduplicate(log.Lines()), "unexpected logs")
// Assert on all actions that happened to deployments.
var actualDeploymentActionVerbs []string
for _, a := range kubeClientset.Actions() {
if a.GetResource().Resource == "deployments" {
actualDeploymentActionVerbs = append(actualDeploymentActionVerbs, a.GetVerb())
}
}
if tt.wantDeploymentActionVerbs != nil {
require.Equal(t, tt.wantDeploymentActionVerbs, actualDeploymentActionVerbs)
}
if tt.wantDeploymentDeleteActionOpts != nil {
require.Equal(t, tt.wantDeploymentDeleteActionOpts, *actualDeleteActionOpts)
}
// Assert that the agent deployment is in the expected final state.
deployments, err := kubeClientset.AppsV1().Deployments("concierge").List(ctx, metav1.ListOptions{})
require.NoError(t, err)

View File

@ -9,6 +9,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
)
@ -28,6 +29,10 @@ func (c *clientWrapper) CoreV1() corev1client.CoreV1Interface {
return &coreWrapper{CoreV1Interface: c.Interface.CoreV1(), opts: c.opts}
}
func (c *clientWrapper) AppsV1() appsv1client.AppsV1Interface {
return &appsWrapper{AppsV1Interface: c.Interface.AppsV1(), opts: c.opts}
}
type coreWrapper struct {
corev1client.CoreV1Interface
opts *[]metav1.DeleteOptions
@ -41,6 +46,15 @@ func (c *coreWrapper) Secrets(namespace string) corev1client.SecretInterface {
return &secretsWrapper{SecretInterface: c.CoreV1Interface.Secrets(namespace), opts: c.opts}
}
type appsWrapper struct {
appsv1client.AppsV1Interface
opts *[]metav1.DeleteOptions
}
func (c *appsWrapper) Deployments(namespace string) appsv1client.DeploymentInterface {
return &deploymentsWrapper{DeploymentInterface: c.AppsV1Interface.Deployments(namespace), opts: c.opts}
}
type podsWrapper struct {
corev1client.PodInterface
opts *[]metav1.DeleteOptions
@ -61,6 +75,16 @@ func (s *secretsWrapper) Delete(ctx context.Context, name string, opts metav1.De
return s.SecretInterface.Delete(ctx, name, opts)
}
type deploymentsWrapper struct {
appsv1client.DeploymentInterface
opts *[]metav1.DeleteOptions
}
func (s *deploymentsWrapper) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
*s.opts = append(*s.opts, opts)
return s.DeploymentInterface.Delete(ctx, name, opts)
}
func NewPreconditions(uid types.UID, rv string) metav1.DeleteOptions {
return metav1.DeleteOptions{
Preconditions: &metav1.Preconditions{

View File

@ -32,7 +32,7 @@ func TestKubeCertAgent(t *testing.T) {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
agentPods, err := kubeClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{
LabelSelector: "kube-cert-agent.pinniped.dev=v2",
LabelSelector: "kube-cert-agent.pinniped.dev=v3",
})
if err != nil {
return false, fmt.Errorf("failed to list pods: %w", err)