Improve the selectors of Deployments and Services
Fixes #801. The solution is complicated by the fact that the Selector field of Deployments is immutable. It would have been easy to just make the Selectors of the main Concierge Deployment, the Kube cert agent Deployment, and the various Services use more specific labels, but that would break upgrades. Instead, we make the Pod template labels and the Service selectors more specific, because those not immutable, and then handle the Deployment selectors in a special way. For the main Concierge and Supervisor Deployments, we cannot change their selectors, so they remain "app: app_name", and we make other changes to ensure that only the intended pods are selected. We keep the original "app" label on those pods and remove the "app" label from the pods of the Kube cert agent Deployment. By removing it from the Kube cert agent pods, there is no longer any chance that they will accidentally get selected by the main Concierge Deployment. For the Kube cert agent Deployment, we can change the immutable selector by deleting and recreating the Deployment. The new selector uses only the unique label that has always been applied to the pods of that deployment. Upon recreation, these pods no longer have the "app" label, so they will not be selected by the main Concierge Deployment's selector. The selector of all Services have been updated to use new labels to more specifically target the intended pods. For the Concierge Services, this will prevent them from accidentally including the Kube cert agent pods. For the Supervisor Services, we follow the same convention just to be consistent and to help future-proof the Supervisor app in case it ever has a second Deployment added to it. The selector of the auto-created impersonation proxy Service was also previously using the "app" label. There is no change to this Service because that label will now select the correct pods, since the Kube cert agent pods no longer have that label. It would be possible to update that selector to use the new more specific label, but then we would need to invent a way to pass that label into the controller, so it seemed like more work than was justified.
This commit is contained in:
parent
16f562e81c
commit
cec9f3c4d7
@ -3,7 +3,8 @@
|
|||||||
|
|
||||||
#@ load("@ytt:data", "data")
|
#@ load("@ytt:data", "data")
|
||||||
#@ load("@ytt:json", "json")
|
#@ load("@ytt:json", "json")
|
||||||
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel", "pinnipedDevAPIGroupWithPrefix")
|
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel", "pinnipedDevAPIGroupWithPrefix")
|
||||||
|
#@ load("@ytt:template", "template")
|
||||||
|
|
||||||
#@ if not data.values.into_namespace:
|
#@ if not data.values.into_namespace:
|
||||||
---
|
---
|
||||||
@ -108,10 +109,16 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
replicas: #@ data.values.replicas
|
replicas: #@ data.values.replicas
|
||||||
selector:
|
selector:
|
||||||
|
#! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades.
|
||||||
matchLabels: #@ defaultLabel()
|
matchLabels: #@ defaultLabel()
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels: #@ defaultLabel()
|
labels:
|
||||||
|
#! This has always included defaultLabel(), which is used by this Deployment's selector.
|
||||||
|
_: #@ template.replace(defaultLabel())
|
||||||
|
#! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically
|
||||||
|
#! without accidentally selecting any other Deployment's Pods, especially the kube cert agent Deployment's Pods.
|
||||||
|
_: #@ template.replace(deploymentPodLabel())
|
||||||
annotations:
|
annotations:
|
||||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||||
spec:
|
spec:
|
||||||
@ -225,7 +232,7 @@ spec:
|
|||||||
- weight: 50
|
- weight: 50
|
||||||
podAffinityTerm:
|
podAffinityTerm:
|
||||||
labelSelector:
|
labelSelector:
|
||||||
matchLabels: #@ defaultLabel()
|
matchLabels: #@ deploymentPodLabel()
|
||||||
topologyKey: kubernetes.io/hostname
|
topologyKey: kubernetes.io/hostname
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
@ -237,7 +244,7 @@ metadata:
|
|||||||
labels: #@ labels()
|
labels: #@ labels()
|
||||||
spec:
|
spec:
|
||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
selector: #@ defaultLabel()
|
selector: #@ deploymentPodLabel()
|
||||||
ports:
|
ports:
|
||||||
- protocol: TCP
|
- protocol: TCP
|
||||||
port: 443
|
port: 443
|
||||||
@ -251,7 +258,7 @@ metadata:
|
|||||||
labels: #@ labels()
|
labels: #@ labels()
|
||||||
spec:
|
spec:
|
||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
selector: #@ defaultLabel()
|
selector: #@ deploymentPodLabel()
|
||||||
ports:
|
ports:
|
||||||
- protocol: TCP
|
- protocol: TCP
|
||||||
port: 443
|
port: 443
|
||||||
|
@ -25,9 +25,14 @@
|
|||||||
#@ end
|
#@ end
|
||||||
|
|
||||||
#@ def defaultLabel():
|
#@ def defaultLabel():
|
||||||
|
#! Note that the name of this label's key is also assumed by kubecertagent.go and impersonator_config.go
|
||||||
app: #@ data.values.app_name
|
app: #@ data.values.app_name
|
||||||
#@ end
|
#@ end
|
||||||
|
|
||||||
|
#@ def deploymentPodLabel():
|
||||||
|
deployment.pinniped.dev: concierge
|
||||||
|
#@ end
|
||||||
|
|
||||||
#@ def labels():
|
#@ def labels():
|
||||||
_: #@ template.replace(defaultLabel())
|
_: #@ template.replace(defaultLabel())
|
||||||
_: #@ template.replace(data.values.custom_labels)
|
_: #@ template.replace(data.values.custom_labels)
|
||||||
|
@ -145,7 +145,7 @@ rules:
|
|||||||
#! We need to be able to create and update deployments in our namespace so we can manage the kube-cert-agent Deployment.
|
#! We need to be able to create and update deployments in our namespace so we can manage the kube-cert-agent Deployment.
|
||||||
- apiGroups: [ apps ]
|
- apiGroups: [ apps ]
|
||||||
resources: [ deployments ]
|
resources: [ deployments ]
|
||||||
verbs: [ create, get, list, patch, update, watch ]
|
verbs: [ create, get, list, patch, update, watch, delete ]
|
||||||
#! We need to be able to get replicasets so we can form the correct owner references on our generated objects.
|
#! We need to be able to get replicasets so we can form the correct owner references on our generated objects.
|
||||||
- apiGroups: [ apps ]
|
- apiGroups: [ apps ]
|
||||||
resources: [ replicasets ]
|
resources: [ replicasets ]
|
||||||
|
@ -3,7 +3,8 @@
|
|||||||
|
|
||||||
#@ load("@ytt:data", "data")
|
#@ load("@ytt:data", "data")
|
||||||
#@ load("@ytt:json", "json")
|
#@ load("@ytt:json", "json")
|
||||||
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel")
|
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel")
|
||||||
|
#@ load("@ytt:template", "template")
|
||||||
|
|
||||||
#@ if not data.values.into_namespace:
|
#@ if not data.values.into_namespace:
|
||||||
---
|
---
|
||||||
@ -59,10 +60,16 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
replicas: #@ data.values.replicas
|
replicas: #@ data.values.replicas
|
||||||
selector:
|
selector:
|
||||||
|
#! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades.
|
||||||
matchLabels: #@ defaultLabel()
|
matchLabels: #@ defaultLabel()
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels: #@ defaultLabel()
|
labels:
|
||||||
|
#! This has always included defaultLabel(), which is used by this Deployment's selector.
|
||||||
|
_: #@ template.replace(defaultLabel())
|
||||||
|
#! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically
|
||||||
|
#! without accidentally selecting pods from any future Deployments which might also want to use the defaultLabel().
|
||||||
|
_: #@ template.replace(deploymentPodLabel())
|
||||||
spec:
|
spec:
|
||||||
securityContext:
|
securityContext:
|
||||||
runAsUser: #@ data.values.run_as_user
|
runAsUser: #@ data.values.run_as_user
|
||||||
@ -156,5 +163,5 @@ spec:
|
|||||||
- weight: 50
|
- weight: 50
|
||||||
podAffinityTerm:
|
podAffinityTerm:
|
||||||
labelSelector:
|
labelSelector:
|
||||||
matchLabels: #@ defaultLabel()
|
matchLabels: #@ deploymentPodLabel()
|
||||||
topologyKey: kubernetes.io/hostname
|
topologyKey: kubernetes.io/hostname
|
||||||
|
@ -28,6 +28,10 @@
|
|||||||
app: #@ data.values.app_name
|
app: #@ data.values.app_name
|
||||||
#@ end
|
#@ end
|
||||||
|
|
||||||
|
#@ def deploymentPodLabel():
|
||||||
|
deployment.pinniped.dev: supervisor
|
||||||
|
#@ end
|
||||||
|
|
||||||
#@ def labels():
|
#@ def labels():
|
||||||
_: #@ template.replace(defaultLabel())
|
_: #@ template.replace(defaultLabel())
|
||||||
_: #@ template.replace(data.values.custom_labels)
|
_: #@ template.replace(data.values.custom_labels)
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||||
#! SPDX-License-Identifier: Apache-2.0
|
#! SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
#@ load("@ytt:data", "data")
|
#@ load("@ytt:data", "data")
|
||||||
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix")
|
#@ load("helpers.lib.yaml", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix")
|
||||||
|
|
||||||
#@ if data.values.service_http_nodeport_port or data.values.service_https_nodeport_port:
|
#@ if data.values.service_http_nodeport_port or data.values.service_https_nodeport_port:
|
||||||
---
|
---
|
||||||
@ -14,8 +14,7 @@ metadata:
|
|||||||
labels: #@ labels()
|
labels: #@ labels()
|
||||||
spec:
|
spec:
|
||||||
type: NodePort
|
type: NodePort
|
||||||
selector:
|
selector: #@ deploymentPodLabel()
|
||||||
app: #@ data.values.app_name
|
|
||||||
ports:
|
ports:
|
||||||
#@ if data.values.service_http_nodeport_port:
|
#@ if data.values.service_http_nodeport_port:
|
||||||
- name: http
|
- name: http
|
||||||
@ -47,7 +46,7 @@ metadata:
|
|||||||
labels: #@ labels()
|
labels: #@ labels()
|
||||||
spec:
|
spec:
|
||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
selector: #@ defaultLabel()
|
selector: #@ deploymentPodLabel()
|
||||||
ports:
|
ports:
|
||||||
#@ if data.values.service_http_clusterip_port:
|
#@ if data.values.service_http_clusterip_port:
|
||||||
- name: http
|
- name: http
|
||||||
@ -73,7 +72,7 @@ metadata:
|
|||||||
labels: #@ labels()
|
labels: #@ labels()
|
||||||
spec:
|
spec:
|
||||||
type: LoadBalancer
|
type: LoadBalancer
|
||||||
selector: #@ defaultLabel()
|
selector: #@ deploymentPodLabel()
|
||||||
#@ if data.values.service_loadbalancer_ip:
|
#@ if data.values.service_loadbalancer_ip:
|
||||||
loadBalancerIP: #@ data.values.service_loadbalancer_ip
|
loadBalancerIP: #@ data.values.service_loadbalancer_ip
|
||||||
#@ end
|
#@ end
|
||||||
|
@ -50,6 +50,10 @@ const (
|
|||||||
agentPodLabelKey = "kube-cert-agent.pinniped.dev"
|
agentPodLabelKey = "kube-cert-agent.pinniped.dev"
|
||||||
agentPodLabelValue = "v2"
|
agentPodLabelValue = "v2"
|
||||||
|
|
||||||
|
// conciergeDefaultLabelKeyName is the name of the key of the label applied to all Concierge resources.
|
||||||
|
// This name is determined in the YAML manifests, but this controller needs to treat it as a special case below.
|
||||||
|
conciergeDefaultLabelKeyName = "app"
|
||||||
|
|
||||||
ClusterInfoNamespace = "kube-public"
|
ClusterInfoNamespace = "kube-public"
|
||||||
clusterInfoName = "cluster-info"
|
clusterInfoName = "cluster-info"
|
||||||
clusterInfoConfigMapKey = "kubeconfig"
|
clusterInfoConfigMapKey = "kubeconfig"
|
||||||
@ -84,11 +88,27 @@ type AgentConfig struct {
|
|||||||
DiscoveryURLOverride *string
|
DiscoveryURLOverride *string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *AgentConfig) agentLabels() map[string]string {
|
// Only select using the unique label which will not match the pods of any other Deployment.
|
||||||
|
// Older versions of Pinniped had multiple labels here.
|
||||||
|
func (a *AgentConfig) agentPodSelectorLabels() map[string]string {
|
||||||
|
return map[string]string{agentPodLabelKey: agentPodLabelValue}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Label the agent pod using the configured labels plus the unique label which we will use in the selector.
|
||||||
|
func (a *AgentConfig) agentPodLabels() map[string]string {
|
||||||
allLabels := map[string]string{agentPodLabelKey: agentPodLabelValue}
|
allLabels := map[string]string{agentPodLabelKey: agentPodLabelValue}
|
||||||
for k, v := range a.Labels {
|
for k, v := range a.Labels {
|
||||||
|
// Never label the agent pod with any label whose key is "app" because that could unfortunately match
|
||||||
|
// the selector of the main Concierge Deployment. This is sadly inconsistent because all other resources
|
||||||
|
// get labelled with the "app" label, but unfortunately the selector of the main Concierge Deployment is
|
||||||
|
// an immutable field, so we cannot update it to make it use a more specific label without breaking upgrades.
|
||||||
|
// Therefore, we take extra care here to avoid allowing the kube cert agent pods to match the selector of
|
||||||
|
// the main Concierge Deployment. Note that older versions of Pinniped included this "app" label, so during
|
||||||
|
// an upgrade we must take care to perform an update to remove it.
|
||||||
|
if k != conciergeDefaultLabelKeyName {
|
||||||
allLabels[k] = v
|
allLabels[k] = v
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return allLabels
|
return allLabels
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -236,7 +256,7 @@ func (c *agentController) Sync(ctx controllerlib.Context) error {
|
|||||||
return fmt.Errorf("could not get CredentialIssuer to update: %w", err)
|
return fmt.Errorf("could not get CredentialIssuer to update: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find the latest healthy kube-controller-manager Pod in kube-system..
|
// Find the latest healthy kube-controller-manager Pod in kube-system.
|
||||||
controllerManagerPods, err := c.kubeSystemPods.Lister().Pods(ControllerManagerNamespace).List(controllerManagerLabels)
|
controllerManagerPods, err := c.kubeSystemPods.Lister().Pods(ControllerManagerNamespace).List(controllerManagerLabels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err := fmt.Errorf("could not list controller manager pods: %w", err)
|
err := fmt.Errorf("could not list controller manager pods: %w", err)
|
||||||
@ -365,16 +385,42 @@ func (c *agentController) createOrUpdateDeployment(ctx controllerlib.Context, ne
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise update the spec of the Deployment to match our desired state.
|
// Update the spec of the Deployment to match our desired state.
|
||||||
updatedDeployment := existingDeployment.DeepCopy()
|
updatedDeployment := existingDeployment.DeepCopy()
|
||||||
updatedDeployment.Spec = expectedDeployment.Spec
|
updatedDeployment.Spec = expectedDeployment.Spec
|
||||||
updatedDeployment.ObjectMeta = mergeLabelsAndAnnotations(updatedDeployment.ObjectMeta, expectedDeployment.ObjectMeta)
|
updatedDeployment.ObjectMeta = mergeLabelsAndAnnotations(updatedDeployment.ObjectMeta, expectedDeployment.ObjectMeta)
|
||||||
|
desireSelectorUpdate := !apiequality.Semantic.DeepEqual(updatedDeployment.Spec.Selector, existingDeployment.Spec.Selector)
|
||||||
|
desireTemplateLabelsUpdate := !apiequality.Semantic.DeepEqual(updatedDeployment.Spec.Template.Labels, existingDeployment.Spec.Template.Labels)
|
||||||
|
|
||||||
// If the existing Deployment already matches our desired spec, we're done.
|
// If the existing Deployment already matches our desired spec, we're done.
|
||||||
if apiequality.Semantic.DeepDerivative(updatedDeployment, existingDeployment) {
|
if apiequality.Semantic.DeepDerivative(updatedDeployment, existingDeployment) {
|
||||||
return nil
|
// DeepDerivative allows the map fields of updatedDeployment to be a subset of existingDeployment,
|
||||||
|
// but we want to check that certain of those map fields are exactly equal before deciding to skip the update.
|
||||||
|
if !desireSelectorUpdate && !desireTemplateLabelsUpdate {
|
||||||
|
return nil // already equal enough, so skip update
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Selector is an immutable field, so if we want to update it then we must delete and recreate the Deployment,
|
||||||
|
// and then we're done. Older versions of Pinniped had multiple labels in the Selector, so to support upgrades from
|
||||||
|
// those versions we take extra care to handle this case.
|
||||||
|
if desireSelectorUpdate {
|
||||||
|
log.Info("deleting deployment to update immutable Selector field")
|
||||||
|
err = c.client.Kubernetes.AppsV1().Deployments(existingDeployment.Namespace).Delete(ctx.Context, existingDeployment.Name, metav1.DeleteOptions{
|
||||||
|
Preconditions: &metav1.Preconditions{
|
||||||
|
UID: &existingDeployment.UID,
|
||||||
|
ResourceVersion: &existingDeployment.ResourceVersion,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Info("creating new deployment to update immutable Selector field")
|
||||||
|
_, err = c.client.Kubernetes.AppsV1().Deployments(expectedDeployment.Namespace).Create(ctx.Context, expectedDeployment, metav1.CreateOptions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, update the Deployment.
|
||||||
log.Info("updating existing deployment")
|
log.Info("updating existing deployment")
|
||||||
_, err = c.client.Kubernetes.AppsV1().Deployments(updatedDeployment.Namespace).Update(ctx.Context, updatedDeployment, metav1.UpdateOptions{})
|
_, err = c.client.Kubernetes.AppsV1().Deployments(updatedDeployment.Namespace).Update(ctx.Context, updatedDeployment, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
@ -457,10 +503,10 @@ func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) *
|
|||||||
},
|
},
|
||||||
Spec: appsv1.DeploymentSpec{
|
Spec: appsv1.DeploymentSpec{
|
||||||
Replicas: pointer.Int32Ptr(1),
|
Replicas: pointer.Int32Ptr(1),
|
||||||
Selector: metav1.SetAsLabelSelector(c.cfg.agentLabels()),
|
Selector: metav1.SetAsLabelSelector(c.cfg.agentPodSelectorLabels()),
|
||||||
Template: corev1.PodTemplateSpec{
|
Template: corev1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: c.cfg.agentLabels(),
|
Labels: c.cfg.agentPodLabels(),
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
TerminationGracePeriodSeconds: pointer.Int64Ptr(0),
|
TerminationGracePeriodSeconds: pointer.Int64Ptr(0),
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
"go.pinniped.dev/internal/here"
|
"go.pinniped.dev/internal/here"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
|
"go.pinniped.dev/internal/testutil"
|
||||||
"go.pinniped.dev/internal/testutil/testlogger"
|
"go.pinniped.dev/internal/testutil/testlogger"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -85,12 +86,11 @@ func TestAgentController(t *testing.T) {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: "concierge",
|
Namespace: "concierge",
|
||||||
Name: "pinniped-concierge-kube-cert-agent",
|
Name: "pinniped-concierge-kube-cert-agent",
|
||||||
Labels: map[string]string{"extralabel": "labelvalue"},
|
Labels: map[string]string{"extralabel": "labelvalue", "app": "anything"},
|
||||||
},
|
},
|
||||||
Spec: appsv1.DeploymentSpec{
|
Spec: appsv1.DeploymentSpec{
|
||||||
Replicas: pointer.Int32Ptr(1),
|
Replicas: pointer.Int32Ptr(1),
|
||||||
Selector: metav1.SetAsLabelSelector(map[string]string{
|
Selector: metav1.SetAsLabelSelector(map[string]string{
|
||||||
"extralabel": "labelvalue",
|
|
||||||
"kube-cert-agent.pinniped.dev": "v2",
|
"kube-cert-agent.pinniped.dev": "v2",
|
||||||
}),
|
}),
|
||||||
Template: corev1.PodTemplateSpec{
|
Template: corev1.PodTemplateSpec{
|
||||||
@ -151,6 +151,19 @@ func TestAgentController(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Older versions of Pinniped had a selector which included "app: app_name", e.g. "app: concierge".
|
||||||
|
// Selector is an immutable field, but we want to support upgrading from those older versions anyway.
|
||||||
|
oldStyleLabels := map[string]string{
|
||||||
|
"app": "concierge",
|
||||||
|
"extralabel": "labelvalue",
|
||||||
|
"kube-cert-agent.pinniped.dev": "v2",
|
||||||
|
}
|
||||||
|
healthyAgentDeploymentWithOldStyleSelector := healthyAgentDeployment.DeepCopy()
|
||||||
|
healthyAgentDeploymentWithOldStyleSelector.Spec.Selector = metav1.SetAsLabelSelector(oldStyleLabels)
|
||||||
|
healthyAgentDeploymentWithOldStyleSelector.Spec.Template.ObjectMeta.Labels = oldStyleLabels
|
||||||
|
healthyAgentDeploymentWithOldStyleSelector.UID = "fake-uid-abc123" // needs UID to test delete options
|
||||||
|
healthyAgentDeploymentWithOldStyleSelector.ResourceVersion = "fake-resource-version-1234" // needs ResourceVersion to test delete options
|
||||||
|
|
||||||
// The host network setting from the kube-controller-manager pod should be applied on the
|
// The host network setting from the kube-controller-manager pod should be applied on the
|
||||||
// deployment.
|
// deployment.
|
||||||
healthyKubeControllerManagerPodWithHostNetwork := healthyKubeControllerManagerPod.DeepCopy()
|
healthyKubeControllerManagerPodWithHostNetwork := healthyKubeControllerManagerPod.DeepCopy()
|
||||||
@ -227,6 +240,8 @@ func TestAgentController(t *testing.T) {
|
|||||||
alsoAllowUndesiredDistinctErrors []string
|
alsoAllowUndesiredDistinctErrors []string
|
||||||
wantDistinctLogs []string
|
wantDistinctLogs []string
|
||||||
wantAgentDeployment *appsv1.Deployment
|
wantAgentDeployment *appsv1.Deployment
|
||||||
|
wantDeploymentActionVerbs []string
|
||||||
|
wantDeploymentDeleteActionOpts []metav1.DeleteOptions
|
||||||
wantStrategy *configv1alpha1.CredentialIssuerStrategy
|
wantStrategy *configv1alpha1.CredentialIssuerStrategy
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -370,6 +385,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch", "create"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
@ -418,6 +434,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeploymentWithDefaultedPaths,
|
wantAgentDeployment: healthyAgentDeploymentWithDefaultedPaths,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch", "create"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
@ -426,6 +443,111 @@ func TestAgentController(t *testing.T) {
|
|||||||
LastUpdateTime: metav1.NewTime(now),
|
LastUpdateTime: metav1.NewTime(now),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "to support upgrade from old versions, update to immutable selector field of existing deployment causes delete and recreate, no running agent pods yet",
|
||||||
|
pinnipedObjects: []runtime.Object{
|
||||||
|
initialCredentialIssuer,
|
||||||
|
},
|
||||||
|
kubeObjects: []runtime.Object{
|
||||||
|
healthyKubeControllerManagerPod,
|
||||||
|
healthyAgentDeploymentWithOldStyleSelector,
|
||||||
|
pendingAgentPod,
|
||||||
|
},
|
||||||
|
wantDistinctErrors: []string{
|
||||||
|
"could not find a healthy agent pod (1 candidate)",
|
||||||
|
},
|
||||||
|
wantDistinctLogs: []string{
|
||||||
|
`kube-cert-agent-controller "level"=0 "msg"="deleting deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||||
|
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||||
|
},
|
||||||
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch", "delete", "create"}, // must recreate deployment when Selector field changes
|
||||||
|
wantDeploymentDeleteActionOpts: []metav1.DeleteOptions{
|
||||||
|
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
||||||
|
},
|
||||||
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
|
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||||
|
Message: "could not find a healthy agent pod (1 candidate)",
|
||||||
|
LastUpdateTime: metav1.NewTime(now),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "to support upgrade from old versions, update to immutable selector field of existing deployment causes delete and recreate, when delete fails",
|
||||||
|
pinnipedObjects: []runtime.Object{
|
||||||
|
initialCredentialIssuer,
|
||||||
|
},
|
||||||
|
kubeObjects: []runtime.Object{
|
||||||
|
healthyKubeControllerManagerPod,
|
||||||
|
healthyAgentDeploymentWithOldStyleSelector,
|
||||||
|
pendingAgentPod,
|
||||||
|
},
|
||||||
|
addKubeReactions: func(clientset *kubefake.Clientset) {
|
||||||
|
clientset.PrependReactor("delete", "deployments", func(action coretesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
|
return true, nil, fmt.Errorf("some delete error")
|
||||||
|
})
|
||||||
|
},
|
||||||
|
wantDistinctErrors: []string{
|
||||||
|
"could not ensure agent deployment: some delete error",
|
||||||
|
},
|
||||||
|
wantDistinctLogs: []string{
|
||||||
|
`kube-cert-agent-controller "level"=0 "msg"="deleting deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||||
|
},
|
||||||
|
wantAgentDeployment: healthyAgentDeploymentWithOldStyleSelector, // couldn't be deleted, so it didn't change
|
||||||
|
// delete to try to recreate deployment when Selector field changes, but delete always fails, so keeps trying to delete
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch", "delete", "delete", "delete", "delete"},
|
||||||
|
wantDeploymentDeleteActionOpts: []metav1.DeleteOptions{
|
||||||
|
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
||||||
|
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
||||||
|
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
||||||
|
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
||||||
|
},
|
||||||
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
|
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||||
|
Message: "could not ensure agent deployment: some delete error",
|
||||||
|
LastUpdateTime: metav1.NewTime(now),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "to support upgrade from old versions, update to immutable selector field of existing deployment causes delete and recreate, when delete succeeds but create fails",
|
||||||
|
pinnipedObjects: []runtime.Object{
|
||||||
|
initialCredentialIssuer,
|
||||||
|
},
|
||||||
|
kubeObjects: []runtime.Object{
|
||||||
|
healthyKubeControllerManagerPod,
|
||||||
|
healthyAgentDeploymentWithOldStyleSelector,
|
||||||
|
pendingAgentPod,
|
||||||
|
},
|
||||||
|
addKubeReactions: func(clientset *kubefake.Clientset) {
|
||||||
|
clientset.PrependReactor("create", "deployments", func(action coretesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
|
return true, nil, fmt.Errorf("some create error")
|
||||||
|
})
|
||||||
|
},
|
||||||
|
wantDistinctErrors: []string{
|
||||||
|
"could not ensure agent deployment: some create error",
|
||||||
|
},
|
||||||
|
wantDistinctLogs: []string{
|
||||||
|
`kube-cert-agent-controller "level"=0 "msg"="deleting deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||||
|
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment to update immutable Selector field" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||||
|
`kube-cert-agent-controller "level"=0 "msg"="creating new deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||||
|
},
|
||||||
|
wantAgentDeployment: nil, // was deleted, but couldn't be recreated
|
||||||
|
// delete to try to recreate deployment when Selector field changes, but create always fails, so keeps trying to recreate
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch", "delete", "create", "create", "create", "create"},
|
||||||
|
wantDeploymentDeleteActionOpts: []metav1.DeleteOptions{
|
||||||
|
testutil.NewPreconditions(healthyAgentDeploymentWithOldStyleSelector.UID, healthyAgentDeploymentWithOldStyleSelector.ResourceVersion),
|
||||||
|
},
|
||||||
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
|
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
||||||
|
Message: "could not ensure agent deployment: some create error",
|
||||||
|
LastUpdateTime: metav1.NewTime(now),
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "update to existing deployment, no running agent pods yet",
|
name: "update to existing deployment, no running agent pods yet",
|
||||||
pinnipedObjects: []runtime.Object{
|
pinnipedObjects: []runtime.Object{
|
||||||
@ -463,6 +585,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
`kube-cert-agent-controller "level"=0 "msg"="updating existing deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
`kube-cert-agent-controller "level"=0 "msg"="updating existing deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeploymentWithExtraLabels,
|
wantAgentDeployment: healthyAgentDeploymentWithExtraLabels,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch", "update"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
@ -485,6 +608,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
"failed to get kube-public/cluster-info configmap: configmap \"cluster-info\" not found",
|
"failed to get kube-public/cluster-info configmap: configmap \"cluster-info\" not found",
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeploymentWithHostNetwork,
|
wantAgentDeployment: healthyAgentDeploymentWithHostNetwork,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch", "update"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
@ -510,6 +634,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
"failed to get kube-public/cluster-info configmap: configmap \"cluster-info\" not found",
|
"failed to get kube-public/cluster-info configmap: configmap \"cluster-info\" not found",
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
@ -536,6 +661,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
"could not extract Kubernetes API endpoint info from kube-public/cluster-info configmap: missing \"kubeconfig\" key",
|
"could not extract Kubernetes API endpoint info from kube-public/cluster-info configmap: missing \"kubeconfig\" key",
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
@ -562,6 +688,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
"could not extract Kubernetes API endpoint info from kube-public/cluster-info configmap: key \"kubeconfig\" does not contain a valid kubeconfig",
|
"could not extract Kubernetes API endpoint info from kube-public/cluster-info configmap: key \"kubeconfig\" does not contain a valid kubeconfig",
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
@ -588,6 +715,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
"could not extract Kubernetes API endpoint info from kube-public/cluster-info configmap: kubeconfig in key \"kubeconfig\" does not contain any clusters",
|
"could not extract Kubernetes API endpoint info from kube-public/cluster-info configmap: kubeconfig in key \"kubeconfig\" does not contain any clusters",
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
@ -616,6 +744,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
"could not exec into agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: some exec error",
|
"could not exec into agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: some exec error",
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
@ -644,6 +773,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
`failed to decode signing cert/key JSON from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: invalid character 'b' looking for beginning of value`,
|
`failed to decode signing cert/key JSON from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: invalid character 'b' looking for beginning of value`,
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
@ -672,6 +802,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
`failed to decode signing cert base64 from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: illegal base64 data at input byte 4`,
|
`failed to decode signing cert base64 from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: illegal base64 data at input byte 4`,
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
@ -700,6 +831,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
`failed to decode signing key base64 from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: illegal base64 data at input byte 4`,
|
`failed to decode signing key base64 from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: illegal base64 data at input byte 4`,
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
@ -731,6 +863,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
"failed to set signing cert/key content from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: some dynamic cert error",
|
"failed to set signing cert/key content from agent pod concierge/pinniped-concierge-kube-cert-agent-xyz-1234: some dynamic cert error",
|
||||||
},
|
},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
@ -756,6 +889,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
},
|
},
|
||||||
wantDistinctErrors: []string{""},
|
wantDistinctErrors: []string{""},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.SuccessStrategyStatus,
|
Status: configv1alpha1.SuccessStrategyStatus,
|
||||||
@ -785,6 +919,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
mocks: mockExecSucceeds,
|
mocks: mockExecSucceeds,
|
||||||
wantDistinctErrors: []string{""},
|
wantDistinctErrors: []string{""},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.SuccessStrategyStatus,
|
Status: configv1alpha1.SuccessStrategyStatus,
|
||||||
@ -815,6 +950,7 @@ func TestAgentController(t *testing.T) {
|
|||||||
mocks: mockExecSucceeds,
|
mocks: mockExecSucceeds,
|
||||||
wantDistinctErrors: []string{""},
|
wantDistinctErrors: []string{""},
|
||||||
wantAgentDeployment: healthyAgentDeployment,
|
wantAgentDeployment: healthyAgentDeployment,
|
||||||
|
wantDeploymentActionVerbs: []string{"list", "watch"},
|
||||||
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
Status: configv1alpha1.SuccessStrategyStatus,
|
Status: configv1alpha1.SuccessStrategyStatus,
|
||||||
@ -843,6 +979,10 @@ func TestAgentController(t *testing.T) {
|
|||||||
if tt.addKubeReactions != nil {
|
if tt.addKubeReactions != nil {
|
||||||
tt.addKubeReactions(kubeClientset)
|
tt.addKubeReactions(kubeClientset)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
actualDeleteActionOpts := &[]metav1.DeleteOptions{}
|
||||||
|
trackDeleteKubeClient := testutil.NewDeleteOptionsRecorder(kubeClientset, actualDeleteActionOpts)
|
||||||
|
|
||||||
kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0)
|
kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0)
|
||||||
log := testlogger.New(t)
|
log := testlogger.New(t)
|
||||||
|
|
||||||
@ -863,10 +1003,16 @@ func TestAgentController(t *testing.T) {
|
|||||||
NamePrefix: "pinniped-concierge-kube-cert-agent-",
|
NamePrefix: "pinniped-concierge-kube-cert-agent-",
|
||||||
ContainerImagePullSecrets: []string{"pinniped-image-pull-secret"},
|
ContainerImagePullSecrets: []string{"pinniped-image-pull-secret"},
|
||||||
CredentialIssuerName: initialCredentialIssuer.Name,
|
CredentialIssuerName: initialCredentialIssuer.Name,
|
||||||
Labels: map[string]string{"extralabel": "labelvalue"},
|
Labels: map[string]string{
|
||||||
|
"extralabel": "labelvalue",
|
||||||
|
// The special label "app" should never be added to the Pods of the kube cert agent Deployment.
|
||||||
|
// Older versions of Pinniped added this label, but it matches the Selector of the main
|
||||||
|
// Concierge Deployment, so we do not want it to exist on the Kube cert agent pods.
|
||||||
|
"app": "anything",
|
||||||
|
},
|
||||||
DiscoveryURLOverride: tt.discoveryURLOverride,
|
DiscoveryURLOverride: tt.discoveryURLOverride,
|
||||||
},
|
},
|
||||||
&kubeclient.Client{Kubernetes: kubeClientset, PinnipedConcierge: conciergeClientset},
|
&kubeclient.Client{Kubernetes: trackDeleteKubeClient, PinnipedConcierge: conciergeClientset},
|
||||||
kubeInformers.Core().V1().Pods(),
|
kubeInformers.Core().V1().Pods(),
|
||||||
kubeInformers.Apps().V1().Deployments(),
|
kubeInformers.Apps().V1().Deployments(),
|
||||||
kubeInformers.Core().V1().Pods(),
|
kubeInformers.Core().V1().Pods(),
|
||||||
@ -894,6 +1040,20 @@ func TestAgentController(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(t, tt.wantDistinctLogs, deduplicate(log.Lines()), "unexpected logs")
|
assert.Equal(t, tt.wantDistinctLogs, deduplicate(log.Lines()), "unexpected logs")
|
||||||
|
|
||||||
|
// Assert on all actions that happened to deployments.
|
||||||
|
var actualDeploymentActionVerbs []string
|
||||||
|
for _, a := range kubeClientset.Actions() {
|
||||||
|
if a.GetResource().Resource == "deployments" {
|
||||||
|
actualDeploymentActionVerbs = append(actualDeploymentActionVerbs, a.GetVerb())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tt.wantDeploymentActionVerbs != nil {
|
||||||
|
require.Equal(t, tt.wantDeploymentActionVerbs, actualDeploymentActionVerbs)
|
||||||
|
}
|
||||||
|
if tt.wantDeploymentDeleteActionOpts != nil {
|
||||||
|
require.Equal(t, tt.wantDeploymentDeleteActionOpts, *actualDeleteActionOpts)
|
||||||
|
}
|
||||||
|
|
||||||
// Assert that the agent deployment is in the expected final state.
|
// Assert that the agent deployment is in the expected final state.
|
||||||
deployments, err := kubeClientset.AppsV1().Deployments("concierge").List(ctx, metav1.ListOptions{})
|
deployments, err := kubeClientset.AppsV1().Deployments("concierge").List(ctx, metav1.ListOptions{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
|
appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -28,6 +29,10 @@ func (c *clientWrapper) CoreV1() corev1client.CoreV1Interface {
|
|||||||
return &coreWrapper{CoreV1Interface: c.Interface.CoreV1(), opts: c.opts}
|
return &coreWrapper{CoreV1Interface: c.Interface.CoreV1(), opts: c.opts}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *clientWrapper) AppsV1() appsv1client.AppsV1Interface {
|
||||||
|
return &appsWrapper{AppsV1Interface: c.Interface.AppsV1(), opts: c.opts}
|
||||||
|
}
|
||||||
|
|
||||||
type coreWrapper struct {
|
type coreWrapper struct {
|
||||||
corev1client.CoreV1Interface
|
corev1client.CoreV1Interface
|
||||||
opts *[]metav1.DeleteOptions
|
opts *[]metav1.DeleteOptions
|
||||||
@ -41,6 +46,15 @@ func (c *coreWrapper) Secrets(namespace string) corev1client.SecretInterface {
|
|||||||
return &secretsWrapper{SecretInterface: c.CoreV1Interface.Secrets(namespace), opts: c.opts}
|
return &secretsWrapper{SecretInterface: c.CoreV1Interface.Secrets(namespace), opts: c.opts}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type appsWrapper struct {
|
||||||
|
appsv1client.AppsV1Interface
|
||||||
|
opts *[]metav1.DeleteOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *appsWrapper) Deployments(namespace string) appsv1client.DeploymentInterface {
|
||||||
|
return &deploymentsWrapper{DeploymentInterface: c.AppsV1Interface.Deployments(namespace), opts: c.opts}
|
||||||
|
}
|
||||||
|
|
||||||
type podsWrapper struct {
|
type podsWrapper struct {
|
||||||
corev1client.PodInterface
|
corev1client.PodInterface
|
||||||
opts *[]metav1.DeleteOptions
|
opts *[]metav1.DeleteOptions
|
||||||
@ -61,6 +75,16 @@ func (s *secretsWrapper) Delete(ctx context.Context, name string, opts metav1.De
|
|||||||
return s.SecretInterface.Delete(ctx, name, opts)
|
return s.SecretInterface.Delete(ctx, name, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type deploymentsWrapper struct {
|
||||||
|
appsv1client.DeploymentInterface
|
||||||
|
opts *[]metav1.DeleteOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *deploymentsWrapper) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
||||||
|
*s.opts = append(*s.opts, opts)
|
||||||
|
return s.DeploymentInterface.Delete(ctx, name, opts)
|
||||||
|
}
|
||||||
|
|
||||||
func NewPreconditions(uid types.UID, rv string) metav1.DeleteOptions {
|
func NewPreconditions(uid types.UID, rv string) metav1.DeleteOptions {
|
||||||
return metav1.DeleteOptions{
|
return metav1.DeleteOptions{
|
||||||
Preconditions: &metav1.Preconditions{
|
Preconditions: &metav1.Preconditions{
|
||||||
|
Loading…
Reference in New Issue
Block a user