Copy hostNetwork field for kube-cert-agent

For clusters where the control plane nodes aren't running a CNI, the
kube-cert-agent pods deployed by concierge cannot be scheduled as they
don't know to use `hostNetwork: true`. This change allows embedding the
host network setting in the Concierge configuration. (by copying it from
the kube-controller-manager pod spec when generating the kube-cert-agent
Deployment)

Also fixed a stray double comma in one of the nearby tests.
This commit is contained in:
Mayank Bhatt 2021-08-23 10:49:09 -07:00
parent f579b1cb9f
commit 68547f767d
2 changed files with 37 additions and 1 deletions

View File

@ -501,6 +501,7 @@ func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) *
RunAsUser: pointer.Int64Ptr(0),
RunAsGroup: pointer.Int64Ptr(0),
},
HostNetwork: controllerManagerPod.Spec.HostNetwork,
},
},

View File

@ -151,6 +151,16 @@ func TestAgentController(t *testing.T) {
},
}
// The host network setting from the kube-controller-manager pod should be applied on the
// deployment.
healthyKubeControllerManagerPodWithHostNetwork := healthyKubeControllerManagerPod.DeepCopy()
healthyKubeControllerManagerPodWithHostNetwork.Spec.HostNetwork = true
// We create an agent deployment that does not use host network and expect the
// controller to add 'hostNetwork: true' to the spec.
healthyAgentDeploymentWithHostNetwork := healthyAgentDeployment.DeepCopy()
healthyAgentDeploymentWithHostNetwork.Spec.Template.Spec.HostNetwork = true
// Make another kube-controller-manager pod that's similar, but does not have the CLI flags we're expecting.
// We should handle this by falling back to default values for the cert and key paths.
healthyKubeControllerManagerPodWithoutArgs := healthyKubeControllerManagerPod.DeepCopy()
@ -461,6 +471,31 @@ func TestAgentController(t *testing.T) {
LastUpdateTime: metav1.NewTime(now),
},
},
{
name: "deployment exists, but missing host network from kube-controller-manager",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},
kubeObjects: []runtime.Object{
healthyKubeControllerManagerPodWithHostNetwork,
healthyAgentDeployment,
healthyAgentPod,
},
wantDistinctErrors: []string{
"failed to get kube-public/cluster-info configmap: configmap \"cluster-info\" not found",
},
wantAgentDeployment: healthyAgentDeploymentWithHostNetwork,
wantStrategy: &configv1alpha1.CredentialIssuerStrategy{
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: configv1alpha1.ErrorStrategyStatus,
Reason: configv1alpha1.CouldNotGetClusterInfoStrategyReason,
Message: "failed to get kube-public/cluster-info configmap: configmap \"cluster-info\" not found",
LastUpdateTime: metav1.NewTime(now),
},
wantDistinctLogs: []string{
`kube-cert-agent-controller "level"=0 "msg"="updating existing deployment" "deployment"={"name":"pinniped-concierge-kube-cert-agent","namespace":"concierge"} "templatePod"={"name":"kube-controller-manager-1","namespace":"kube-system"}`,
},
},
{
name: "deployment exists, configmap missing",
pinnipedObjects: []runtime.Object{
@ -562,7 +597,7 @@ func TestAgentController(t *testing.T) {
},
},
{
name: "deployment exists, configmap is valid,, exec into agent pod fails",
name: "deployment exists, configmap is valid, exec into agent pod fails",
pinnipedObjects: []runtime.Object{
initialCredentialIssuer,
},