Refactor kube-cert-agent controllers to use a Deployment.
This is a relatively large rewrite of much of the kube-cert-agent controllers. Instead of managing raw Pod objects, they now create a single Deployment and let the builtin k8s controller handle it from there. This reduces the amount of code we need and should handle a number of edge cases better, especially those where a Pod becomes "wedged" and needs to be recreated. Signed-off-by: Matt Moyer <moyerm@vmware.com>
This commit is contained in:
parent
cc51c72c12
commit
2843c4f8cb
@ -82,16 +82,21 @@ rules:
|
|||||||
- apiGroups: [ "" ]
|
- apiGroups: [ "" ]
|
||||||
resources: [ secrets ]
|
resources: [ secrets ]
|
||||||
verbs: [ create, get, list, patch, update, watch, delete ]
|
verbs: [ create, get, list, patch, update, watch, delete ]
|
||||||
#! We need to be able to CRUD pods in our namespace so we can reconcile the kube-cert-agent pods.
|
#! We need to be able to watch pods in our namespace so we can find the kube-cert-agent pods.
|
||||||
- apiGroups: [ "" ]
|
- apiGroups: [ "" ]
|
||||||
resources: [ pods ]
|
resources: [ pods ]
|
||||||
verbs: [ create, get, list, patch, update, watch, delete ]
|
verbs: [ get, list, watch ]
|
||||||
#! We need to be able to exec into pods in our namespace so we can grab the API server's private key
|
#! We need to be able to exec into pods in our namespace so we can grab the API server's private key
|
||||||
- apiGroups: [ "" ]
|
- apiGroups: [ "" ]
|
||||||
resources: [ pods/exec ]
|
resources: [ pods/exec ]
|
||||||
verbs: [ create ]
|
verbs: [ create ]
|
||||||
|
#! We need to be able to create and update deployments in our namespace so we can manage the kube-cert-agent Deployment.
|
||||||
- apiGroups: [ apps ]
|
- apiGroups: [ apps ]
|
||||||
resources: [ replicasets,deployments ]
|
resources: [ deployments ]
|
||||||
|
verbs: [ create, get, list, patch, update, watch ]
|
||||||
|
#! We need to be able to get replicasets so we can form the correct owner references on our generated objects.
|
||||||
|
- apiGroups: [ apps ]
|
||||||
|
resources: [ replicasets ]
|
||||||
verbs: [ get ]
|
verbs: [ get ]
|
||||||
- apiGroups: [ "" ]
|
- apiGroups: [ "" ]
|
||||||
resources: [ configmaps ]
|
resources: [ configmaps ]
|
||||||
|
@ -1,219 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/spf13/pflag"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/client-go/util/retry"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
pinnipedclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
|
||||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
|
||||||
"go.pinniped.dev/internal/controller/issuerconfig"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/plog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// These constants are the default values for the kube-controller-manager flags. If the flags are
|
|
||||||
// not properly set on the kube-controller-manager process, then we will fallback to using these.
|
|
||||||
const (
|
|
||||||
k8sAPIServerCACertPEMDefaultPath = "/etc/kubernetes/ca/ca.pem"
|
|
||||||
k8sAPIServerCAKeyPEMDefaultPath = "/etc/kubernetes/ca/ca.key"
|
|
||||||
)
|
|
||||||
|
|
||||||
type annotaterController struct {
|
|
||||||
agentPodConfig *AgentPodConfig
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig
|
|
||||||
credentialIssuerLabels map[string]string
|
|
||||||
clock clock.Clock
|
|
||||||
k8sClient kubernetes.Interface
|
|
||||||
pinnipedAPIClient pinnipedclientset.Interface
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer
|
|
||||||
agentPodInformer corev1informers.PodInformer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAnnotaterController returns a controller that updates agent pods with the path to the kube
|
|
||||||
// API's certificate and key.
|
|
||||||
//
|
|
||||||
// This controller will add annotations to agent pods with the best-guess paths to the kube API's
|
|
||||||
// certificate and key.
|
|
||||||
//
|
|
||||||
// It also is tasked with updating the CredentialIssuer, located via the provided
|
|
||||||
// credentialIssuerLocationConfig, with any errors that it encounters.
|
|
||||||
func NewAnnotaterController(
|
|
||||||
agentPodConfig *AgentPodConfig,
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig,
|
|
||||||
credentialIssuerLabels map[string]string,
|
|
||||||
clock clock.Clock,
|
|
||||||
k8sClient kubernetes.Interface,
|
|
||||||
pinnipedAPIClient pinnipedclientset.Interface,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
|
||||||
) controllerlib.Controller {
|
|
||||||
return controllerlib.New(
|
|
||||||
controllerlib.Config{
|
|
||||||
Name: "kube-cert-agent-annotater-controller",
|
|
||||||
Syncer: &annotaterController{
|
|
||||||
agentPodConfig: agentPodConfig,
|
|
||||||
credentialIssuerLocationConfig: credentialIssuerLocationConfig,
|
|
||||||
credentialIssuerLabels: credentialIssuerLabels,
|
|
||||||
clock: clock,
|
|
||||||
k8sClient: k8sClient,
|
|
||||||
pinnipedAPIClient: pinnipedAPIClient,
|
|
||||||
kubeSystemPodInformer: kubeSystemPodInformer,
|
|
||||||
agentPodInformer: agentPodInformer,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
withInformer(
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isControllerManagerPod),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
withInformer(
|
|
||||||
agentPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isAgentPod),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync implements controllerlib.Syncer.
|
|
||||||
func (c *annotaterController) Sync(ctx controllerlib.Context) error {
|
|
||||||
agentPods, err := c.agentPodInformer.
|
|
||||||
Lister().
|
|
||||||
Pods(c.agentPodConfig.Namespace).
|
|
||||||
List(c.agentPodConfig.AgentSelector())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("informer cannot list agent pods: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, agentPod := range agentPods {
|
|
||||||
controllerManagerPod, err := findControllerManagerPodForSpecificAgentPod(agentPod, c.kubeSystemPodInformer)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if controllerManagerPod == nil {
|
|
||||||
// The deleter will clean this orphaned agent.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
certPath := getContainerArgByName(
|
|
||||||
controllerManagerPod,
|
|
||||||
"cluster-signing-cert-file",
|
|
||||||
k8sAPIServerCACertPEMDefaultPath,
|
|
||||||
)
|
|
||||||
keyPath := getContainerArgByName(
|
|
||||||
controllerManagerPod,
|
|
||||||
"cluster-signing-key-file",
|
|
||||||
k8sAPIServerCAKeyPEMDefaultPath,
|
|
||||||
)
|
|
||||||
if err := c.maybeUpdateAgentPod(
|
|
||||||
ctx.Context,
|
|
||||||
agentPod.Name,
|
|
||||||
agentPod.Namespace,
|
|
||||||
certPath,
|
|
||||||
keyPath,
|
|
||||||
); err != nil {
|
|
||||||
err = fmt.Errorf("cannot update agent pod: %w", err)
|
|
||||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
strategyError(c.clock, err),
|
|
||||||
)
|
|
||||||
if strategyResultUpdateErr != nil {
|
|
||||||
// If the CI update fails, then we probably want to try again. This controller will get
|
|
||||||
// called again because of the pod create failure, so just try the CI update again then.
|
|
||||||
klog.ErrorS(strategyResultUpdateErr, "could not create or update CredentialIssuer")
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *annotaterController) maybeUpdateAgentPod(
|
|
||||||
ctx context.Context,
|
|
||||||
name string,
|
|
||||||
namespace string,
|
|
||||||
certPath string,
|
|
||||||
keyPath string,
|
|
||||||
) error {
|
|
||||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
|
||||||
agentPod, err := c.k8sClient.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if agentPod.Annotations[agentPodCertPathAnnotationKey] != certPath ||
|
|
||||||
agentPod.Annotations[agentPodKeyPathAnnotationKey] != keyPath {
|
|
||||||
if err := c.reallyUpdateAgentPod(
|
|
||||||
ctx,
|
|
||||||
agentPod,
|
|
||||||
certPath,
|
|
||||||
keyPath,
|
|
||||||
); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *annotaterController) reallyUpdateAgentPod(
|
|
||||||
ctx context.Context,
|
|
||||||
agentPod *corev1.Pod,
|
|
||||||
certPath string,
|
|
||||||
keyPath string,
|
|
||||||
) error {
|
|
||||||
// Create a deep copy of the agent pod since it is coming straight from the cache.
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
if updatedAgentPod.Annotations == nil {
|
|
||||||
updatedAgentPod.Annotations = make(map[string]string)
|
|
||||||
}
|
|
||||||
updatedAgentPod.Annotations[agentPodCertPathAnnotationKey] = certPath
|
|
||||||
updatedAgentPod.Annotations[agentPodKeyPathAnnotationKey] = keyPath
|
|
||||||
|
|
||||||
plog.Debug(
|
|
||||||
"updating agent pod annotations",
|
|
||||||
"pod",
|
|
||||||
klog.KObj(updatedAgentPod),
|
|
||||||
"certPath",
|
|
||||||
certPath,
|
|
||||||
"keyPath",
|
|
||||||
keyPath,
|
|
||||||
)
|
|
||||||
_, err := c.k8sClient.
|
|
||||||
CoreV1().
|
|
||||||
Pods(agentPod.Namespace).
|
|
||||||
Update(ctx, updatedAgentPod, metav1.UpdateOptions{})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func getContainerArgByName(pod *corev1.Pod, name, fallbackValue string) string {
|
|
||||||
for _, container := range pod.Spec.Containers {
|
|
||||||
flagset := pflag.NewFlagSet("", pflag.ContinueOnError)
|
|
||||||
flagset.ParseErrorsWhitelist = pflag.ParseErrorsWhitelist{UnknownFlags: true}
|
|
||||||
var val string
|
|
||||||
flagset.StringVar(&val, name, "", "")
|
|
||||||
_ = flagset.Parse(append(container.Command, container.Args...))
|
|
||||||
if val != "" {
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fallbackValue
|
|
||||||
}
|
|
@ -1,727 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/sclevine/spec"
|
|
||||||
"github.com/sclevine/spec/report"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
|
||||||
coretesting "k8s.io/client-go/testing"
|
|
||||||
|
|
||||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
|
||||||
pinnipedfake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/testutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAnnotaterControllerFilter(t *testing.T) {
|
|
||||||
defineSharedKubecertagentFilterSpecs(
|
|
||||||
t,
|
|
||||||
"AnnotaterControllerFilter",
|
|
||||||
func(
|
|
||||||
agentPodConfig *AgentPodConfig,
|
|
||||||
_ *CredentialIssuerLocationConfig,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
observableWithInformerOption *testutil.ObservableWithInformerOption,
|
|
||||||
) {
|
|
||||||
_ = NewAnnotaterController(
|
|
||||||
agentPodConfig,
|
|
||||||
nil, // credentialIssuerLabels, shouldn't matter
|
|
||||||
nil, // credentialIssuerLocationConfig, shouldn't matter
|
|
||||||
nil, // clock, shouldn't matter
|
|
||||||
nil, // k8sClient, shouldn't matter
|
|
||||||
nil, // pinnipedClient, shouldn't matter
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
agentPodInformer,
|
|
||||||
observableWithInformerOption.WithInformer,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAnnotaterControllerSync(t *testing.T) {
|
|
||||||
spec.Run(t, "AnnotaterControllerSync", func(t *testing.T, when spec.G, it spec.S) {
|
|
||||||
const kubeSystemNamespace = "kube-system"
|
|
||||||
const agentPodNamespace = "agent-pod-namespace"
|
|
||||||
const defaultKubeControllerManagerClusterSigningCertFileFlagValue = "/etc/kubernetes/ca/ca.pem"
|
|
||||||
const defaultKubeControllerManagerClusterSigningKeyFileFlagValue = "/etc/kubernetes/ca/ca.key"
|
|
||||||
const credentialIssuerResourceName = "ci-resource-name"
|
|
||||||
|
|
||||||
const (
|
|
||||||
certPath = "some-cert-path"
|
|
||||||
certPathAnnotation = "kube-cert-agent.pinniped.dev/cert-path"
|
|
||||||
|
|
||||||
keyPath = "some-key-path"
|
|
||||||
keyPathAnnotation = "kube-cert-agent.pinniped.dev/key-path"
|
|
||||||
)
|
|
||||||
|
|
||||||
var r *require.Assertions
|
|
||||||
|
|
||||||
var subject controllerlib.Controller
|
|
||||||
var kubeAPIClient *kubernetesfake.Clientset
|
|
||||||
var kubeSystemInformerClient *kubernetesfake.Clientset
|
|
||||||
var kubeSystemInformers kubeinformers.SharedInformerFactory
|
|
||||||
var agentInformerClient *kubernetesfake.Clientset
|
|
||||||
var agentInformers kubeinformers.SharedInformerFactory
|
|
||||||
var pinnipedAPIClient *pinnipedfake.Clientset
|
|
||||||
var cancelContext context.Context
|
|
||||||
var cancelContextCancelFunc context.CancelFunc
|
|
||||||
var syncContext *controllerlib.Context
|
|
||||||
var controllerManagerPod, agentPod *corev1.Pod
|
|
||||||
var podsGVR schema.GroupVersionResource
|
|
||||||
var credentialIssuerGVR schema.GroupVersionResource
|
|
||||||
var frozenNow time.Time
|
|
||||||
var credentialIssuerLabels map[string]string
|
|
||||||
|
|
||||||
// Defer starting the informers until the last possible moment so that the
|
|
||||||
// nested Before's can keep adding things to the informer caches.
|
|
||||||
var startInformersAndController = func() {
|
|
||||||
// Set this at the last second to allow for injection of server override.
|
|
||||||
subject = NewAnnotaterController(
|
|
||||||
&AgentPodConfig{
|
|
||||||
Namespace: agentPodNamespace,
|
|
||||||
ContainerImage: "some-agent-image",
|
|
||||||
PodNamePrefix: "some-agent-name-",
|
|
||||||
AdditionalLabels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&CredentialIssuerLocationConfig{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
credentialIssuerLabels,
|
|
||||||
clock.NewFakeClock(frozenNow),
|
|
||||||
kubeAPIClient,
|
|
||||||
pinnipedAPIClient,
|
|
||||||
kubeSystemInformers.Core().V1().Pods(),
|
|
||||||
agentInformers.Core().V1().Pods(),
|
|
||||||
controllerlib.WithInformer,
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set this at the last second to support calling subject.Name().
|
|
||||||
syncContext = &controllerlib.Context{
|
|
||||||
Context: cancelContext,
|
|
||||||
Name: subject.Name(),
|
|
||||||
Key: controllerlib.Key{
|
|
||||||
Namespace: kubeSystemNamespace,
|
|
||||||
Name: "should-not-matter",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must start informers before calling TestRunSynchronously()
|
|
||||||
kubeSystemInformers.Start(cancelContext.Done())
|
|
||||||
agentInformers.Start(cancelContext.Done())
|
|
||||||
controllerlib.TestRunSynchronously(t, subject)
|
|
||||||
}
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
r = require.New(t)
|
|
||||||
|
|
||||||
kubeAPIClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
|
|
||||||
kubeSystemInformerClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
kubeSystemInformers = kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
|
|
||||||
|
|
||||||
agentInformerClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
agentInformers = kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
|
|
||||||
|
|
||||||
pinnipedAPIClient = pinnipedfake.NewSimpleClientset()
|
|
||||||
|
|
||||||
cancelContext, cancelContextCancelFunc = context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
controllerManagerPod, agentPod = exampleControllerManagerAndAgentPods(
|
|
||||||
kubeSystemNamespace, agentPodNamespace, certPath, keyPath,
|
|
||||||
)
|
|
||||||
|
|
||||||
podsGVR = schema.GroupVersionResource{
|
|
||||||
Group: corev1.SchemeGroupVersion.Group,
|
|
||||||
Version: corev1.SchemeGroupVersion.Version,
|
|
||||||
Resource: "pods",
|
|
||||||
}
|
|
||||||
|
|
||||||
credentialIssuerGVR = schema.GroupVersionResource{
|
|
||||||
Group: configv1alpha1.GroupName,
|
|
||||||
Version: configv1alpha1.SchemeGroupVersion.Version,
|
|
||||||
Resource: "credentialissuers",
|
|
||||||
}
|
|
||||||
|
|
||||||
frozenNow = time.Date(2020, time.September, 23, 7, 42, 0, 0, time.Local)
|
|
||||||
|
|
||||||
// Add a pod into the test that doesn't matter to make sure we don't accidentally trigger any
|
|
||||||
// logic on this thing.
|
|
||||||
ignorablePod := corev1.Pod{}
|
|
||||||
ignorablePod.Name = "some-ignorable-pod"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(&ignorablePod))
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(&ignorablePod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(&ignorablePod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it.After(func() {
|
|
||||||
cancelContextCancelFunc()
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod without annotations set", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the annotations according to the controller manager pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("updating the agent pod fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
kubeAPIClient.PrependReactor(
|
|
||||||
"update",
|
|
||||||
"pods",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some update error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns the error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "cannot update agent pod: some update error")
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is already a CredentialIssuer", func() {
|
|
||||||
var initialCredentialIssuer *configv1alpha1.CredentialIssuer
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
initialCredentialIssuer = &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuer))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the CredentialIssuer status with the error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
expectedCredentialIssuer := initialCredentialIssuer.DeepCopy()
|
|
||||||
expectedCredentialIssuer.Status.Strategies = []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: "cannot update agent pod: some update error",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
credentialIssuerResourceName,
|
|
||||||
)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
"status",
|
|
||||||
expectedCredentialIssuer,
|
|
||||||
)
|
|
||||||
|
|
||||||
r.EqualError(err, "cannot update agent pod: some update error")
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
expectedGetAction,
|
|
||||||
expectedUpdateAction,
|
|
||||||
},
|
|
||||||
pinnipedAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("updating the CredentialIssuer fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"update",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some update error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns the original pod update error so the controller gets scheduled again", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "cannot update agent pod: some update error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is not already a CredentialIssuer", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
credentialIssuerLabels = map[string]string{"foo": "bar"}
|
|
||||||
})
|
|
||||||
|
|
||||||
it("creates the CredentialIssuer status with the error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{"foo": "bar"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{"foo": "bar"},
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: "cannot update agent pod: some update error",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
credentialIssuerResourceName,
|
|
||||||
)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
expectedCreateCredentialIssuer,
|
|
||||||
)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
"status",
|
|
||||||
expectedCredentialIssuer,
|
|
||||||
)
|
|
||||||
|
|
||||||
r.EqualError(err, "cannot update agent pod: some update error")
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
expectedGetAction,
|
|
||||||
expectedCreateAction,
|
|
||||||
expectedUpdateAction,
|
|
||||||
},
|
|
||||||
pinnipedAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a controller manager pod with CLI flag values separated by spaces", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
|
||||||
"kube-controller-manager",
|
|
||||||
"--cluster-signing-cert-file", certPath,
|
|
||||||
"--cluster-signing-key-file", keyPath,
|
|
||||||
}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the annotations according to the controller manager pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a controller manager pod with no CLI flags", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
|
||||||
"kube-controller-manager",
|
|
||||||
}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the annotations with the default values", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = defaultKubeControllerManagerClusterSigningCertFileFlagValue
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = defaultKubeControllerManagerClusterSigningKeyFileFlagValue
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a controller manager pod with unparsable CLI flags", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
|
||||||
"kube-controller-manager",
|
|
||||||
"--cluster-signing-cert-file-blah", certPath,
|
|
||||||
"--cluster-signing-key-file-blah", keyPath,
|
|
||||||
}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the annotations with the default values", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = defaultKubeControllerManagerClusterSigningCertFileFlagValue
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = defaultKubeControllerManagerClusterSigningKeyFileFlagValue
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a controller manager pod with unparsable cert CLI flag", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
|
||||||
"kube-controller-manager",
|
|
||||||
"--cluster-signing-cert-file-blah", certPath,
|
|
||||||
"--cluster-signing-key-file", keyPath,
|
|
||||||
}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the key annotation with the default cert flag value", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = defaultKubeControllerManagerClusterSigningCertFileFlagValue
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a controller manager pod with unparsable key CLI flag", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
|
||||||
"kube-controller-manager",
|
|
||||||
"--cluster-signing-cert-file", certPath,
|
|
||||||
"--cluster-signing-key-file-blah", keyPath,
|
|
||||||
}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the cert annotation with the default key flag value", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = defaultKubeControllerManagerClusterSigningKeyFileFlagValue
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a non-matching controller manager pod via uid", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.UID = "some-other-controller-manager-uid"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing; the deleter will delete this pod to trigger resync", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a non-matching controller manager pod via name", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Name = "some-other-controller-manager-name"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing; the deleter will delete this pod to trigger resync", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod without annotations set which does not have the configured additional labels", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
delete(agentPod.ObjectMeta.Labels, "myLabelKey1")
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the annotations according to the controller manager pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod with correct annotations set", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
agentPod.Annotations = make(map[string]string)
|
|
||||||
agentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
agentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing since the pod is up to date", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod with the wrong cert annotation", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
agentPod.Annotations[certPathAnnotation] = "wrong"
|
|
||||||
agentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the agent with the correct cert annotation", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod with the wrong key annotation", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
agentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
agentPod.Annotations[keyPathAnnotation] = "key"
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the agent with the correct key annotation", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
|
||||||
}
|
|
@ -1,185 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
pinnipedclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
|
||||||
"go.pinniped.dev/internal/constable"
|
|
||||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
|
||||||
"go.pinniped.dev/internal/controller/issuerconfig"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/plog"
|
|
||||||
)
|
|
||||||
|
|
||||||
type createrController struct {
|
|
||||||
agentPodConfig *AgentPodConfig
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig
|
|
||||||
credentialIssuerLabels map[string]string
|
|
||||||
clock clock.Clock
|
|
||||||
k8sClient kubernetes.Interface
|
|
||||||
pinnipedAPIClient pinnipedclientset.Interface
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer
|
|
||||||
agentPodInformer corev1informers.PodInformer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCreaterController returns a controller that creates new kube-cert-agent pods for every known
|
|
||||||
// kube-controller-manager pod.
|
|
||||||
//
|
|
||||||
// It also is tasked with updating the CredentialIssuer, located via the provided
|
|
||||||
// credentialIssuerLocationConfig, with any errors that it encounters.
|
|
||||||
func NewCreaterController(
|
|
||||||
agentPodConfig *AgentPodConfig,
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig,
|
|
||||||
credentialIssuerLabels map[string]string,
|
|
||||||
clock clock.Clock,
|
|
||||||
k8sClient kubernetes.Interface,
|
|
||||||
pinnipedAPIClient pinnipedclientset.Interface,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
|
||||||
withInitialEvent pinnipedcontroller.WithInitialEventOptionFunc,
|
|
||||||
) controllerlib.Controller {
|
|
||||||
return controllerlib.New(
|
|
||||||
controllerlib.Config{
|
|
||||||
//nolint: misspell
|
|
||||||
Name: "kube-cert-agent-creater-controller",
|
|
||||||
Syncer: &createrController{
|
|
||||||
agentPodConfig: agentPodConfig,
|
|
||||||
credentialIssuerLocationConfig: credentialIssuerLocationConfig,
|
|
||||||
credentialIssuerLabels: credentialIssuerLabels,
|
|
||||||
clock: clock,
|
|
||||||
k8sClient: k8sClient,
|
|
||||||
pinnipedAPIClient: pinnipedAPIClient,
|
|
||||||
kubeSystemPodInformer: kubeSystemPodInformer,
|
|
||||||
agentPodInformer: agentPodInformer,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
withInformer(
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isControllerManagerPod),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
withInformer(
|
|
||||||
agentPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isAgentPod),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
// Be sure to run once even to make sure the CI is updated if there are no controller manager
|
|
||||||
// pods. We should be able to pass an empty key since we don't use the key in the sync (we sync
|
|
||||||
// the world).
|
|
||||||
withInitialEvent(controllerlib.Key{}),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync implements controllerlib.Syncer.
|
|
||||||
func (c *createrController) Sync(ctx controllerlib.Context) error {
|
|
||||||
controllerManagerSelector, err := labels.Parse("component=kube-controller-manager")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot create controller manager selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
controllerManagerPods, err := c.kubeSystemPodInformer.Lister().List(controllerManagerSelector)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("informer cannot list controller manager pods: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(controllerManagerPods) == 0 {
|
|
||||||
// If there are no controller manager pods, we alert the user that we can't find the keypair via
|
|
||||||
// the CredentialIssuer.
|
|
||||||
return issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
strategyError(c.clock, constable.Error("did not find kube-controller-manager pod(s)")),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, controllerManagerPod := range controllerManagerPods {
|
|
||||||
agentPod, err := findAgentPodForSpecificControllerManagerPod(
|
|
||||||
controllerManagerPod,
|
|
||||||
c.kubeSystemPodInformer,
|
|
||||||
c.agentPodInformer,
|
|
||||||
c.agentPodConfig.AgentSelector(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if agentPod == nil {
|
|
||||||
agentPod = c.agentPodConfig.newAgentPod(controllerManagerPod)
|
|
||||||
|
|
||||||
plog.Debug(
|
|
||||||
"creating agent pod",
|
|
||||||
"pod",
|
|
||||||
klog.KObj(agentPod),
|
|
||||||
"controller",
|
|
||||||
klog.KObj(controllerManagerPod),
|
|
||||||
)
|
|
||||||
_, err := c.k8sClient.CoreV1().
|
|
||||||
Pods(c.agentPodConfig.Namespace).
|
|
||||||
Create(ctx.Context, agentPod, metav1.CreateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("cannot create agent pod: %w", err)
|
|
||||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
strategyError(c.clock, err),
|
|
||||||
)
|
|
||||||
if strategyResultUpdateErr != nil {
|
|
||||||
// If the CI update fails, then we probably want to try again. This controller will get
|
|
||||||
// called again because of the pod create failure, so just try the CI update again then.
|
|
||||||
klog.ErrorS(strategyResultUpdateErr, "could not create or update CredentialIssuer")
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The deleter controller handles the case where the expected fields do not match in the agent pod.
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func findAgentPodForSpecificControllerManagerPod(
|
|
||||||
controllerManagerPod *corev1.Pod,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
agentSelector labels.Selector,
|
|
||||||
) (*corev1.Pod, error) {
|
|
||||||
agentPods, err := agentPodInformer.
|
|
||||||
Lister().
|
|
||||||
List(agentSelector)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("informer cannot list agent pods: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, maybeAgentPod := range agentPods {
|
|
||||||
maybeControllerManagerPod, err := findControllerManagerPodForSpecificAgentPod(
|
|
||||||
maybeAgentPod,
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if maybeControllerManagerPod != nil &&
|
|
||||||
maybeControllerManagerPod.UID == controllerManagerPod.UID {
|
|
||||||
return maybeAgentPod, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
@ -1,623 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/sclevine/spec"
|
|
||||||
"github.com/sclevine/spec/report"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
|
||||||
coretesting "k8s.io/client-go/testing"
|
|
||||||
|
|
||||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
|
||||||
pinnipedfake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/testutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCreaterControllerFilter(t *testing.T) {
|
|
||||||
defineSharedKubecertagentFilterSpecs(
|
|
||||||
t,
|
|
||||||
"CreaterControllerFilter",
|
|
||||||
func(
|
|
||||||
agentPodConfig *AgentPodConfig,
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
observableWithInformerOption *testutil.ObservableWithInformerOption,
|
|
||||||
) {
|
|
||||||
_ = NewCreaterController(
|
|
||||||
agentPodConfig,
|
|
||||||
credentialIssuerLocationConfig,
|
|
||||||
map[string]string{},
|
|
||||||
nil, // clock, shouldn't matter
|
|
||||||
nil, // k8sClient, shouldn't matter
|
|
||||||
nil, // pinnipedAPIClient, shouldn't matter
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
agentPodInformer,
|
|
||||||
observableWithInformerOption.WithInformer,
|
|
||||||
controllerlib.WithInitialEvent,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreaterControllerInitialEvent(t *testing.T) {
|
|
||||||
kubeSystemInformerClient := kubernetesfake.NewSimpleClientset()
|
|
||||||
kubeSystemInformers := kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
|
|
||||||
|
|
||||||
agentInformerClient := kubernetesfake.NewSimpleClientset()
|
|
||||||
agentInformers := kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
|
|
||||||
|
|
||||||
observableWithInitialEventOption := testutil.NewObservableWithInitialEventOption()
|
|
||||||
|
|
||||||
_ = NewCreaterController(
|
|
||||||
nil, // agentPodConfig, shouldn't matter
|
|
||||||
nil, // credentialIssuerLocationConfig, shouldn't matter
|
|
||||||
map[string]string{},
|
|
||||||
nil, // clock, shouldn't matter
|
|
||||||
nil, // k8sClient, shouldn't matter
|
|
||||||
nil, // pinnipedAPIClient, shouldn't matter
|
|
||||||
kubeSystemInformers.Core().V1().Pods(),
|
|
||||||
agentInformers.Core().V1().Pods(),
|
|
||||||
controllerlib.WithInformer,
|
|
||||||
observableWithInitialEventOption.WithInitialEvent,
|
|
||||||
)
|
|
||||||
require.Equal(t, &controllerlib.Key{}, observableWithInitialEventOption.GetInitialEventKey())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreaterControllerSync(t *testing.T) {
|
|
||||||
spec.Run(t, "CreaterControllerSync", func(t *testing.T, when spec.G, it spec.S) {
|
|
||||||
const kubeSystemNamespace = "kube-system"
|
|
||||||
const agentPodNamespace = "agent-pod-namespace"
|
|
||||||
const credentialIssuerResourceName = "ci-resource-name"
|
|
||||||
|
|
||||||
var r *require.Assertions
|
|
||||||
|
|
||||||
var subject controllerlib.Controller
|
|
||||||
var kubeAPIClient *kubernetesfake.Clientset
|
|
||||||
var kubeSystemInformerClient *kubernetesfake.Clientset
|
|
||||||
var kubeSystemInformers kubeinformers.SharedInformerFactory
|
|
||||||
var agentInformerClient *kubernetesfake.Clientset
|
|
||||||
var agentInformers kubeinformers.SharedInformerFactory
|
|
||||||
var pinnipedAPIClient *pinnipedfake.Clientset
|
|
||||||
var cancelContext context.Context
|
|
||||||
var cancelContextCancelFunc context.CancelFunc
|
|
||||||
var syncContext *controllerlib.Context
|
|
||||||
var controllerManagerPod, agentPod *corev1.Pod
|
|
||||||
var podsGVR schema.GroupVersionResource
|
|
||||||
var credentialIssuerGVR schema.GroupVersionResource
|
|
||||||
var frozenNow time.Time
|
|
||||||
|
|
||||||
// Defer starting the informers until the last possible moment so that the
|
|
||||||
// nested Before's can keep adding things to the informer caches.
|
|
||||||
var startInformersAndController = func() {
|
|
||||||
// Set this at the last second to allow for injection of server override.
|
|
||||||
subject = NewCreaterController(
|
|
||||||
&AgentPodConfig{
|
|
||||||
Namespace: agentPodNamespace,
|
|
||||||
ContainerImage: "some-agent-image",
|
|
||||||
PodNamePrefix: "some-agent-name-",
|
|
||||||
ContainerImagePullSecrets: []string{"some-image-pull-secret"},
|
|
||||||
AdditionalLabels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&CredentialIssuerLocationConfig{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
clock.NewFakeClock(frozenNow),
|
|
||||||
kubeAPIClient,
|
|
||||||
pinnipedAPIClient,
|
|
||||||
kubeSystemInformers.Core().V1().Pods(),
|
|
||||||
agentInformers.Core().V1().Pods(),
|
|
||||||
controllerlib.WithInformer,
|
|
||||||
controllerlib.WithInitialEvent,
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set this at the last second to support calling subject.Name().
|
|
||||||
syncContext = &controllerlib.Context{
|
|
||||||
Context: cancelContext,
|
|
||||||
Name: subject.Name(),
|
|
||||||
Key: controllerlib.Key{
|
|
||||||
Namespace: kubeSystemNamespace,
|
|
||||||
Name: "should-not-matter",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must start informers before calling TestRunSynchronously()
|
|
||||||
kubeSystemInformers.Start(cancelContext.Done())
|
|
||||||
agentInformers.Start(cancelContext.Done())
|
|
||||||
controllerlib.TestRunSynchronously(t, subject)
|
|
||||||
}
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
r = require.New(t)
|
|
||||||
|
|
||||||
kubeAPIClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
|
|
||||||
kubeSystemInformerClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
kubeSystemInformers = kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
|
|
||||||
|
|
||||||
agentInformerClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
agentInformers = kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
|
|
||||||
|
|
||||||
pinnipedAPIClient = pinnipedfake.NewSimpleClientset()
|
|
||||||
|
|
||||||
cancelContext, cancelContextCancelFunc = context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
controllerManagerPod, agentPod = exampleControllerManagerAndAgentPods(
|
|
||||||
kubeSystemNamespace, agentPodNamespace, "ignored for this test", "ignored for this test",
|
|
||||||
)
|
|
||||||
|
|
||||||
podsGVR = schema.GroupVersionResource{
|
|
||||||
Group: corev1.SchemeGroupVersion.Group,
|
|
||||||
Version: corev1.SchemeGroupVersion.Version,
|
|
||||||
Resource: "pods",
|
|
||||||
}
|
|
||||||
|
|
||||||
credentialIssuerGVR = schema.GroupVersionResource{
|
|
||||||
Group: configv1alpha1.GroupName,
|
|
||||||
Version: configv1alpha1.SchemeGroupVersion.Version,
|
|
||||||
Resource: "credentialissuers",
|
|
||||||
}
|
|
||||||
|
|
||||||
frozenNow = time.Date(2020, time.September, 23, 7, 42, 0, 0, time.Local)
|
|
||||||
|
|
||||||
// Add a pod into the test that doesn't matter to make sure we don't accidentally trigger any
|
|
||||||
// logic on this thing.
|
|
||||||
ignorablePod := corev1.Pod{}
|
|
||||||
ignorablePod.Name = "some-ignorable-pod"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(&ignorablePod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(&ignorablePod))
|
|
||||||
|
|
||||||
// Add another valid agent pod to make sure our logic works for just the pod we care about.
|
|
||||||
otherAgentPod := agentPod.DeepCopy()
|
|
||||||
otherAgentPod.Name = "some-other-agent"
|
|
||||||
otherAgentPod.Annotations = map[string]string{
|
|
||||||
"kube-cert-agent.pinniped.dev/controller-manager-name": "some-other-controller-manager-name",
|
|
||||||
"kube-cert-agent.pinniped.dev/controller-manager-uid": "some-other-controller-manager-uid",
|
|
||||||
}
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(otherAgentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(otherAgentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it.After(func() {
|
|
||||||
cancelContextCancelFunc()
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching agent pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Empty(kubeAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching agent pod that is missing some of the configured additional labels", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
nonMatchingAgentPod := agentPod.DeepCopy()
|
|
||||||
delete(nonMatchingAgentPod.ObjectMeta.Labels, "myLabelKey1")
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(nonMatchingAgentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(nonMatchingAgentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing because the deleter controller is responsible for deleting it", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Empty(kubeAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a non-matching agent pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
nonMatchingAgentPod := agentPod.DeepCopy()
|
|
||||||
nonMatchingAgentPod.Name = "some-agent-name-85da432e"
|
|
||||||
nonMatchingAgentPod.Annotations[controllerManagerUIDAnnotationKey] = "some-non-matching-uid"
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(nonMatchingAgentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(nonMatchingAgentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("creates a matching agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewCreateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
agentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is no matching agent pod", func() {
|
|
||||||
it("creates a matching agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewCreateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
agentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("creating the matching agent pod fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
kubeAPIClient.PrependReactor(
|
|
||||||
"create",
|
|
||||||
"pods",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some create error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is already a CredentialIssuer", func() {
|
|
||||||
var initialCredentialIssuer *configv1alpha1.CredentialIssuer
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
initialCredentialIssuer = &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuer))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the CredentialIssuer status saying that controller manager pods couldn't be found", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
expectedCredentialIssuer := initialCredentialIssuer.DeepCopy()
|
|
||||||
expectedCredentialIssuer.Status.Strategies = []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: "cannot create agent pod: some create error",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
credentialIssuerResourceName,
|
|
||||||
)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
"status",
|
|
||||||
expectedCredentialIssuer,
|
|
||||||
)
|
|
||||||
|
|
||||||
r.EqualError(err, "cannot create agent pod: some create error")
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
expectedGetAction,
|
|
||||||
expectedUpdateAction,
|
|
||||||
},
|
|
||||||
pinnipedAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the CredentialIssuer operation fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"update",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some update error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
it("still returns the pod create error, since the controller will get rescheduled", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "cannot create agent pod: some create error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is not already a CredentialIssuer", func() {
|
|
||||||
it("returns an error and updates the CredentialIssuer status", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: "cannot create agent pod: some create error",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
credentialIssuerResourceName,
|
|
||||||
)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
expectedCreateCredentialIssuer,
|
|
||||||
)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
"status",
|
|
||||||
expectedCredentialIssuer,
|
|
||||||
)
|
|
||||||
|
|
||||||
r.EqualError(err, "cannot create agent pod: some create error")
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
expectedGetAction,
|
|
||||||
expectedCreateAction,
|
|
||||||
expectedUpdateAction,
|
|
||||||
},
|
|
||||||
pinnipedAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is no controller manager pod", func() {
|
|
||||||
when("there is already a CredentialIssuer", func() {
|
|
||||||
var initialCredentialIssuer *configv1alpha1.CredentialIssuer
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
initialCredentialIssuer = &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuer))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the CredentialIssuer status saying that controller manager pods couldn't be found", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
expectedCredentialIssuer := initialCredentialIssuer.DeepCopy()
|
|
||||||
expectedCredentialIssuer.Status.Strategies = []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: "did not find kube-controller-manager pod(s)",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
credentialIssuerResourceName,
|
|
||||||
)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
"status",
|
|
||||||
expectedCredentialIssuer,
|
|
||||||
)
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
expectedGetAction,
|
|
||||||
expectedUpdateAction,
|
|
||||||
},
|
|
||||||
pinnipedAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("when updating the CredentialIssuer fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"update",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some update error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "could not create or update credentialissuer: some update error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("when getting the CredentialIssuer fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"get",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some get error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "could not create or update credentialissuer: get failed: some get error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is not already a CredentialIssuer", func() {
|
|
||||||
it("creates the CredentialIssuer status saying that controller manager pods couldn't be found", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: "did not find kube-controller-manager pod(s)",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
credentialIssuerResourceName,
|
|
||||||
)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
expectedCreateCredentialIssuer,
|
|
||||||
)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
"status",
|
|
||||||
expectedCredentialIssuer,
|
|
||||||
)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
expectedGetAction,
|
|
||||||
expectedCreateAction,
|
|
||||||
expectedUpdateAction,
|
|
||||||
},
|
|
||||||
pinnipedAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("when creating the CredentialIssuer fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"create",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some create error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "could not create or update credentialissuer: create failed: some create error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("when getting the CredentialIssuer fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"get",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some get error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "could not create or update credentialissuer: get failed: some get error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
|
||||||
}
|
|
@ -1,87 +0,0 @@
|
|||||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/plog"
|
|
||||||
)
|
|
||||||
|
|
||||||
type deleterController struct {
|
|
||||||
agentPodConfig *AgentPodConfig
|
|
||||||
k8sClient kubernetes.Interface
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer
|
|
||||||
agentPodInformer corev1informers.PodInformer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDeleterController returns a controller that deletes any kube-cert-agent pods that are out of
|
|
||||||
// sync with the known kube-controller-manager pods.
|
|
||||||
func NewDeleterController(
|
|
||||||
agentPodConfig *AgentPodConfig,
|
|
||||||
k8sClient kubernetes.Interface,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
|
||||||
) controllerlib.Controller {
|
|
||||||
return controllerlib.New(
|
|
||||||
controllerlib.Config{
|
|
||||||
Name: "kube-cert-agent-deleter-controller",
|
|
||||||
Syncer: &deleterController{
|
|
||||||
agentPodConfig: agentPodConfig,
|
|
||||||
k8sClient: k8sClient,
|
|
||||||
kubeSystemPodInformer: kubeSystemPodInformer,
|
|
||||||
agentPodInformer: agentPodInformer,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
withInformer(
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isControllerManagerPod),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
withInformer(
|
|
||||||
agentPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isAgentPod),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync implements controllerlib.Syncer.
|
|
||||||
func (c *deleterController) Sync(ctx controllerlib.Context) error {
|
|
||||||
agentPods, err := c.agentPodInformer.
|
|
||||||
Lister().
|
|
||||||
Pods(c.agentPodConfig.Namespace).
|
|
||||||
List(c.agentPodConfig.AgentSelector())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("informer cannot list agent pods: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, agentPod := range agentPods {
|
|
||||||
controllerManagerPod, err := findControllerManagerPodForSpecificAgentPod(agentPod, c.kubeSystemPodInformer)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if controllerManagerPod == nil ||
|
|
||||||
!isAgentPodUpToDate(agentPod, c.agentPodConfig.newAgentPod(controllerManagerPod)) {
|
|
||||||
plog.Debug("deleting agent pod", "pod", klog.KObj(agentPod))
|
|
||||||
err := c.k8sClient.
|
|
||||||
CoreV1().
|
|
||||||
Pods(agentPod.Namespace).
|
|
||||||
Delete(ctx.Context, agentPod.Name, metav1.DeleteOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot delete agent pod: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,506 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/sclevine/spec"
|
|
||||||
"github.com/sclevine/spec/report"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
|
||||||
coretesting "k8s.io/client-go/testing"
|
|
||||||
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/testutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDeleterControllerFilter(t *testing.T) {
|
|
||||||
defineSharedKubecertagentFilterSpecs(
|
|
||||||
t,
|
|
||||||
"DeleterControllerFilter",
|
|
||||||
func(
|
|
||||||
agentPodConfig *AgentPodConfig,
|
|
||||||
_ *CredentialIssuerLocationConfig,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
observableWithInformerOption *testutil.ObservableWithInformerOption,
|
|
||||||
) {
|
|
||||||
_ = NewDeleterController(
|
|
||||||
agentPodConfig,
|
|
||||||
nil, // k8sClient, shouldn't matter
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
agentPodInformer,
|
|
||||||
observableWithInformerOption.WithInformer,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleterControllerSync(t *testing.T) {
|
|
||||||
spec.Run(t, "DeleterControllerSync", func(t *testing.T, when spec.G, it spec.S) {
|
|
||||||
const kubeSystemNamespace = "kube-system"
|
|
||||||
const agentPodNamespace = "agent-pod-namespace"
|
|
||||||
|
|
||||||
var r *require.Assertions
|
|
||||||
|
|
||||||
var subject controllerlib.Controller
|
|
||||||
var kubeAPIClient *kubernetesfake.Clientset
|
|
||||||
var kubeSystemInformerClient *kubernetesfake.Clientset
|
|
||||||
var kubeSystemInformers kubeinformers.SharedInformerFactory
|
|
||||||
var agentInformerClient *kubernetesfake.Clientset
|
|
||||||
var agentInformers kubeinformers.SharedInformerFactory
|
|
||||||
var cancelContext context.Context
|
|
||||||
var cancelContextCancelFunc context.CancelFunc
|
|
||||||
var syncContext *controllerlib.Context
|
|
||||||
var controllerManagerPod, agentPod *corev1.Pod
|
|
||||||
var podsGVR schema.GroupVersionResource
|
|
||||||
|
|
||||||
// Defer starting the informers until the last possible moment so that the
|
|
||||||
// nested Before's can keep adding things to the informer caches.
|
|
||||||
var startInformersAndController = func() {
|
|
||||||
// Set this at the last second to allow for injection of server override.
|
|
||||||
subject = NewDeleterController(
|
|
||||||
&AgentPodConfig{
|
|
||||||
Namespace: agentPodNamespace,
|
|
||||||
ContainerImage: "some-agent-image",
|
|
||||||
PodNamePrefix: "some-agent-name-",
|
|
||||||
AdditionalLabels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
kubeAPIClient,
|
|
||||||
kubeSystemInformers.Core().V1().Pods(),
|
|
||||||
agentInformers.Core().V1().Pods(),
|
|
||||||
controllerlib.WithInformer,
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set this at the last second to support calling subject.Name().
|
|
||||||
syncContext = &controllerlib.Context{
|
|
||||||
Context: cancelContext,
|
|
||||||
Name: subject.Name(),
|
|
||||||
Key: controllerlib.Key{
|
|
||||||
Namespace: kubeSystemNamespace,
|
|
||||||
Name: "should-not-matter",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must start informers before calling TestRunSynchronously()
|
|
||||||
kubeSystemInformers.Start(cancelContext.Done())
|
|
||||||
agentInformers.Start(cancelContext.Done())
|
|
||||||
controllerlib.TestRunSynchronously(t, subject)
|
|
||||||
}
|
|
||||||
|
|
||||||
var requireAgentPodWasDeleted = func() {
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{coretesting.NewDeleteAction(podsGVR, agentPodNamespace, agentPod.Name)},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
r = require.New(t)
|
|
||||||
|
|
||||||
cancelContext, cancelContextCancelFunc = context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
kubeAPIClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
|
|
||||||
kubeSystemInformerClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
kubeSystemInformers = kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
|
|
||||||
|
|
||||||
agentInformerClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
agentInformers = kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
|
|
||||||
|
|
||||||
controllerManagerPod, agentPod = exampleControllerManagerAndAgentPods(
|
|
||||||
kubeSystemNamespace, agentPodNamespace, "ignored for this test", "ignored for this test",
|
|
||||||
)
|
|
||||||
|
|
||||||
podsGVR = schema.GroupVersionResource{
|
|
||||||
Group: corev1.SchemeGroupVersion.Group,
|
|
||||||
Version: corev1.SchemeGroupVersion.Version,
|
|
||||||
Resource: "pods",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add an pod into the test that doesn't matter to make sure we don't accidentally
|
|
||||||
// trigger any logic on this thing.
|
|
||||||
ignorablePod := corev1.Pod{}
|
|
||||||
ignorablePod.Name = "some-ignorable-pod"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(&ignorablePod))
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(&ignorablePod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(&ignorablePod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it.After(func() {
|
|
||||||
cancelContextCancelFunc()
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Empty(kubeAPIClient.Actions())
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the controller manager via volume mounts", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{Name: "some-other-volume-mount"}}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the controller manager via volumes", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Volumes = []corev1.Volume{{Name: "some-other-volume"}}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the controller manager via node selector", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.NodeSelector = map[string]string{
|
|
||||||
"some-other-node-selector-key": "some-other-node-selector-value",
|
|
||||||
}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the controller manager via node name", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.NodeName = "some-other-node-name"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the controller manager via tolerations", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Tolerations = []corev1.Toleration{{Key: "some-other-toleration-key"}}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync via restart policy", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Spec.RestartPolicy = corev1.RestartPolicyAlways
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync via automount service account token", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
t := true
|
|
||||||
updatedAgentPod.Spec.AutomountServiceAccountToken = &t
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via name", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Spec.Containers[0].Name = "some-new-name"
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via image", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Spec.Containers[0].Image = "new-image"
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via runAsUser", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
notRoot := int64(1234)
|
|
||||||
updatedAgentPod.Spec.SecurityContext.RunAsUser = ¬Root
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via runAsGroup", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
notRoot := int64(1234)
|
|
||||||
updatedAgentPod.Spec.SecurityContext.RunAsGroup = ¬Root
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via having a nil SecurityContext", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Spec.SecurityContext = nil
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via labels", func() {
|
|
||||||
when("an additional label's value was changed", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.ObjectMeta.Labels = map[string]string{
|
|
||||||
"kube-cert-agent.pinniped.dev": "true",
|
|
||||||
// the value of a label is wrong so the pod should be deleted so it can get recreated with the new labels
|
|
||||||
"myLabelKey1": "myLabelValue1-outdated-value",
|
|
||||||
"myLabelKey2": "myLabelValue2-outdated-value",
|
|
||||||
}
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("an additional custom label was added since the agent pod was created", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.ObjectMeta.Labels = map[string]string{
|
|
||||||
"kube-cert-agent.pinniped.dev": "true",
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
// "myLabelKey2" is missing so the pod should be deleted so it can get recreated with the new labels
|
|
||||||
}
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod has extra labels that seem unrelated to the additional labels", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.ObjectMeta.Labels = map[string]string{
|
|
||||||
"kube-cert-agent.pinniped.dev": "true",
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
"extra-label": "not-related-to-the-sepcified-additional-labels",
|
|
||||||
}
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does not delete the agent pod because someone else might have put those labels on it", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Empty(kubeAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via command", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Spec.Containers[0].Command = []string{"some", "new", "command"}
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a non-matching controller manager pod via uid", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.UID = "some-other-controller-manager-uid"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a non-matching controller manager pod via name", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Name = "some-other-controller-manager-name"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is no matching controller manager pod", func() {
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is no agent pod", func() {
|
|
||||||
it("does nothing", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Empty(kubeAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
|
||||||
}
|
|
@ -1,232 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
|
||||||
"k8s.io/apimachinery/pkg/util/errors"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
|
||||||
|
|
||||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
|
||||||
pinnipedclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
|
||||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
|
||||||
"go.pinniped.dev/internal/controller/issuerconfig"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/dynamiccert"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
ClusterInfoNamespace = "kube-public"
|
|
||||||
clusterInfoName = "cluster-info"
|
|
||||||
clusterInfoConfigMapKey = "kubeconfig"
|
|
||||||
)
|
|
||||||
|
|
||||||
type execerController struct {
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig
|
|
||||||
credentialIssuerLabels map[string]string
|
|
||||||
discoveryURLOverride *string
|
|
||||||
dynamicCertProvider dynamiccert.Private
|
|
||||||
podCommandExecutor PodCommandExecutor
|
|
||||||
clock clock.Clock
|
|
||||||
pinnipedAPIClient pinnipedclientset.Interface
|
|
||||||
agentPodInformer corev1informers.PodInformer
|
|
||||||
configMapInformer corev1informers.ConfigMapInformer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewExecerController returns a controllerlib.Controller that listens for agent pods with proper
|
|
||||||
// cert/key path annotations and execs into them to get the cert/key material. It sets the retrieved
|
|
||||||
// key material in a provided dynamicCertProvider.
|
|
||||||
//
|
|
||||||
// It also is tasked with updating the CredentialIssuer, located via the provided
|
|
||||||
// credentialIssuerLocationConfig, with any errors that it encounters.
|
|
||||||
func NewExecerController(
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig,
|
|
||||||
credentialIssuerLabels map[string]string,
|
|
||||||
discoveryURLOverride *string,
|
|
||||||
dynamicCertProvider dynamiccert.Private,
|
|
||||||
podCommandExecutor PodCommandExecutor,
|
|
||||||
pinnipedAPIClient pinnipedclientset.Interface,
|
|
||||||
clock clock.Clock,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
configMapInformer corev1informers.ConfigMapInformer,
|
|
||||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
|
||||||
) controllerlib.Controller {
|
|
||||||
return controllerlib.New(
|
|
||||||
controllerlib.Config{
|
|
||||||
Name: "kube-cert-agent-execer-controller",
|
|
||||||
Syncer: &execerController{
|
|
||||||
credentialIssuerLocationConfig: credentialIssuerLocationConfig,
|
|
||||||
credentialIssuerLabels: credentialIssuerLabels,
|
|
||||||
discoveryURLOverride: discoveryURLOverride,
|
|
||||||
dynamicCertProvider: dynamicCertProvider,
|
|
||||||
podCommandExecutor: podCommandExecutor,
|
|
||||||
pinnipedAPIClient: pinnipedAPIClient,
|
|
||||||
clock: clock,
|
|
||||||
agentPodInformer: agentPodInformer,
|
|
||||||
configMapInformer: configMapInformer,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
withInformer(
|
|
||||||
agentPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilter(isAgentPod, nil), // nil parent func is fine because each event is distinct
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
withInformer(
|
|
||||||
configMapInformer,
|
|
||||||
pinnipedcontroller.NameAndNamespaceExactMatchFilterFactory(clusterInfoName, ClusterInfoNamespace),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *execerController) Sync(ctx controllerlib.Context) error {
|
|
||||||
maybeAgentPod, err := c.agentPodInformer.Lister().Pods(ctx.Key.Namespace).Get(ctx.Key.Name)
|
|
||||||
notFound := k8serrors.IsNotFound(err)
|
|
||||||
if err != nil && !notFound {
|
|
||||||
return fmt.Errorf("failed to get %s/%s pod: %w", ctx.Key.Namespace, ctx.Key.Name, err)
|
|
||||||
}
|
|
||||||
if notFound {
|
|
||||||
// The pod in question does not exist, so it was probably deleted
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
certPath, keyPath := c.getKeypairFilePaths(maybeAgentPod)
|
|
||||||
if certPath == "" || keyPath == "" {
|
|
||||||
// The annotator controller has not annotated this agent pod yet, or it is not an agent pod at all
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
agentPod := maybeAgentPod
|
|
||||||
|
|
||||||
if agentPod.Status.Phase != v1.PodRunning {
|
|
||||||
// Seems to be an agent pod, but it is not ready yet
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
certPEM, err := c.podCommandExecutor.Exec(agentPod.Namespace, agentPod.Name, "cat", certPath)
|
|
||||||
if err != nil {
|
|
||||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
strategyError(c.clock, err),
|
|
||||||
)
|
|
||||||
return newAggregate(err, strategyResultUpdateErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
keyPEM, err := c.podCommandExecutor.Exec(agentPod.Namespace, agentPod.Name, "cat", keyPath)
|
|
||||||
if err != nil {
|
|
||||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
strategyError(c.clock, err),
|
|
||||||
)
|
|
||||||
return newAggregate(err, strategyResultUpdateErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.dynamicCertProvider.SetCertKeyContent([]byte(certPEM), []byte(keyPEM)); err != nil {
|
|
||||||
err = fmt.Errorf("failed to set signing cert/key content from agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
|
||||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
strategyError(c.clock, err),
|
|
||||||
)
|
|
||||||
return newAggregate(err, strategyResultUpdateErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
apiInfo, err := c.getTokenCredentialRequestAPIInfo()
|
|
||||||
if err != nil {
|
|
||||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotGetClusterInfoStrategyReason,
|
|
||||||
Message: err.Error(),
|
|
||||||
LastUpdateTime: metav1.NewTime(c.clock.Now()),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
return newAggregate(err, strategyResultUpdateErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.SuccessStrategyStatus,
|
|
||||||
Reason: configv1alpha1.FetchedKeyStrategyReason,
|
|
||||||
Message: "Key was fetched successfully",
|
|
||||||
LastUpdateTime: metav1.NewTime(c.clock.Now()),
|
|
||||||
Frontend: &configv1alpha1.CredentialIssuerFrontend{
|
|
||||||
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
|
|
||||||
TokenCredentialRequestAPIInfo: apiInfo,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *execerController) getTokenCredentialRequestAPIInfo() (*configv1alpha1.TokenCredentialRequestAPIInfo, error) {
|
|
||||||
configMap, err := c.configMapInformer.
|
|
||||||
Lister().
|
|
||||||
ConfigMaps(ClusterInfoNamespace).
|
|
||||||
Get(clusterInfoName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get %s configmap: %w", clusterInfoName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
kubeConfigYAML, kubeConfigPresent := configMap.Data[clusterInfoConfigMapKey]
|
|
||||||
if !kubeConfigPresent {
|
|
||||||
return nil, fmt.Errorf("failed to get %s key from %s configmap", clusterInfoConfigMapKey, clusterInfoName)
|
|
||||||
}
|
|
||||||
|
|
||||||
kubeconfig, err := clientcmd.Load([]byte(kubeConfigYAML))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to load data from %s key in %s configmap", clusterInfoConfigMapKey, clusterInfoName)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range kubeconfig.Clusters {
|
|
||||||
result := &configv1alpha1.TokenCredentialRequestAPIInfo{
|
|
||||||
Server: v.Server,
|
|
||||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString(v.CertificateAuthorityData),
|
|
||||||
}
|
|
||||||
if c.discoveryURLOverride != nil {
|
|
||||||
result.Server = *c.discoveryURLOverride
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("kubeconfig in %s key in %s configmap did not contain any clusters", clusterInfoConfigMapKey, clusterInfoName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *execerController) getKeypairFilePaths(pod *v1.Pod) (string, string) {
|
|
||||||
annotations := pod.Annotations
|
|
||||||
if annotations == nil {
|
|
||||||
annotations = make(map[string]string)
|
|
||||||
}
|
|
||||||
|
|
||||||
certPath := annotations[agentPodCertPathAnnotationKey]
|
|
||||||
keyPath := annotations[agentPodKeyPathAnnotationKey]
|
|
||||||
|
|
||||||
return certPath, keyPath
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAggregate(errs ...error) error {
|
|
||||||
return errors.NewAggregate(errs)
|
|
||||||
}
|
|
@ -1,733 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/sclevine/spec"
|
|
||||||
"github.com/sclevine/spec/report"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
|
||||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
|
||||||
coretesting "k8s.io/client-go/testing"
|
|
||||||
|
|
||||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
|
||||||
pinnipedfake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/dynamiccert"
|
|
||||||
"go.pinniped.dev/internal/here"
|
|
||||||
"go.pinniped.dev/internal/testutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestExecerControllerOptions(t *testing.T) {
|
|
||||||
spec.Run(t, "options", func(t *testing.T, when spec.G, it spec.S) {
|
|
||||||
var r *require.Assertions
|
|
||||||
var observableWithInformerOption *testutil.ObservableWithInformerOption
|
|
||||||
var agentPodInformerFilter controllerlib.Filter
|
|
||||||
|
|
||||||
whateverPod := &corev1.Pod{}
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
r = require.New(t)
|
|
||||||
observableWithInformerOption = testutil.NewObservableWithInformerOption()
|
|
||||||
informerFactory := kubeinformers.NewSharedInformerFactory(nil, 0)
|
|
||||||
agentPodsInformer := informerFactory.Core().V1().Pods()
|
|
||||||
configMapsInformer := informerFactory.Core().V1().ConfigMaps()
|
|
||||||
_ = NewExecerController(
|
|
||||||
&CredentialIssuerLocationConfig{
|
|
||||||
Name: "ignored by this test",
|
|
||||||
},
|
|
||||||
nil, // credentialIssuerLabels, not needed for this test
|
|
||||||
nil, // discoveryURLOverride, not needed for this test
|
|
||||||
nil, // dynamicCertProvider, not needed for this test
|
|
||||||
nil, // podCommandExecutor, not needed for this test
|
|
||||||
nil, // pinnipedAPIClient, not needed for this test
|
|
||||||
nil, // clock, not needed for this test
|
|
||||||
agentPodsInformer,
|
|
||||||
configMapsInformer,
|
|
||||||
observableWithInformerOption.WithInformer,
|
|
||||||
)
|
|
||||||
agentPodInformerFilter = observableWithInformerOption.GetFilterForInformer(agentPodsInformer)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the change is happening in the agent's namespace", func() {
|
|
||||||
when("a pod with all agent labels is added/updated/deleted", func() {
|
|
||||||
it("returns true", func() {
|
|
||||||
pod := &corev1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{
|
|
||||||
"kube-cert-agent.pinniped.dev": "true",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
r.True(agentPodInformerFilter.Add(pod))
|
|
||||||
r.True(agentPodInformerFilter.Update(whateverPod, pod))
|
|
||||||
r.True(agentPodInformerFilter.Update(pod, whateverPod))
|
|
||||||
r.True(agentPodInformerFilter.Delete(pod))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("a pod missing the agent label is added/updated/deleted", func() {
|
|
||||||
it("returns false", func() {
|
|
||||||
pod := &corev1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{
|
|
||||||
"some-other-label-key": "some-other-label-value",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
r.False(agentPodInformerFilter.Add(pod))
|
|
||||||
r.False(agentPodInformerFilter.Update(whateverPod, pod))
|
|
||||||
r.False(agentPodInformerFilter.Update(pod, whateverPod))
|
|
||||||
r.False(agentPodInformerFilter.Delete(pod))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
|
||||||
}
|
|
||||||
|
|
||||||
type fakePodExecutor struct {
|
|
||||||
r *require.Assertions
|
|
||||||
|
|
||||||
resultsToReturn []string
|
|
||||||
errorsToReturn []error
|
|
||||||
|
|
||||||
calledWithPodName []string
|
|
||||||
calledWithPodNamespace []string
|
|
||||||
calledWithCommandAndArgs [][]string
|
|
||||||
|
|
||||||
callCount int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *fakePodExecutor) Exec(podNamespace string, podName string, commandAndArgs ...string) (string, error) {
|
|
||||||
s.calledWithPodNamespace = append(s.calledWithPodNamespace, podNamespace)
|
|
||||||
s.calledWithPodName = append(s.calledWithPodName, podName)
|
|
||||||
s.calledWithCommandAndArgs = append(s.calledWithCommandAndArgs, commandAndArgs)
|
|
||||||
s.r.Less(s.callCount, len(s.resultsToReturn), "unexpected extra invocation of fakePodExecutor")
|
|
||||||
result := s.resultsToReturn[s.callCount]
|
|
||||||
var err error = nil
|
|
||||||
if s.errorsToReturn != nil {
|
|
||||||
s.r.Less(s.callCount, len(s.errorsToReturn), "unexpected extra invocation of fakePodExecutor")
|
|
||||||
err = s.errorsToReturn[s.callCount]
|
|
||||||
}
|
|
||||||
s.callCount++
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestManagerControllerSync(t *testing.T) {
|
|
||||||
name := t.Name()
|
|
||||||
spec.Run(t, "Sync", func(t *testing.T, when spec.G, it spec.S) {
|
|
||||||
const agentPodNamespace = "some-namespace"
|
|
||||||
const agentPodName = "some-agent-pod-name-123"
|
|
||||||
const certPathAnnotationName = "kube-cert-agent.pinniped.dev/cert-path"
|
|
||||||
const keyPathAnnotationName = "kube-cert-agent.pinniped.dev/key-path"
|
|
||||||
const fakeCertPath = "/some/cert/path"
|
|
||||||
const fakeKeyPath = "/some/key/path"
|
|
||||||
const credentialIssuerResourceName = "ci-resource-name"
|
|
||||||
|
|
||||||
var r *require.Assertions
|
|
||||||
|
|
||||||
var subject controllerlib.Controller
|
|
||||||
var cancelContext context.Context
|
|
||||||
var cancelContextCancelFunc context.CancelFunc
|
|
||||||
var syncContext *controllerlib.Context
|
|
||||||
var pinnipedAPIClient *pinnipedfake.Clientset
|
|
||||||
var kubeInformerFactory kubeinformers.SharedInformerFactory
|
|
||||||
var kubeClientset *kubernetesfake.Clientset
|
|
||||||
var fakeExecutor *fakePodExecutor
|
|
||||||
var credentialIssuerLabels map[string]string
|
|
||||||
var discoveryURLOverride *string
|
|
||||||
var dynamicCertProvider dynamiccert.Provider
|
|
||||||
var fakeCertPEM, fakeKeyPEM string
|
|
||||||
var credentialIssuerGVR schema.GroupVersionResource
|
|
||||||
var frozenNow time.Time
|
|
||||||
var defaultDynamicCertProviderCert string
|
|
||||||
var defaultDynamicCertProviderKey string
|
|
||||||
|
|
||||||
// Defer starting the informers until the last possible moment so that the
|
|
||||||
// nested Before's can keep adding things to the informer caches.
|
|
||||||
var startInformersAndController = func() {
|
|
||||||
// Set this at the last second to allow for injection of server override.
|
|
||||||
subject = NewExecerController(
|
|
||||||
&CredentialIssuerLocationConfig{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
credentialIssuerLabels,
|
|
||||||
discoveryURLOverride,
|
|
||||||
dynamicCertProvider,
|
|
||||||
fakeExecutor,
|
|
||||||
pinnipedAPIClient,
|
|
||||||
clock.NewFakeClock(frozenNow),
|
|
||||||
kubeInformerFactory.Core().V1().Pods(),
|
|
||||||
kubeInformerFactory.Core().V1().ConfigMaps(),
|
|
||||||
controllerlib.WithInformer,
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set this at the last second to support calling subject.Name().
|
|
||||||
syncContext = &controllerlib.Context{
|
|
||||||
Context: cancelContext,
|
|
||||||
Name: subject.Name(),
|
|
||||||
Key: controllerlib.Key{
|
|
||||||
Namespace: agentPodNamespace,
|
|
||||||
Name: agentPodName,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must start informers before calling TestRunSynchronously()
|
|
||||||
kubeInformerFactory.Start(cancelContext.Done())
|
|
||||||
controllerlib.TestRunSynchronously(t, subject)
|
|
||||||
}
|
|
||||||
|
|
||||||
var newAgentPod = func(agentPodName string, hasCertPathAnnotations bool) *corev1.Pod {
|
|
||||||
pod := &corev1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: agentPodName,
|
|
||||||
Namespace: agentPodNamespace,
|
|
||||||
Labels: map[string]string{
|
|
||||||
"some-label-key": "some-label-value",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if hasCertPathAnnotations {
|
|
||||||
pod.Annotations = map[string]string{
|
|
||||||
certPathAnnotationName: fakeCertPath,
|
|
||||||
keyPathAnnotationName: fakeKeyPath,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pod
|
|
||||||
}
|
|
||||||
|
|
||||||
var requireDynamicCertProviderHasDefaultValues = func() {
|
|
||||||
actualCertPEM, actualKeyPEM := dynamicCertProvider.CurrentCertKeyContent()
|
|
||||||
r.Equal(defaultDynamicCertProviderCert, string(actualCertPEM))
|
|
||||||
r.Equal(defaultDynamicCertProviderKey, string(actualKeyPEM))
|
|
||||||
}
|
|
||||||
|
|
||||||
var requireNoExternalActionsTaken = func() {
|
|
||||||
r.Empty(pinnipedAPIClient.Actions())
|
|
||||||
r.Zero(fakeExecutor.callCount)
|
|
||||||
requireDynamicCertProviderHasDefaultValues()
|
|
||||||
}
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
r = require.New(t)
|
|
||||||
|
|
||||||
crt, key, err := testutil.CreateCertificate(
|
|
||||||
time.Now().Add(-time.Hour),
|
|
||||||
time.Now().Add(time.Hour),
|
|
||||||
)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defaultDynamicCertProviderCert = string(crt)
|
|
||||||
defaultDynamicCertProviderKey = string(key)
|
|
||||||
|
|
||||||
cancelContext, cancelContextCancelFunc = context.WithCancel(context.Background())
|
|
||||||
pinnipedAPIClient = pinnipedfake.NewSimpleClientset()
|
|
||||||
kubeClientset = kubernetesfake.NewSimpleClientset()
|
|
||||||
kubeInformerFactory = kubeinformers.NewSharedInformerFactory(kubeClientset, 0)
|
|
||||||
fakeExecutor = &fakePodExecutor{r: r}
|
|
||||||
frozenNow = time.Date(2020, time.September, 23, 7, 42, 0, 0, time.Local)
|
|
||||||
dynamicCertProvider = dynamiccert.NewCA(name)
|
|
||||||
err = dynamicCertProvider.SetCertKeyContent([]byte(defaultDynamicCertProviderCert), []byte(defaultDynamicCertProviderKey))
|
|
||||||
r.NoError(err)
|
|
||||||
|
|
||||||
loadFile := func(filename string) string {
|
|
||||||
bytes, err := ioutil.ReadFile(filename)
|
|
||||||
r.NoError(err)
|
|
||||||
return string(bytes)
|
|
||||||
}
|
|
||||||
fakeCertPEM = loadFile("./testdata/test.crt")
|
|
||||||
fakeKeyPEM = loadFile("./testdata/test.key")
|
|
||||||
|
|
||||||
credentialIssuerGVR = schema.GroupVersionResource{
|
|
||||||
Group: configv1alpha1.GroupName,
|
|
||||||
Version: configv1alpha1.SchemeGroupVersion.Version,
|
|
||||||
Resource: "credentialissuers",
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
it.After(func() {
|
|
||||||
cancelContextCancelFunc()
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is not yet any agent pods or they were deleted", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
unrelatedPod := &corev1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "some other pod",
|
|
||||||
Namespace: agentPodNamespace,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(unrelatedPod))
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing", func() {
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
requireNoExternalActionsTaken()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod, as determined by its labels matching the agent pod template labels, which is not yet annotated by the annotater controller", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
agentPod := newAgentPod(agentPodName, false)
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(agentPod))
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing", func() {
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
requireNoExternalActionsTaken()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod, as determined by its labels matching the agent pod template labels, and it was annotated by the annotater controller, but it is not Running", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
agentPod := newAgentPod(agentPodName, true)
|
|
||||||
agentPod.Status.Phase = corev1.PodPending // not Running
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(agentPod))
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing", func() {
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
requireNoExternalActionsTaken()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod, as determined by its labels matching the agent pod template labels, which is already annotated by the annotater controller, and it is Running", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
targetAgentPod := newAgentPod(agentPodName, true)
|
|
||||||
targetAgentPod.Status.Phase = corev1.PodRunning
|
|
||||||
anotherAgentPod := newAgentPod("some-other-agent-pod-which-is-not-the-context-of-this-sync", true)
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(targetAgentPod))
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(anotherAgentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the resulting pod execs will succeed", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
fakeExecutor.resultsToReturn = []string{fakeCertPEM, fakeKeyPEM}
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the cluster-info ConfigMap is not found", func() {
|
|
||||||
it("returns an error and updates the strategy with an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), `failed to get cluster-info configmap: configmap "cluster-info" not found`)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotGetClusterInfoStrategyReason,
|
|
||||||
Message: `failed to get cluster-info configmap: configmap "cluster-info" not found`,
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
|
||||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the cluster-info ConfigMap is missing a key", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(&corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Namespace: ClusterInfoNamespace,
|
|
||||||
Name: clusterInfoName,
|
|
||||||
},
|
|
||||||
Data: map[string]string{"uninteresting-key": "uninteresting-value"},
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), `failed to get kubeconfig key from cluster-info configmap`)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the cluster-info ConfigMap is contains invalid YAML", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(&corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Namespace: ClusterInfoNamespace,
|
|
||||||
Name: clusterInfoName,
|
|
||||||
},
|
|
||||||
Data: map[string]string{"kubeconfig": "invalid-yaml"},
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), `failed to load data from kubeconfig key in cluster-info configmap`)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the cluster-info ConfigMap is contains an empty list of clusters", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(&corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Namespace: ClusterInfoNamespace,
|
|
||||||
Name: clusterInfoName,
|
|
||||||
},
|
|
||||||
Data: map[string]string{
|
|
||||||
"kubeconfig": here.Doc(`
|
|
||||||
kind: Config
|
|
||||||
apiVersion: v1
|
|
||||||
clusters: []
|
|
||||||
`),
|
|
||||||
"uninteresting-key": "uninteresting-value",
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), `kubeconfig in kubeconfig key in cluster-info configmap did not contain any clusters`)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the cluster-info ConfigMap is valid", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
const caData = "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=" // "some-certificate-authority-data" base64 encoded
|
|
||||||
const kubeServerURL = "https://some-server"
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(&corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Namespace: ClusterInfoNamespace,
|
|
||||||
Name: clusterInfoName,
|
|
||||||
},
|
|
||||||
Data: map[string]string{
|
|
||||||
"kubeconfig": here.Docf(`
|
|
||||||
kind: Config
|
|
||||||
apiVersion: v1
|
|
||||||
clusters:
|
|
||||||
- name: ""
|
|
||||||
cluster:
|
|
||||||
certificate-authority-data: "%s"
|
|
||||||
server: "%s"`,
|
|
||||||
caData, kubeServerURL),
|
|
||||||
"uninteresting-key": "uninteresting-value",
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("execs to the agent pod to get the keys and updates the dynamic certificates provider with the new certs", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
r.Equal(2, fakeExecutor.callCount)
|
|
||||||
|
|
||||||
r.Equal(agentPodNamespace, fakeExecutor.calledWithPodNamespace[0])
|
|
||||||
r.Equal(agentPodName, fakeExecutor.calledWithPodName[0])
|
|
||||||
r.Equal([]string{"cat", fakeCertPath}, fakeExecutor.calledWithCommandAndArgs[0])
|
|
||||||
|
|
||||||
r.Equal(agentPodNamespace, fakeExecutor.calledWithPodNamespace[1])
|
|
||||||
r.Equal(agentPodName, fakeExecutor.calledWithPodName[1])
|
|
||||||
r.Equal([]string{"cat", fakeKeyPath}, fakeExecutor.calledWithCommandAndArgs[1])
|
|
||||||
|
|
||||||
actualCertPEM, actualKeyPEM := dynamicCertProvider.CurrentCertKeyContent()
|
|
||||||
r.Equal(fakeCertPEM, string(actualCertPEM))
|
|
||||||
r.Equal(fakeKeyPEM, string(actualKeyPEM))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is already a CredentialIssuer", func() {
|
|
||||||
var initialCredentialIssuer *configv1alpha1.CredentialIssuer
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
initialCredentialIssuer = &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuer))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("also updates the the existing CredentialIssuer status field", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
// The first update to the CredentialIssuer will set the strategy entry
|
|
||||||
expectedCredentialIssuer := initialCredentialIssuer.DeepCopy()
|
|
||||||
expectedCredentialIssuer.Status.Strategies = []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.SuccessStrategyStatus,
|
|
||||||
Reason: configv1alpha1.FetchedKeyStrategyReason,
|
|
||||||
Message: "Key was fetched successfully",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
Frontend: &configv1alpha1.CredentialIssuerFrontend{
|
|
||||||
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
|
|
||||||
TokenCredentialRequestAPIInfo: &configv1alpha1.TokenCredentialRequestAPIInfo{
|
|
||||||
Server: "https://some-server",
|
|
||||||
CertificateAuthorityData: "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedCredentialIssuer.Status.KubeConfigInfo = &configv1alpha1.CredentialIssuerKubeConfigInfo{
|
|
||||||
Server: "https://some-server",
|
|
||||||
CertificateAuthorityData: "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=",
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
|
||||||
expectedCreateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
|
||||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction}, pinnipedAPIClient.Actions())
|
|
||||||
})
|
|
||||||
|
|
||||||
when("updating the CredentialIssuer fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"update",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some update error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "could not create or update credentialissuer: some update error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is not already a CredentialIssuer", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
server := "https://overridden-server-url.example.com"
|
|
||||||
discoveryURLOverride = &server
|
|
||||||
credentialIssuerLabels = map[string]string{"foo": "bar"}
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("also creates the the CredentialIssuer with the appropriate status field and labels", func() {
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{"foo": "bar"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{"foo": "bar"},
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.SuccessStrategyStatus,
|
|
||||||
Reason: configv1alpha1.FetchedKeyStrategyReason,
|
|
||||||
Message: "Key was fetched successfully",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
Frontend: &configv1alpha1.CredentialIssuerFrontend{
|
|
||||||
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
|
|
||||||
TokenCredentialRequestAPIInfo: &configv1alpha1.TokenCredentialRequestAPIInfo{
|
|
||||||
Server: "https://overridden-server-url.example.com",
|
|
||||||
CertificateAuthorityData: "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
KubeConfigInfo: &configv1alpha1.CredentialIssuerKubeConfigInfo{
|
|
||||||
Server: "https://overridden-server-url.example.com",
|
|
||||||
CertificateAuthorityData: "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
|
||||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the first resulting pod exec will fail", func() {
|
|
||||||
var podExecErrorMessage string
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
podExecErrorMessage = "some pod exec error message"
|
|
||||||
fakeExecutor.errorsToReturn = []error{fmt.Errorf(podExecErrorMessage), nil}
|
|
||||||
fakeExecutor.resultsToReturn = []string{"", fakeKeyPEM}
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does not update the dynamic certificates provider", func() {
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
|
|
||||||
requireDynamicCertProviderHasDefaultValues()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("creates or updates the the CredentialIssuer status field with an error", func() {
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: podExecErrorMessage,
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
|
||||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the second resulting pod exec will fail", func() {
|
|
||||||
var podExecErrorMessage string
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
podExecErrorMessage = "some pod exec error message"
|
|
||||||
fakeExecutor.errorsToReturn = []error{nil, fmt.Errorf(podExecErrorMessage)}
|
|
||||||
fakeExecutor.resultsToReturn = []string{fakeCertPEM, ""}
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does not update the dynamic certificates provider", func() {
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
|
|
||||||
requireDynamicCertProviderHasDefaultValues()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("creates or updates the the CredentialIssuer status field with an error", func() {
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: podExecErrorMessage,
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
|
||||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the third resulting pod exec has invalid key data", func() {
|
|
||||||
var keyParseErrorMessage string
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
keyParseErrorMessage = "failed to set signing cert/key content from agent pod some-namespace/some-agent-pod-name-123: TestManagerControllerSync: attempt to set invalid key pair: tls: failed to find any PEM data in key input"
|
|
||||||
fakeExecutor.errorsToReturn = []error{nil, nil}
|
|
||||||
fakeExecutor.resultsToReturn = []string{fakeCertPEM, ""}
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does not update the dynamic certificates provider", func() {
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), keyParseErrorMessage)
|
|
||||||
requireDynamicCertProviderHasDefaultValues()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("creates or updates the the CredentialIssuer status field with an error", func() {
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), keyParseErrorMessage)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: keyParseErrorMessage,
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
|
||||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
|
||||||
}
|
|
@ -1,130 +1,461 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
// Package kubecertagent provides controllers that ensure a set of pods (the kube-cert-agent), is
|
// Package kubecertagent provides controllers that ensure a pod (the kube-cert-agent), is
|
||||||
// colocated with the Kubernetes controller manager so that Pinniped can access its signing keys.
|
// co-located with the Kubernetes controller manager so that Pinniped can access its signing keys.
|
||||||
//
|
|
||||||
// Note: the controllers use a filter that accepts all pods that look like the controller manager or
|
|
||||||
// an agent pod, across any add/update/delete event. Each of the controllers only care about a
|
|
||||||
// subset of these events in reality, but the liberal filter implementation serves as an MVP.
|
|
||||||
package kubecertagent
|
package kubecertagent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/fnv"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/equality"
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/util/cache"
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
"k8s.io/apimachinery/pkg/util/clock"
|
||||||
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
|
appsv1informers "k8s.io/client-go/informers/apps/v1"
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||||
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"k8s.io/klog/v2/klogr"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||||
"go.pinniped.dev/internal/plog"
|
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||||
|
"go.pinniped.dev/internal/controller/issuerconfig"
|
||||||
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
|
"go.pinniped.dev/internal/dynamiccert"
|
||||||
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// ControllerManagerNamespace is the assumed namespace of the kube-controller-manager pod(s).
|
// ControllerManagerNamespace is the assumed namespace of the kube-controller-manager pod(s).
|
||||||
ControllerManagerNamespace = "kube-system"
|
ControllerManagerNamespace = "kube-system"
|
||||||
|
|
||||||
// controllerManagerNameAnnotationKey is used to store an agent pod's parent's name, i.e., the
|
|
||||||
// name of the controller manager pod with which it is supposed to be in sync.
|
|
||||||
controllerManagerNameAnnotationKey = "kube-cert-agent.pinniped.dev/controller-manager-name"
|
|
||||||
// controllerManagerUIDAnnotationKey is used to store an agent pod's parent's UID, i.e., the UID
|
|
||||||
// of the controller manager pod with which it is supposed to be in sync.
|
|
||||||
controllerManagerUIDAnnotationKey = "kube-cert-agent.pinniped.dev/controller-manager-uid"
|
|
||||||
|
|
||||||
// agentPodLabelKey is used to identify which pods are created by the kube-cert-agent
|
// agentPodLabelKey is used to identify which pods are created by the kube-cert-agent
|
||||||
// controllers.
|
// controllers.
|
||||||
agentPodLabelKey = "kube-cert-agent.pinniped.dev"
|
agentPodLabelKey = "kube-cert-agent.pinniped.dev"
|
||||||
agentPodLabelValue = "true"
|
agentPodLabelValue = "v2"
|
||||||
|
|
||||||
// agentPodCertPathAnnotationKey is the annotation that the kube-cert-agent pod will use
|
ClusterInfoNamespace = "kube-public"
|
||||||
// to communicate the in-pod path to the kube API's certificate.
|
clusterInfoName = "cluster-info"
|
||||||
agentPodCertPathAnnotationKey = "kube-cert-agent.pinniped.dev/cert-path"
|
clusterInfoConfigMapKey = "kubeconfig"
|
||||||
|
|
||||||
// agentPodKeyPathAnnotationKey is the annotation that the kube-cert-agent pod will use
|
|
||||||
// to communicate the in-pod path to the kube API's key.
|
|
||||||
agentPodKeyPathAnnotationKey = "kube-cert-agent.pinniped.dev/key-path"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type AgentPodConfig struct {
|
// AgentConfig is the configuration for the kube-cert-agent controller.
|
||||||
// The namespace in which agent pods will be created.
|
type AgentConfig struct {
|
||||||
|
// Namespace in which agent pods will be created.
|
||||||
Namespace string
|
Namespace string
|
||||||
|
|
||||||
// The container image used for the agent pods.
|
// ContainerImage specifies the container image used for the agent pods.
|
||||||
ContainerImage string
|
ContainerImage string
|
||||||
|
|
||||||
// The name prefix for each of the agent pods.
|
// NamePrefix will be prefixed to all agent pod names.
|
||||||
PodNamePrefix string
|
NamePrefix string
|
||||||
|
|
||||||
// ContainerImagePullSecrets is a list of names of Kubernetes Secret objects that will be used as
|
// ContainerImagePullSecrets is a list of names of Kubernetes Secret objects that will be used as
|
||||||
// ImagePullSecrets on the kube-cert-agent pods.
|
// ImagePullSecrets on the kube-cert-agent pods.
|
||||||
ContainerImagePullSecrets []string
|
ContainerImagePullSecrets []string
|
||||||
|
|
||||||
// Additional labels that should be added to every agent pod during creation.
|
// CredentialIssuerName specifies the CredentialIssuer to be created/updated.
|
||||||
AdditionalLabels map[string]string
|
CredentialIssuerName string
|
||||||
|
|
||||||
|
// Labels to be applied to the CredentialIssuer and agent pods.
|
||||||
|
Labels map[string]string
|
||||||
|
|
||||||
|
// DiscoveryURLOverride is the Kubernetes server endpoint to report in the CredentialIssuer, overriding any
|
||||||
|
// value discovered in the kube-public/cluster-info ConfigMap.
|
||||||
|
DiscoveryURLOverride *string
|
||||||
}
|
}
|
||||||
|
|
||||||
type CredentialIssuerLocationConfig struct {
|
func (a *AgentConfig) agentLabels() map[string]string {
|
||||||
// The resource name for the CredentialIssuer to be created/updated.
|
allLabels := map[string]string{agentPodLabelKey: agentPodLabelValue}
|
||||||
Name string
|
for k, v := range a.Labels {
|
||||||
}
|
|
||||||
|
|
||||||
func (c *AgentPodConfig) Labels() map[string]string {
|
|
||||||
allLabels := map[string]string{
|
|
||||||
agentPodLabelKey: agentPodLabelValue,
|
|
||||||
}
|
|
||||||
for k, v := range c.AdditionalLabels {
|
|
||||||
allLabels[k] = v
|
allLabels[k] = v
|
||||||
}
|
}
|
||||||
return allLabels
|
return allLabels
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *AgentPodConfig) AgentSelector() labels.Selector {
|
func (a *AgentConfig) deploymentName() string {
|
||||||
return labels.SelectorFromSet(map[string]string{agentPodLabelKey: agentPodLabelValue})
|
return strings.TrimSuffix(a.NamePrefix, "-")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *AgentPodConfig) newAgentPod(controllerManagerPod *corev1.Pod) *corev1.Pod {
|
type agentController struct {
|
||||||
terminateImmediately := int64(0)
|
cfg AgentConfig
|
||||||
rootID := int64(0)
|
client *kubeclient.Client
|
||||||
f := false
|
kubeSystemPods corev1informers.PodInformer
|
||||||
falsePtr := &f
|
agentDeployments appsv1informers.DeploymentInformer
|
||||||
|
agentPods corev1informers.PodInformer
|
||||||
|
kubePublicConfigMaps corev1informers.ConfigMapInformer
|
||||||
|
executor PodCommandExecutor
|
||||||
|
dynamicCertProvider dynamiccert.Private
|
||||||
|
clock clock.Clock
|
||||||
|
log logr.Logger
|
||||||
|
execCache *cache.Expiring
|
||||||
|
}
|
||||||
|
|
||||||
imagePullSecrets := []corev1.LocalObjectReference{}
|
var (
|
||||||
for _, imagePullSecret := range c.ContainerImagePullSecrets {
|
// controllerManagerLabels are the Kubernetes labels we expect on the kube-controller-manager Pod.
|
||||||
imagePullSecrets = append(
|
controllerManagerLabels = labels.SelectorFromSet(map[string]string{ //nolint: gochecknoglobals
|
||||||
imagePullSecrets,
|
"component": "kube-controller-manager",
|
||||||
corev1.LocalObjectReference{
|
})
|
||||||
Name: imagePullSecret,
|
|
||||||
|
// agentLabels are the Kubernetes labels we always expect on the kube-controller-manager Pod.
|
||||||
|
agentLabels = labels.SelectorFromSet(map[string]string{ //nolint: gochecknoglobals
|
||||||
|
agentPodLabelKey: agentPodLabelValue,
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewAgentController returns a controller that manages the kube-cert-agent Deployment. It also is tasked with updating
|
||||||
|
// the CredentialIssuer with any errors that it encounters.
|
||||||
|
func NewAgentController(
|
||||||
|
cfg AgentConfig,
|
||||||
|
client *kubeclient.Client,
|
||||||
|
kubeSystemPods corev1informers.PodInformer,
|
||||||
|
agentDeployments appsv1informers.DeploymentInformer,
|
||||||
|
agentPods corev1informers.PodInformer,
|
||||||
|
kubePublicConfigMaps corev1informers.ConfigMapInformer,
|
||||||
|
dynamicCertProvider dynamiccert.Private,
|
||||||
|
) controllerlib.Controller {
|
||||||
|
return newAgentController(
|
||||||
|
cfg,
|
||||||
|
client,
|
||||||
|
kubeSystemPods,
|
||||||
|
agentDeployments,
|
||||||
|
agentPods,
|
||||||
|
kubePublicConfigMaps,
|
||||||
|
NewPodCommandExecutor(client.JSONConfig, client.Kubernetes),
|
||||||
|
dynamicCertProvider,
|
||||||
|
&clock.RealClock{},
|
||||||
|
cache.NewExpiring(),
|
||||||
|
klogr.New(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAgentController(
|
||||||
|
cfg AgentConfig,
|
||||||
|
client *kubeclient.Client,
|
||||||
|
kubeSystemPods corev1informers.PodInformer,
|
||||||
|
agentDeployments appsv1informers.DeploymentInformer,
|
||||||
|
agentPods corev1informers.PodInformer,
|
||||||
|
kubePublicConfigMaps corev1informers.ConfigMapInformer,
|
||||||
|
podCommandExecutor PodCommandExecutor,
|
||||||
|
dynamicCertProvider dynamiccert.Private,
|
||||||
|
clock clock.Clock,
|
||||||
|
execCache *cache.Expiring,
|
||||||
|
log logr.Logger,
|
||||||
|
options ...controllerlib.Option,
|
||||||
|
) controllerlib.Controller {
|
||||||
|
return controllerlib.New(
|
||||||
|
controllerlib.Config{
|
||||||
|
Name: "kube-cert-agent-controller",
|
||||||
|
Syncer: &agentController{
|
||||||
|
cfg: cfg,
|
||||||
|
client: client,
|
||||||
|
kubeSystemPods: kubeSystemPods,
|
||||||
|
agentDeployments: agentDeployments,
|
||||||
|
agentPods: agentPods,
|
||||||
|
kubePublicConfigMaps: kubePublicConfigMaps,
|
||||||
|
executor: podCommandExecutor,
|
||||||
|
dynamicCertProvider: dynamicCertProvider,
|
||||||
|
clock: clock,
|
||||||
|
log: log.WithName("kube-cert-agent-controller"),
|
||||||
|
execCache: execCache,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
append([]controllerlib.Option{
|
||||||
|
controllerlib.WithInformer(
|
||||||
|
kubeSystemPods,
|
||||||
|
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
|
||||||
|
return controllerManagerLabels.Matches(labels.Set(obj.GetLabels()))
|
||||||
|
}),
|
||||||
|
controllerlib.InformerOption{},
|
||||||
|
),
|
||||||
|
controllerlib.WithInformer(
|
||||||
|
agentDeployments,
|
||||||
|
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
|
||||||
|
return obj.GetNamespace() == cfg.Namespace && obj.GetName() == cfg.deploymentName()
|
||||||
|
}),
|
||||||
|
controllerlib.InformerOption{},
|
||||||
|
),
|
||||||
|
controllerlib.WithInformer(
|
||||||
|
agentPods,
|
||||||
|
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
|
||||||
|
return agentLabels.Matches(labels.Set(obj.GetLabels()))
|
||||||
|
}),
|
||||||
|
controllerlib.InformerOption{},
|
||||||
|
),
|
||||||
|
controllerlib.WithInformer(
|
||||||
|
kubePublicConfigMaps,
|
||||||
|
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
|
||||||
|
return obj.GetNamespace() == ClusterInfoNamespace && obj.GetName() == clusterInfoName
|
||||||
|
}),
|
||||||
|
controllerlib.InformerOption{},
|
||||||
|
),
|
||||||
|
// Be sure to run once even to make sure the CredentialIssuer is updated if there are no controller manager
|
||||||
|
// pods. We should be able to pass an empty key since we don't use the key in the sync (we sync
|
||||||
|
// the world).
|
||||||
|
controllerlib.WithInitialEvent(controllerlib.Key{}),
|
||||||
|
}, options...)...,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync implements controllerlib.Syncer.
|
||||||
|
func (c *agentController) Sync(ctx controllerlib.Context) error {
|
||||||
|
// Find the latest healthy kube-controller-manager Pod in kube-system..
|
||||||
|
controllerManagerPods, err := c.kubeSystemPods.Lister().Pods(ControllerManagerNamespace).List(controllerManagerLabels)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("could not list controller manager pods: %w", err)
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||||
|
}
|
||||||
|
newestControllerManager := newestRunningPod(controllerManagerPods)
|
||||||
|
|
||||||
|
// If there are no healthy controller manager pods, we alert the user that we can't find the keypair via
|
||||||
|
// the CredentialIssuer.
|
||||||
|
if newestControllerManager == nil {
|
||||||
|
err := fmt.Errorf("could not find a healthy kube-controller-manager pod (%s)", pluralize(controllerManagerPods))
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.createOrUpdateDeployment(ctx, newestControllerManager); err != nil {
|
||||||
|
err := fmt.Errorf("could not ensure agent deployment: %w", err)
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the latest healthy agent Pod in our namespace.
|
||||||
|
agentPods, err := c.agentPods.Lister().Pods(c.cfg.Namespace).List(agentLabels)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("could not list agent pods: %w", err)
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||||
|
}
|
||||||
|
newestAgentPod := newestRunningPod(agentPods)
|
||||||
|
|
||||||
|
// If there are no healthy controller agent pods, we alert the user that we can't find the keypair via
|
||||||
|
// the CredentialIssuer.
|
||||||
|
if newestAgentPod == nil {
|
||||||
|
err := fmt.Errorf("could not find a healthy agent pod (%s)", pluralize(agentPods))
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load the Kubernetes API info from the kube-public/cluster-info ConfigMap.
|
||||||
|
configMap, err := c.kubePublicConfigMaps.Lister().ConfigMaps(ClusterInfoNamespace).Get(clusterInfoName)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("failed to get %s/%s configmap: %w", ClusterInfoNamespace, clusterInfoName, err)
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason)
|
||||||
|
}
|
||||||
|
|
||||||
|
apiInfo, err := c.extractAPIInfo(configMap)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("could not extract Kubernetes API endpoint info from %s/%s configmap: %w", ClusterInfoNamespace, clusterInfoName, err)
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load the certificate and key from the agent pod into our in-memory signer.
|
||||||
|
if err := c.loadSigningKey(newestAgentPod); err != nil {
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the CredentialIssuer strategy to successful.
|
||||||
|
return issuerconfig.UpdateStrategy(
|
||||||
|
ctx.Context,
|
||||||
|
c.cfg.CredentialIssuerName,
|
||||||
|
c.cfg.Labels,
|
||||||
|
c.client.PinnipedConcierge,
|
||||||
|
configv1alpha1.CredentialIssuerStrategy{
|
||||||
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
|
Status: configv1alpha1.SuccessStrategyStatus,
|
||||||
|
Reason: configv1alpha1.FetchedKeyStrategyReason,
|
||||||
|
Message: "key was fetched successfully",
|
||||||
|
LastUpdateTime: metav1.NewTime(c.clock.Now()),
|
||||||
|
Frontend: &configv1alpha1.CredentialIssuerFrontend{
|
||||||
|
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
|
||||||
|
TokenCredentialRequestAPIInfo: apiInfo,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &corev1.Pod{
|
func (c *agentController) loadSigningKey(agentPod *corev1.Pod) error {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
// If we remember successfully loading the key from this pod recently, we can skip this step and return immediately.
|
||||||
Name: fmt.Sprintf("%s%s", c.PodNamePrefix, hash(controllerManagerPod)),
|
if _, exists := c.execCache.Get(agentPod.UID); exists {
|
||||||
Namespace: c.Namespace,
|
return nil
|
||||||
Labels: c.Labels(),
|
}
|
||||||
Annotations: map[string]string{
|
|
||||||
controllerManagerNameAnnotationKey: controllerManagerPod.Name,
|
// Exec into the agent pod and cat out the certificate and the key.
|
||||||
controllerManagerUIDAnnotationKey: string(controllerManagerPod.UID),
|
combinedPEM, err := c.executor.Exec(
|
||||||
|
agentPod.Namespace, agentPod.Name,
|
||||||
|
"sh", "-c", "cat ${CERT_PATH}; echo; echo; cat ${KEY_PATH}",
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not exec into agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split up the output by looking for the block of newlines.
|
||||||
|
var certPEM, keyPEM string
|
||||||
|
if parts := strings.Split(combinedPEM, "\n\n\n"); len(parts) == 2 {
|
||||||
|
certPEM, keyPEM = parts[0], parts[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load the certificate and key into the dynamic signer.
|
||||||
|
if err := c.dynamicCertProvider.SetCertKeyContent([]byte(certPEM), []byte(keyPEM)); err != nil {
|
||||||
|
return fmt.Errorf("failed to set signing cert/key content from agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remember that we've successfully loaded the key from this pod so we can skip the exec+load if nothing has changed.
|
||||||
|
c.execCache.Set(agentPod.UID, struct{}{}, 15*time.Minute)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *agentController) createOrUpdateDeployment(ctx controllerlib.Context, newestControllerManager *corev1.Pod) error {
|
||||||
|
// Build the expected Deployment based on the kube-controller-manager Pod as a template.
|
||||||
|
expectedDeployment := c.newAgentDeployment(newestControllerManager)
|
||||||
|
|
||||||
|
// Try to get the existing Deployment, if it exists.
|
||||||
|
existingDeployment, err := c.agentDeployments.Lister().Deployments(expectedDeployment.Namespace).Get(expectedDeployment.Name)
|
||||||
|
notFound := k8serrors.IsNotFound(err)
|
||||||
|
if err != nil && !notFound {
|
||||||
|
return fmt.Errorf("could not get deployments: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log := c.log.WithValues(
|
||||||
|
"deployment", klog.KObj(expectedDeployment),
|
||||||
|
"templatePod", klog.KObj(newestControllerManager),
|
||||||
|
)
|
||||||
|
|
||||||
|
// If the Deployment did not exist, create it and be done.
|
||||||
|
if notFound {
|
||||||
|
log.Info("creating new deployment")
|
||||||
|
_, err := c.client.Kubernetes.AppsV1().Deployments(expectedDeployment.Namespace).Create(ctx.Context, expectedDeployment, metav1.CreateOptions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise update the spec of the Deployment to match our desired state.
|
||||||
|
updatedDeployment := existingDeployment.DeepCopy()
|
||||||
|
updatedDeployment.Spec = expectedDeployment.Spec
|
||||||
|
updatedDeployment.ObjectMeta = mergeLabelsAndAnnotations(updatedDeployment.ObjectMeta, expectedDeployment.ObjectMeta)
|
||||||
|
|
||||||
|
// If the existing Deployment already matches our desired spec, we're done.
|
||||||
|
if apiequality.Semantic.DeepDerivative(updatedDeployment, existingDeployment) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("updating existing deployment")
|
||||||
|
_, err = c.client.Kubernetes.AppsV1().Deployments(updatedDeployment.Namespace).Update(ctx.Context, updatedDeployment, metav1.UpdateOptions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *agentController) failStrategyAndErr(ctx context.Context, err error, reason configv1alpha1.StrategyReason) error {
|
||||||
|
return utilerrors.NewAggregate([]error{err, issuerconfig.UpdateStrategy(
|
||||||
|
ctx,
|
||||||
|
c.cfg.CredentialIssuerName,
|
||||||
|
c.cfg.Labels,
|
||||||
|
c.client.PinnipedConcierge,
|
||||||
|
configv1alpha1.CredentialIssuerStrategy{
|
||||||
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
|
Reason: reason,
|
||||||
|
Message: err.Error(),
|
||||||
|
LastUpdateTime: metav1.NewTime(c.clock.Now()),
|
||||||
},
|
},
|
||||||
|
)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *agentController) extractAPIInfo(configMap *corev1.ConfigMap) (*configv1alpha1.TokenCredentialRequestAPIInfo, error) {
|
||||||
|
kubeConfigYAML, kubeConfigPresent := configMap.Data[clusterInfoConfigMapKey]
|
||||||
|
if !kubeConfigPresent {
|
||||||
|
return nil, fmt.Errorf("missing %q key", clusterInfoConfigMapKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
kubeconfig, err := clientcmd.Load([]byte(kubeConfigYAML))
|
||||||
|
if err != nil {
|
||||||
|
// We purposefully don't wrap "err" here because it's very verbose.
|
||||||
|
return nil, fmt.Errorf("key %q does not contain a valid kubeconfig", clusterInfoConfigMapKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range kubeconfig.Clusters {
|
||||||
|
result := &configv1alpha1.TokenCredentialRequestAPIInfo{
|
||||||
|
Server: v.Server,
|
||||||
|
CertificateAuthorityData: base64.StdEncoding.EncodeToString(v.CertificateAuthorityData),
|
||||||
|
}
|
||||||
|
if c.cfg.DiscoveryURLOverride != nil {
|
||||||
|
result.Server = *c.cfg.DiscoveryURLOverride
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("kubeconfig in key %q does not contain any clusters", clusterInfoConfigMapKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newestRunningPod takes a list of pods and returns the newest one with status.phase == "Running".
|
||||||
|
func newestRunningPod(pods []*corev1.Pod) *corev1.Pod {
|
||||||
|
// Compare two pods based on creation timestamp, breaking ties by name
|
||||||
|
newer := func(a, b *corev1.Pod) bool {
|
||||||
|
if a.CreationTimestamp.Time.Equal(b.CreationTimestamp.Time) {
|
||||||
|
return a.Name < b.Name
|
||||||
|
}
|
||||||
|
return a.CreationTimestamp.After(b.CreationTimestamp.Time)
|
||||||
|
}
|
||||||
|
|
||||||
|
var result *corev1.Pod
|
||||||
|
for _, pod := range pods {
|
||||||
|
if pod.Status.Phase == corev1.PodRunning && (result == nil || newer(pod, result)) {
|
||||||
|
result = pod
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) *appsv1.Deployment {
|
||||||
|
var volumeMounts []corev1.VolumeMount
|
||||||
|
if len(controllerManagerPod.Spec.Containers) > 0 {
|
||||||
|
volumeMounts = controllerManagerPod.Spec.Containers[0].VolumeMounts
|
||||||
|
}
|
||||||
|
|
||||||
|
var imagePullSecrets []corev1.LocalObjectReference
|
||||||
|
if len(c.cfg.ContainerImagePullSecrets) > 0 {
|
||||||
|
imagePullSecrets = make([]corev1.LocalObjectReference, 0, len(c.cfg.ContainerImagePullSecrets))
|
||||||
|
for _, name := range c.cfg.ContainerImagePullSecrets {
|
||||||
|
imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{Name: name})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &appsv1.Deployment{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: c.cfg.deploymentName(),
|
||||||
|
Namespace: c.cfg.Namespace,
|
||||||
|
Labels: c.cfg.Labels,
|
||||||
|
},
|
||||||
|
Spec: appsv1.DeploymentSpec{
|
||||||
|
Replicas: pointer.Int32Ptr(1),
|
||||||
|
Selector: metav1.SetAsLabelSelector(c.cfg.agentLabels()),
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: c.cfg.agentLabels(),
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
TerminationGracePeriodSeconds: &terminateImmediately,
|
TerminationGracePeriodSeconds: pointer.Int64Ptr(0),
|
||||||
ImagePullSecrets: imagePullSecrets,
|
ImagePullSecrets: imagePullSecrets,
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
Name: "sleeper",
|
Name: "sleeper",
|
||||||
Image: c.ContainerImage,
|
Image: c.cfg.ContainerImage,
|
||||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||||
Command: []string{"/bin/sleep", "infinity"},
|
Command: []string{"/bin/sleep", "infinity"},
|
||||||
VolumeMounts: controllerManagerPod.Spec.Containers[0].VolumeMounts,
|
VolumeMounts: volumeMounts,
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "CERT_PATH", Value: getContainerArgByName(controllerManagerPod, "cluster-signing-cert-file", "/etc/kubernetes/ca/ca.pem")},
|
||||||
|
{Name: "KEY_PATH", Value: getContainerArgByName(controllerManagerPod, "cluster-signing-key-file", "/etc/kubernetes/ca/ca.key")},
|
||||||
|
},
|
||||||
Resources: corev1.ResourceRequirements{
|
Resources: corev1.ResourceRequirements{
|
||||||
Limits: corev1.ResourceList{
|
Limits: corev1.ResourceList{
|
||||||
corev1.ResourceMemory: resource.MustParse("16Mi"),
|
corev1.ResourceMemory: resource.MustParse("16Mi"),
|
||||||
@ -138,159 +469,60 @@ func (c *AgentPodConfig) newAgentPod(controllerManagerPod *corev1.Pod) *corev1.P
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Volumes: controllerManagerPod.Spec.Volumes,
|
Volumes: controllerManagerPod.Spec.Volumes,
|
||||||
RestartPolicy: corev1.RestartPolicyNever,
|
RestartPolicy: corev1.RestartPolicyAlways,
|
||||||
NodeSelector: controllerManagerPod.Spec.NodeSelector,
|
NodeSelector: controllerManagerPod.Spec.NodeSelector,
|
||||||
AutomountServiceAccountToken: falsePtr,
|
AutomountServiceAccountToken: pointer.BoolPtr(false),
|
||||||
NodeName: controllerManagerPod.Spec.NodeName,
|
NodeName: controllerManagerPod.Spec.NodeName,
|
||||||
Tolerations: controllerManagerPod.Spec.Tolerations,
|
Tolerations: controllerManagerPod.Spec.Tolerations,
|
||||||
// We need to run the agent pod as root since the file permissions
|
// We need to run the agent pod as root since the file permissions
|
||||||
// on the cluster keypair usually restricts access to only root.
|
// on the cluster keypair usually restricts access to only root.
|
||||||
SecurityContext: &corev1.PodSecurityContext{
|
SecurityContext: &corev1.PodSecurityContext{
|
||||||
RunAsUser: &rootID,
|
RunAsUser: pointer.Int64Ptr(0),
|
||||||
RunAsGroup: &rootID,
|
RunAsGroup: pointer.Int64Ptr(0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Setting MinReadySeconds prevents the agent pods from being churned too quickly by the deployments controller.
|
||||||
|
MinReadySeconds: 10,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func isAgentPodUpToDate(actualAgentPod, expectedAgentPod *corev1.Pod) bool {
|
func mergeLabelsAndAnnotations(existing metav1.ObjectMeta, desired metav1.ObjectMeta) metav1.ObjectMeta {
|
||||||
requiredLabelsAllPresentWithCorrectValues := true
|
result := existing.DeepCopy()
|
||||||
actualLabels := actualAgentPod.ObjectMeta.Labels
|
for k, v := range desired.Labels {
|
||||||
for expectedLabelKey, expectedLabelValue := range expectedAgentPod.ObjectMeta.Labels {
|
if result.Labels == nil {
|
||||||
if actualLabels[expectedLabelKey] != expectedLabelValue {
|
result.Labels = map[string]string{}
|
||||||
requiredLabelsAllPresentWithCorrectValues = false
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
result.Labels[k] = v
|
||||||
|
}
|
||||||
|
for k, v := range desired.Annotations {
|
||||||
|
if result.Annotations == nil {
|
||||||
|
result.Annotations = map[string]string{}
|
||||||
|
}
|
||||||
|
result.Annotations[k] = v
|
||||||
|
}
|
||||||
|
return *result
|
||||||
}
|
}
|
||||||
|
|
||||||
if actualAgentPod.Spec.SecurityContext == nil {
|
func getContainerArgByName(pod *corev1.Pod, name, fallbackValue string) string {
|
||||||
return false
|
for _, container := range pod.Spec.Containers {
|
||||||
|
flagset := pflag.NewFlagSet("", pflag.ContinueOnError)
|
||||||
|
flagset.ParseErrorsWhitelist = pflag.ParseErrorsWhitelist{UnknownFlags: true}
|
||||||
|
var val string
|
||||||
|
flagset.StringVar(&val, name, "", "")
|
||||||
|
_ = flagset.Parse(append(container.Command, container.Args...))
|
||||||
|
if val != "" {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fallbackValue
|
||||||
}
|
}
|
||||||
|
|
||||||
return requiredLabelsAllPresentWithCorrectValues &&
|
func pluralize(pods []*corev1.Pod) string {
|
||||||
equality.Semantic.DeepEqual(
|
if len(pods) == 1 {
|
||||||
actualAgentPod.Spec.Containers[0].VolumeMounts,
|
return "1 candidate"
|
||||||
expectedAgentPod.Spec.Containers[0].VolumeMounts,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.Containers[0].Name,
|
|
||||||
expectedAgentPod.Spec.Containers[0].Name,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.Containers[0].Image,
|
|
||||||
expectedAgentPod.Spec.Containers[0].Image,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.Containers[0].Command,
|
|
||||||
expectedAgentPod.Spec.Containers[0].Command,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.Volumes,
|
|
||||||
expectedAgentPod.Spec.Volumes,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.RestartPolicy,
|
|
||||||
expectedAgentPod.Spec.RestartPolicy,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.NodeSelector,
|
|
||||||
expectedAgentPod.Spec.NodeSelector,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.AutomountServiceAccountToken,
|
|
||||||
expectedAgentPod.Spec.AutomountServiceAccountToken,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.NodeName,
|
|
||||||
expectedAgentPod.Spec.NodeName,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.Tolerations,
|
|
||||||
expectedAgentPod.Spec.Tolerations,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.SecurityContext.RunAsUser,
|
|
||||||
expectedAgentPod.Spec.SecurityContext.RunAsUser,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.SecurityContext.RunAsGroup,
|
|
||||||
expectedAgentPod.Spec.SecurityContext.RunAsGroup,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
return fmt.Sprintf("%d candidates", len(pods))
|
||||||
func isControllerManagerPod(obj metav1.Object) bool {
|
|
||||||
pod, ok := obj.(*corev1.Pod)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if pod.Labels == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
component, ok := pod.Labels["component"]
|
|
||||||
if !ok || component != "kube-controller-manager" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if pod.Status.Phase != corev1.PodRunning {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func isAgentPod(obj metav1.Object) bool {
|
|
||||||
value, foundLabel := obj.GetLabels()[agentPodLabelKey]
|
|
||||||
return foundLabel && value == agentPodLabelValue
|
|
||||||
}
|
|
||||||
|
|
||||||
func findControllerManagerPodForSpecificAgentPod(
|
|
||||||
agentPod *corev1.Pod,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
) (*corev1.Pod, error) {
|
|
||||||
name, ok := agentPod.Annotations[controllerManagerNameAnnotationKey]
|
|
||||||
if !ok {
|
|
||||||
plog.Debug("agent pod missing parent name annotation", "pod", agentPod.Name)
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
uid, ok := agentPod.Annotations[controllerManagerUIDAnnotationKey]
|
|
||||||
if !ok {
|
|
||||||
plog.Debug("agent pod missing parent uid annotation", "pod", agentPod.Name)
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
maybeControllerManagerPod, err := kubeSystemPodInformer.
|
|
||||||
Lister().
|
|
||||||
Pods(ControllerManagerNamespace).
|
|
||||||
Get(name)
|
|
||||||
notFound := k8serrors.IsNotFound(err)
|
|
||||||
if err != nil && !notFound {
|
|
||||||
return nil, fmt.Errorf("cannot get controller pod: %w", err)
|
|
||||||
} else if notFound ||
|
|
||||||
maybeControllerManagerPod == nil ||
|
|
||||||
string(maybeControllerManagerPod.UID) != uid {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return maybeControllerManagerPod, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func strategyError(clock clock.Clock, err error) configv1alpha1.CredentialIssuerStrategy {
|
|
||||||
return configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: err.Error(),
|
|
||||||
LastUpdateTime: metav1.NewTime(clock.Now()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func hash(controllerManagerPod *corev1.Pod) string {
|
|
||||||
// FNV should be faster than SHA, and we don't care about hash-reversibility here, and Kubernetes
|
|
||||||
// uses FNV for their pod templates, so should be good enough for us?
|
|
||||||
h := fnv.New32a()
|
|
||||||
_, _ = h.Write([]byte(controllerManagerPod.UID)) // Never returns an error, per godoc.
|
|
||||||
return hex.EncodeToString(h.Sum([]byte{}))
|
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -30,6 +30,7 @@ func NewPodCommandExecutor(kubeConfig *restclient.Config, kubeClient kubernetes.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *kubeClientPodCommandExecutor) Exec(podNamespace string, podName string, commandAndArgs ...string) (string, error) {
|
func (s *kubeClientPodCommandExecutor) Exec(podNamespace string, podName string, commandAndArgs ...string) (string, error) {
|
||||||
|
// TODO: see if we can add a timeout or make this cancelable somehow
|
||||||
request := s.kubeClient.
|
request := s.kubeClient.
|
||||||
CoreV1().
|
CoreV1().
|
||||||
RESTClient().
|
RESTClient().
|
||||||
|
@ -119,16 +119,14 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
|||||||
// Create informers. Don't forget to make sure they get started in the function returned below.
|
// Create informers. Don't forget to make sure they get started in the function returned below.
|
||||||
informers := createInformers(c.ServerInstallationInfo.Namespace, client.Kubernetes, client.PinnipedConcierge)
|
informers := createInformers(c.ServerInstallationInfo.Namespace, client.Kubernetes, client.PinnipedConcierge)
|
||||||
|
|
||||||
// Configuration for the kubecertagent controllers created below.
|
agentConfig := kubecertagent.AgentConfig{
|
||||||
agentPodConfig := &kubecertagent.AgentPodConfig{
|
|
||||||
Namespace: c.ServerInstallationInfo.Namespace,
|
Namespace: c.ServerInstallationInfo.Namespace,
|
||||||
ContainerImage: *c.KubeCertAgentConfig.Image,
|
ContainerImage: *c.KubeCertAgentConfig.Image,
|
||||||
PodNamePrefix: *c.KubeCertAgentConfig.NamePrefix,
|
NamePrefix: *c.KubeCertAgentConfig.NamePrefix,
|
||||||
ContainerImagePullSecrets: c.KubeCertAgentConfig.ImagePullSecrets,
|
ContainerImagePullSecrets: c.KubeCertAgentConfig.ImagePullSecrets,
|
||||||
AdditionalLabels: c.Labels,
|
Labels: c.Labels,
|
||||||
}
|
CredentialIssuerName: c.NamesConfig.CredentialIssuer,
|
||||||
credentialIssuerLocationConfig := &kubecertagent.CredentialIssuerLocationConfig{
|
DiscoveryURLOverride: c.DiscoveryURLOverride,
|
||||||
Name: c.NamesConfig.CredentialIssuer,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create controller manager.
|
// Create controller manager.
|
||||||
@ -195,64 +193,20 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
|||||||
),
|
),
|
||||||
singletonWorker,
|
singletonWorker,
|
||||||
).
|
).
|
||||||
|
// The kube-cert-agent controller is responsible for finding the cluster's signing keys and keeping them
|
||||||
// Kube cert agent controllers are responsible for finding the cluster's signing keys and keeping them
|
|
||||||
// up to date in memory, as well as reporting status on this cluster integration strategy.
|
// up to date in memory, as well as reporting status on this cluster integration strategy.
|
||||||
WithController(
|
WithController(
|
||||||
kubecertagent.NewCreaterController(
|
kubecertagent.NewAgentController(
|
||||||
agentPodConfig,
|
agentConfig,
|
||||||
credentialIssuerLocationConfig,
|
client,
|
||||||
c.Labels,
|
|
||||||
clock.RealClock{},
|
|
||||||
client.Kubernetes,
|
|
||||||
client.PinnipedConcierge,
|
|
||||||
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
||||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
informers.installationNamespaceK8s.Apps().V1().Deployments(),
|
||||||
controllerlib.WithInformer,
|
|
||||||
controllerlib.WithInitialEvent,
|
|
||||||
),
|
|
||||||
singletonWorker,
|
|
||||||
).
|
|
||||||
WithController(
|
|
||||||
kubecertagent.NewAnnotaterController(
|
|
||||||
agentPodConfig,
|
|
||||||
credentialIssuerLocationConfig,
|
|
||||||
c.Labels,
|
|
||||||
clock.RealClock{},
|
|
||||||
client.Kubernetes,
|
|
||||||
client.PinnipedConcierge,
|
|
||||||
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
|
||||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
|
||||||
controllerlib.WithInformer,
|
|
||||||
),
|
|
||||||
singletonWorker,
|
|
||||||
).
|
|
||||||
WithController(
|
|
||||||
kubecertagent.NewExecerController(
|
|
||||||
credentialIssuerLocationConfig,
|
|
||||||
c.Labels,
|
|
||||||
c.DiscoveryURLOverride,
|
|
||||||
c.DynamicSigningCertProvider,
|
|
||||||
kubecertagent.NewPodCommandExecutor(client.JSONConfig, client.Kubernetes),
|
|
||||||
client.PinnipedConcierge,
|
|
||||||
clock.RealClock{},
|
|
||||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
informers.installationNamespaceK8s.Core().V1().Pods(),
|
||||||
informers.kubePublicNamespaceK8s.Core().V1().ConfigMaps(),
|
informers.kubePublicNamespaceK8s.Core().V1().ConfigMaps(),
|
||||||
controllerlib.WithInformer,
|
c.DynamicSigningCertProvider,
|
||||||
),
|
),
|
||||||
singletonWorker,
|
singletonWorker,
|
||||||
).
|
).
|
||||||
WithController(
|
|
||||||
kubecertagent.NewDeleterController(
|
|
||||||
agentPodConfig,
|
|
||||||
client.Kubernetes,
|
|
||||||
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
|
||||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
|
||||||
controllerlib.WithInformer,
|
|
||||||
),
|
|
||||||
singletonWorker,
|
|
||||||
).
|
|
||||||
|
|
||||||
// The cache filler/cleaner controllers are responsible for keep an in-memory representation of active
|
// The cache filler/cleaner controllers are responsible for keep an in-memory representation of active
|
||||||
// authenticators up to date.
|
// authenticators up to date.
|
||||||
WithController(
|
WithController(
|
||||||
|
@ -80,7 +80,7 @@ func TestCredentialIssuer(t *testing.T) {
|
|||||||
if env.HasCapability(library.ClusterSigningKeyIsAvailable) {
|
if env.HasCapability(library.ClusterSigningKeyIsAvailable) {
|
||||||
require.Equal(t, configv1alpha1.SuccessStrategyStatus, actualStatusStrategy.Status)
|
require.Equal(t, configv1alpha1.SuccessStrategyStatus, actualStatusStrategy.Status)
|
||||||
require.Equal(t, configv1alpha1.FetchedKeyStrategyReason, actualStatusStrategy.Reason)
|
require.Equal(t, configv1alpha1.FetchedKeyStrategyReason, actualStatusStrategy.Reason)
|
||||||
require.Equal(t, "Key was fetched successfully", actualStatusStrategy.Message)
|
require.Equal(t, "key was fetched successfully", actualStatusStrategy.Message)
|
||||||
require.NotNil(t, actualStatusStrategy.Frontend)
|
require.NotNil(t, actualStatusStrategy.Frontend)
|
||||||
require.Equal(t, configv1alpha1.TokenCredentialRequestAPIFrontendType, actualStatusStrategy.Frontend.Type)
|
require.Equal(t, configv1alpha1.TokenCredentialRequestAPIFrontendType, actualStatusStrategy.Frontend.Type)
|
||||||
expectedTokenRequestAPIInfo := configv1alpha1.TokenCredentialRequestAPIInfo{
|
expectedTokenRequestAPIInfo := configv1alpha1.TokenCredentialRequestAPIInfo{
|
||||||
@ -111,10 +111,7 @@ func TestCredentialIssuer(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
require.Equal(t, configv1alpha1.ErrorStrategyStatus, actualStatusStrategy.Status)
|
require.Equal(t, configv1alpha1.ErrorStrategyStatus, actualStatusStrategy.Status)
|
||||||
require.Equal(t, configv1alpha1.CouldNotFetchKeyStrategyReason, actualStatusStrategy.Reason)
|
require.Equal(t, configv1alpha1.CouldNotFetchKeyStrategyReason, actualStatusStrategy.Reason)
|
||||||
require.Contains(t, actualStatusStrategy.Message, "did not find kube-controller-manager pod(s)")
|
require.Contains(t, actualStatusStrategy.Message, "could not find a healthy kube-controller-manager pod (0 candidates)")
|
||||||
// For now, don't verify the kube config info because its not available on GKE. We'll need to address
|
|
||||||
// this somehow once we starting supporting those cluster types.
|
|
||||||
// Require `nil` to remind us to address this later for other types of clusters where it is available.
|
|
||||||
require.Nil(t, actualStatusKubeConfigInfo)
|
require.Nil(t, actualStatusKubeConfigInfo)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -6,169 +6,85 @@ package integration
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/equality"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/diff"
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
|
||||||
|
|
||||||
conciergev1alpha "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
conciergev1alpha "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||||
"go.pinniped.dev/test/library"
|
"go.pinniped.dev/test/library"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
kubeCertAgentLabelSelector = "kube-cert-agent.pinniped.dev=true"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestKubeCertAgent(t *testing.T) {
|
func TestKubeCertAgent(t *testing.T) {
|
||||||
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
|
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
kubeClient := library.NewKubernetesClientset(t)
|
kubeClient := library.NewKubernetesClientset(t)
|
||||||
|
|
||||||
// Get the current number of kube-cert-agent pods.
|
|
||||||
//
|
|
||||||
// We can pretty safely assert there should be more than 1, since there should be a
|
|
||||||
// kube-cert-agent pod per kube-controller-manager pod, and there should probably be at least
|
|
||||||
// 1 kube-controller-manager for this to be a working kube API.
|
|
||||||
originalAgentPods, err := kubeClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{
|
|
||||||
LabelSelector: kubeCertAgentLabelSelector,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotEmpty(t, originalAgentPods.Items)
|
|
||||||
sortPods(originalAgentPods)
|
|
||||||
|
|
||||||
for _, agentPod := range originalAgentPods.Items {
|
|
||||||
// All agent pods should contain all custom labels
|
|
||||||
for k, v := range env.ConciergeCustomLabels {
|
|
||||||
require.Equalf(t, v, agentPod.Labels[k], "expected agent pod to have label `%s: %s`", k, v)
|
|
||||||
}
|
|
||||||
require.Equal(t, env.ConciergeAppName, agentPod.Labels["app"])
|
|
||||||
}
|
|
||||||
|
|
||||||
agentPodsReconciled := func() bool {
|
|
||||||
var currentAgentPods *corev1.PodList
|
|
||||||
currentAgentPods, err = kubeClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{
|
|
||||||
LabelSelector: kubeCertAgentLabelSelector,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(originalAgentPods.Items) != len(currentAgentPods.Items) {
|
|
||||||
err = fmt.Errorf(
|
|
||||||
"original agent pod len != current agent pod len: %s",
|
|
||||||
diff.ObjectDiff(originalAgentPods.Items, currentAgentPods.Items),
|
|
||||||
)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
sortPods(currentAgentPods)
|
|
||||||
for i := range originalAgentPods.Items {
|
|
||||||
if !equality.Semantic.DeepEqual(
|
|
||||||
originalAgentPods.Items[i].Spec,
|
|
||||||
currentAgentPods.Items[i].Spec,
|
|
||||||
) {
|
|
||||||
err = fmt.Errorf(
|
|
||||||
"original agent pod != current agent pod: %s",
|
|
||||||
diff.ObjectDiff(originalAgentPods.Items[i].Spec, currentAgentPods.Items[i].Spec),
|
|
||||||
)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("reconcile on update", func(t *testing.T) {
|
|
||||||
// Ensure that the next test will start from a known state.
|
|
||||||
defer ensureKubeCertAgentSteadyState(t, agentPodsReconciled)
|
|
||||||
|
|
||||||
// Update the image of the first pod. The controller should see it, and flip it back.
|
|
||||||
//
|
|
||||||
// Note that we update the toleration field here because it is the only field, currently, that
|
|
||||||
// 1) we are allowed to update on a running pod AND 2) the kube-cert-agent controllers care
|
|
||||||
// about.
|
|
||||||
updatedAgentPod := originalAgentPods.Items[0].DeepCopy()
|
|
||||||
updatedAgentPod.Spec.Tolerations = append(
|
|
||||||
updatedAgentPod.Spec.Tolerations,
|
|
||||||
corev1.Toleration{Key: "fake-toleration"},
|
|
||||||
)
|
|
||||||
_, err = kubeClient.CoreV1().Pods(env.ConciergeNamespace).Update(ctx, updatedAgentPod, metav1.UpdateOptions{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Make sure the original pods come back.
|
|
||||||
assert.Eventually(t, agentPodsReconciled, 10*time.Second, 250*time.Millisecond)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("reconcile on delete", func(t *testing.T) {
|
|
||||||
// Ensure that the next test will start from a known state.
|
|
||||||
defer ensureKubeCertAgentSteadyState(t, agentPodsReconciled)
|
|
||||||
|
|
||||||
// Delete the first pod. The controller should see it, and flip it back.
|
|
||||||
err = kubeClient.
|
|
||||||
CoreV1().
|
|
||||||
Pods(env.ConciergeNamespace).
|
|
||||||
Delete(ctx, originalAgentPods.Items[0].Name, metav1.DeleteOptions{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Make sure the original pods come back.
|
|
||||||
assert.Eventually(t, agentPodsReconciled, 10*time.Second, 250*time.Millisecond)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Because the above tests have purposefully put the kube cert issuer strategy into a broken
|
|
||||||
// state, wait for it to become healthy again before moving on to other integration tests,
|
|
||||||
// otherwise those tests would be polluted by this test and would have to wait for the
|
|
||||||
// strategy to become successful again.
|
|
||||||
library.RequireEventuallyWithoutError(t, func() (bool, error) {
|
|
||||||
adminConciergeClient := library.NewConciergeClientset(t)
|
adminConciergeClient := library.NewConciergeClientset(t)
|
||||||
credentialIssuer, err := adminConciergeClient.ConfigV1alpha1().CredentialIssuers().Get(ctx, credentialIssuerName(env), metav1.GetOptions{})
|
|
||||||
if err != nil || credentialIssuer.Status.Strategies == nil {
|
|
||||||
t.Log("Did not find any CredentialIssuer with any strategies")
|
|
||||||
return false, nil // didn't find it, but keep trying
|
|
||||||
}
|
|
||||||
for _, strategy := range credentialIssuer.Status.Strategies {
|
|
||||||
// There will be other strategy types in the list, so ignore those.
|
|
||||||
if strategy.Type == conciergev1alpha.KubeClusterSigningCertificateStrategyType && strategy.Status == conciergev1alpha.SuccessStrategyStatus { //nolint:nestif
|
|
||||||
if strategy.Frontend == nil {
|
|
||||||
return false, fmt.Errorf("did not find a Frontend") // unexpected, fail the test
|
|
||||||
}
|
|
||||||
return true, nil // found it, continue the test!
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.Log("Did not find any successful KubeClusterSigningCertificate strategy on CredentialIssuer")
|
|
||||||
return false, nil // didn't find it, but keep trying
|
|
||||||
}, 3*time.Minute, 3*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ensureKubeCertAgentSteadyState(t *testing.T, agentPodsReconciled func() bool) {
|
// Expect there to be at least on healthy kube-cert-agent pod on this cluster.
|
||||||
t.Helper()
|
library.RequireEventuallyWithoutError(t, func() (bool, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||||
const wantSteadyStateSnapshots = 3
|
defer cancel()
|
||||||
var steadyStateSnapshots int
|
agentPods, err := kubeClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{
|
||||||
require.NoError(t, wait.Poll(250*time.Millisecond, 30*time.Second, func() (bool, error) {
|
LabelSelector: "kube-cert-agent.pinniped.dev=v2",
|
||||||
if agentPodsReconciled() {
|
|
||||||
steadyStateSnapshots++
|
|
||||||
} else {
|
|
||||||
steadyStateSnapshots = 0
|
|
||||||
}
|
|
||||||
return steadyStateSnapshots == wantSteadyStateSnapshots, nil
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
func sortPods(pods *corev1.PodList) {
|
|
||||||
sort.Slice(pods.Items, func(i, j int) bool {
|
|
||||||
return pods.Items[i].Name < pods.Items[j].Name
|
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to list pods: %w", err)
|
||||||
|
}
|
||||||
|
for _, p := range agentPods.Items {
|
||||||
|
t.Logf("found agent pod %s/%s in phase %s", p.Namespace, p.Name, p.Status.Phase)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range agentPods.Items {
|
||||||
|
if p.Status.Phase == corev1.PodRunning {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}, 1*time.Minute, 2*time.Second, "never saw a healthy kube-cert-agent Pod running")
|
||||||
|
|
||||||
|
// Expect that the CredentialIssuer will have a healthy KubeClusterSigningCertificate strategy.
|
||||||
|
library.RequireEventuallyWithoutError(t, func() (bool, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
credentialIssuer, err := adminConciergeClient.ConfigV1alpha1().CredentialIssuers().Get(ctx, credentialIssuerName(env), metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("could not get the CredentialIssuer: %v", err)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there's no successful strategy yet, wait until there is.
|
||||||
|
strategy := findSuccessfulStrategy(credentialIssuer, conciergev1alpha.KubeClusterSigningCertificateStrategyType)
|
||||||
|
if strategy == nil {
|
||||||
|
t.Log("could not find a successful TokenCredentialRequestAPI strategy in the CredentialIssuer:")
|
||||||
|
for _, s := range credentialIssuer.Status.Strategies {
|
||||||
|
t.Logf(" strategy %s has status %s/%s: %s", s.Type, s.Status, s.Reason, s.Message)
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The successful strategy must have a frontend of type TokenCredentialRequestAPI.
|
||||||
|
if strategy.Frontend == nil {
|
||||||
|
return false, fmt.Errorf("strategy did not find a Frontend")
|
||||||
|
}
|
||||||
|
if strategy.Frontend.Type != conciergev1alpha.TokenCredentialRequestAPIFrontendType {
|
||||||
|
return false, fmt.Errorf("strategy had unexpected frontend type %q", strategy.Frontend.Type)
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}, 3*time.Minute, 2*time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findSuccessfulStrategy(credentialIssuer *conciergev1alpha.CredentialIssuer, strategyType conciergev1alpha.StrategyType) *conciergev1alpha.CredentialIssuerStrategy {
|
||||||
|
for _, strategy := range credentialIssuer.Status.Strategies {
|
||||||
|
if strategy.Type != strategyType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strategy.Status != conciergev1alpha.SuccessStrategyStatus {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return &strategy
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user