Merge pull request #627 from mattmoyer/use-informers-for-credentialissuer-updates
Create CredentialIssuer at install, not runtime.
This commit is contained in:
commit
562942cdbf
@ -247,3 +247,9 @@ spec:
|
||||
name: #@ defaultResourceNameWithSuffix("api")
|
||||
namespace: #@ namespace()
|
||||
port: 443
|
||||
---
|
||||
apiVersion: #@ pinnipedDevAPIGroupWithPrefix("config.concierge") + "/v1alpha1"
|
||||
kind: CredentialIssuer
|
||||
metadata:
|
||||
name: #@ defaultResourceNameWithSuffix("config")
|
||||
labels: #@ labels()
|
||||
|
@ -5,8 +5,12 @@ package issuerconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||
"go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
||||
)
|
||||
@ -28,6 +32,23 @@ func UpdateStrategy(ctx context.Context,
|
||||
)
|
||||
}
|
||||
|
||||
// Update a strategy on an existing CredentialIssuer, merging into any existing strategy entries.
|
||||
func Update(ctx context.Context, client versioned.Interface, issuer *v1alpha1.CredentialIssuer, strategy v1alpha1.CredentialIssuerStrategy) error {
|
||||
// Update the existing object to merge in the new strategy.
|
||||
updated := issuer.DeepCopy()
|
||||
mergeStrategy(&updated.Status, strategy)
|
||||
|
||||
// If the status has not changed, we're done.
|
||||
if apiequality.Semantic.DeepEqual(issuer.Status, updated.Status) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := client.ConfigV1alpha1().CredentialIssuers().UpdateStatus(ctx, updated, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to update CredentialIssuer status: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergeStrategy(configToUpdate *v1alpha1.CredentialIssuerStatus, strategy v1alpha1.CredentialIssuerStrategy) {
|
||||
var existing *v1alpha1.CredentialIssuerStrategy
|
||||
for i := range configToUpdate.Strategies {
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||
configv1alpha1informers "go.pinniped.dev/generated/latest/client/concierge/informers/externalversions/config/v1alpha1"
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controller/issuerconfig"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
@ -101,6 +102,7 @@ type agentController struct {
|
||||
agentDeployments appsv1informers.DeploymentInformer
|
||||
agentPods corev1informers.PodInformer
|
||||
kubePublicConfigMaps corev1informers.ConfigMapInformer
|
||||
credentialIssuers configv1alpha1informers.CredentialIssuerInformer
|
||||
executor PodCommandExecutor
|
||||
dynamicCertProvider dynamiccert.Private
|
||||
clock clock.Clock
|
||||
@ -129,6 +131,7 @@ func NewAgentController(
|
||||
agentDeployments appsv1informers.DeploymentInformer,
|
||||
agentPods corev1informers.PodInformer,
|
||||
kubePublicConfigMaps corev1informers.ConfigMapInformer,
|
||||
credentialIssuers configv1alpha1informers.CredentialIssuerInformer,
|
||||
dynamicCertProvider dynamiccert.Private,
|
||||
) controllerlib.Controller {
|
||||
return newAgentController(
|
||||
@ -138,6 +141,7 @@ func NewAgentController(
|
||||
agentDeployments,
|
||||
agentPods,
|
||||
kubePublicConfigMaps,
|
||||
credentialIssuers,
|
||||
NewPodCommandExecutor(client.JSONConfig, client.Kubernetes),
|
||||
dynamicCertProvider,
|
||||
&clock.RealClock{},
|
||||
@ -153,6 +157,7 @@ func newAgentController(
|
||||
agentDeployments appsv1informers.DeploymentInformer,
|
||||
agentPods corev1informers.PodInformer,
|
||||
kubePublicConfigMaps corev1informers.ConfigMapInformer,
|
||||
credentialIssuers configv1alpha1informers.CredentialIssuerInformer,
|
||||
podCommandExecutor PodCommandExecutor,
|
||||
dynamicCertProvider dynamiccert.Private,
|
||||
clock clock.Clock,
|
||||
@ -170,6 +175,7 @@ func newAgentController(
|
||||
agentDeployments: agentDeployments,
|
||||
agentPods: agentPods,
|
||||
kubePublicConfigMaps: kubePublicConfigMaps,
|
||||
credentialIssuers: credentialIssuers,
|
||||
executor: podCommandExecutor,
|
||||
dynamicCertProvider: dynamicCertProvider,
|
||||
clock: clock,
|
||||
@ -206,6 +212,13 @@ func newAgentController(
|
||||
}),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
controllerlib.WithInformer(
|
||||
credentialIssuers,
|
||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
|
||||
return obj.GetName() == cfg.CredentialIssuerName
|
||||
}),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
// Be sure to run once even to make sure the CredentialIssuer is updated if there are no controller manager
|
||||
// pods. We should be able to pass an empty key since we don't use the key in the sync (we sync
|
||||
// the world).
|
||||
@ -216,11 +229,17 @@ func newAgentController(
|
||||
|
||||
// Sync implements controllerlib.Syncer.
|
||||
func (c *agentController) Sync(ctx controllerlib.Context) error {
|
||||
// Load the CredentialIssuer that we'll update with status.
|
||||
credIssuer, err := c.credentialIssuers.Lister().Get(c.cfg.CredentialIssuerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get CredentialIssuer to update: %w", err)
|
||||
}
|
||||
|
||||
// Find the latest healthy kube-controller-manager Pod in kube-system..
|
||||
controllerManagerPods, err := c.kubeSystemPods.Lister().Pods(ControllerManagerNamespace).List(controllerManagerLabels)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("could not list controller manager pods: %w", err)
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
}
|
||||
newestControllerManager := newestRunningPod(controllerManagerPods)
|
||||
|
||||
@ -228,19 +247,19 @@ func (c *agentController) Sync(ctx controllerlib.Context) error {
|
||||
// the CredentialIssuer.
|
||||
if newestControllerManager == nil {
|
||||
err := fmt.Errorf("could not find a healthy kube-controller-manager pod (%s)", pluralize(controllerManagerPods))
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
}
|
||||
|
||||
if err := c.createOrUpdateDeployment(ctx, newestControllerManager); err != nil {
|
||||
err := fmt.Errorf("could not ensure agent deployment: %w", err)
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
}
|
||||
|
||||
// Find the latest healthy agent Pod in our namespace.
|
||||
agentPods, err := c.agentPods.Lister().Pods(c.cfg.Namespace).List(agentLabels)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("could not list agent pods: %w", err)
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
}
|
||||
newestAgentPod := newestRunningPod(agentPods)
|
||||
|
||||
@ -248,34 +267,29 @@ func (c *agentController) Sync(ctx controllerlib.Context) error {
|
||||
// the CredentialIssuer.
|
||||
if newestAgentPod == nil {
|
||||
err := fmt.Errorf("could not find a healthy agent pod (%s)", pluralize(agentPods))
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
}
|
||||
|
||||
// Load the Kubernetes API info from the kube-public/cluster-info ConfigMap.
|
||||
configMap, err := c.kubePublicConfigMaps.Lister().ConfigMaps(ClusterInfoNamespace).Get(clusterInfoName)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to get %s/%s configmap: %w", ClusterInfoNamespace, clusterInfoName, err)
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason)
|
||||
return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason)
|
||||
}
|
||||
|
||||
apiInfo, err := c.extractAPIInfo(configMap)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("could not extract Kubernetes API endpoint info from %s/%s configmap: %w", ClusterInfoNamespace, clusterInfoName, err)
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason)
|
||||
return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason)
|
||||
}
|
||||
|
||||
// Load the certificate and key from the agent pod into our in-memory signer.
|
||||
if err := c.loadSigningKey(newestAgentPod); err != nil {
|
||||
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
return c.failStrategyAndErr(ctx.Context, credIssuer, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||
}
|
||||
|
||||
// Set the CredentialIssuer strategy to successful.
|
||||
return issuerconfig.UpdateStrategy(
|
||||
ctx.Context,
|
||||
c.cfg.CredentialIssuerName,
|
||||
c.cfg.Labels,
|
||||
c.client.PinnipedConcierge,
|
||||
configv1alpha1.CredentialIssuerStrategy{
|
||||
return issuerconfig.Update(ctx.Context, c.client.PinnipedConcierge, credIssuer, configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.SuccessStrategyStatus,
|
||||
Reason: configv1alpha1.FetchedKeyStrategyReason,
|
||||
@ -285,8 +299,7 @@ func (c *agentController) Sync(ctx controllerlib.Context) error {
|
||||
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
|
||||
TokenCredentialRequestAPIInfo: apiInfo,
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *agentController) loadSigningKey(agentPod *corev1.Pod) error {
|
||||
@ -358,20 +371,15 @@ func (c *agentController) createOrUpdateDeployment(ctx controllerlib.Context, ne
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *agentController) failStrategyAndErr(ctx context.Context, err error, reason configv1alpha1.StrategyReason) error {
|
||||
return utilerrors.NewAggregate([]error{err, issuerconfig.UpdateStrategy(
|
||||
ctx,
|
||||
c.cfg.CredentialIssuerName,
|
||||
c.cfg.Labels,
|
||||
c.client.PinnipedConcierge,
|
||||
configv1alpha1.CredentialIssuerStrategy{
|
||||
func (c *agentController) failStrategyAndErr(ctx context.Context, credIssuer *configv1alpha1.CredentialIssuer, err error, reason configv1alpha1.StrategyReason) error {
|
||||
updateErr := issuerconfig.Update(ctx, c.client.PinnipedConcierge, credIssuer, configv1alpha1.CredentialIssuerStrategy{
|
||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||
Status: configv1alpha1.ErrorStrategyStatus,
|
||||
Reason: reason,
|
||||
Message: err.Error(),
|
||||
LastUpdateTime: metav1.NewTime(c.clock.Now()),
|
||||
},
|
||||
)})
|
||||
})
|
||||
return utilerrors.NewAggregate([]error{err, updateErr})
|
||||
}
|
||||
|
||||
func (c *agentController) extractAPIInfo(configMap *corev1.ConfigMap) (*configv1alpha1.TokenCredentialRequestAPIInfo, error) {
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
|
||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||
conciergefake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
|
||||
conciergeinformers "go.pinniped.dev/generated/latest/client/concierge/informers/externalversions"
|
||||
"go.pinniped.dev/internal/controller/kubecertagent/mocks"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/here"
|
||||
@ -38,6 +39,10 @@ func TestAgentController(t *testing.T) {
|
||||
t.Parallel()
|
||||
now := time.Date(2021, 4, 13, 9, 57, 0, 0, time.UTC)
|
||||
|
||||
initialCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pinniped-concierge-config"},
|
||||
}
|
||||
|
||||
healthyKubeControllerManagerPod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "kube-system",
|
||||
@ -204,6 +209,7 @@ func TestAgentController(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
discoveryURLOverride *string
|
||||
pinnipedObjects []runtime.Object
|
||||
kubeObjects []runtime.Object
|
||||
addKubeReactions func(*kubefake.Clientset)
|
||||
mocks func(*testing.T, *mocks.MockPodCommandExecutorMockRecorder, *mocks.MockDynamicCertPrivateMockRecorder, *cache.Expiring)
|
||||
@ -212,8 +218,17 @@ func TestAgentController(t *testing.T) {
|
||||
wantAgentDeployment *appsv1.Deployment
|
||||
wantStrategy *configv1alpha1.CredentialIssuerStrategy
|
||||
}{
|
||||
{
|
||||
name: "no CredentialIssuer found",
|
||||
wantDistinctErrors: []string{
|
||||
`could not get CredentialIssuer to update: credentialissuer.config.concierge.pinniped.dev "pinniped-concierge-config" not found`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no kube-controller-manager pods",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
&corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -236,6 +251,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "only unhealthy kube-controller-manager pods",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
&corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -276,6 +294,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "failed to created new deployment",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
},
|
||||
@ -300,6 +321,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "created new deployment, no agent pods running yet",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
&corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -341,6 +365,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "created new deployment with defaulted paths, no agent pods running yet",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
&corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -382,6 +409,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "update to existing deployment, no running agent pods yet",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
&corev1.Pod{
|
||||
@ -424,6 +454,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "deployment exists, configmap missing",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
healthyAgentDeployment,
|
||||
@ -443,6 +476,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "deployment exists, configmap missing key",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
healthyAgentDeployment,
|
||||
@ -466,6 +502,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "deployment exists, configmap key has invalid data",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
healthyAgentDeployment,
|
||||
@ -489,6 +528,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "deployment exists, configmap kubeconfig has no clusters",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
healthyAgentDeployment,
|
||||
@ -512,6 +554,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "deployment exists, configmap is valid,, exec into agent pod fails",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
healthyAgentDeployment,
|
||||
@ -537,6 +582,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "deployment exists, configmap is valid, exec into agent pod returns bogus certs",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
healthyAgentDeployment,
|
||||
@ -565,6 +613,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "deployment exists, configmap is valid, exec is cached",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
healthyAgentDeployment,
|
||||
@ -594,6 +645,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "deployment exists, configmap is valid, exec succeeds",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
healthyAgentDeployment,
|
||||
@ -620,6 +674,9 @@ func TestAgentController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "deployment exists, configmap is valid, exec succeeds, overridden discovery URL",
|
||||
pinnipedObjects: []runtime.Object{
|
||||
initialCredentialIssuer,
|
||||
},
|
||||
kubeObjects: []runtime.Object{
|
||||
healthyKubeControllerManagerPod,
|
||||
healthyAgentDeployment,
|
||||
@ -651,12 +708,13 @@ func TestAgentController(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
conciergeClientset := conciergefake.NewSimpleClientset(tt.pinnipedObjects...)
|
||||
conciergeInformers := conciergeinformers.NewSharedInformerFactory(conciergeClientset, 0)
|
||||
|
||||
kubeClientset := kubefake.NewSimpleClientset(tt.kubeObjects...)
|
||||
if tt.addKubeReactions != nil {
|
||||
tt.addKubeReactions(kubeClientset)
|
||||
}
|
||||
|
||||
conciergeClientset := conciergefake.NewSimpleClientset()
|
||||
kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0)
|
||||
log := testlogger.New(t)
|
||||
|
||||
@ -676,7 +734,7 @@ func TestAgentController(t *testing.T) {
|
||||
ServiceAccountName: "test-service-account-name",
|
||||
NamePrefix: "pinniped-concierge-kube-cert-agent-",
|
||||
ContainerImagePullSecrets: []string{"pinniped-image-pull-secret"},
|
||||
CredentialIssuerName: "pinniped-concierge-config",
|
||||
CredentialIssuerName: initialCredentialIssuer.Name,
|
||||
Labels: map[string]string{"extralabel": "labelvalue"},
|
||||
DiscoveryURLOverride: tt.discoveryURLOverride,
|
||||
},
|
||||
@ -685,6 +743,7 @@ func TestAgentController(t *testing.T) {
|
||||
kubeInformers.Apps().V1().Deployments(),
|
||||
kubeInformers.Core().V1().Pods(),
|
||||
kubeInformers.Core().V1().ConfigMaps(),
|
||||
conciergeInformers.Config().V1alpha1().CredentialIssuers(),
|
||||
mockExecutor,
|
||||
mockDynamicCert,
|
||||
fakeClock,
|
||||
@ -696,7 +755,7 @@ func TestAgentController(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
errorMessages := runControllerUntilQuiet(ctx, t, controller, kubeInformers)
|
||||
errorMessages := runControllerUntilQuiet(ctx, t, controller, kubeInformers, conciergeInformers)
|
||||
assert.Equal(t, tt.wantDistinctErrors, deduplicate(errorMessages), "unexpected errors")
|
||||
assert.Equal(t, tt.wantDistinctLogs, deduplicate(log.Lines()), "unexpected logs")
|
||||
|
||||
@ -711,10 +770,12 @@ func TestAgentController(t *testing.T) {
|
||||
}
|
||||
|
||||
// Assert that the CredentialIssuer is in the expected final state
|
||||
credIssuer, err := conciergeClientset.ConfigV1alpha1().CredentialIssuers().Get(ctx, "pinniped-concierge-config", metav1.GetOptions{})
|
||||
if tt.wantStrategy != nil {
|
||||
credIssuer, err := conciergeClientset.ConfigV1alpha1().CredentialIssuers().Get(ctx, initialCredentialIssuer.Name, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, credIssuer.Status.Strategies, 1, "expected a single strategy in the CredentialIssuer")
|
||||
require.Equal(t, tt.wantStrategy, &credIssuer.Status.Strategies[0])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -794,7 +855,7 @@ func deduplicate(strings []string) []string {
|
||||
return result
|
||||
}
|
||||
|
||||
func runControllerUntilQuiet(ctx context.Context, t *testing.T, controller controllerlib.Controller, informers ...informers.SharedInformerFactory) []string {
|
||||
func runControllerUntilQuiet(ctx context.Context, t *testing.T, controller controllerlib.Controller, informers ...interface{ Start(<-chan struct{}) }) []string {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
|
@ -204,6 +204,7 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
||||
informers.installationNamespaceK8s.Apps().V1().Deployments(),
|
||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
||||
informers.kubePublicNamespaceK8s.Core().V1().ConfigMaps(),
|
||||
informers.pinniped.Config().V1alpha1().CredentialIssuers(),
|
||||
c.DynamicSigningCertProvider,
|
||||
),
|
||||
singletonWorker,
|
||||
|
@ -43,23 +43,12 @@ func TestCredentialIssuer(t *testing.T) {
|
||||
}
|
||||
require.Equal(t, env.ConciergeAppName, actualConfig.Labels["app"])
|
||||
|
||||
// verify owner ref is set
|
||||
require.Len(t, actualConfig.OwnerReferences, 1)
|
||||
|
||||
apiService, err := aggregatedClientset.ApiregistrationV1().APIServices().Get(ctx, "v1alpha1.login.concierge."+env.APIGroupSuffix, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// work around stupid behavior of WithoutVersionDecoder.Decode
|
||||
apiService.APIVersion, apiService.Kind = apiregistrationv1.SchemeGroupVersion.WithKind("APIService").ToAPIVersionAndKind()
|
||||
|
||||
ref := metav1.OwnerReference{
|
||||
APIVersion: apiService.APIVersion,
|
||||
Kind: apiService.Kind,
|
||||
Name: apiService.Name,
|
||||
UID: apiService.UID,
|
||||
}
|
||||
require.Equal(t, ref, actualConfig.OwnerReferences[0])
|
||||
|
||||
// Verify the cluster strategy status based on what's expected of the test cluster's ability to share signing keys.
|
||||
actualStatusStrategies := actualConfigList.Items[0].Status.Strategies
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user