Upon pod startup, update the Status of CredentialIssuerConfig

- Indicate the success or failure of the cluster signing key strategy
- Also introduce the concept of "capabilities" of an integration test
  cluster to allow the integration tests to be run against clusters
  that do or don't allow the borrowing of the cluster signing key
- Tests that are not expected to pass on clusters that lack the
  borrowing of the signing key capability are now ignored by
  calling the new library.SkipUnlessClusterHasCapability test helper
- Rename library.Getenv to library.GetEnv
- Add copyrights where they were missing
This commit is contained in:
Ryan Richard 2020-08-24 18:07:34 -07:00
parent 399e1d2eb8
commit 6e59596285
26 changed files with 376 additions and 127 deletions

View File

@ -1,4 +1,5 @@
#@ load("@ytt:data", "data")
#! Copyright 2020 VMware, Inc.
#! SPDX-License-Identifier: Apache-2.0
#! Example of valid CredentialIssuerConfig object:
#! ---
@ -8,15 +9,17 @@
#! name: credential-issuer-config
#! namespace: integration
#! status:
#! strategies:
#! - type: KubeClusterSigningCertificate
#! lastUpdateTime: 2020-08-21T20:08:18Z
#! status: Error
#! reason: CouldNotFetchKey
#! message: "There was an error getting the signing cert"
#! kubeConfigInfo:
#! server: https://foo
#! certificateAuthorityData: bar
#! strategies:
#! - type: KubeClusterSigningCertificate
#! status: Error
#! reason: CouldNotFetchKey
#! message: "There was an error getting the signing cert"
#! lastUpdateTime: 2020-08-21T20:08:18Z
#@ load("@ytt:data", "data")
---
apiVersion: apiextensions.k8s.io/v1
@ -41,7 +44,7 @@ spec:
type: array
items:
type: object
required: [ type, status,reason, message, lastUpdateTime ]
required: [ type, status, reason, message, lastUpdateTime ]
properties:
type: #! this property is called "type"
type: string

View File

@ -1,3 +1,6 @@
#! Copyright 2020 VMware, Inc.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:data", "data")
---

View File

@ -1,3 +1,6 @@
#! Copyright 2020 VMware, Inc.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:data", "data")
#! Give permission to various cluster-scoped objects

View File

@ -1,3 +1,6 @@
#! Copyright 2020 VMware, Inc.
#! SPDX-License-Identifier: Apache-2.0
#@data/values
---

View File

@ -1,5 +1,10 @@
#!/usr/bin/env bash
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
KUBE_ROOT="${ROOT}" # required by `hack/lib/version.sh`

View File

@ -1,5 +1,8 @@
#!/usr/bin/env bash
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"

View File

@ -2,7 +2,9 @@
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
"$ROOT/hack/module.sh" unittest

View File

@ -2,7 +2,9 @@
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
"$ROOT/hack/module.sh" tidy

View File

@ -2,7 +2,9 @@
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
"$ROOT/hack/module.sh" lint

View File

@ -9,6 +9,7 @@ import (
"context"
"encoding/base64"
"fmt"
"reflect"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -109,61 +110,90 @@ func (c *publisherController) Sync(ctx controller.Context) error {
server = *c.serverOverride
}
credentialIssuerConfig := crdpinnipedv1alpha1.CredentialIssuerConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: configName,
Namespace: c.namespace,
},
Status: crdpinnipedv1alpha1.CredentialIssuerConfigStatus{
Strategies: []crdpinnipedv1alpha1.CredentialIssuerConfigStrategy{},
KubeConfigInfo: &crdpinnipedv1alpha1.CredentialIssuerConfigKubeConfigInfo{
Server: server,
CertificateAuthorityData: certificateAuthorityData,
},
},
}
if err := c.createOrUpdateCredentialIssuerConfig(ctx.Context, &credentialIssuerConfig); err != nil {
return err
}
return nil
}
func (c *publisherController) createOrUpdateCredentialIssuerConfig(
ctx context.Context,
newCredentialIssuerConfig *crdpinnipedv1alpha1.CredentialIssuerConfig,
) error {
existingCredentialIssuerConfig, err := c.credentialIssuerConfigInformer.
existingCredentialIssuerConfigFromInformerCache, err := c.credentialIssuerConfigInformer.
Lister().
CredentialIssuerConfigs(c.namespace).
Get(newCredentialIssuerConfig.Name)
Get(configName)
notFound = k8serrors.IsNotFound(err)
if err != nil && !notFound {
return fmt.Errorf("could not get credentialissuerconfig: %w", err)
}
updateServerAndCAFunc := func(c *crdpinnipedv1alpha1.CredentialIssuerConfig) {
c.Status.KubeConfigInfo = &crdpinnipedv1alpha1.CredentialIssuerConfigKubeConfigInfo{
Server: server,
CertificateAuthorityData: certificateAuthorityData,
}
}
err = createOrUpdateCredentialIssuerConfig(
ctx.Context,
existingCredentialIssuerConfigFromInformerCache,
notFound,
configName,
c.namespace,
c.pinnipedClient,
updateServerAndCAFunc)
return err
}
func CreateOrUpdateCredentialIssuerConfig(
ctx context.Context,
credentialIssuerConfigNamespace string,
pinnipedClient pinnipedclientset.Interface,
applyUpdatesToCredentialIssuerConfigFunc func(configToUpdate *crdpinnipedv1alpha1.CredentialIssuerConfig),
) error {
credentialIssuerConfigName := configName
existingCredentialIssuerConfig, err := pinnipedClient.
CrdV1alpha1().
CredentialIssuerConfigs(credentialIssuerConfigNamespace).
Get(ctx, credentialIssuerConfigName, metav1.GetOptions{})
notFound := k8serrors.IsNotFound(err)
if err != nil && !notFound {
return fmt.Errorf("could not get credentialissuerconfig: %w", err)
}
credentialIssuerConfigsClient := c.pinnipedClient.CrdV1alpha1().CredentialIssuerConfigs(c.namespace)
if notFound {
if _, err := credentialIssuerConfigsClient.Create(
ctx,
newCredentialIssuerConfig,
metav1.CreateOptions{},
); err != nil {
return fmt.Errorf("could not create credentialissuerconfig: %w", err)
}
} else if !equal(existingCredentialIssuerConfig, newCredentialIssuerConfig) {
// Update just the fields we care about.
newServer := newCredentialIssuerConfig.Status.KubeConfigInfo.Server
newCA := newCredentialIssuerConfig.Status.KubeConfigInfo.CertificateAuthorityData
existingCredentialIssuerConfig.Status.KubeConfigInfo.Server = newServer
existingCredentialIssuerConfig.Status.KubeConfigInfo.CertificateAuthorityData = newCA
if _, err := credentialIssuerConfigsClient.Update(
return createOrUpdateCredentialIssuerConfig(
ctx,
existingCredentialIssuerConfig,
metav1.UpdateOptions{},
); err != nil {
notFound,
credentialIssuerConfigName,
credentialIssuerConfigNamespace,
pinnipedClient,
applyUpdatesToCredentialIssuerConfigFunc)
}
func createOrUpdateCredentialIssuerConfig(
ctx context.Context,
existingCredentialIssuerConfig *crdpinnipedv1alpha1.CredentialIssuerConfig,
notFound bool,
credentialIssuerConfigName string,
credentialIssuerConfigNamespace string,
pinnipedClient pinnipedclientset.Interface,
applyUpdatesToCredentialIssuerConfigFunc func(configToUpdate *crdpinnipedv1alpha1.CredentialIssuerConfig),
) error {
credentialIssuerConfigsClient := pinnipedClient.CrdV1alpha1().CredentialIssuerConfigs(credentialIssuerConfigNamespace)
if notFound {
// Create it
credentialIssuerConfig := minimalValidCredentialIssuerConfig(credentialIssuerConfigName, credentialIssuerConfigNamespace)
applyUpdatesToCredentialIssuerConfigFunc(credentialIssuerConfig)
if _, err := credentialIssuerConfigsClient.Create(ctx, credentialIssuerConfig, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("could not create credentialissuerconfig: %w", err)
}
} else {
// Already exists, so check to see if we need to update it
credentialIssuerConfig := existingCredentialIssuerConfig.DeepCopy()
applyUpdatesToCredentialIssuerConfigFunc(credentialIssuerConfig)
if reflect.DeepEqual(existingCredentialIssuerConfig.Status, credentialIssuerConfig.Status) {
// Nothing interesting would change as a result of this update, so skip it
return nil
}
if _, err := credentialIssuerConfigsClient.Update(ctx, credentialIssuerConfig, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("could not update credentialissuerconfig: %w", err)
}
}
@ -171,7 +201,19 @@ func (c *publisherController) createOrUpdateCredentialIssuerConfig(
return nil
}
func equal(a, b *crdpinnipedv1alpha1.CredentialIssuerConfig) bool {
return a.Status.KubeConfigInfo.Server == b.Status.KubeConfigInfo.Server &&
a.Status.KubeConfigInfo.CertificateAuthorityData == b.Status.KubeConfigInfo.CertificateAuthorityData
func minimalValidCredentialIssuerConfig(
credentialIssuerConfigName string,
credentialIssuerConfigNamespace string,
) *crdpinnipedv1alpha1.CredentialIssuerConfig {
return &crdpinnipedv1alpha1.CredentialIssuerConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: credentialIssuerConfigName,
Namespace: credentialIssuerConfigNamespace,
},
Status: crdpinnipedv1alpha1.CredentialIssuerConfigStatus{
Strategies: []crdpinnipedv1alpha1.CredentialIssuerConfigStrategy{},
KubeConfigInfo: nil,
},
}
}

View File

@ -13,19 +13,24 @@ import (
"time"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
genericapiserver "k8s.io/apiserver/pkg/server"
genericoptions "k8s.io/apiserver/pkg/server/options"
"k8s.io/apiserver/plugin/pkg/authenticator/token/webhook"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/klog/v2"
"github.com/suzerain-io/pinniped/internal/apiserver"
"github.com/suzerain-io/pinniped/internal/certauthority/kubecertauthority"
"github.com/suzerain-io/pinniped/internal/controller/issuerconfig"
"github.com/suzerain-io/pinniped/internal/controllermanager"
"github.com/suzerain-io/pinniped/internal/downward"
"github.com/suzerain-io/pinniped/internal/provider"
"github.com/suzerain-io/pinniped/internal/registry/credentialrequest"
crdpinnipedv1alpha1 "github.com/suzerain-io/pinniped/kubernetes/1.19/api/apis/crdpinniped/v1alpha1"
pinnipedv1alpha1 "github.com/suzerain-io/pinniped/kubernetes/1.19/api/apis/pinniped/v1alpha1"
pinnipedclientset "github.com/suzerain-io/pinniped/kubernetes/1.19/client-go/clientset/versioned"
"github.com/suzerain-io/pinniped/pkg/config"
)
@ -99,8 +104,15 @@ func (a *App) runServer(ctx context.Context) error {
return fmt.Errorf("could not load config: %w", err)
}
// Discover in which namespace we are installed.
podInfo, err := downward.Load(a.downwardAPIPath)
if err != nil {
return fmt.Errorf("could not read pod metadata: %w", err)
}
serverInstallationNamespace := podInfo.Namespace
// Load the Kubernetes cluster signing CA.
k8sClusterCA, shutdownCA, err := getClusterCASigner()
k8sClusterCA, shutdownCA, err := getClusterCASigner(ctx, serverInstallationNamespace)
if err != nil {
return err
}
@ -112,13 +124,6 @@ func (a *App) runServer(ctx context.Context) error {
return fmt.Errorf("could not create webhook client: %w", err)
}
// Discover in which namespace we are installed.
podInfo, err := downward.Load(a.downwardAPIPath)
if err != nil {
return fmt.Errorf("could not read pod metadata: %w", err)
}
serverInstallationNamespace := podInfo.Namespace
// This cert provider will provide certs to the API server and will
// be mutated by a controller to keep the certs up to date with what
// is stored in a k8s Secret. Therefore it also effectively acting as
@ -160,7 +165,7 @@ func (a *App) runServer(ctx context.Context) error {
return server.GenericAPIServer.PrepareRun().Run(ctx.Done())
}
func getClusterCASigner() (*kubecertauthority.CA, kubecertauthority.ShutdownFunc, error) {
func getClusterCASigner(ctx context.Context, serverInstallationNamespace string) (*kubecertauthority.CA, kubecertauthority.ShutdownFunc, error) {
// Load the Kubernetes client configuration.
kubeConfig, err := restclient.InClusterConfig()
if err != nil {
@ -173,6 +178,12 @@ func getClusterCASigner() (*kubecertauthority.CA, kubecertauthority.ShutdownFunc
return nil, nil, fmt.Errorf("could not initialize Kubernetes client: %w", err)
}
// Connect to the pinniped API.
pinnipedClient, err := pinnipedclientset.NewForConfig(kubeConfig)
if err != nil {
return nil, nil, fmt.Errorf("could not initialize pinniped client: %w", err)
}
// Make a clock tick that triggers a periodic refresh.
ticker := time.NewTicker(5 * time.Minute)
@ -182,10 +193,51 @@ func getClusterCASigner() (*kubecertauthority.CA, kubecertauthority.ShutdownFunc
kubecertauthority.NewPodCommandExecutor(kubeConfig, kubeClient),
ticker.C,
)
if err != nil {
ticker.Stop()
updateErr := issuerconfig.CreateOrUpdateCredentialIssuerConfig(
ctx,
serverInstallationNamespace,
pinnipedClient,
func(configToUpdate *crdpinnipedv1alpha1.CredentialIssuerConfig) {
configToUpdate.Status.Strategies = []crdpinnipedv1alpha1.CredentialIssuerConfigStrategy{
{
Type: crdpinnipedv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: crdpinnipedv1alpha1.ErrorStrategyStatus,
Reason: crdpinnipedv1alpha1.CouldNotFetchKeyStrategyReason,
Message: err.Error(),
LastUpdateTime: metav1.Now(),
},
}
},
)
klog.Errorf("error performing create or update on CredentialIssuerConfig to add strategy error: %w", updateErr)
return nil, nil, fmt.Errorf("could not load cluster signing CA: %w", err)
}
updateErr := issuerconfig.CreateOrUpdateCredentialIssuerConfig(
ctx,
serverInstallationNamespace,
pinnipedClient,
func(configToUpdate *crdpinnipedv1alpha1.CredentialIssuerConfig) {
configToUpdate.Status.Strategies = []crdpinnipedv1alpha1.CredentialIssuerConfigStrategy{
{
Type: crdpinnipedv1alpha1.KubeClusterSigningCertificateStrategyType,
Status: crdpinnipedv1alpha1.SuccessStrategyStatus,
Reason: crdpinnipedv1alpha1.FetchedKeyStrategyReason,
Message: "Key was fetched successfully",
LastUpdateTime: metav1.Now(),
},
}
},
)
if updateErr != nil {
return nil, nil, fmt.Errorf("error performing create or update on CredentialIssuerConfig to add strategy success: %w", updateErr)
}
return k8sClusterCA, func() { shutdownCA(); ticker.Stop() }, nil
}

View File

@ -7,6 +7,20 @@ package v1alpha1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
type StrategyType string
type StrategyStatus string
type StrategyReason string
const (
KubeClusterSigningCertificateStrategyType = StrategyType("KubeClusterSigningCertificate")
SuccessStrategyStatus = StrategyStatus("Success")
ErrorStrategyStatus = StrategyStatus("Error")
CouldNotFetchKeyStrategyReason = StrategyReason("CouldNotFetchKey")
FetchedKeyStrategyReason = StrategyReason("FetchedKey")
)
type CredentialIssuerConfigStatus struct {
Strategies []CredentialIssuerConfigStrategy `json:"strategies"`
@ -23,9 +37,9 @@ type CredentialIssuerConfigKubeConfigInfo struct {
}
type CredentialIssuerConfigStrategy struct {
Type string `json:"type,omitempty"`
Status string `json:"status,omitempty"`
Reason string `json:"reason,omitempty"`
Type StrategyType `json:"type,omitempty"`
Status StrategyStatus `json:"status,omitempty"`
Reason StrategyReason `json:"reason,omitempty"`
Message string `json:"message,omitempty"`
LastUpdateTime metav1.Time `json:"lastUpdateTime"`
}

View File

@ -0,0 +1,8 @@
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
# Describe the capabilities of the cluster against which the integration tests will run.
capabilities:
# Is it possible to borrow the cluster's signing key from the kube API server?
clusterSigningKeyIsAvailable: false

View File

@ -0,0 +1,8 @@
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
# Describe the capabilities of the cluster against which the integration tests will run.
capabilities:
# Is it possible to borrow the cluster's signing key from the kube API server?
clusterSigningKeyIsAvailable: true

View File

@ -0,0 +1,8 @@
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
# Describe the capabilities of the cluster against which the integration tests will run.
capabilities:
# Is it possible to borrow the cluster's signing key from the kube API server?
clusterSigningKeyIsAvailable: true

View File

@ -4,6 +4,7 @@ go 1.14
require (
github.com/davecgh/go-spew v1.1.1
github.com/ghodss/yaml v1.0.0
github.com/stretchr/testify v1.6.1
github.com/suzerain-io/pinniped v0.0.0-20200819182107-1b9a70d089f4
github.com/suzerain-io/pinniped/kubernetes/1.19/api v0.0.0-00010101000000-000000000000

View File

@ -103,6 +103,7 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-critic/go-critic v0.5.0 h1:Ic2p5UCl5fX/2WX2w8nroPpPhxRNsNTMlJzsu/uqwnM=
github.com/go-critic/go-critic v0.5.0/go.mod h1:4jeRh3ZAVnRYhuWdOEvwzVqLUpxMSoAT0xZ74JsTPlo=

View File

@ -16,6 +16,7 @@ import (
func TestGetAPIResourceList(t *testing.T) {
library.SkipUnlessIntegration(t)
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
client := library.NewPinnipedClientset(t)
@ -60,7 +61,7 @@ func TestGetAPIResourceList(t *testing.T) {
SingularName: "",
}
expectedLDCAPIResource := metav1.APIResource{
expectedCredentialIssuerConfigResource := metav1.APIResource{
Name: "credentialissuerconfigs",
SingularName: "credentialissuerconfig",
Namespaced: true,
@ -79,8 +80,8 @@ func TestGetAPIResourceList(t *testing.T) {
actualAPIResource := actualCrdPinnipedResources.APIResources[0]
// workaround because its hard to predict the storage version hash (e.g. "t/+v41y+3e4=")
// so just don't worry about comparing that field
expectedLDCAPIResource.StorageVersionHash = actualAPIResource.StorageVersionHash
require.Equal(t, expectedLDCAPIResource, actualAPIResource)
expectedCredentialIssuerConfigResource.StorageVersionHash = actualAPIResource.StorageVersionHash
require.Equal(t, expectedCredentialIssuerConfigResource, actualAPIResource)
}
func findGroup(name string, groups []*metav1.APIGroup) *metav1.APIGroup {

View File

@ -22,6 +22,7 @@ import (
func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
library.SkipUnlessIntegration(t)
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
tests := []struct {
name string
@ -74,7 +75,7 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
namespaceName := library.Getenv(t, "PINNIPED_NAMESPACE")
namespaceName := library.GetEnv(t, "PINNIPED_NAMESPACE")
kubeClient := library.NewClientset(t)
aggregatedClient := library.NewAggregatedClientset(t)
@ -108,7 +109,7 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
return err == nil
}
assert.Eventually(t, secretIsRegenerated, 10*time.Second, 250*time.Millisecond)
require.NoError(t, err) // prints out the error in case of failure
require.NoError(t, err) // prints out the error and stops the test in case of failure
regeneratedCACert := secret.Data["caCertificate"]
regeneratedPrivateKey := secret.Data["tlsPrivateKey"]
regeneratedCertChain := secret.Data["tlsCertificateChain"]
@ -125,7 +126,7 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
return err == nil
}
assert.Eventually(t, aggregatedAPIUpdated, 10*time.Second, 250*time.Millisecond)
require.NoError(t, err) // prints out the error in case of failure
require.NoError(t, err) // prints out the error and stops the test in case of failure
require.Equal(t, regeneratedCACert, apiService.Spec.CABundle)
// Check that we can still make requests to the aggregated API through the kube API server,
@ -147,7 +148,7 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
// Unfortunately, although our code changes all the certs immediately, it seems to take ~1 minute for
// the API machinery to notice that we updated our serving cert, causing 1 minute of downtime for our endpoint.
assert.Eventually(t, aggregatedAPIWorking, 2*time.Minute, 250*time.Millisecond)
require.NoError(t, err) // prints out the error in case of failure
require.NoError(t, err) // prints out the error and stops the test in case of failure
})
}
}

View File

@ -20,8 +20,9 @@ import (
func TestGetDeployment(t *testing.T) {
library.SkipUnlessIntegration(t)
namespaceName := library.Getenv(t, "PINNIPED_NAMESPACE")
deploymentName := library.Getenv(t, "PINNIPED_APP_NAME")
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
namespaceName := library.GetEnv(t, "PINNIPED_NAMESPACE")
deploymentName := library.GetEnv(t, "PINNIPED_APP_NAME")
client := library.NewClientset(t)

View File

@ -56,7 +56,8 @@ var maskKey = func(s string) string { return strings.ReplaceAll(s, "TESTING KEY"
func TestClient(t *testing.T) {
library.SkipUnlessIntegration(t)
tmcClusterToken := library.Getenv(t, "PINNIPED_TMC_CLUSTER_TOKEN")
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
tmcClusterToken := library.GetEnv(t, "PINNIPED_TMC_CLUSTER_TOKEN")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()

View File

@ -11,6 +11,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
@ -19,65 +20,91 @@ import (
"github.com/suzerain-io/pinniped/test/library"
)
func TestSuccessfulCredentialIssuerConfig(t *testing.T) {
func TestCredentialIssuerConfig(t *testing.T) {
library.SkipUnlessIntegration(t)
namespaceName := library.Getenv(t, "PINNIPED_NAMESPACE")
namespaceName := library.GetEnv(t, "PINNIPED_NAMESPACE")
config := library.NewClientConfig(t)
client := library.NewPinnipedClientset(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
config := library.NewClientConfig(t)
expectedLDCStatus := expectedLDCStatus(config)
configList, err := client.
t.Run("test successful CredentialIssuerConfig", func(t *testing.T) {
actualConfigList, err := client.
CrdV1alpha1().
CredentialIssuerConfigs(namespaceName).
List(ctx, metav1.ListOptions{})
require.NoError(t, err)
require.Len(t, configList.Items, 1)
require.Equal(t, expectedLDCStatus, &configList.Items[0].Status)
}
require.Len(t, actualConfigList.Items, 1)
func TestReconcilingCredentialIssuerConfig(t *testing.T) {
library.SkipUnlessIntegration(t)
namespaceName := library.Getenv(t, "PINNIPED_NAMESPACE")
// Verify the published kube config info.
actualStatusKubeConfigInfo := actualConfigList.Items[0].Status.KubeConfigInfo
require.Equal(t, expectedStatusKubeConfigInfo(config), actualStatusKubeConfigInfo)
client := library.NewPinnipedClientset(t)
// Verify the cluster strategy status based on what's expected of the test cluster's ability to share signing keys.
actualStatusStrategies := actualConfigList.Items[0].Status.Strategies
require.Len(t, actualStatusStrategies, 1)
actualStatusStrategy := actualStatusStrategies[0]
require.Equal(t, crdpinnipedv1alpha1.KubeClusterSigningCertificateStrategyType, actualStatusStrategy.Type)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if library.ClusterHasCapability(t, library.ClusterSigningKeyIsAvailable) {
require.Equal(t, crdpinnipedv1alpha1.SuccessStrategyStatus, actualStatusStrategy.Status)
require.Equal(t, crdpinnipedv1alpha1.FetchedKeyStrategyReason, actualStatusStrategy.Reason)
require.Equal(t, "Key was fetched successfully", actualStatusStrategy.Message)
} else {
require.Equal(t, crdpinnipedv1alpha1.ErrorStrategyStatus, actualStatusStrategy.Status)
require.Equal(t, crdpinnipedv1alpha1.CouldNotFetchKeyStrategyReason, actualStatusStrategy.Reason)
require.Contains(t, actualStatusStrategy.Message, "some part of the error message")
}
err := client.
CrdV1alpha1().
CredentialIssuerConfigs(namespaceName).
Delete(ctx, "pinniped-config", metav1.DeleteOptions{})
require.NoError(t, err)
require.WithinDuration(t, time.Now(), actualStatusStrategy.LastUpdateTime.Local(), 10*time.Minute)
})
config := library.NewClientConfig(t)
expectedLDCStatus := expectedLDCStatus(config)
t.Run("reconciling CredentialIssuerConfig", func(t *testing.T) {
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
var actualLDC *crdpinnipedv1alpha1.CredentialIssuerConfig
for i := 0; i < 10; i++ {
actualLDC, err = client.
existingConfig, err := client.
CrdV1alpha1().
CredentialIssuerConfigs(namespaceName).
Get(ctx, "pinniped-config", metav1.GetOptions{})
if err == nil {
break
}
time.Sleep(time.Millisecond * 750)
}
require.NoError(t, err)
require.Equal(t, expectedLDCStatus, &actualLDC.Status)
require.Len(t, existingConfig.Status.Strategies, 1)
initialStrategy := existingConfig.Status.Strategies[0]
// Mutate the existing object. Don't delete it because that would mess up its `Status.Strategies` array,
// since the reconciling controller is not currently responsible for that field.
existingConfig.Status.KubeConfigInfo.Server = "https://junk"
updatedConfig, err := client.
CrdV1alpha1().
CredentialIssuerConfigs(namespaceName).
Update(ctx, existingConfig, metav1.UpdateOptions{})
require.NoError(t, err)
require.Equal(t, "https://junk", updatedConfig.Status.KubeConfigInfo.Server)
// Expect that the object's mutated field is set back to what matches its source of truth.
var actualCredentialIssuerConfig *crdpinnipedv1alpha1.CredentialIssuerConfig
var getConfig = func() bool {
actualCredentialIssuerConfig, err = client.
CrdV1alpha1().
CredentialIssuerConfigs(namespaceName).
Get(ctx, "pinniped-config", metav1.GetOptions{})
return err == nil
}
assert.Eventually(t, getConfig, 5*time.Second, 100*time.Millisecond)
require.NoError(t, err) // prints out the error and stops the test in case of failure
actualStatusKubeConfigInfo := actualCredentialIssuerConfig.Status.KubeConfigInfo
require.Equal(t, expectedStatusKubeConfigInfo(config), actualStatusKubeConfigInfo)
// The strategies should not have changed during reconciliation.
require.Len(t, actualCredentialIssuerConfig.Status.Strategies, 1)
require.Equal(t, initialStrategy, actualCredentialIssuerConfig.Status.Strategies[0])
})
}
func expectedLDCStatus(config *rest.Config) *crdpinnipedv1alpha1.CredentialIssuerConfigStatus {
return &crdpinnipedv1alpha1.CredentialIssuerConfigStatus{
Strategies: []crdpinnipedv1alpha1.CredentialIssuerConfigStrategy{},
KubeConfigInfo: &crdpinnipedv1alpha1.CredentialIssuerConfigKubeConfigInfo{
func expectedStatusKubeConfigInfo(config *rest.Config) *crdpinnipedv1alpha1.CredentialIssuerConfigKubeConfigInfo {
return &crdpinnipedv1alpha1.CredentialIssuerConfigKubeConfigInfo{
Server: config.Host,
CertificateAuthorityData: base64.StdEncoding.EncodeToString(config.TLSClientConfig.CAData),
},
}
}

View File

@ -14,9 +14,8 @@ import (
"time"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -63,7 +62,8 @@ func addTestClusterRoleBinding(ctx context.Context, t *testing.T, adminClient ku
func TestSuccessfulCredentialRequest(t *testing.T) {
library.SkipUnlessIntegration(t)
tmcClusterToken := library.Getenv(t, "PINNIPED_TMC_CLUSTER_TOKEN")
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
tmcClusterToken := library.GetEnv(t, "PINNIPED_TMC_CLUSTER_TOKEN")
response, err := makeRequest(t, v1alpha1.CredentialRequestSpec{
Type: v1alpha1.TokenCredentialType,
@ -121,7 +121,7 @@ func TestSuccessfulCredentialRequest(t *testing.T) {
return err == nil
}
assert.Eventually(t, canListNamespaces, 3*time.Second, 250*time.Millisecond)
require.NoError(t, err) // prints out the error in case of failure
require.NoError(t, err) // prints out the error and stops the test in case of failure
require.NotEmpty(t, listNamespaceResponse.Items)
})
@ -150,13 +150,15 @@ func TestSuccessfulCredentialRequest(t *testing.T) {
return err == nil
}
assert.Eventually(t, canListNamespaces, 3*time.Second, 250*time.Millisecond)
require.NoError(t, err) // prints out the error in case of failure
require.NoError(t, err) // prints out the error and stops the test in case of failure
require.NotEmpty(t, listNamespaceResponse.Items)
})
}
func TestFailedCredentialRequestWhenTheRequestIsValidButTheTokenDoesNotAuthenticateTheUser(t *testing.T) {
library.SkipUnlessIntegration(t)
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
response, err := makeRequest(t, v1alpha1.CredentialRequestSpec{
Type: v1alpha1.TokenCredentialType,
Token: &v1alpha1.CredentialRequestTokenCredential{Value: "not a good token"},
@ -171,6 +173,8 @@ func TestFailedCredentialRequestWhenTheRequestIsValidButTheTokenDoesNotAuthentic
func TestCredentialRequest_ShouldFailWhenRequestDoesNotIncludeToken(t *testing.T) {
library.SkipUnlessIntegration(t)
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
response, err := makeRequest(t, v1alpha1.CredentialRequestSpec{
Type: v1alpha1.TokenCredentialType,
Token: nil,

View File

@ -15,6 +15,7 @@ import (
"github.com/suzerain-io/pinniped/test/library"
)
// Smoke test to see if the kubeconfig works and the cluster is reachable.
func TestGetNodes(t *testing.T) {
library.SkipUnlessIntegration(t)
cmd := exec.Command("kubectl", "get", "nodes")

View File

@ -0,0 +1,53 @@
package library
import (
"io/ioutil"
"os"
"testing"
"github.com/ghodss/yaml"
"github.com/stretchr/testify/require"
)
type TestClusterCapability string
const (
ClusterSigningKeyIsAvailable = TestClusterCapability("clusterSigningKeyIsAvailable")
)
type capabilitiesConfig struct {
Capabilities map[TestClusterCapability]bool `yaml:"capabilities,omitempty"`
}
func ClusterHasCapability(t *testing.T, capability TestClusterCapability) bool {
t.Helper()
capabilitiesDescriptionYAML := os.Getenv("PINNIPED_CLUSTER_CAPABILITY_YAML")
capabilitiesDescriptionFile := os.Getenv("PINNIPED_CLUSTER_CAPABILITY_FILE")
require.NotEmptyf(t,
capabilitiesDescriptionYAML+capabilitiesDescriptionFile,
"must specify either PINNIPED_CLUSTER_CAPABILITY_YAML or PINNIPED_CLUSTER_CAPABILITY_FILE env var for integration tests",
)
if capabilitiesDescriptionYAML == "" {
bytes, err := ioutil.ReadFile(capabilitiesDescriptionFile)
capabilitiesDescriptionYAML = string(bytes)
require.NoError(t, err)
}
var capabilities capabilitiesConfig
err := yaml.Unmarshal([]byte(capabilitiesDescriptionYAML), &capabilities)
require.NoError(t, err)
isCapable, capabilityWasDescribed := capabilities.Capabilities[capability]
require.True(t, capabilityWasDescribed, `the cluster's "%s" capability was not described`, capability)
return isCapable
}
func SkipUnlessClusterHasCapability(t *testing.T, capability TestClusterCapability) {
t.Helper()
if !ClusterHasCapability(t, capability) {
t.Skipf(`skipping integration test because cluster lacks the "%s" capability`, capability)
}
}

View File

@ -12,9 +12,9 @@ import (
"github.com/stretchr/testify/require"
)
// Getenv gets the environment variable with key and asserts that it is not
// GetEnv gets the environment variable with key and asserts that it is not
// empty. It returns the value of the environment variable.
func Getenv(t *testing.T, key string) string {
func GetEnv(t *testing.T, key string) string {
t.Helper()
value := os.Getenv(key)
require.NotEmptyf(t, value, "must specify %s env var for integration tests", key)