Merge pull request #134 from mattmoyer/refactor-test-params
Refactor integration test environment helpers to be more structured.
This commit is contained in:
commit
42e74a02e9
@ -19,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
env := library.IntegrationEnv(t)
|
||||
|
||||
const defaultServingCertResourceName = "pinniped-api-tls-serving-certificate"
|
||||
|
||||
@ -74,8 +74,6 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
namespaceName := library.GetEnv(t, "PINNIPED_NAMESPACE")
|
||||
|
||||
kubeClient := library.NewClientset(t)
|
||||
aggregatedClient := library.NewAggregatedClientset(t)
|
||||
pinnipedClient := library.NewPinnipedClientset(t)
|
||||
@ -85,7 +83,7 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
|
||||
const apiServiceName = "v1alpha1.login.pinniped.dev"
|
||||
|
||||
// Get the initial auto-generated version of the Secret.
|
||||
secret, err := kubeClient.CoreV1().Secrets(namespaceName).Get(ctx, defaultServingCertResourceName, metav1.GetOptions{})
|
||||
secret, err := kubeClient.CoreV1().Secrets(env.Namespace).Get(ctx, defaultServingCertResourceName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
initialCACert := secret.Data["caCertificate"]
|
||||
initialPrivateKey := secret.Data["tlsPrivateKey"]
|
||||
@ -100,11 +98,11 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
|
||||
require.Equal(t, initialCACert, apiService.Spec.CABundle)
|
||||
|
||||
// Force rotation to happen.
|
||||
require.NoError(t, test.forceRotation(ctx, kubeClient, namespaceName))
|
||||
require.NoError(t, test.forceRotation(ctx, kubeClient, env.Namespace))
|
||||
|
||||
// Expect that the Secret comes back right away with newly minted certs.
|
||||
secretIsRegenerated := func() bool {
|
||||
secret, err = kubeClient.CoreV1().Secrets(namespaceName).Get(ctx, defaultServingCertResourceName, metav1.GetOptions{})
|
||||
secret, err = kubeClient.CoreV1().Secrets(env.Namespace).Get(ctx, defaultServingCertResourceName, metav1.GetOptions{})
|
||||
return err == nil
|
||||
}
|
||||
assert.Eventually(t, secretIsRegenerated, 10*time.Second, 250*time.Millisecond)
|
||||
@ -135,7 +133,7 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
|
||||
// pod has rotated their cert, but not the other ones sitting behind the service.
|
||||
aggregatedAPIWorking := func() bool {
|
||||
for i := 0; i < 10; i++ {
|
||||
_, err = pinnipedClient.LoginV1alpha1().TokenCredentialRequests(namespaceName).Create(ctx, &loginv1alpha1.TokenCredentialRequest{
|
||||
_, err = pinnipedClient.LoginV1alpha1().TokenCredentialRequests(env.Namespace).Create(ctx, &loginv1alpha1.TokenCredentialRequest{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
Spec: loginv1alpha1.TokenCredentialRequestSpec{Token: "not a good token"},
|
||||
|
@ -17,16 +17,13 @@ import (
|
||||
)
|
||||
|
||||
func TestGetDeployment(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
namespaceName := library.GetEnv(t, "PINNIPED_NAMESPACE")
|
||||
deploymentName := library.GetEnv(t, "PINNIPED_APP_NAME")
|
||||
|
||||
env := library.IntegrationEnv(t)
|
||||
client := library.NewClientset(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
appDeployment, err := client.AppsV1().Deployments(namespaceName).Get(ctx, deploymentName, metav1.GetOptions{})
|
||||
appDeployment, err := client.AppsV1().Deployments(env.Namespace).Get(ctx, env.AppName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
cond := getDeploymentCondition(appDeployment.Status, appsv1.DeploymentAvailable)
|
||||
|
@ -18,14 +18,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCLI(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
|
||||
token := library.GetEnv(t, "PINNIPED_TEST_USER_TOKEN")
|
||||
namespaceName := library.GetEnv(t, "PINNIPED_NAMESPACE")
|
||||
testUsername := library.GetEnv(t, "PINNIPED_TEST_USER_USERNAME")
|
||||
expectedTestUserGroups := strings.Split(
|
||||
strings.ReplaceAll(library.GetEnv(t, "PINNIPED_TEST_USER_GROUPS"), " ", ""), ",",
|
||||
)
|
||||
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
|
||||
|
||||
// Create a test webhook configuration to use with the CLI.
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), 4*time.Minute)
|
||||
@ -62,20 +55,20 @@ func TestCLI(t *testing.T) {
|
||||
defer cleanupFunc()
|
||||
|
||||
// Run pinniped CLI to get kubeconfig.
|
||||
kubeConfigYAML := runPinnipedCLI(t, pinnipedExe, token, namespaceName, "webhook", idp.Name)
|
||||
kubeConfigYAML := runPinnipedCLI(t, pinnipedExe, env.TestUser.Token, env.Namespace, "webhook", idp.Name)
|
||||
|
||||
// In addition to the client-go based testing below, also try the kubeconfig
|
||||
// with kubectl to validate that it works.
|
||||
adminClient := library.NewClientset(t)
|
||||
t.Run(
|
||||
"access as user with kubectl",
|
||||
accessAsUserWithKubectlTest(ctx, adminClient, kubeConfigYAML, testUsername, namespaceName),
|
||||
accessAsUserWithKubectlTest(ctx, adminClient, kubeConfigYAML, env.TestUser.ExpectedUsername, env.Namespace),
|
||||
)
|
||||
for _, group := range expectedTestUserGroups {
|
||||
for _, group := range env.TestUser.ExpectedGroups {
|
||||
group := group
|
||||
t.Run(
|
||||
"access as group "+group+" with kubectl",
|
||||
accessAsGroupWithKubectlTest(ctx, adminClient, kubeConfigYAML, group, namespaceName),
|
||||
accessAsGroupWithKubectlTest(ctx, adminClient, kubeConfigYAML, group, env.Namespace),
|
||||
)
|
||||
}
|
||||
|
||||
@ -83,8 +76,8 @@ func TestCLI(t *testing.T) {
|
||||
kubeClient := library.NewClientsetForKubeConfig(t, kubeConfigYAML)
|
||||
|
||||
// Validate that we can auth to the API via our user.
|
||||
t.Run("access as user with client-go", accessAsUserTest(ctx, adminClient, testUsername, kubeClient))
|
||||
for _, group := range expectedTestUserGroups {
|
||||
t.Run("access as user with client-go", accessAsUserTest(ctx, adminClient, env.TestUser.ExpectedUsername, kubeClient))
|
||||
for _, group := range env.TestUser.ExpectedGroups {
|
||||
group := group
|
||||
t.Run("access as group "+group+" with client-go", accessAsGroupTest(ctx, adminClient, group, kubeClient))
|
||||
}
|
||||
|
@ -55,10 +55,7 @@ var (
|
||||
var maskKey = func(s string) string { return strings.ReplaceAll(s, "TESTING KEY", "PRIVATE KEY") }
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
|
||||
token := library.GetEnv(t, "PINNIPED_TEST_USER_TOKEN")
|
||||
namespace := library.GetEnv(t, "PINNIPED_NAMESPACE")
|
||||
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
@ -75,7 +72,7 @@ func TestClient(t *testing.T) {
|
||||
|
||||
var resp *clientauthenticationv1beta1.ExecCredential
|
||||
assert.Eventually(t, func() bool {
|
||||
resp, err = client.ExchangeToken(ctx, namespace, idp, token, string(clientConfig.CAData), clientConfig.Host)
|
||||
resp, err = client.ExchangeToken(ctx, env.Namespace, idp, env.TestUser.Token, string(clientConfig.CAData), clientConfig.Host)
|
||||
return err == nil
|
||||
}, 10*time.Second, 500*time.Millisecond)
|
||||
require.NoError(t, err)
|
||||
|
@ -17,9 +17,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCredentialIssuerConfig(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
namespaceName := library.GetEnv(t, "PINNIPED_NAMESPACE")
|
||||
|
||||
env := library.IntegrationEnv(t)
|
||||
config := library.NewClientConfig(t)
|
||||
client := library.NewPinnipedClientset(t)
|
||||
|
||||
@ -29,7 +27,7 @@ func TestCredentialIssuerConfig(t *testing.T) {
|
||||
t.Run("test successful CredentialIssuerConfig", func(t *testing.T) {
|
||||
actualConfigList, err := client.
|
||||
ConfigV1alpha1().
|
||||
CredentialIssuerConfigs(namespaceName).
|
||||
CredentialIssuerConfigs(env.Namespace).
|
||||
List(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -43,7 +41,7 @@ func TestCredentialIssuerConfig(t *testing.T) {
|
||||
actualStatusStrategy := actualStatusStrategies[0]
|
||||
require.Equal(t, configv1alpha1.KubeClusterSigningCertificateStrategyType, actualStatusStrategy.Type)
|
||||
|
||||
if library.ClusterHasCapability(t, library.ClusterSigningKeyIsAvailable) {
|
||||
if env.HasCapability(library.ClusterSigningKeyIsAvailable) {
|
||||
require.Equal(t, configv1alpha1.SuccessStrategyStatus, actualStatusStrategy.Status)
|
||||
require.Equal(t, configv1alpha1.FetchedKeyStrategyReason, actualStatusStrategy.Reason)
|
||||
require.Equal(t, "Key was fetched successfully", actualStatusStrategy.Message)
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -40,12 +39,8 @@ func TestUnsuccessfulCredentialRequest(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSuccessfulCredentialRequest(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
|
||||
testUsername := library.GetEnv(t, "PINNIPED_TEST_USER_USERNAME")
|
||||
expectedTestUserGroups := strings.Split(
|
||||
strings.ReplaceAll(library.GetEnv(t, "PINNIPED_TEST_USER_GROUPS"), " ", ""), ",",
|
||||
)
|
||||
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 6*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
@ -64,8 +59,8 @@ func TestSuccessfulCredentialRequest(t *testing.T) {
|
||||
require.Empty(t, response.Spec)
|
||||
require.Empty(t, response.Status.Credential.Token)
|
||||
require.NotEmpty(t, response.Status.Credential.ClientCertificateData)
|
||||
require.Equal(t, testUsername, getCommonName(t, response.Status.Credential.ClientCertificateData))
|
||||
require.ElementsMatch(t, expectedTestUserGroups, getOrganizations(t, response.Status.Credential.ClientCertificateData))
|
||||
require.Equal(t, env.TestUser.ExpectedUsername, getCommonName(t, response.Status.Credential.ClientCertificateData))
|
||||
require.ElementsMatch(t, env.TestUser.ExpectedGroups, getOrganizations(t, response.Status.Credential.ClientCertificateData))
|
||||
require.NotEmpty(t, response.Status.Credential.ClientKeyData)
|
||||
require.NotNil(t, response.Status.Credential.ExpirationTimestamp)
|
||||
require.InDelta(t, time.Until(response.Status.Credential.ExpirationTimestamp.Time), 1*time.Hour, float64(3*time.Minute))
|
||||
@ -82,9 +77,9 @@ func TestSuccessfulCredentialRequest(t *testing.T) {
|
||||
|
||||
t.Run(
|
||||
"access as user",
|
||||
accessAsUserTest(ctx, adminClient, testUsername, clientWithCertFromCredentialRequest),
|
||||
accessAsUserTest(ctx, adminClient, env.TestUser.ExpectedUsername, clientWithCertFromCredentialRequest),
|
||||
)
|
||||
for _, group := range expectedTestUserGroups {
|
||||
for _, group := range env.TestUser.ExpectedGroups {
|
||||
group := group
|
||||
t.Run(
|
||||
"access as group "+group,
|
||||
@ -94,8 +89,7 @@ func TestSuccessfulCredentialRequest(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFailedCredentialRequestWhenTheRequestIsValidButTheTokenDoesNotAuthenticateTheUser(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
|
||||
library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
|
||||
|
||||
response, err := makeRequest(context.Background(), t, loginv1alpha1.TokenCredentialRequestSpec{Token: "not a good token"})
|
||||
|
||||
@ -107,8 +101,7 @@ func TestFailedCredentialRequestWhenTheRequestIsValidButTheTokenDoesNotAuthentic
|
||||
}
|
||||
|
||||
func TestCredentialRequest_ShouldFailWhenRequestDoesNotIncludeToken(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
|
||||
library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
|
||||
|
||||
response, err := makeRequest(context.Background(), t, loginv1alpha1.TokenCredentialRequestSpec{Token: ""})
|
||||
|
||||
@ -127,8 +120,7 @@ func TestCredentialRequest_ShouldFailWhenRequestDoesNotIncludeToken(t *testing.T
|
||||
}
|
||||
|
||||
func TestCredentialRequest_OtherwiseValidRequestWithRealTokenShouldFailWhenTheClusterIsNotCapable(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
library.SkipWhenClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
|
||||
library.IntegrationEnv(t).WithoutCapability(library.ClusterSigningKeyIsAvailable)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
@ -146,23 +138,23 @@ func TestCredentialRequest_OtherwiseValidRequestWithRealTokenShouldFailWhenTheCl
|
||||
|
||||
func makeRequest(ctx context.Context, t *testing.T, spec loginv1alpha1.TokenCredentialRequestSpec) (*loginv1alpha1.TokenCredentialRequest, error) {
|
||||
t.Helper()
|
||||
env := library.IntegrationEnv(t)
|
||||
|
||||
client := library.NewAnonymousPinnipedClientset(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
ns := library.GetEnv(t, "PINNIPED_NAMESPACE")
|
||||
return client.LoginV1alpha1().TokenCredentialRequests(ns).Create(ctx, &loginv1alpha1.TokenCredentialRequest{
|
||||
return client.LoginV1alpha1().TokenCredentialRequests(env.Namespace).Create(ctx, &loginv1alpha1.TokenCredentialRequest{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: ns},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: env.Namespace},
|
||||
Spec: spec,
|
||||
}, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func validCredentialRequestSpecWithRealToken(t *testing.T, idp corev1.TypedLocalObjectReference) loginv1alpha1.TokenCredentialRequestSpec {
|
||||
return loginv1alpha1.TokenCredentialRequestSpec{
|
||||
Token: library.GetEnv(t, "PINNIPED_TEST_USER_TOKEN"),
|
||||
Token: library.IntegrationEnv(t).TestUser.Token,
|
||||
IdentityProvider: idp,
|
||||
}
|
||||
}
|
||||
|
@ -25,9 +25,7 @@ const (
|
||||
)
|
||||
|
||||
func TestKubeCertAgent(t *testing.T) {
|
||||
library.SkipUnlessIntegration(t)
|
||||
library.SkipUnlessClusterHasCapability(t, library.ClusterSigningKeyIsAvailable)
|
||||
namespaceName := library.GetEnv(t, "PINNIPED_NAMESPACE")
|
||||
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||
defer cancel()
|
||||
@ -39,7 +37,7 @@ func TestKubeCertAgent(t *testing.T) {
|
||||
// We can pretty safely assert there should be more than 1, since there should be a
|
||||
// kube-cert-agent pod per kube-controller-manager pod, and there should probably be at least
|
||||
// 1 kube-controller-manager for this to be a working kube API.
|
||||
originalAgentPods, err := kubeClient.CoreV1().Pods(namespaceName).List(ctx, metav1.ListOptions{
|
||||
originalAgentPods, err := kubeClient.CoreV1().Pods(env.Namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: kubeCertAgentLabelSelector,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@ -48,7 +46,7 @@ func TestKubeCertAgent(t *testing.T) {
|
||||
|
||||
agentPodsReconciled := func() bool {
|
||||
var currentAgentPods *corev1.PodList
|
||||
currentAgentPods, err = kubeClient.CoreV1().Pods(namespaceName).List(ctx, metav1.ListOptions{
|
||||
currentAgentPods, err = kubeClient.CoreV1().Pods(env.Namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: kubeCertAgentLabelSelector,
|
||||
})
|
||||
|
||||
@ -92,7 +90,7 @@ func TestKubeCertAgent(t *testing.T) {
|
||||
updatedAgentPod.Spec.Tolerations,
|
||||
corev1.Toleration{Key: "fake-toleration"},
|
||||
)
|
||||
_, err = kubeClient.CoreV1().Pods(namespaceName).Update(ctx, updatedAgentPod, metav1.UpdateOptions{})
|
||||
_, err = kubeClient.CoreV1().Pods(env.Namespace).Update(ctx, updatedAgentPod, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make sure the original pods come back.
|
||||
@ -104,7 +102,7 @@ func TestKubeCertAgent(t *testing.T) {
|
||||
// Delete the first pod. The controller should see it, and flip it back.
|
||||
err = kubeClient.
|
||||
CoreV1().
|
||||
Pods(namespaceName).
|
||||
Pods(env.Namespace).
|
||||
Delete(ctx, originalAgentPods.Items[0].Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -147,12 +147,10 @@ func newAnonymousClientRestConfigWithCertAndKeyAdded(t *testing.T, clientCertifi
|
||||
// descibes the test IDP within the test namespace.
|
||||
func CreateTestWebhookIDP(ctx context.Context, t *testing.T) corev1.TypedLocalObjectReference {
|
||||
t.Helper()
|
||||
testEnv := IntegrationEnv(t)
|
||||
|
||||
namespace := GetEnv(t, "PINNIPED_NAMESPACE")
|
||||
endpoint := GetEnv(t, "PINNIPED_TEST_WEBHOOK_ENDPOINT")
|
||||
caBundle := GetEnv(t, "PINNIPED_TEST_WEBHOOK_CA_BUNDLE")
|
||||
client := NewPinnipedClientset(t)
|
||||
webhooks := client.IDPV1alpha1().WebhookIdentityProviders(namespace)
|
||||
webhooks := client.IDPV1alpha1().WebhookIdentityProviders(testEnv.Namespace)
|
||||
|
||||
createContext, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
@ -163,10 +161,7 @@ func CreateTestWebhookIDP(ctx context.Context, t *testing.T) corev1.TypedLocalOb
|
||||
Labels: map[string]string{"pinniped.dev/test": ""},
|
||||
Annotations: map[string]string{"pinniped.dev/testName": t.Name()},
|
||||
},
|
||||
Spec: idpv1alpha1.WebhookIdentityProviderSpec{
|
||||
Endpoint: endpoint,
|
||||
TLS: &idpv1alpha1.TLSSpec{CertificateAuthorityData: caBundle},
|
||||
},
|
||||
Spec: testEnv.TestWebhook,
|
||||
}, metav1.CreateOptions{})
|
||||
require.NoError(t, err, "could not create test WebhookIdentityProvider")
|
||||
t.Logf("created test WebhookIdentityProvider %s/%s", idp.Namespace, idp.Name)
|
||||
|
@ -1,63 +0,0 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package library
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
type TestClusterCapability string
|
||||
|
||||
const (
|
||||
ClusterSigningKeyIsAvailable = TestClusterCapability("clusterSigningKeyIsAvailable")
|
||||
)
|
||||
|
||||
type capabilitiesConfig struct {
|
||||
Capabilities map[TestClusterCapability]bool `yaml:"capabilities,omitempty"`
|
||||
}
|
||||
|
||||
func ClusterHasCapability(t *testing.T, capability TestClusterCapability) bool {
|
||||
t.Helper()
|
||||
|
||||
capabilitiesDescriptionYAML := os.Getenv("PINNIPED_CLUSTER_CAPABILITY_YAML")
|
||||
capabilitiesDescriptionFile := os.Getenv("PINNIPED_CLUSTER_CAPABILITY_FILE")
|
||||
require.NotEmptyf(t,
|
||||
capabilitiesDescriptionYAML+capabilitiesDescriptionFile,
|
||||
"must specify either PINNIPED_CLUSTER_CAPABILITY_YAML or PINNIPED_CLUSTER_CAPABILITY_FILE env var for integration tests",
|
||||
)
|
||||
|
||||
if capabilitiesDescriptionYAML == "" {
|
||||
bytes, err := ioutil.ReadFile(capabilitiesDescriptionFile)
|
||||
capabilitiesDescriptionYAML = string(bytes)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var capabilities capabilitiesConfig
|
||||
err := yaml.Unmarshal([]byte(capabilitiesDescriptionYAML), &capabilities)
|
||||
require.NoError(t, err)
|
||||
|
||||
isCapable, capabilityWasDescribed := capabilities.Capabilities[capability]
|
||||
require.True(t, capabilityWasDescribed, `the cluster's "%s" capability was not described`, capability)
|
||||
|
||||
return isCapable
|
||||
}
|
||||
|
||||
func SkipUnlessClusterHasCapability(t *testing.T, capability TestClusterCapability) {
|
||||
t.Helper()
|
||||
if !ClusterHasCapability(t, capability) {
|
||||
t.Skipf(`skipping integration test because cluster lacks the "%s" capability`, capability)
|
||||
}
|
||||
}
|
||||
|
||||
func SkipWhenClusterHasCapability(t *testing.T, capability TestClusterCapability) {
|
||||
t.Helper()
|
||||
if ClusterHasCapability(t, capability) {
|
||||
t.Skipf(`skipping integration test because cluster has the "%s" capability`, capability)
|
||||
}
|
||||
}
|
@ -4,17 +4,97 @@
|
||||
package library
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
idpv1alpha1 "go.pinniped.dev/generated/1.19/apis/idp/v1alpha1"
|
||||
)
|
||||
|
||||
// GetEnv gets the environment variable with key and asserts that it is not
|
||||
// empty. It returns the value of the environment variable.
|
||||
func GetEnv(t *testing.T, key string) string {
|
||||
type TestClusterCapability string
|
||||
|
||||
const (
|
||||
ClusterSigningKeyIsAvailable = TestClusterCapability("clusterSigningKeyIsAvailable")
|
||||
)
|
||||
|
||||
// TestEnv captures all the external parameters consumed by our integration tests.
|
||||
type TestEnv struct {
|
||||
t *testing.T
|
||||
|
||||
Namespace string `json:"namespace"`
|
||||
AppName string `json:"appName"`
|
||||
Capabilities map[TestClusterCapability]bool `json:"capabilities"`
|
||||
TestWebhook idpv1alpha1.WebhookIdentityProviderSpec `json:"testWebhook"`
|
||||
TestUser struct {
|
||||
Token string `json:"token"`
|
||||
ExpectedUsername string `json:"expectedUsername"`
|
||||
ExpectedGroups []string `json:"expectedGroups"`
|
||||
} `json:"testUser"`
|
||||
}
|
||||
|
||||
// IntegrationEnv gets the integration test environment from a Kubernetes Secret in the test cluster. This
|
||||
// method also implies SkipUnlessIntegration().
|
||||
func IntegrationEnv(t *testing.T) *TestEnv {
|
||||
t.Helper()
|
||||
SkipUnlessIntegration(t)
|
||||
|
||||
capabilitiesDescriptionYAML := os.Getenv("PINNIPED_CLUSTER_CAPABILITY_YAML")
|
||||
capabilitiesDescriptionFile := os.Getenv("PINNIPED_CLUSTER_CAPABILITY_FILE")
|
||||
require.NotEmptyf(t,
|
||||
capabilitiesDescriptionYAML+capabilitiesDescriptionFile,
|
||||
"must specify either PINNIPED_CLUSTER_CAPABILITY_YAML or PINNIPED_CLUSTER_CAPABILITY_FILE env var for integration tests",
|
||||
)
|
||||
if capabilitiesDescriptionYAML == "" {
|
||||
bytes, err := ioutil.ReadFile(capabilitiesDescriptionFile)
|
||||
capabilitiesDescriptionYAML = string(bytes)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var result TestEnv
|
||||
err := yaml.Unmarshal([]byte(capabilitiesDescriptionYAML), &result)
|
||||
require.NoErrorf(t, err, "capabilities specification was invalid YAML")
|
||||
|
||||
needEnv := func(key string) string {
|
||||
t.Helper()
|
||||
value := os.Getenv(key)
|
||||
require.NotEmptyf(t, value, "must specify %s env var for integration tests", key)
|
||||
return value
|
||||
}
|
||||
|
||||
result.Namespace = needEnv("PINNIPED_NAMESPACE")
|
||||
result.AppName = needEnv("PINNIPED_APP_NAME")
|
||||
result.TestUser.ExpectedUsername = needEnv("PINNIPED_TEST_USER_USERNAME")
|
||||
result.TestUser.ExpectedGroups = strings.Split(strings.ReplaceAll(needEnv("PINNIPED_TEST_USER_GROUPS"), " ", ""), ",")
|
||||
result.TestUser.Token = needEnv("PINNIPED_TEST_USER_TOKEN")
|
||||
result.TestWebhook.Endpoint = needEnv("PINNIPED_TEST_WEBHOOK_ENDPOINT")
|
||||
result.TestWebhook.TLS = &idpv1alpha1.TLSSpec{CertificateAuthorityData: needEnv("PINNIPED_TEST_WEBHOOK_CA_BUNDLE")}
|
||||
result.t = t
|
||||
return &result
|
||||
}
|
||||
|
||||
func (e *TestEnv) HasCapability(cap TestClusterCapability) bool {
|
||||
e.t.Helper()
|
||||
isCapable, capabilityWasDescribed := e.Capabilities[cap]
|
||||
require.True(e.t, capabilityWasDescribed, `the cluster's "%s" capability was not described`, cap)
|
||||
return isCapable
|
||||
}
|
||||
|
||||
func (e *TestEnv) WithCapability(cap TestClusterCapability) *TestEnv {
|
||||
e.t.Helper()
|
||||
if !e.HasCapability(cap) {
|
||||
e.t.Skipf(`skipping integration test because cluster lacks the "%s" capability`, cap)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *TestEnv) WithoutCapability(cap TestClusterCapability) *TestEnv {
|
||||
e.t.Helper()
|
||||
if e.HasCapability(cap) {
|
||||
e.t.Skipf(`skipping integration test because cluster has the "%s" capability`, cap)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user