Rename integration test env variables
- Variables specific to concierge add it to their name - All variables now start with `PINNIPED_TEST_` which makes it clear that they are for tests and also helps them not conflict with the env vars that are used in the Pinniped CLI code
This commit is contained in:
parent
b71959961d
commit
72b2d02777
@ -269,22 +269,22 @@ pinniped_cluster_capability_file_content=$(cat "$kind_capabilities_file")
|
||||
|
||||
cat <<EOF >/tmp/integration-test-env
|
||||
# The following env vars should be set before running 'go test -v -count 1 ./test/...'
|
||||
export PINNIPED_NAMESPACE=${namespace}
|
||||
export PINNIPED_APP_NAME=${app_name}
|
||||
export PINNIPED_TEST_CONCIERGE_NAMESPACE=${namespace}
|
||||
export PINNIPED_TEST_CONCIERGE_APP_NAME=${app_name}
|
||||
export PINNIPED_TEST_USER_USERNAME=${test_username}
|
||||
export PINNIPED_TEST_USER_GROUPS=${test_groups}
|
||||
export PINNIPED_TEST_USER_TOKEN=${test_username}:${test_password}
|
||||
export PINNIPED_TEST_WEBHOOK_ENDPOINT=${webhook_url}
|
||||
export PINNIPED_TEST_WEBHOOK_CA_BUNDLE=${webhook_ca_bundle}
|
||||
export PINNIPED_SUPERVISOR_NAMESPACE=${supervisor_namespace}
|
||||
export PINNIPED_SUPERVISOR_APP_NAME=${supervisor_app_name}
|
||||
export PINNIPED_TEST_SUPERVISOR_NAMESPACE=${supervisor_namespace}
|
||||
export PINNIPED_TEST_SUPERVISOR_APP_NAME=${supervisor_app_name}
|
||||
export PINNIPED_TEST_SUPERVISOR_ADDRESS="localhost:12345"
|
||||
|
||||
read -r -d '' PINNIPED_CLUSTER_CAPABILITY_YAML << PINNIPED_CLUSTER_CAPABILITY_YAML_EOF || true
|
||||
read -r -d '' PINNIPED_TEST_CLUSTER_CAPABILITY_YAML << PINNIPED_TEST_CLUSTER_CAPABILITY_YAML_EOF || true
|
||||
${pinniped_cluster_capability_file_content}
|
||||
PINNIPED_CLUSTER_CAPABILITY_YAML_EOF
|
||||
PINNIPED_TEST_CLUSTER_CAPABILITY_YAML_EOF
|
||||
|
||||
export PINNIPED_CLUSTER_CAPABILITY_YAML
|
||||
export PINNIPED_TEST_CLUSTER_CAPABILITY_YAML
|
||||
EOF
|
||||
|
||||
#
|
||||
@ -298,7 +298,7 @@ log_note " cd $pinniped_path"
|
||||
log_note ' source /tmp/integration-test-env && go test -v -count 1 ./test/integration'
|
||||
log_note
|
||||
log_note 'Want to run integration tests in GoLand? Copy/paste this "Environment" value for GoLand run configurations:'
|
||||
log_note " ${goland_vars}PINNIPED_CLUSTER_CAPABILITY_FILE=${kind_capabilities_file}"
|
||||
log_note " ${goland_vars}PINNIPED_TEST_CLUSTER_CAPABILITY_FILE=${kind_capabilities_file}"
|
||||
log_note
|
||||
|
||||
if ! tilt_mode; then
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -26,49 +25,25 @@ func TestCLI(t *testing.T) {
|
||||
|
||||
idp := library.CreateTestWebhookIDP(ctx, t)
|
||||
|
||||
// Remove all Pinniped environment variables for the remainder of this test
|
||||
// because some of their names clash with the env vars expected by our
|
||||
// kubectl exec plugin. We would like this test to prove that the exec
|
||||
// plugin receives all of the necessary env vars via the auto-generated
|
||||
// kubeconfig from the Pinniped CLI.
|
||||
initialEnvVars := make(map[string]string)
|
||||
for _, e := range os.Environ() {
|
||||
pair := strings.SplitN(e, "=", 2)
|
||||
name := pair[0]
|
||||
value := pair[1]
|
||||
if strings.HasPrefix(name, "PINNIPED_") {
|
||||
initialEnvVars[name] = value
|
||||
err := os.Unsetenv(name)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
// Put them back for other tests to use after this one
|
||||
t.Cleanup(func() {
|
||||
for k, v := range initialEnvVars {
|
||||
err := os.Setenv(k, v)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
// Build pinniped CLI.
|
||||
pinnipedExe, cleanupFunc := buildPinnipedCLI(t)
|
||||
defer cleanupFunc()
|
||||
|
||||
// Run pinniped CLI to get kubeconfig.
|
||||
kubeConfigYAML := runPinnipedCLI(t, pinnipedExe, env.TestUser.Token, env.Namespace, "webhook", idp.Name)
|
||||
kubeConfigYAML := runPinnipedCLI(t, pinnipedExe, env.TestUser.Token, env.ConciergeNamespace, "webhook", idp.Name)
|
||||
|
||||
// In addition to the client-go based testing below, also try the kubeconfig
|
||||
// with kubectl to validate that it works.
|
||||
adminClient := library.NewClientset(t)
|
||||
t.Run(
|
||||
"access as user with kubectl",
|
||||
library.AccessAsUserWithKubectlTest(ctx, adminClient, kubeConfigYAML, env.TestUser.ExpectedUsername, env.Namespace),
|
||||
library.AccessAsUserWithKubectlTest(ctx, adminClient, kubeConfigYAML, env.TestUser.ExpectedUsername, env.ConciergeNamespace),
|
||||
)
|
||||
for _, group := range env.TestUser.ExpectedGroups {
|
||||
group := group
|
||||
t.Run(
|
||||
"access as group "+group+" with kubectl",
|
||||
library.AccessAsGroupWithKubectlTest(ctx, adminClient, kubeConfigYAML, group, env.Namespace),
|
||||
library.AccessAsGroupWithKubectlTest(ctx, adminClient, kubeConfigYAML, group, env.ConciergeNamespace),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ func TestClient(t *testing.T) {
|
||||
|
||||
var resp *clientauthenticationv1beta1.ExecCredential
|
||||
assert.Eventually(t, func() bool {
|
||||
resp, err = client.ExchangeToken(ctx, env.Namespace, idp, env.TestUser.Token, string(clientConfig.CAData), clientConfig.Host)
|
||||
resp, err = client.ExchangeToken(ctx, env.ConciergeNamespace, idp, env.TestUser.Token, string(clientConfig.CAData), clientConfig.Host)
|
||||
return err == nil
|
||||
}, 10*time.Second, 500*time.Millisecond)
|
||||
require.NoError(t, err)
|
||||
|
@ -83,7 +83,7 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
|
||||
const apiServiceName = "v1alpha1.login.pinniped.dev"
|
||||
|
||||
// Get the initial auto-generated version of the Secret.
|
||||
secret, err := kubeClient.CoreV1().Secrets(env.Namespace).Get(ctx, defaultServingCertResourceName, metav1.GetOptions{})
|
||||
secret, err := kubeClient.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, defaultServingCertResourceName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
initialCACert := secret.Data["caCertificate"]
|
||||
initialPrivateKey := secret.Data["tlsPrivateKey"]
|
||||
@ -98,11 +98,11 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
|
||||
require.Equal(t, initialCACert, apiService.Spec.CABundle)
|
||||
|
||||
// Force rotation to happen.
|
||||
require.NoError(t, test.forceRotation(ctx, kubeClient, env.Namespace))
|
||||
require.NoError(t, test.forceRotation(ctx, kubeClient, env.ConciergeNamespace))
|
||||
|
||||
// Expect that the Secret comes back right away with newly minted certs.
|
||||
secretIsRegenerated := func() bool {
|
||||
secret, err = kubeClient.CoreV1().Secrets(env.Namespace).Get(ctx, defaultServingCertResourceName, metav1.GetOptions{})
|
||||
secret, err = kubeClient.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, defaultServingCertResourceName, metav1.GetOptions{})
|
||||
return err == nil
|
||||
}
|
||||
assert.Eventually(t, secretIsRegenerated, 10*time.Second, 250*time.Millisecond)
|
||||
@ -133,7 +133,7 @@ func TestAPIServingCertificateAutoCreationAndRotation(t *testing.T) {
|
||||
// pod has rotated their cert, but not the other ones sitting behind the service.
|
||||
aggregatedAPIWorking := func() bool {
|
||||
for i := 0; i < 10; i++ {
|
||||
_, err = pinnipedClient.LoginV1alpha1().TokenCredentialRequests(env.Namespace).Create(ctx, &loginv1alpha1.TokenCredentialRequest{
|
||||
_, err = pinnipedClient.LoginV1alpha1().TokenCredentialRequests(env.ConciergeNamespace).Create(ctx, &loginv1alpha1.TokenCredentialRequest{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
Spec: loginv1alpha1.TokenCredentialRequestSpec{Token: "not a good token"},
|
||||
|
@ -23,7 +23,7 @@ func TestGetDeployment(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
appDeployment, err := client.AppsV1().Deployments(env.Namespace).Get(ctx, env.AppName, metav1.GetOptions{})
|
||||
appDeployment, err := client.AppsV1().Deployments(env.ConciergeNamespace).Get(ctx, env.ConciergeAppName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
cond := getDeploymentCondition(appDeployment.Status, appsv1.DeploymentAvailable)
|
||||
|
@ -27,7 +27,7 @@ func TestCredentialIssuerConfig(t *testing.T) {
|
||||
t.Run("test successful CredentialIssuerConfig", func(t *testing.T) {
|
||||
actualConfigList, err := client.
|
||||
ConfigV1alpha1().
|
||||
CredentialIssuerConfigs(env.Namespace).
|
||||
CredentialIssuerConfigs(env.ConciergeNamespace).
|
||||
List(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -145,9 +145,9 @@ func makeRequest(ctx context.Context, t *testing.T, spec loginv1alpha1.TokenCred
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
return client.LoginV1alpha1().TokenCredentialRequests(env.Namespace).Create(ctx, &loginv1alpha1.TokenCredentialRequest{
|
||||
return client.LoginV1alpha1().TokenCredentialRequests(env.ConciergeNamespace).Create(ctx, &loginv1alpha1.TokenCredentialRequest{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: env.Namespace},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: env.ConciergeNamespace},
|
||||
Spec: spec,
|
||||
}, metav1.CreateOptions{})
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ func TestKubeCertAgent(t *testing.T) {
|
||||
// We can pretty safely assert there should be more than 1, since there should be a
|
||||
// kube-cert-agent pod per kube-controller-manager pod, and there should probably be at least
|
||||
// 1 kube-controller-manager for this to be a working kube API.
|
||||
originalAgentPods, err := kubeClient.CoreV1().Pods(env.Namespace).List(ctx, metav1.ListOptions{
|
||||
originalAgentPods, err := kubeClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: kubeCertAgentLabelSelector,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@ -46,7 +46,7 @@ func TestKubeCertAgent(t *testing.T) {
|
||||
|
||||
agentPodsReconciled := func() bool {
|
||||
var currentAgentPods *corev1.PodList
|
||||
currentAgentPods, err = kubeClient.CoreV1().Pods(env.Namespace).List(ctx, metav1.ListOptions{
|
||||
currentAgentPods, err = kubeClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: kubeCertAgentLabelSelector,
|
||||
})
|
||||
|
||||
@ -90,7 +90,7 @@ func TestKubeCertAgent(t *testing.T) {
|
||||
updatedAgentPod.Spec.Tolerations,
|
||||
corev1.Toleration{Key: "fake-toleration"},
|
||||
)
|
||||
_, err = kubeClient.CoreV1().Pods(env.Namespace).Update(ctx, updatedAgentPod, metav1.UpdateOptions{})
|
||||
_, err = kubeClient.CoreV1().Pods(env.ConciergeNamespace).Update(ctx, updatedAgentPod, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make sure the original pods come back.
|
||||
@ -102,7 +102,7 @@ func TestKubeCertAgent(t *testing.T) {
|
||||
// Delete the first pod. The controller should see it, and flip it back.
|
||||
err = kubeClient.
|
||||
CoreV1().
|
||||
Pods(env.Namespace).
|
||||
Pods(env.ConciergeNamespace).
|
||||
Delete(ctx, originalAgentPods.Items[0].Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -113,7 +113,7 @@ func newAnonymousClientRestConfigWithCertAndKeyAdded(t *testing.T, clientCertifi
|
||||
return config
|
||||
}
|
||||
|
||||
// CreateTestWebhookIDP creates and returns a test WebhookIdentityProvider in $PINNIPED_NAMESPACE, which will be
|
||||
// CreateTestWebhookIDP creates and returns a test WebhookIdentityProvider in $PINNIPED_TEST_CONCIERGE_NAMESPACE, which will be
|
||||
// automatically deleted at the end of the current test's lifetime. It returns a corev1.TypedLocalObjectReference which
|
||||
// descibes the test IDP within the test namespace.
|
||||
func CreateTestWebhookIDP(ctx context.Context, t *testing.T) corev1.TypedLocalObjectReference {
|
||||
@ -121,7 +121,7 @@ func CreateTestWebhookIDP(ctx context.Context, t *testing.T) corev1.TypedLocalOb
|
||||
testEnv := IntegrationEnv(t)
|
||||
|
||||
client := NewPinnipedClientset(t)
|
||||
webhooks := client.IDPV1alpha1().WebhookIdentityProviders(testEnv.Namespace)
|
||||
webhooks := client.IDPV1alpha1().WebhookIdentityProviders(testEnv.ConciergeNamespace)
|
||||
|
||||
createContext, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
@ -25,14 +25,15 @@ const (
|
||||
type TestEnv struct {
|
||||
t *testing.T
|
||||
|
||||
Namespace string `json:"namespace"`
|
||||
ConciergeNamespace string `json:"conciergeNamespace"`
|
||||
SupervisorNamespace string `json:"supervisorNamespace"`
|
||||
AppName string `json:"appName"`
|
||||
ConciergeAppName string `json:"conciergeAppName"`
|
||||
SupervisorAppName string `json:"supervisorAppName"`
|
||||
Capabilities map[TestClusterCapability]bool `json:"capabilities"`
|
||||
TestWebhook idpv1alpha1.WebhookIdentityProviderSpec `json:"testWebhook"`
|
||||
SupervisorAddress string `json:"supervisorAddress"`
|
||||
TestUser struct {
|
||||
|
||||
TestUser struct {
|
||||
Token string `json:"token"`
|
||||
ExpectedUsername string `json:"expectedUsername"`
|
||||
ExpectedGroups []string `json:"expectedGroups"`
|
||||
@ -45,11 +46,11 @@ func IntegrationEnv(t *testing.T) *TestEnv {
|
||||
t.Helper()
|
||||
SkipUnlessIntegration(t)
|
||||
|
||||
capabilitiesDescriptionYAML := os.Getenv("PINNIPED_CLUSTER_CAPABILITY_YAML")
|
||||
capabilitiesDescriptionFile := os.Getenv("PINNIPED_CLUSTER_CAPABILITY_FILE")
|
||||
capabilitiesDescriptionYAML := os.Getenv("PINNIPED_TEST_CLUSTER_CAPABILITY_YAML")
|
||||
capabilitiesDescriptionFile := os.Getenv("PINNIPED_TEST_CLUSTER_CAPABILITY_FILE")
|
||||
require.NotEmptyf(t,
|
||||
capabilitiesDescriptionYAML+capabilitiesDescriptionFile,
|
||||
"must specify either PINNIPED_CLUSTER_CAPABILITY_YAML or PINNIPED_CLUSTER_CAPABILITY_FILE env var for integration tests",
|
||||
"must specify either PINNIPED_TEST_CLUSTER_CAPABILITY_YAML or PINNIPED_TEST_CLUSTER_CAPABILITY_FILE env var for integration tests",
|
||||
)
|
||||
if capabilitiesDescriptionYAML == "" {
|
||||
bytes, err := ioutil.ReadFile(capabilitiesDescriptionFile)
|
||||
@ -68,14 +69,14 @@ func IntegrationEnv(t *testing.T) *TestEnv {
|
||||
return value
|
||||
}
|
||||
|
||||
result.Namespace = needEnv("PINNIPED_NAMESPACE")
|
||||
result.AppName = needEnv("PINNIPED_APP_NAME")
|
||||
result.ConciergeNamespace = needEnv("PINNIPED_TEST_CONCIERGE_NAMESPACE")
|
||||
result.ConciergeAppName = needEnv("PINNIPED_TEST_CONCIERGE_APP_NAME")
|
||||
result.TestUser.ExpectedUsername = needEnv("PINNIPED_TEST_USER_USERNAME")
|
||||
result.TestUser.ExpectedGroups = strings.Split(strings.ReplaceAll(needEnv("PINNIPED_TEST_USER_GROUPS"), " ", ""), ",")
|
||||
result.TestUser.Token = needEnv("PINNIPED_TEST_USER_TOKEN")
|
||||
result.TestWebhook.Endpoint = needEnv("PINNIPED_TEST_WEBHOOK_ENDPOINT")
|
||||
result.SupervisorNamespace = needEnv("PINNIPED_SUPERVISOR_NAMESPACE")
|
||||
result.SupervisorAppName = needEnv("PINNIPED_SUPERVISOR_APP_NAME")
|
||||
result.SupervisorNamespace = needEnv("PINNIPED_TEST_SUPERVISOR_NAMESPACE")
|
||||
result.SupervisorAppName = needEnv("PINNIPED_TEST_SUPERVISOR_APP_NAME")
|
||||
result.SupervisorAddress = needEnv("PINNIPED_TEST_SUPERVISOR_ADDRESS")
|
||||
result.TestWebhook.TLS = &idpv1alpha1.TLSSpec{CertificateAuthorityData: needEnv("PINNIPED_TEST_WEBHOOK_CA_BUNDLE")}
|
||||
result.t = t
|
||||
|
Loading…
Reference in New Issue
Block a user