Merge pull request #808 from enj/enj/t/integration_parallel
test/integration: run parallel tests concurrently with serial tests
This commit is contained in:
commit
d22099ac33
@ -20,7 +20,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestExampleController(t *testing.T) {
|
func TestExampleController(t *testing.T) {
|
||||||
testlib.SkipUnlessIntegration(t)
|
_ = testlib.IntegrationEnv(t)
|
||||||
|
|
||||||
config := testlib.NewClientConfig(t)
|
config := testlib.NewClientConfig(t)
|
||||||
|
|
||||||
|
@ -37,7 +37,8 @@ import (
|
|||||||
"go.pinniped.dev/test/testlib/browsertest"
|
"go.pinniped.dev/test/testlib/browsertest"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCLIGetKubeconfigStaticToken(t *testing.T) {
|
// safe to run in parallel with serial tests since it only interacts with a test local webhook, see main_test.go.
|
||||||
|
func TestCLIGetKubeconfigStaticToken_Parallel(t *testing.T) {
|
||||||
env := testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
env := testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
||||||
|
|
||||||
// Create a test webhook configuration to use with the CLI.
|
// Create a test webhook configuration to use with the CLI.
|
||||||
|
@ -22,7 +22,8 @@ import (
|
|||||||
"go.pinniped.dev/test/testlib"
|
"go.pinniped.dev/test/testlib"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestUnsuccessfulCredentialRequest(t *testing.T) {
|
// TCRs are non-mutating and safe to run in parallel with serial tests, see main_test.go.
|
||||||
|
func TestUnsuccessfulCredentialRequest_Parallel(t *testing.T) {
|
||||||
env := testlib.IntegrationEnv(t).WithCapability(testlib.AnonymousAuthenticationSupported)
|
env := testlib.IntegrationEnv(t).WithCapability(testlib.AnonymousAuthenticationSupported)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||||
@ -44,7 +45,8 @@ func TestUnsuccessfulCredentialRequest(t *testing.T) {
|
|||||||
require.Equal(t, "authentication failed", *response.Status.Message)
|
require.Equal(t, "authentication failed", *response.Status.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSuccessfulCredentialRequest(t *testing.T) {
|
// TCRs are non-mutating and safe to run in parallel with serial tests, see main_test.go.
|
||||||
|
func TestSuccessfulCredentialRequest_Parallel(t *testing.T) {
|
||||||
env := testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
env := testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 6*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 6*time.Minute)
|
||||||
@ -129,7 +131,8 @@ func TestSuccessfulCredentialRequest(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFailedCredentialRequestWhenTheRequestIsValidButTheTokenDoesNotAuthenticateTheUser(t *testing.T) {
|
// TCRs are non-mutating and safe to run in parallel with serial tests, see main_test.go.
|
||||||
|
func TestFailedCredentialRequestWhenTheRequestIsValidButTheTokenDoesNotAuthenticateTheUser_Parallel(t *testing.T) {
|
||||||
_ = testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
_ = testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
||||||
|
|
||||||
// Create a testWebhook so we have a legitimate authenticator to pass to the
|
// Create a testWebhook so we have a legitimate authenticator to pass to the
|
||||||
@ -149,7 +152,8 @@ func TestFailedCredentialRequestWhenTheRequestIsValidButTheTokenDoesNotAuthentic
|
|||||||
require.Equal(t, pointer.StringPtr("authentication failed"), response.Status.Message)
|
require.Equal(t, pointer.StringPtr("authentication failed"), response.Status.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCredentialRequest_ShouldFailWhenRequestDoesNotIncludeToken(t *testing.T) {
|
// TCRs are non-mutating and safe to run in parallel with serial tests, see main_test.go.
|
||||||
|
func TestCredentialRequest_ShouldFailWhenRequestDoesNotIncludeToken_Parallel(t *testing.T) {
|
||||||
_ = testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
_ = testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
||||||
|
|
||||||
// Create a testWebhook so we have a legitimate authenticator to pass to the
|
// Create a testWebhook so we have a legitimate authenticator to pass to the
|
||||||
|
@ -93,7 +93,8 @@ func findSuccessfulStrategy(credentialIssuer *conciergev1alpha.CredentialIssuer,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLegacyPodCleaner(t *testing.T) {
|
// safe to run in parallel with serial tests since it only interacts with a test local pod, see main_test.go.
|
||||||
|
func TestLegacyPodCleaner_Parallel(t *testing.T) {
|
||||||
env := testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
env := testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -15,7 +15,7 @@ import (
|
|||||||
|
|
||||||
// Smoke test to see if the kubeconfig works and the cluster is reachable.
|
// Smoke test to see if the kubeconfig works and the cluster is reachable.
|
||||||
func TestGetNodes(t *testing.T) {
|
func TestGetNodes(t *testing.T) {
|
||||||
testlib.SkipUnlessIntegration(t)
|
_ = testlib.IntegrationEnv(t)
|
||||||
cmd := exec.Command("kubectl", "get", "nodes")
|
cmd := exec.Command("kubectl", "get", "nodes")
|
||||||
cmd.Stdout = os.Stdout
|
cmd.Stdout = os.Stdout
|
||||||
cmd.Stderr = os.Stderr
|
cmd.Stderr = os.Stderr
|
||||||
|
@ -24,7 +24,10 @@ import (
|
|||||||
"go.pinniped.dev/test/testlib/browsertest"
|
"go.pinniped.dev/test/testlib/browsertest"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFormPostHTML(t *testing.T) {
|
// safe to run in parallel with serial tests since it only interacts with a test local server, see main_test.go.
|
||||||
|
func TestFormPostHTML_Parallel(t *testing.T) {
|
||||||
|
_ = testlib.IntegrationEnv(t)
|
||||||
|
|
||||||
// Run a mock callback handler, simulating the one running in the CLI.
|
// Run a mock callback handler, simulating the one running in the CLI.
|
||||||
callbackURL, expectCallback := formpostCallbackServer(t)
|
callbackURL, expectCallback := formpostCallbackServer(t)
|
||||||
|
|
||||||
|
@ -24,7 +24,8 @@ import (
|
|||||||
"go.pinniped.dev/test/testlib"
|
"go.pinniped.dev/test/testlib"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLDAPSearch(t *testing.T) {
|
// safe to run in parallel with serial tests since it only makes read requests to our test LDAP server, see main_test.go.
|
||||||
|
func TestLDAPSearch_Parallel(t *testing.T) {
|
||||||
// This test does not interact with Kubernetes itself. It is a test of our LDAP client code, and only interacts
|
// This test does not interact with Kubernetes itself. It is a test of our LDAP client code, and only interacts
|
||||||
// with our test OpenLDAP server, which is exposed directly to this test via kubectl port-forward.
|
// with our test OpenLDAP server, which is exposed directly to this test via kubectl port-forward.
|
||||||
// Theoretically we should always be able to run this test, but something about the kubectl port forwarding
|
// Theoretically we should always be able to run this test, but something about the kubectl port forwarding
|
||||||
|
@ -27,12 +27,11 @@ import (
|
|||||||
"go.pinniped.dev/test/testlib"
|
"go.pinniped.dev/test/testlib"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLeaderElection(t *testing.T) {
|
// safe to run in parallel with serial tests since it only interacts with a test local lease, see main_test.go.
|
||||||
|
func TestLeaderElection_Parallel(t *testing.T) {
|
||||||
_ = testlib.IntegrationEnv(t)
|
_ = testlib.IntegrationEnv(t)
|
||||||
|
|
||||||
t.Parallel()
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
|
||||||
t.Cleanup(cancel)
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
leaseName := "leader-election-" + rand.String(5)
|
leaseName := "leader-election-" + rand.String(5)
|
||||||
@ -198,14 +197,17 @@ func waitForIdentity(ctx context.Context, t *testing.T, namespace *corev1.Namesp
|
|||||||
testlib.RequireEventuallyWithoutError(t, func() (bool, error) {
|
testlib.RequireEventuallyWithoutError(t, func() (bool, error) {
|
||||||
lease, err := pickRandomLeaderElectionClient(clients).Kubernetes.CoordinationV1().Leases(namespace.Name).Get(ctx, leaseName, metav1.GetOptions{})
|
lease, err := pickRandomLeaderElectionClient(clients).Kubernetes.CoordinationV1().Leases(namespace.Name).Get(ctx, leaseName, metav1.GetOptions{})
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
|
t.Logf("lease %s/%s does not exist", namespace.Name, leaseName)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
out = lease
|
out = lease
|
||||||
|
t.Logf("lease %s/%s - current leader identity: %s, valid leader identities: %s",
|
||||||
|
namespace.Name, leaseName, pointer.StringDeref(lease.Spec.HolderIdentity, "<nil>"), identities.List())
|
||||||
return lease.Spec.HolderIdentity != nil && identities.Has(*lease.Spec.HolderIdentity), nil
|
return lease.Spec.HolderIdentity != nil && identities.Has(*lease.Spec.HolderIdentity), nil
|
||||||
}, 5*time.Minute, time.Second)
|
}, 10*time.Minute, 10*time.Second)
|
||||||
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
@ -257,7 +259,7 @@ func checkOnlyLeaderCanWrite(ctx context.Context, t *testing.T, namespace *corev
|
|||||||
}
|
}
|
||||||
requireEventually.Equal(1, leaders, "did not see leader")
|
requireEventually.Equal(1, leaders, "did not see leader")
|
||||||
requireEventually.Equal(len(clients)-1, nonLeaders, "did not see non-leader")
|
requireEventually.Equal(len(clients)-1, nonLeaders, "did not see non-leader")
|
||||||
}, time.Minute, time.Second)
|
}, 3*time.Minute, 3*time.Second)
|
||||||
|
|
||||||
return lease
|
return lease
|
||||||
}
|
}
|
||||||
@ -274,7 +276,7 @@ func forceTransition(ctx context.Context, t *testing.T, namespace *corev1.Namesp
|
|||||||
startTime = *startLease.Spec.AcquireTime
|
startTime = *startLease.Spec.AcquireTime
|
||||||
|
|
||||||
startLease = startLease.DeepCopy()
|
startLease = startLease.DeepCopy()
|
||||||
startLease.Spec.HolderIdentity = pointer.String("some-other-client" + rand.String(5))
|
startLease.Spec.HolderIdentity = pointer.String("some-other-client-" + rand.String(5))
|
||||||
|
|
||||||
_, err := pickCurrentLeaderClient(ctx, t, namespace, leaseName, clients).
|
_, err := pickCurrentLeaderClient(ctx, t, namespace, leaseName, clients).
|
||||||
Kubernetes.CoordinationV1().Leases(namespace.Name).Update(ctx, startLease, metav1.UpdateOptions{})
|
Kubernetes.CoordinationV1().Leases(namespace.Name).Update(ctx, startLease, metav1.UpdateOptions{})
|
||||||
@ -289,8 +291,6 @@ func forceTransition(ctx context.Context, t *testing.T, namespace *corev1.Namesp
|
|||||||
require.Greater(t, finalTransitions, startTransitions)
|
require.Greater(t, finalTransitions, startTransitions)
|
||||||
require.Greater(t, finalTime.UnixNano(), startTime.UnixNano())
|
require.Greater(t, finalTime.UnixNano(), startTime.UnixNano())
|
||||||
|
|
||||||
time.Sleep(2 * time.Minute) // need to give clients time to notice this change because leader election is polling based
|
|
||||||
|
|
||||||
return finalLease
|
return finalLease
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -307,8 +307,6 @@ func forceRestart(ctx context.Context, t *testing.T, namespace *corev1.Namespace
|
|||||||
require.Zero(t, *newLease.Spec.LeaseTransitions)
|
require.Zero(t, *newLease.Spec.LeaseTransitions)
|
||||||
require.Greater(t, newLease.Spec.AcquireTime.UnixNano(), startLease.Spec.AcquireTime.UnixNano())
|
require.Greater(t, newLease.Spec.AcquireTime.UnixNano(), startLease.Spec.AcquireTime.UnixNano())
|
||||||
|
|
||||||
time.Sleep(2 * time.Minute) // need to give clients time to notice this change because leader election is polling based
|
|
||||||
|
|
||||||
return newLease
|
return newLease
|
||||||
}
|
}
|
||||||
|
|
||||||
|
91
test/integration/main_test.go
Normal file
91
test/integration/main_test.go
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"go.pinniped.dev/test/testlib"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
splitIntegrationTestsIntoBuckets(m)
|
||||||
|
os.Exit(m.Run())
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitIntegrationTestsIntoBuckets(m *testing.M) {
|
||||||
|
// this is some dark magic to set a private field
|
||||||
|
testsField := reflect.ValueOf(m).Elem().FieldByName("tests")
|
||||||
|
testsPointer := (*[]testing.InternalTest)(unsafe.Pointer(testsField.UnsafeAddr()))
|
||||||
|
|
||||||
|
tests := *testsPointer
|
||||||
|
|
||||||
|
if len(tests) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var serialTests, parallelTests, finalTests []testing.InternalTest
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
|
|
||||||
|
// top level integration tests the end with the string _Parallel
|
||||||
|
// are indicating that they are safe to run in parallel with
|
||||||
|
// other serial tests (which Go does not let you easily express).
|
||||||
|
// top level tests that want the standard Go behavior of only running
|
||||||
|
// parallel tests with other parallel tests should use the regular
|
||||||
|
// t.Parallel() approach. this has no effect on any subtest.
|
||||||
|
if strings.HasSuffix(test.Name, "_Parallel") {
|
||||||
|
parallelTests = append(parallelTests, test)
|
||||||
|
} else {
|
||||||
|
serialTests = append(serialTests, test)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
serialTest := testing.InternalTest{
|
||||||
|
Name: "TestIntegrationSerial",
|
||||||
|
F: func(t *testing.T) {
|
||||||
|
_ = testlib.IntegrationEnv(t) // make sure these tests do not run during unit tests
|
||||||
|
t.Parallel() // outer test runs in parallel always
|
||||||
|
|
||||||
|
for _, test := range serialTests {
|
||||||
|
test := test
|
||||||
|
t.Run(test.Name, func(t *testing.T) {
|
||||||
|
test.F(t) // inner serial tests do not run in parallel
|
||||||
|
})
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
parallelTest := testing.InternalTest{
|
||||||
|
Name: "TestIntegrationParallel",
|
||||||
|
F: func(t *testing.T) {
|
||||||
|
_ = testlib.IntegrationEnv(t) // make sure these tests do not run during unit tests
|
||||||
|
t.Parallel() // outer test runs in parallel always
|
||||||
|
|
||||||
|
for _, test := range parallelTests {
|
||||||
|
test := test
|
||||||
|
t.Run(test.Name, func(t *testing.T) {
|
||||||
|
t.Parallel() // inner parallel tests do run in parallel
|
||||||
|
|
||||||
|
test.F(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(serialTests) > 0 {
|
||||||
|
finalTests = append(finalTests, serialTest)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(parallelTests) > 0 {
|
||||||
|
finalTests = append(finalTests, parallelTest)
|
||||||
|
}
|
||||||
|
|
||||||
|
*testsPointer = finalTests
|
||||||
|
}
|
@ -18,7 +18,8 @@ import (
|
|||||||
"go.pinniped.dev/test/testlib"
|
"go.pinniped.dev/test/testlib"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSupervisorSecrets(t *testing.T) {
|
// safe to run in parallel with serial tests since it only interacts with a test local federation domain, see main_test.go.
|
||||||
|
func TestSupervisorSecrets_Parallel(t *testing.T) {
|
||||||
env := testlib.IntegrationEnv(t)
|
env := testlib.IntegrationEnv(t)
|
||||||
kubeClient := testlib.NewKubernetesClientset(t)
|
kubeClient := testlib.NewKubernetesClientset(t)
|
||||||
supervisorClient := testlib.NewSupervisorClientset(t)
|
supervisorClient := testlib.NewSupervisorClientset(t)
|
||||||
|
@ -19,11 +19,8 @@ import (
|
|||||||
"go.pinniped.dev/test/testlib"
|
"go.pinniped.dev/test/testlib"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestStorageGarbageCollection(t *testing.T) {
|
// safe to run in parallel with serial tests since it only interacts with test local secrets, see main_test.go.
|
||||||
// Run this test in parallel with the other integration tests because it does a lot of waiting
|
func TestStorageGarbageCollection_Parallel(t *testing.T) {
|
||||||
// and will not impact other tests, or be impacted by other tests, when run in parallel.
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
env := testlib.IntegrationEnv(t)
|
env := testlib.IntegrationEnv(t)
|
||||||
client := testlib.NewKubernetesClientset(t)
|
client := testlib.NewKubernetesClientset(t)
|
||||||
secrets := client.CoreV1().Secrets(env.SupervisorNamespace)
|
secrets := client.CoreV1().Secrets(env.SupervisorNamespace)
|
||||||
|
@ -29,7 +29,8 @@ import (
|
|||||||
"go.pinniped.dev/test/testlib"
|
"go.pinniped.dev/test/testlib"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestWhoAmI_Kubeadm(t *testing.T) {
|
// whoami requests are non-mutating and safe to run in parallel with serial tests, see main_test.go.
|
||||||
|
func TestWhoAmI_Kubeadm_Parallel(t *testing.T) {
|
||||||
// use the cluster signing key being available as a proxy for this being a kubeadm cluster
|
// use the cluster signing key being available as a proxy for this being a kubeadm cluster
|
||||||
// we should add more robust logic around skipping clusters based on vendor
|
// we should add more robust logic around skipping clusters based on vendor
|
||||||
_ = testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
_ = testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
||||||
@ -60,7 +61,8 @@ func TestWhoAmI_Kubeadm(t *testing.T) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWhoAmI_ServiceAccount_Legacy(t *testing.T) {
|
// whoami requests are non-mutating and safe to run in parallel with serial tests, see main_test.go.
|
||||||
|
func TestWhoAmI_ServiceAccount_Legacy_Parallel(t *testing.T) {
|
||||||
_ = testlib.IntegrationEnv(t)
|
_ = testlib.IntegrationEnv(t)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||||
@ -133,7 +135,8 @@ func TestWhoAmI_ServiceAccount_Legacy(t *testing.T) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWhoAmI_ServiceAccount_TokenRequest(t *testing.T) {
|
// whoami requests are non-mutating and safe to run in parallel with serial tests, see main_test.go.
|
||||||
|
func TestWhoAmI_ServiceAccount_TokenRequest_Parallel(t *testing.T) {
|
||||||
env := testlib.IntegrationEnv(t)
|
env := testlib.IntegrationEnv(t)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||||
@ -242,7 +245,8 @@ func TestWhoAmI_ServiceAccount_TokenRequest(t *testing.T) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWhoAmI_CSR(t *testing.T) {
|
// whoami requests are non-mutating and safe to run in parallel with serial tests, see main_test.go.
|
||||||
|
func TestWhoAmI_CSR_Parallel(t *testing.T) {
|
||||||
// use the cluster signing key being available as a proxy for this not being an EKS cluster
|
// use the cluster signing key being available as a proxy for this not being an EKS cluster
|
||||||
// we should add more robust logic around skipping clusters based on vendor
|
// we should add more robust logic around skipping clusters based on vendor
|
||||||
_ = testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
_ = testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
||||||
@ -330,7 +334,8 @@ func TestWhoAmI_CSR(t *testing.T) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWhoAmI_Anonymous(t *testing.T) {
|
// whoami requests are non-mutating and safe to run in parallel with serial tests, see main_test.go.
|
||||||
|
func TestWhoAmI_Anonymous_Parallel(t *testing.T) {
|
||||||
_ = testlib.IntegrationEnv(t).WithCapability(testlib.AnonymousAuthenticationSupported)
|
_ = testlib.IntegrationEnv(t).WithCapability(testlib.AnonymousAuthenticationSupported)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||||
@ -360,7 +365,8 @@ func TestWhoAmI_Anonymous(t *testing.T) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWhoAmI_ImpersonateDirectly(t *testing.T) {
|
// whoami requests are non-mutating and safe to run in parallel with serial tests, see main_test.go.
|
||||||
|
func TestWhoAmI_ImpersonateDirectly_Parallel(t *testing.T) {
|
||||||
_ = testlib.IntegrationEnv(t)
|
_ = testlib.IntegrationEnv(t)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||||
|
@ -120,7 +120,7 @@ func IntegrationEnv(t *testing.T) *TestEnv {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Helper()
|
t.Helper()
|
||||||
SkipUnlessIntegration(t)
|
skipUnlessIntegration(t)
|
||||||
|
|
||||||
capabilitiesDescriptionYAML := os.Getenv("PINNIPED_TEST_CLUSTER_CAPABILITY_YAML")
|
capabilitiesDescriptionYAML := os.Getenv("PINNIPED_TEST_CLUSTER_CAPABILITY_YAML")
|
||||||
capabilitiesDescriptionFile := os.Getenv("PINNIPED_TEST_CLUSTER_CAPABILITY_FILE")
|
capabilitiesDescriptionFile := os.Getenv("PINNIPED_TEST_CLUSTER_CAPABILITY_FILE")
|
||||||
|
@ -5,8 +5,8 @@ package testlib
|
|||||||
|
|
||||||
import "testing"
|
import "testing"
|
||||||
|
|
||||||
// SkipUnlessIntegration skips the current test if `-short` has been passed to `go test`.
|
// skipUnlessIntegration skips the current test if `-short` has been passed to `go test`.
|
||||||
func SkipUnlessIntegration(t *testing.T) {
|
func skipUnlessIntegration(t *testing.T) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping integration test because of '-short' flag")
|
t.Skip("skipping integration test because of '-short' flag")
|
||||||
|
Loading…
Reference in New Issue
Block a user