From 45f57939af00be6e6d32331b1c854b13a04ee4b0 Mon Sep 17 00:00:00 2001 From: Matt Moyer Date: Tue, 2 Mar 2021 14:17:27 -0600 Subject: [PATCH 1/2] Make TestGetPinnipedCategory more resilient. If the test is run immediately after the Concierge is installed, the API server can still have broken discovery data and return an error on the first call. This commit adds a retry loop to attempt this first kubectl command for up to 60s before declaring failure. The subsequent tests should be covered by this as well since they are not run in parallel. Signed-off-by: Matt Moyer --- test/integration/category_test.go | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/test/integration/category_test.go b/test/integration/category_test.go index e8edc7bd..42d44582 100644 --- a/test/integration/category_test.go +++ b/test/integration/category_test.go @@ -7,6 +7,7 @@ import ( "bytes" "os/exec" "testing" + "time" "github.com/stretchr/testify/require" @@ -20,13 +21,21 @@ func TestGetPinnipedCategory(t *testing.T) { t.Run("category, no special params", func(t *testing.T) { var stdOut, stdErr bytes.Buffer - cmd := exec.Command("kubectl", "get", "pinniped", "-A") - cmd.Stdout = &stdOut - cmd.Stderr = &stdErr - err := cmd.Run() - require.NoError(t, err, stdErr.String(), stdOut.String()) + var err error + require.Eventuallyf(t, func() bool { + cmd := exec.Command("kubectl", "get", "pinniped", "-A") + cmd.Stdout = &stdOut + cmd.Stderr = &stdErr + err = cmd.Run() + return err == nil + }, + 60*time.Second, + 1*time.Second, + "never ran 'kubectl get pinniped -A' successfully:\n%s\n\n%s", + stdErr.String(), + stdOut.String(), + ) require.Empty(t, stdErr.String()) - require.NotContains(t, stdOut.String(), "MethodNotAllowed") require.Contains(t, stdOut.String(), dotSuffix) }) From df27c2e1fca30972457d7bd4c97d9261ab023f6b Mon Sep 17 00:00:00 2001 From: Matt Moyer Date: Tue, 2 Mar 2021 15:41:21 -0600 Subject: [PATCH 2/2] Use randomly generated API groups in TestKubeClientOwnerRef. I think this is another aspect of the test flakes we're trying to fix. This matters especially for the "Multiple Pinnipeds" test environment where two copies of the test suite are running concurrently. Signed-off-by: Matt Moyer --- test/integration/kubeclient_test.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/test/integration/kubeclient_test.go b/test/integration/kubeclient_test.go index 6cdaa56d..e5f759c5 100644 --- a/test/integration/kubeclient_test.go +++ b/test/integration/kubeclient_test.go @@ -5,6 +5,7 @@ package integration import ( "context" + "fmt" "testing" "time" @@ -75,15 +76,18 @@ func TestKubeClientOwnerRef(t *testing.T) { UID: parentSecret.UID, } + snorlaxAPIGroup := fmt.Sprintf("%s.snorlax.dev", library.RandHex(t, 8)) parentAPIService, err := regularAggregationClient.ApiregistrationV1().APIServices().Create( ctx, &apiregistrationv1.APIService{ ObjectMeta: metav1.ObjectMeta{ - Name: "v1.snorlax.dev", + Name: "v1." + snorlaxAPIGroup, + Labels: map[string]string{"pinniped.dev/test": ""}, + Annotations: map[string]string{"pinniped.dev/testName": t.Name()}, }, Spec: apiregistrationv1.APIServiceSpec{ Version: "v1", - Group: "snorlax.dev", + Group: snorlaxAPIGroup, GroupPriorityMinimum: 10_000, VersionPriority: 500, }, @@ -183,16 +187,19 @@ func TestKubeClientOwnerRef(t *testing.T) { }) // cluster scoped API service should be owned by the other one we created above + pandasAPIGroup := fmt.Sprintf("%s.pandas.dev", library.RandHex(t, 8)) apiService, err := ownerRefClient.Aggregation.ApiregistrationV1().APIServices().Create( ctx, &apiregistrationv1.APIService{ ObjectMeta: metav1.ObjectMeta{ - Name: "v1.pandas.dev", + Name: "v1." + pandasAPIGroup, OwnerReferences: nil, // no owner refs set + Labels: map[string]string{"pinniped.dev/test": ""}, + Annotations: map[string]string{"pinniped.dev/testName": t.Name()}, }, Spec: apiregistrationv1.APIServiceSpec{ Version: "v1", - Group: "pandas.dev", + Group: pandasAPIGroup, GroupPriorityMinimum: 10_000, VersionPriority: 500, },