From e2cf9f6b74f4db9336789158aa7340a4cf4de8dd Mon Sep 17 00:00:00 2001 From: Monis Khan Date: Thu, 26 Aug 2021 08:39:44 -0400 Subject: [PATCH] leader election test: approximate that followers have observed change Instead of blindly waiting long enough for a disruptive change to have been observed by the old leader and followers, we instead rely on the approximation that checkOnlyLeaderCanWrite provides - i.e. only a single actor believes they are the leader. This does not account for clients that were in the followers list before and after the disruptive change, but it serves as a reasonable approximation. Signed-off-by: Monis Khan --- test/integration/leaderelection_test.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/test/integration/leaderelection_test.go b/test/integration/leaderelection_test.go index 2e1fe044..d9da6c57 100644 --- a/test/integration/leaderelection_test.go +++ b/test/integration/leaderelection_test.go @@ -31,7 +31,7 @@ import ( func TestLeaderElection_Parallel(t *testing.T) { _ = testlib.IntegrationEnv(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) t.Cleanup(cancel) leaseName := "leader-election-" + rand.String(5) @@ -197,14 +197,17 @@ func waitForIdentity(ctx context.Context, t *testing.T, namespace *corev1.Namesp testlib.RequireEventuallyWithoutError(t, func() (bool, error) { lease, err := pickRandomLeaderElectionClient(clients).Kubernetes.CoordinationV1().Leases(namespace.Name).Get(ctx, leaseName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { + t.Logf("lease %s/%s does not exist", namespace.Name, leaseName) return false, nil } if err != nil { return false, err } out = lease + t.Logf("lease %s/%s - current leader identity: %s, valid leader identities: %s", + namespace.Name, leaseName, pointer.StringDeref(lease.Spec.HolderIdentity, ""), identities.List()) return lease.Spec.HolderIdentity != nil && identities.Has(*lease.Spec.HolderIdentity), nil - }, 5*time.Minute, time.Second) + }, 10*time.Minute, 10*time.Second) return out } @@ -256,7 +259,7 @@ func checkOnlyLeaderCanWrite(ctx context.Context, t *testing.T, namespace *corev } requireEventually.Equal(1, leaders, "did not see leader") requireEventually.Equal(len(clients)-1, nonLeaders, "did not see non-leader") - }, time.Minute, time.Second) + }, 3*time.Minute, 3*time.Second) return lease } @@ -273,7 +276,7 @@ func forceTransition(ctx context.Context, t *testing.T, namespace *corev1.Namesp startTime = *startLease.Spec.AcquireTime startLease = startLease.DeepCopy() - startLease.Spec.HolderIdentity = pointer.String("some-other-client" + rand.String(5)) + startLease.Spec.HolderIdentity = pointer.String("some-other-client-" + rand.String(5)) _, err := pickCurrentLeaderClient(ctx, t, namespace, leaseName, clients). Kubernetes.CoordinationV1().Leases(namespace.Name).Update(ctx, startLease, metav1.UpdateOptions{}) @@ -288,8 +291,6 @@ func forceTransition(ctx context.Context, t *testing.T, namespace *corev1.Namesp require.Greater(t, finalTransitions, startTransitions) require.Greater(t, finalTime.UnixNano(), startTime.UnixNano()) - time.Sleep(2 * time.Minute) // need to give clients time to notice this change because leader election is polling based - return finalLease } @@ -306,8 +307,6 @@ func forceRestart(ctx context.Context, t *testing.T, namespace *corev1.Namespace require.Zero(t, *newLease.Spec.LeaseTransitions) require.Greater(t, newLease.Spec.AcquireTime.UnixNano(), startLease.Spec.AcquireTime.UnixNano()) - time.Sleep(2 * time.Minute) // need to give clients time to notice this change because leader election is polling based - return newLease }