leader election test: approximate that followers have observed change

Instead of blindly waiting long enough for a disruptive change to
have been observed by the old leader and followers, we instead rely
on the approximation that checkOnlyLeaderCanWrite provides - i.e.
only a single actor believes they are the leader.  This does not
account for clients that were in the followers list before and after
the disruptive change, but it serves as a reasonable approximation.

Signed-off-by: Monis Khan <mok@vmware.com>
This commit is contained in:
Monis Khan 2021-08-26 08:39:44 -04:00
parent 74daa1da64
commit e2cf9f6b74
No known key found for this signature in database
GPG Key ID: 52C90ADA01B269B8
1 changed files with 7 additions and 8 deletions

View File

@ -31,7 +31,7 @@ import (
func TestLeaderElection_Parallel(t *testing.T) {
_ = testlib.IntegrationEnv(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute)
t.Cleanup(cancel)
leaseName := "leader-election-" + rand.String(5)
@ -197,14 +197,17 @@ func waitForIdentity(ctx context.Context, t *testing.T, namespace *corev1.Namesp
testlib.RequireEventuallyWithoutError(t, func() (bool, error) {
lease, err := pickRandomLeaderElectionClient(clients).Kubernetes.CoordinationV1().Leases(namespace.Name).Get(ctx, leaseName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
t.Logf("lease %s/%s does not exist", namespace.Name, leaseName)
return false, nil
}
if err != nil {
return false, err
}
out = lease
t.Logf("lease %s/%s - current leader identity: %s, valid leader identities: %s",
namespace.Name, leaseName, pointer.StringDeref(lease.Spec.HolderIdentity, "<nil>"), identities.List())
return lease.Spec.HolderIdentity != nil && identities.Has(*lease.Spec.HolderIdentity), nil
}, 5*time.Minute, time.Second)
}, 10*time.Minute, 10*time.Second)
return out
}
@ -256,7 +259,7 @@ func checkOnlyLeaderCanWrite(ctx context.Context, t *testing.T, namespace *corev
}
requireEventually.Equal(1, leaders, "did not see leader")
requireEventually.Equal(len(clients)-1, nonLeaders, "did not see non-leader")
}, time.Minute, time.Second)
}, 3*time.Minute, 3*time.Second)
return lease
}
@ -273,7 +276,7 @@ func forceTransition(ctx context.Context, t *testing.T, namespace *corev1.Namesp
startTime = *startLease.Spec.AcquireTime
startLease = startLease.DeepCopy()
startLease.Spec.HolderIdentity = pointer.String("some-other-client" + rand.String(5))
startLease.Spec.HolderIdentity = pointer.String("some-other-client-" + rand.String(5))
_, err := pickCurrentLeaderClient(ctx, t, namespace, leaseName, clients).
Kubernetes.CoordinationV1().Leases(namespace.Name).Update(ctx, startLease, metav1.UpdateOptions{})
@ -288,8 +291,6 @@ func forceTransition(ctx context.Context, t *testing.T, namespace *corev1.Namesp
require.Greater(t, finalTransitions, startTransitions)
require.Greater(t, finalTime.UnixNano(), startTime.UnixNano())
time.Sleep(2 * time.Minute) // need to give clients time to notice this change because leader election is polling based
return finalLease
}
@ -306,8 +307,6 @@ func forceRestart(ctx context.Context, t *testing.T, namespace *corev1.Namespace
require.Zero(t, *newLease.Spec.LeaseTransitions)
require.Greater(t, newLease.Spec.AcquireTime.UnixNano(), startLease.Spec.AcquireTime.UnixNano())
time.Sleep(2 * time.Minute) // need to give clients time to notice this change because leader election is polling based
return newLease
}