2022-03-08 20:28:09 +00:00
|
|
|
// Copyright 2021-2022 the Pinniped contributors. All Rights Reserved.
|
2021-01-14 15:17:46 +00:00
|
|
|
// SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
2021-06-22 15:23:19 +00:00
|
|
|
package testlib
|
2021-01-14 15:17:46 +00:00
|
|
|
|
|
|
|
import (
|
2021-01-12 20:55:31 +00:00
|
|
|
"context"
|
2021-02-25 22:40:02 +00:00
|
|
|
"errors"
|
2021-01-12 20:55:31 +00:00
|
|
|
"fmt"
|
2021-01-14 15:17:46 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2021-01-12 20:55:31 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2021-01-14 15:17:46 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2021-01-12 20:55:31 +00:00
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
2021-01-14 15:17:46 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
2021-03-17 16:46:55 +00:00
|
|
|
"k8s.io/client-go/kubernetes"
|
2021-06-16 22:51:23 +00:00
|
|
|
|
|
|
|
"go.pinniped.dev/internal/constable"
|
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
|
|
|
// loopTestingT records the failures observed during an iteration of the RequireEventually() loop.
|
|
|
|
loopTestingT []assertionFailure
|
|
|
|
|
|
|
|
// assertionFailure is a single error observed during an iteration of the RequireEventually() loop.
|
|
|
|
assertionFailure struct {
|
|
|
|
format string
|
|
|
|
args []interface{}
|
|
|
|
}
|
2021-01-14 15:17:46 +00:00
|
|
|
)
|
|
|
|
|
2022-03-08 20:28:09 +00:00
|
|
|
// loopTestingT implements require.TestingT.
|
2021-06-16 22:51:23 +00:00
|
|
|
var _ require.TestingT = (*loopTestingT)(nil)
|
|
|
|
|
|
|
|
// Errorf is called by the assert.Assertions methods to record an error.
|
|
|
|
func (e *loopTestingT) Errorf(format string, args ...interface{}) {
|
|
|
|
*e = append(*e, assertionFailure{format, args})
|
|
|
|
}
|
|
|
|
|
|
|
|
const errLoopFailNow = constable.Error("failing test now")
|
|
|
|
|
|
|
|
// FailNow is called by the require.Assertions methods to force the code to immediately halt. It panics with a
|
|
|
|
// sentinel value that is recovered by recoverLoopFailNow().
|
|
|
|
func (e *loopTestingT) FailNow() { panic(errLoopFailNow) }
|
|
|
|
|
|
|
|
// ignoreFailNowPanic catches the panic from FailNow() and ignores it (allowing the FailNow() call to halt the test
|
|
|
|
// but let the retry loop continue.
|
|
|
|
func recoverLoopFailNow() {
|
|
|
|
switch p := recover(); p {
|
|
|
|
case nil, errLoopFailNow:
|
|
|
|
// Ignore nil (success) and our sentinel value.
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
// Re-panic on any other value.
|
|
|
|
panic(p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func RequireEventuallyf(
|
|
|
|
t *testing.T,
|
|
|
|
f func(requireEventually *require.Assertions),
|
|
|
|
waitFor time.Duration,
|
|
|
|
tick time.Duration,
|
|
|
|
msg string,
|
|
|
|
args ...interface{},
|
|
|
|
) {
|
2021-06-30 19:39:46 +00:00
|
|
|
t.Helper()
|
2021-06-16 22:51:23 +00:00
|
|
|
RequireEventually(t, f, waitFor, tick, fmt.Sprintf(msg, args...))
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequireEventually is similar to require.Eventually() except that it is thread safe and provides a richer way to
|
|
|
|
// write per-iteration assertions.
|
|
|
|
func RequireEventually(
|
|
|
|
t *testing.T,
|
|
|
|
f func(requireEventually *require.Assertions),
|
|
|
|
waitFor time.Duration,
|
|
|
|
tick time.Duration,
|
|
|
|
msgAndArgs ...interface{},
|
|
|
|
) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
// Set up some bookkeeping so we can fail with a nice message if necessary.
|
|
|
|
var (
|
|
|
|
startTime = time.Now()
|
|
|
|
attempts int
|
|
|
|
mostRecentFailures loopTestingT
|
|
|
|
)
|
|
|
|
|
|
|
|
// Run the check until it completes with no assertion failures.
|
|
|
|
waitErr := wait.PollImmediate(tick, waitFor, func() (bool, error) {
|
|
|
|
t.Helper()
|
|
|
|
attempts++
|
|
|
|
|
|
|
|
// Reset the recorded failures on each iteration.
|
|
|
|
mostRecentFailures = nil
|
|
|
|
|
|
|
|
// Ignore any panics caused by FailNow() -- they will cause the f() to return immediately but any errors
|
|
|
|
// they've logged should be in mostRecentFailures.
|
|
|
|
defer recoverLoopFailNow()
|
|
|
|
|
|
|
|
// Run the per-iteration check, recording any failed assertions into mostRecentFailures.
|
|
|
|
f(require.New(&mostRecentFailures))
|
|
|
|
|
|
|
|
// We're only done iterating if no assertions have failed.
|
|
|
|
return len(mostRecentFailures) == 0, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
// If things eventually completed with no failures/timeouts, we're done.
|
|
|
|
if waitErr == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Re-assert the most recent set of failures with a nice error log.
|
|
|
|
duration := time.Since(startTime).Round(100 * time.Millisecond)
|
|
|
|
t.Errorf("failed to complete even after %s (%d attempts): %v", duration, attempts, waitErr)
|
|
|
|
for _, failure := range mostRecentFailures {
|
|
|
|
t.Errorf(failure.format, failure.args...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fail the test now with the provided message.
|
|
|
|
require.NoError(t, waitErr, msgAndArgs...)
|
|
|
|
}
|
|
|
|
|
2021-02-25 22:40:02 +00:00
|
|
|
// RequireEventuallyWithoutError is similar to require.Eventually() except that it also allows the caller to
|
2021-01-14 15:17:46 +00:00
|
|
|
// return an error from the condition function. If the condition function returns an error at any
|
|
|
|
// point, the assertion will immediately fail.
|
|
|
|
func RequireEventuallyWithoutError(
|
|
|
|
t *testing.T,
|
|
|
|
f func() (bool, error),
|
|
|
|
waitFor time.Duration,
|
|
|
|
tick time.Duration,
|
|
|
|
msgAndArgs ...interface{},
|
|
|
|
) {
|
|
|
|
t.Helper()
|
|
|
|
require.NoError(t, wait.PollImmediate(tick, waitFor, f), msgAndArgs...)
|
|
|
|
}
|
2021-01-12 20:55:31 +00:00
|
|
|
|
2021-02-25 22:40:02 +00:00
|
|
|
// RequireNeverWithoutError is similar to require.Never() except that it also allows the caller to
|
|
|
|
// return an error from the condition function. If the condition function returns an error at any
|
|
|
|
// point, the assertion will immediately fail.
|
|
|
|
func RequireNeverWithoutError(
|
|
|
|
t *testing.T,
|
|
|
|
f func() (bool, error),
|
|
|
|
waitFor time.Duration,
|
|
|
|
tick time.Duration,
|
|
|
|
msgAndArgs ...interface{},
|
|
|
|
) {
|
|
|
|
t.Helper()
|
|
|
|
err := wait.PollImmediate(tick, waitFor, f)
|
|
|
|
isWaitTimeout := errors.Is(err, wait.ErrWaitTimeout)
|
|
|
|
if err != nil && !isWaitTimeout {
|
|
|
|
require.NoError(t, err, msgAndArgs...) // this will fail and throw the right error message
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
// This prints the same error message that require.Never would print in this case.
|
|
|
|
require.Fail(t, "Condition satisfied", msgAndArgs...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-17 16:08:01 +00:00
|
|
|
// assertNoRestartsDuringTest allows a caller to assert that there were no restarts for a Pod in the
|
2021-01-12 20:55:31 +00:00
|
|
|
// provided namespace with the provided labelSelector during the lifetime of a test.
|
2021-03-17 16:08:01 +00:00
|
|
|
func assertNoRestartsDuringTest(t *testing.T, namespace, labelSelector string) {
|
2021-01-12 20:55:31 +00:00
|
|
|
t.Helper()
|
2021-03-17 16:46:55 +00:00
|
|
|
kubeClient := NewKubernetesClientset(t)
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
|
|
|
defer cancel()
|
2021-01-12 20:55:31 +00:00
|
|
|
|
2021-03-17 16:46:55 +00:00
|
|
|
previousRestartCounts := getRestartCounts(ctx, t, kubeClient, namespace, labelSelector)
|
2021-01-12 20:55:31 +00:00
|
|
|
|
|
|
|
t.Cleanup(func() {
|
2021-03-17 16:46:55 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
currentRestartCounts := getRestartCounts(ctx, t, kubeClient, namespace, labelSelector)
|
2021-01-12 20:55:31 +00:00
|
|
|
|
|
|
|
for key, previousRestartCount := range previousRestartCounts {
|
|
|
|
currentRestartCount, ok := currentRestartCounts[key]
|
2021-03-17 16:46:55 +00:00
|
|
|
|
|
|
|
// If the container no longer exists, that's a test failure.
|
|
|
|
if !assert.Truef(
|
2021-01-12 20:55:31 +00:00
|
|
|
t,
|
|
|
|
ok,
|
2021-03-17 16:46:55 +00:00
|
|
|
"container %s existed at beginning of the test, but not the end",
|
|
|
|
key.String(),
|
2021-01-12 20:55:31 +00:00
|
|
|
) {
|
2021-03-17 16:46:55 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Expect the restart count to be the same as it was before the test.
|
2021-03-26 21:43:02 +00:00
|
|
|
assert.Equal(
|
2021-03-17 16:46:55 +00:00
|
|
|
t,
|
|
|
|
previousRestartCount,
|
|
|
|
currentRestartCount,
|
|
|
|
"container %s has restarted %d times (original count was %d)",
|
|
|
|
key.String(),
|
|
|
|
currentRestartCount,
|
|
|
|
previousRestartCount,
|
2021-03-26 21:43:02 +00:00
|
|
|
)
|
2021-01-12 20:55:31 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-03-17 16:46:55 +00:00
|
|
|
type containerRestartKey struct {
|
|
|
|
namespace string
|
|
|
|
pod string
|
|
|
|
container string
|
|
|
|
}
|
2021-01-12 20:55:31 +00:00
|
|
|
|
2021-03-17 16:46:55 +00:00
|
|
|
func (k containerRestartKey) String() string {
|
|
|
|
return fmt.Sprintf("%s/%s/%s", k.namespace, k.pod, k.container)
|
|
|
|
}
|
|
|
|
|
|
|
|
type containerRestartMap map[containerRestartKey]int32
|
|
|
|
|
|
|
|
func getRestartCounts(ctx context.Context, t *testing.T, kubeClient kubernetes.Interface, namespace, labelSelector string) containerRestartMap {
|
|
|
|
t.Helper()
|
2021-01-12 20:55:31 +00:00
|
|
|
|
|
|
|
pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-03-17 16:46:55 +00:00
|
|
|
restartCounts := make(containerRestartMap)
|
2021-01-12 20:55:31 +00:00
|
|
|
for _, pod := range pods.Items {
|
|
|
|
for _, container := range pod.Status.ContainerStatuses {
|
2021-03-17 16:46:55 +00:00
|
|
|
key := containerRestartKey{
|
|
|
|
namespace: pod.Namespace,
|
|
|
|
pod: pod.Name,
|
|
|
|
container: container.Name,
|
|
|
|
}
|
2021-01-12 20:55:31 +00:00
|
|
|
restartCounts[key] = container.RestartCount
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return restartCounts
|
|
|
|
}
|