Extend assertNoRestartsDuringTest to dump logs from containers that restarted.
Signed-off-by: Matt Moyer <moyerm@vmware.com>
This commit is contained in:
parent
6520c5a3a1
commit
0dd2b358fb
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RequireEventuallyWithoutError is a wrapper around require.Eventually() that allows the caller to
|
// RequireEventuallyWithoutError is a wrapper around require.Eventually() that allows the caller to
|
||||||
@ -33,48 +34,73 @@ func RequireEventuallyWithoutError(
|
|||||||
// provided namespace with the provided labelSelector during the lifetime of a test.
|
// provided namespace with the provided labelSelector during the lifetime of a test.
|
||||||
func assertNoRestartsDuringTest(t *testing.T, namespace, labelSelector string) {
|
func assertNoRestartsDuringTest(t *testing.T, namespace, labelSelector string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
kubeClient := NewKubernetesClientset(t)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
previousRestartCounts := getRestartCounts(t, namespace, labelSelector)
|
previousRestartCounts := getRestartCounts(ctx, t, kubeClient, namespace, labelSelector)
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
currentRestartCounts := getRestartCounts(t, namespace, labelSelector)
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
currentRestartCounts := getRestartCounts(ctx, t, kubeClient, namespace, labelSelector)
|
||||||
|
|
||||||
for key, previousRestartCount := range previousRestartCounts {
|
for key, previousRestartCount := range previousRestartCounts {
|
||||||
currentRestartCount, ok := currentRestartCounts[key]
|
currentRestartCount, ok := currentRestartCounts[key]
|
||||||
if assert.Truef(
|
|
||||||
|
// If the container no longer exists, that's a test failure.
|
||||||
|
if !assert.Truef(
|
||||||
t,
|
t,
|
||||||
ok,
|
ok,
|
||||||
"pod namespace/name/container %s existed at beginning of the test, but not the end",
|
"container %s existed at beginning of the test, but not the end",
|
||||||
key,
|
key.String(),
|
||||||
) {
|
) {
|
||||||
assert.Equal(
|
continue
|
||||||
t,
|
}
|
||||||
previousRestartCount,
|
|
||||||
currentRestartCount,
|
// Expect the restart count to be the same as it was before the test.
|
||||||
"pod namespace/name/container %s has restarted %d times (original count was %d)",
|
if !assert.Equal(
|
||||||
key,
|
t,
|
||||||
currentRestartCount,
|
previousRestartCount,
|
||||||
previousRestartCount,
|
currentRestartCount,
|
||||||
)
|
"container %s has restarted %d times (original count was %d)",
|
||||||
|
key.String(),
|
||||||
|
currentRestartCount,
|
||||||
|
previousRestartCount,
|
||||||
|
) {
|
||||||
|
// Attempt to dump the logs from the previous container that crashed.
|
||||||
|
dumpContainerLogs(ctx, t, kubeClient, key.namespace, key.pod, key.container, true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRestartCounts(t *testing.T, namespace, labelSelector string) map[string]int32 {
|
type containerRestartKey struct {
|
||||||
t.Helper()
|
namespace string
|
||||||
|
pod string
|
||||||
|
container string
|
||||||
|
}
|
||||||
|
|
||||||
kubeClient := NewKubernetesClientset(t)
|
func (k containerRestartKey) String() string {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
return fmt.Sprintf("%s/%s/%s", k.namespace, k.pod, k.container)
|
||||||
defer cancel()
|
}
|
||||||
|
|
||||||
|
type containerRestartMap map[containerRestartKey]int32
|
||||||
|
|
||||||
|
func getRestartCounts(ctx context.Context, t *testing.T, kubeClient kubernetes.Interface, namespace, labelSelector string) containerRestartMap {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
restartCounts := make(map[string]int32)
|
restartCounts := make(containerRestartMap)
|
||||||
for _, pod := range pods.Items {
|
for _, pod := range pods.Items {
|
||||||
for _, container := range pod.Status.ContainerStatuses {
|
for _, container := range pod.Status.ContainerStatuses {
|
||||||
key := fmt.Sprintf("%s/%s/%s", pod.Namespace, pod.Name, container.Name)
|
key := containerRestartKey{
|
||||||
|
namespace: pod.Namespace,
|
||||||
|
pod: pod.Name,
|
||||||
|
container: container.Name,
|
||||||
|
}
|
||||||
restartCounts[key] = container.RestartCount
|
restartCounts[key] = container.RestartCount
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user