test/integration: add "kubectl attach" test to TestImpersonationProxy
Signed-off-by: Andrew Keesler <akeesler@vmware.com>
This commit is contained in:
parent
fcd8c585c3
commit
22ca2da1ff
@ -18,6 +18,7 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -47,6 +48,30 @@ import (
|
|||||||
"go.pinniped.dev/test/library"
|
"go.pinniped.dev/test/library"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// syncBuffer wraps bytes.Buffer with a mutex so we don't have races in our test code.
|
||||||
|
type syncBuffer struct {
|
||||||
|
buf bytes.Buffer
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *syncBuffer) String() string {
|
||||||
|
sb.mu.Lock()
|
||||||
|
defer sb.mu.Unlock()
|
||||||
|
return sb.buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *syncBuffer) Read(b []byte) (int, error) {
|
||||||
|
sb.mu.Lock()
|
||||||
|
defer sb.mu.Unlock()
|
||||||
|
return sb.buf.Read(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sb *syncBuffer) Write(b []byte) (int, error) {
|
||||||
|
sb.mu.Lock()
|
||||||
|
defer sb.mu.Unlock()
|
||||||
|
return sb.buf.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
// Note that this test supports being run on all of our integration test cluster types:
|
// Note that this test supports being run on all of our integration test cluster types:
|
||||||
// - load balancers not supported, has squid proxy (e.g. kind)
|
// - load balancers not supported, has squid proxy (e.g. kind)
|
||||||
// - load balancers supported, has squid proxy (e.g. EKS)
|
// - load balancers supported, has squid proxy (e.g. EKS)
|
||||||
@ -471,11 +496,11 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
require.NoError(t, ioutil.WriteFile(kubeconfigPath, []byte(kubeconfigYAML), 0600))
|
require.NoError(t, ioutil.WriteFile(kubeconfigPath, []byte(kubeconfigYAML), 0600))
|
||||||
|
|
||||||
// func to create kubectl commands with a kubeconfig
|
// func to create kubectl commands with a kubeconfig
|
||||||
kubectlCommand := func(timeout context.Context, args ...string) (*exec.Cmd, *bytes.Buffer, *bytes.Buffer) {
|
kubectlCommand := func(timeout context.Context, args ...string) (*exec.Cmd, *syncBuffer, *syncBuffer) {
|
||||||
allArgs := append([]string{"--kubeconfig", kubeconfigPath}, args...)
|
allArgs := append([]string{"--kubeconfig", kubeconfigPath}, args...)
|
||||||
//nolint:gosec // we are not performing malicious argument injection against ourselves
|
//nolint:gosec // we are not performing malicious argument injection against ourselves
|
||||||
kubectlCmd := exec.CommandContext(timeout, "kubectl", allArgs...)
|
kubectlCmd := exec.CommandContext(timeout, "kubectl", allArgs...)
|
||||||
var stdout, stderr bytes.Buffer
|
var stdout, stderr syncBuffer
|
||||||
kubectlCmd.Stdout = &stdout
|
kubectlCmd.Stdout = &stdout
|
||||||
kubectlCmd.Stderr = &stderr
|
kubectlCmd.Stderr = &stderr
|
||||||
kubectlCmd.Env = envVarsWithProxy
|
kubectlCmd.Env = envVarsWithProxy
|
||||||
@ -501,44 +526,45 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
pods, err := adminClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{})
|
pods, err := adminClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, len(pods.Items), 0)
|
require.Greater(t, len(pods.Items), 0)
|
||||||
var podName string
|
var conciergePod *corev1.Pod
|
||||||
for _, pod := range pods.Items {
|
for _, pod := range pods.Items {
|
||||||
|
pod := pod
|
||||||
if !strings.Contains(pod.Name, "kube-cert-agent") {
|
if !strings.Contains(pod.Name, "kube-cert-agent") {
|
||||||
podName = pod.Name
|
conciergePod = &pod
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if podName == "" {
|
if conciergePod == nil {
|
||||||
t.Error("could not find a concierge pod")
|
t.Error("could not find a concierge pod")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try "kubectl exec" through the impersonation proxy.
|
// Try "kubectl exec" through the impersonation proxy.
|
||||||
echoString := "hello world"
|
echoString := "hello world"
|
||||||
remoteEchoFile := fmt.Sprintf("/tmp/test-impersonation-proxy-echo-file-%d.txt", time.Now().Unix())
|
remoteEchoFile := fmt.Sprintf("/tmp/test-impersonation-proxy-echo-file-%d.txt", time.Now().Unix())
|
||||||
stdout, _, err := runKubectl("exec", "--namespace", env.ConciergeNamespace, podName, "--", "bash", "-c", fmt.Sprintf(`echo "%s" | tee %s`, echoString, remoteEchoFile))
|
stdout, _, err := runKubectl("exec", "--namespace", env.ConciergeNamespace, conciergePod.Name, "--", "bash", "-c", fmt.Sprintf(`echo "%s" | tee %s`, echoString, remoteEchoFile))
|
||||||
require.NoError(t, err, `"kubectl exec" failed`)
|
require.NoError(t, err, `"kubectl exec" failed`)
|
||||||
require.Equal(t, echoString+"\n", stdout)
|
require.Equal(t, echoString+"\n", stdout)
|
||||||
|
|
||||||
// run the kubectl cp command
|
// run the kubectl cp command
|
||||||
localEchoFile := filepath.Join(tempDir, filepath.Base(remoteEchoFile))
|
localEchoFile := filepath.Join(tempDir, filepath.Base(remoteEchoFile))
|
||||||
_, _, err = runKubectl("cp", fmt.Sprintf("%s/%s:%s", env.ConciergeNamespace, podName, remoteEchoFile), localEchoFile)
|
_, _, err = runKubectl("cp", fmt.Sprintf("%s/%s:%s", env.ConciergeNamespace, conciergePod.Name, remoteEchoFile), localEchoFile)
|
||||||
require.NoError(t, err, `"kubectl cp" failed`)
|
require.NoError(t, err, `"kubectl cp" failed`)
|
||||||
localEchoFileData, err := ioutil.ReadFile(localEchoFile)
|
localEchoFileData, err := ioutil.ReadFile(localEchoFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, echoString+"\n", string(localEchoFileData))
|
require.Equal(t, echoString+"\n", string(localEchoFileData))
|
||||||
defer func() {
|
defer func() {
|
||||||
_, _, _ = runKubectl("exec", "--namespace", env.ConciergeNamespace, podName, "--", "rm", remoteEchoFile) // cleanup remote echo file
|
_, _, _ = runKubectl("exec", "--namespace", env.ConciergeNamespace, conciergePod.Name, "--", "rm", remoteEchoFile) // cleanup remote echo file
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// run the kubectl logs command
|
// run the kubectl logs command
|
||||||
logLinesCount := 10
|
logLinesCount := 10
|
||||||
stdout, _, err = runKubectl("logs", "--namespace", env.ConciergeNamespace, podName, fmt.Sprintf("--tail=%d", logLinesCount))
|
stdout, _, err = runKubectl("logs", "--namespace", env.ConciergeNamespace, conciergePod.Name, fmt.Sprintf("--tail=%d", logLinesCount))
|
||||||
require.NoError(t, err, `"kubectl logs" failed`)
|
require.NoError(t, err, `"kubectl logs" failed`)
|
||||||
require.Equalf(t, logLinesCount, strings.Count(stdout, "\n"), "wanted %d newlines in kubectl logs output:\n%s", logLinesCount, stdout)
|
require.Equalf(t, logLinesCount, strings.Count(stdout, "\n"), "wanted %d newlines in kubectl logs output:\n%s", logLinesCount, stdout)
|
||||||
|
|
||||||
// run the kubectl port-forward command
|
// run the kubectl port-forward command
|
||||||
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
|
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
portForwardCmd, _, stderr := kubectlCommand(timeout, "port-forward", "--namespace", env.ConciergeNamespace, podName, "8443:8443")
|
portForwardCmd, _, stderr := kubectlCommand(timeout, "port-forward", "--namespace", env.ConciergeNamespace, conciergePod.Name, "8443:8443")
|
||||||
portForwardCmd.Env = envVarsWithProxy
|
portForwardCmd.Env = envVarsWithProxy
|
||||||
|
|
||||||
// start, but don't wait for the command to finish
|
// start, but don't wait for the command to finish
|
||||||
@ -564,6 +590,42 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
}
|
}
|
||||||
// we expect this to 403, but all we care is that it gets through
|
// we expect this to 403, but all we care is that it gets through
|
||||||
require.Contains(t, curlStdOut.String(), "\"forbidden: User \\\"system:anonymous\\\" cannot get path \\\"/\\\"\"")
|
require.Contains(t, curlStdOut.String(), "\"forbidden: User \\\"system:anonymous\\\" cannot get path \\\"/\\\"\"")
|
||||||
|
|
||||||
|
// run the kubectl attach command
|
||||||
|
namespaceName := createTestNamespace(t, adminClient)
|
||||||
|
attachPod := library.CreatePod(ctx, t, "impersonation-proxy-attach", namespaceName, corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "impersonation-proxy-attach",
|
||||||
|
Image: conciergePod.Spec.Containers[0].Image,
|
||||||
|
Command: []string{"bash"},
|
||||||
|
Args: []string{"-c", `while true; do read VAR; echo "VAR: $VAR"; done`},
|
||||||
|
Stdin: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
attachCmd, attachStdout, attachStderr := kubectlCommand(timeout, "attach", "--stdin=true", "--namespace", namespaceName, attachPod.Name)
|
||||||
|
attachCmd.Env = envVarsWithProxy
|
||||||
|
attachStdin, err := attachCmd.StdinPipe()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// start but don't wait for the attach command
|
||||||
|
err = attachCmd.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// write to stdin on the attach process
|
||||||
|
_, err = attachStdin.Write([]byte(echoString + "\n"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// see that we can read stdout and it spits out stdin output back to us
|
||||||
|
wantAttachStdout := fmt.Sprintf("VAR: %s\n", echoString)
|
||||||
|
require.Eventuallyf(t, func() bool { return attachStdout.String() == wantAttachStdout }, time.Second*30, time.Second, `got "kubectl attach" stdout: %q, wanted: %q (stderr: %q)`, attachStdout.String(), wantAttachStdout, attachStderr.String())
|
||||||
|
|
||||||
|
// close stdin and attach process should exit
|
||||||
|
err = attachStdin.Close()
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = attachCmd.Wait()
|
||||||
|
require.NoError(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("websocket client", func(t *testing.T) {
|
t.Run("websocket client", func(t *testing.T) {
|
||||||
|
@ -439,6 +439,35 @@ func CreateTokenCredentialRequest(ctx context.Context, t *testing.T, spec v1alph
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CreatePod(ctx context.Context, t *testing.T, name, namespace string, spec corev1.PodSpec) *corev1.Pod {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
client := NewKubernetesClientset(t)
|
||||||
|
pods := client.CoreV1().Pods(namespace)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
created, err := pods.Create(ctx, &corev1.Pod{ObjectMeta: testObjectMeta(t, name), Spec: spec}, metav1.CreateOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Logf("created test Pod %s", created.Name)
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
t.Logf("cleaning up test Pod %s", created.Name)
|
||||||
|
err := pods.Delete(context.Background(), created.Name, metav1.DeleteOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
var result *corev1.Pod
|
||||||
|
require.Eventuallyf(t, func() bool {
|
||||||
|
var err error
|
||||||
|
result, err = pods.Get(ctx, created.Name, metav1.GetOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
return result.Status.Phase == corev1.PodRunning
|
||||||
|
}, 15*time.Second, 1*time.Second, "expected the Pod to go into phase %s", corev1.PodRunning)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
func WaitForUserToHaveAccess(t *testing.T, user string, groups []string, shouldHaveAccessTo *authorizationv1.ResourceAttributes) {
|
func WaitForUserToHaveAccess(t *testing.T, user string, groups []string, shouldHaveAccessTo *authorizationv1.ResourceAttributes) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
client := NewKubernetesClientset(t)
|
client := NewKubernetesClientset(t)
|
||||||
|
Loading…
Reference in New Issue
Block a user