Refactor kubectl exec test in TestCLI to avoid assuming any RBAC settings

This commit is contained in:
Ryan Richard 2020-09-21 11:40:11 -07:00
parent 49145791cc
commit cfb76a538c
3 changed files with 163 additions and 108 deletions

View File

@ -56,29 +56,35 @@ func TestCLI(t *testing.T) {
defer cleanupFunc() defer cleanupFunc()
// Run pinniped CLI to get kubeconfig. // Run pinniped CLI to get kubeconfig.
kubeConfig := runPinnipedCLI(t, pinnipedExe, token, namespaceName) kubeConfigYAML := runPinnipedCLI(t, pinnipedExe, token, namespaceName)
// In addition to the client-go based testing below, also try the kubeconfig adminClient := library.NewClientset(t)
// with kubectl once just in case it is somehow different.
runKubectlCLI(t, kubeConfig, namespaceName, testUsername)
// Create Kubernetes client with kubeconfig from pinniped CLI.
kubeClient := library.NewClientsetForKubeConfig(t, kubeConfig)
// Validate that we can auth to the API via our user.
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*3) ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*3)
defer cancelFunc() defer cancelFunc()
adminClient := library.NewClientset(t) // In addition to the client-go based testing below, also try the kubeconfig
// with kubectl to validate that it works.
t.Run("access as user", accessAsUserTest(ctx, adminClient, testUsername, kubeClient)) t.Run(
"access as user with kubectl",
accessAsUserWithKubectlTest(ctx, adminClient, kubeConfigYAML, testUsername, namespaceName),
)
for _, group := range expectedTestUserGroups { for _, group := range expectedTestUserGroups {
group := group group := group
t.Run( t.Run(
"access as group "+group, "access as group "+group+" with kubectl",
accessAsGroupTest(ctx, adminClient, group, kubeClient), accessAsGroupWithKubectlTest(ctx, adminClient, kubeConfigYAML, group, namespaceName),
) )
} }
// Create Kubernetes client with kubeconfig from pinniped CLI.
kubeClient := library.NewClientsetForKubeConfig(t, kubeConfigYAML)
// Validate that we can auth to the API via our user.
t.Run("access as user with client-go", accessAsUserTest(ctx, adminClient, testUsername, kubeClient))
for _, group := range expectedTestUserGroups {
group := group
t.Run("access as group "+group+" with client-go", accessAsGroupTest(ctx, adminClient, group, kubeClient))
}
} }
func buildPinnipedCLI(t *testing.T) (string, func()) { func buildPinnipedCLI(t *testing.T) (string, func()) {
@ -115,39 +121,3 @@ func runPinnipedCLI(t *testing.T, pinnipedExe, token, namespaceName string) stri
return string(output) return string(output)
} }
func runKubectlCLI(t *testing.T, kubeConfig, namespaceName, username string) string {
t.Helper()
f, err := ioutil.TempFile("", "pinniped-generated-kubeconfig-*")
require.NoError(t, err)
defer func() {
err := os.Remove(f.Name())
require.NoError(t, err)
}()
_, err = f.WriteString(kubeConfig)
require.NoError(t, err)
err = f.Close()
require.NoError(t, err)
//nolint: gosec // It's okay that we are passing f.Name() to an exec command here. It was created above.
output, err := exec.Command(
"kubectl",
"get",
"pods",
"--kubeconfig", f.Name(),
"--namespace", namespaceName,
).CombinedOutput()
// Expect an error because this user has no RBAC permission. However, the
// error message should state that we had already authenticated as the test user.
expectedErrorMessage := `Error from server (Forbidden): pods is forbidden: User "` +
username +
`" cannot list resource "pods" in API group "" in the namespace "` +
namespaceName +
`"` + "\n"
require.EqualError(t, err, "exit status 1")
require.Equal(t, expectedErrorMessage, string(output))
return string(output)
}

View File

@ -4,9 +4,15 @@ package integration
import ( import (
"context" "context"
"io/ioutil"
"net/http"
"os"
"os/exec"
"testing" "testing"
"time" "time"
"k8s.io/apimachinery/pkg/api/errors"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -27,6 +33,97 @@ func accessAsUserTest(
clientUnderTest kubernetes.Interface, clientUnderTest kubernetes.Interface,
) func(t *testing.T) { ) func(t *testing.T) {
return func(t *testing.T) { return func(t *testing.T) {
addTestClusterUserCanViewEverythingRoleBinding(t, ctx, adminClient, testUsername)
// Use the client which is authenticated as the test user to list namespaces
var listNamespaceResponse *v1.NamespaceList
var err error
var canListNamespaces = func() bool {
listNamespaceResponse, err = clientUnderTest.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
return err == nil
}
assert.Eventually(t, canListNamespaces, 3*time.Second, 250*time.Millisecond)
require.NoError(t, err) // prints out the error and stops the test in case of failure
require.NotEmpty(t, listNamespaceResponse.Items)
}
}
func accessAsUserWithKubectlTest(
ctx context.Context,
adminClient kubernetes.Interface,
testKubeConfigYAML string,
testUsername string,
expectedNamespace string,
) func(t *testing.T) {
return func(t *testing.T) {
addTestClusterUserCanViewEverythingRoleBinding(t, ctx, adminClient, testUsername)
// Use the given kubeconfig with kubectl to list namespaces as the test user
var kubectlCommandOutput string
var err error
var canListNamespaces = func() bool {
kubectlCommandOutput, err = runKubectlGetNamespaces(t, testKubeConfigYAML)
return err == nil
}
assert.Eventually(t, canListNamespaces, 3*time.Second, 250*time.Millisecond)
require.NoError(t, err) // prints out the error and stops the test in case of failure
require.Contains(t, kubectlCommandOutput, expectedNamespace)
}
}
// accessAsGroupTest runs a generic test in which a clientUnderTest with membership in group
// testGroup tries to auth to the kube API (i.e., list namespaces).
//
// Use this function if you want to simply validate that a user can auth to the kube API (via
// a group membership) after performing a Pinniped credential exchange.
func accessAsGroupTest(
ctx context.Context,
adminClient kubernetes.Interface,
testGroup string,
clientUnderTest kubernetes.Interface,
) func(t *testing.T) {
return func(t *testing.T) {
addTestClusterGroupCanViewEverythingRoleBinding(t, ctx, adminClient, testGroup)
// Use the client which is authenticated as the test user to list namespaces
var listNamespaceResponse *v1.NamespaceList
var err error
var canListNamespaces = func() bool {
listNamespaceResponse, err = clientUnderTest.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
return err == nil
}
assert.Eventually(t, canListNamespaces, 3*time.Second, 250*time.Millisecond)
require.NoError(t, err) // prints out the error and stops the test in case of failure
require.NotEmpty(t, listNamespaceResponse.Items)
}
}
func accessAsGroupWithKubectlTest(
ctx context.Context,
adminClient kubernetes.Interface,
testKubeConfigYAML string,
testGroup string,
expectedNamespace string,
) func(t *testing.T) {
return func(t *testing.T) {
addTestClusterGroupCanViewEverythingRoleBinding(t, ctx, adminClient, testGroup)
// Use the given kubeconfig with kubectl to list namespaces as the test user
var kubectlCommandOutput string
var err error
var canListNamespaces = func() bool {
kubectlCommandOutput, err = runKubectlGetNamespaces(t, testKubeConfigYAML)
return err == nil
}
assert.Eventually(t, canListNamespaces, 3*time.Second, 250*time.Millisecond)
require.NoError(t, err) // prints out the error and stops the test in case of failure
require.Contains(t, kubectlCommandOutput, expectedNamespace)
}
}
func addTestClusterUserCanViewEverythingRoleBinding(t *testing.T, ctx context.Context, adminClient kubernetes.Interface, testUsername string) {
addTestClusterRoleBinding(ctx, t, adminClient, &rbacv1.ClusterRoleBinding{ addTestClusterRoleBinding(ctx, t, adminClient, &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{}, TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -43,32 +140,9 @@ func accessAsUserTest(
Name: "view", Name: "view",
}, },
}) })
// Use the client which is authenticated as the test user to list namespaces
var listNamespaceResponse *v1.NamespaceList
var err error
var canListNamespaces = func() bool {
listNamespaceResponse, err = clientUnderTest.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
return err == nil
}
assert.Eventually(t, canListNamespaces, 3*time.Second, 250*time.Millisecond)
require.NoError(t, err) // prints out the error and stops the test in case of failure
require.NotEmpty(t, listNamespaceResponse.Items)
}
} }
// accessAsGroupTest runs a generic test in which a clientUnderTest with membership in group func addTestClusterGroupCanViewEverythingRoleBinding(t *testing.T, ctx context.Context, adminClient kubernetes.Interface, testGroup string) {
// testGroup tries to auth to the kube API (i.e., list namespaces).
//
// Use this function if you want to simply validate that a user can auth to the kube API (via
// a group membership) after performing a Pinniped credential exchange.
func accessAsGroupTest(
ctx context.Context,
adminClient kubernetes.Interface,
testGroup string,
clientUnderTest kubernetes.Interface,
) func(t *testing.T) {
return func(t *testing.T) {
addTestClusterRoleBinding(ctx, t, adminClient, &rbacv1.ClusterRoleBinding{ addTestClusterRoleBinding(ctx, t, adminClient, &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{}, TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -85,16 +159,51 @@ func accessAsGroupTest(
Name: "view", Name: "view",
}, },
}) })
}
// Use the client which is authenticated as the test user to list namespaces
var listNamespaceResponse *v1.NamespaceList func addTestClusterRoleBinding(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, binding *rbacv1.ClusterRoleBinding) {
var err error _, err := adminClient.RbacV1().ClusterRoleBindings().Get(ctx, binding.Name, metav1.GetOptions{})
var canListNamespaces = func() bool { if err != nil {
listNamespaceResponse, err = clientUnderTest.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) // "404 not found" errors are acceptable, but others would be unexpected
return err == nil statusError, isStatus := err.(*errors.StatusError)
} require.True(t, isStatus)
assert.Eventually(t, canListNamespaces, 3*time.Second, 250*time.Millisecond) require.Equal(t, http.StatusNotFound, int(statusError.Status().Code))
require.NoError(t, err) // prints out the error and stops the test in case of failure
require.NotEmpty(t, listNamespaceResponse.Items) _, err = adminClient.RbacV1().ClusterRoleBindings().Create(ctx, binding, metav1.CreateOptions{})
} require.NoError(t, err)
}
t.Cleanup(func() {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err = adminClient.RbacV1().ClusterRoleBindings().Delete(ctx, binding.Name, metav1.DeleteOptions{})
require.NoError(t, err, "Test failed to clean up after itself")
})
}
func runKubectlGetNamespaces(t *testing.T, kubeConfigYAML string) (string, error) {
f := writeStringToTempFile(t, "pinniped-generated-kubeconfig-*", kubeConfigYAML)
//nolint: gosec // It's okay that we are passing f.Name() to an exec command here. It was created above.
output, err := exec.Command(
"kubectl", "get", "namespace", "--kubeconfig", f.Name(),
).CombinedOutput()
return string(output), err
}
func writeStringToTempFile(t *testing.T, filename string, kubeConfigYAML string) *os.File {
t.Helper()
f, err := ioutil.TempFile("", filename)
require.NoError(t, err)
deferMe := func() {
err := os.Remove(f.Name())
require.NoError(t, err)
}
t.Cleanup(deferMe)
_, err = f.WriteString(kubeConfigYAML)
require.NoError(t, err)
err = f.Close()
require.NoError(t, err)
return f
} }

View File

@ -7,19 +7,15 @@ import (
"context" "context"
"crypto/x509" "crypto/x509"
"encoding/pem" "encoding/pem"
"net/http"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"go.pinniped.dev/generated/1.19/apis/login/v1alpha1" "go.pinniped.dev/generated/1.19/apis/login/v1alpha1"
"go.pinniped.dev/test/library" "go.pinniped.dev/test/library"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
func TestSuccessfulCredentialRequest(t *testing.T) { func TestSuccessfulCredentialRequest(t *testing.T) {
@ -136,26 +132,6 @@ func validCredentialRequestSpecWithRealToken(t *testing.T) v1alpha1.TokenCredent
return v1alpha1.TokenCredentialRequestSpec{Token: library.GetEnv(t, "PINNIPED_TEST_USER_TOKEN")} return v1alpha1.TokenCredentialRequestSpec{Token: library.GetEnv(t, "PINNIPED_TEST_USER_TOKEN")}
} }
func addTestClusterRoleBinding(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, binding *rbacv1.ClusterRoleBinding) {
_, err := adminClient.RbacV1().ClusterRoleBindings().Get(ctx, binding.Name, metav1.GetOptions{})
if err != nil {
// "404 not found" errors are acceptable, but others would be unexpected
statusError, isStatus := err.(*errors.StatusError)
require.True(t, isStatus)
require.Equal(t, http.StatusNotFound, int(statusError.Status().Code))
_, err = adminClient.RbacV1().ClusterRoleBindings().Create(ctx, binding, metav1.CreateOptions{})
require.NoError(t, err)
}
t.Cleanup(func() {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err = adminClient.RbacV1().ClusterRoleBindings().Delete(ctx, binding.Name, metav1.DeleteOptions{})
require.NoError(t, err, "Test failed to clean up after itself")
})
}
func stringPtr(s string) *string { func stringPtr(s string) *string {
return &s return &s
} }