Merge remote-tracking branch 'upstream/main' into 1-19-exec-strategy
This commit is contained in:
commit
83920db502
@ -56,29 +56,35 @@ func TestCLI(t *testing.T) {
|
||||
defer cleanupFunc()
|
||||
|
||||
// Run pinniped CLI to get kubeconfig.
|
||||
kubeConfig := runPinnipedCLI(t, pinnipedExe, token, namespaceName)
|
||||
kubeConfigYAML := runPinnipedCLI(t, pinnipedExe, token, namespaceName)
|
||||
|
||||
// In addition to the client-go based testing below, also try the kubeconfig
|
||||
// with kubectl once just in case it is somehow different.
|
||||
runKubectlCLI(t, kubeConfig, namespaceName, testUsername)
|
||||
|
||||
// Create Kubernetes client with kubeconfig from pinniped CLI.
|
||||
kubeClient := library.NewClientsetForKubeConfig(t, kubeConfig)
|
||||
|
||||
// Validate that we can auth to the API via our user.
|
||||
adminClient := library.NewClientset(t)
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*3)
|
||||
defer cancelFunc()
|
||||
|
||||
adminClient := library.NewClientset(t)
|
||||
|
||||
t.Run("access as user", accessAsUserTest(ctx, adminClient, testUsername, kubeClient))
|
||||
// In addition to the client-go based testing below, also try the kubeconfig
|
||||
// with kubectl to validate that it works.
|
||||
t.Run(
|
||||
"access as user with kubectl",
|
||||
accessAsUserWithKubectlTest(ctx, adminClient, kubeConfigYAML, testUsername, namespaceName),
|
||||
)
|
||||
for _, group := range expectedTestUserGroups {
|
||||
group := group
|
||||
t.Run(
|
||||
"access as group "+group,
|
||||
accessAsGroupTest(ctx, adminClient, group, kubeClient),
|
||||
"access as group "+group+" with kubectl",
|
||||
accessAsGroupWithKubectlTest(ctx, adminClient, kubeConfigYAML, group, namespaceName),
|
||||
)
|
||||
}
|
||||
|
||||
// Create Kubernetes client with kubeconfig from pinniped CLI.
|
||||
kubeClient := library.NewClientsetForKubeConfig(t, kubeConfigYAML)
|
||||
|
||||
// Validate that we can auth to the API via our user.
|
||||
t.Run("access as user with client-go", accessAsUserTest(ctx, adminClient, testUsername, kubeClient))
|
||||
for _, group := range expectedTestUserGroups {
|
||||
group := group
|
||||
t.Run("access as group "+group+" with client-go", accessAsGroupTest(ctx, adminClient, group, kubeClient))
|
||||
}
|
||||
}
|
||||
|
||||
func buildPinnipedCLI(t *testing.T) (string, func()) {
|
||||
@ -115,39 +121,3 @@ func runPinnipedCLI(t *testing.T, pinnipedExe, token, namespaceName string) stri
|
||||
|
||||
return string(output)
|
||||
}
|
||||
|
||||
func runKubectlCLI(t *testing.T, kubeConfig, namespaceName, username string) string {
|
||||
t.Helper()
|
||||
|
||||
f, err := ioutil.TempFile("", "pinniped-generated-kubeconfig-*")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.Remove(f.Name())
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
_, err = f.WriteString(kubeConfig)
|
||||
require.NoError(t, err)
|
||||
err = f.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
//nolint: gosec // It's okay that we are passing f.Name() to an exec command here. It was created above.
|
||||
output, err := exec.Command(
|
||||
"kubectl",
|
||||
"get",
|
||||
"pods",
|
||||
"--kubeconfig", f.Name(),
|
||||
"--namespace", namespaceName,
|
||||
).CombinedOutput()
|
||||
|
||||
// Expect an error because this user has no RBAC permission. However, the
|
||||
// error message should state that we had already authenticated as the test user.
|
||||
expectedErrorMessage := `Error from server (Forbidden): pods is forbidden: User "` +
|
||||
username +
|
||||
`" cannot list resource "pods" in API group "" in the namespace "` +
|
||||
namespaceName +
|
||||
`"` + "\n"
|
||||
require.EqualError(t, err, "exit status 1")
|
||||
require.Equal(t, expectedErrorMessage, string(output))
|
||||
|
||||
return string(output)
|
||||
}
|
||||
|
@ -4,9 +4,15 @@ package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -27,22 +33,7 @@ func accessAsUserTest(
|
||||
clientUnderTest kubernetes.Interface,
|
||||
) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
addTestClusterRoleBinding(ctx, t, adminClient, &rbacv1.ClusterRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "integration-test-user-readonly-role-binding",
|
||||
},
|
||||
Subjects: []rbacv1.Subject{{
|
||||
Kind: rbacv1.UserKind,
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: testUsername,
|
||||
}},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Kind: "ClusterRole",
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: "view",
|
||||
},
|
||||
})
|
||||
addTestClusterUserCanViewEverythingRoleBinding(ctx, t, adminClient, testUsername)
|
||||
|
||||
// Use the client which is authenticated as the test user to list namespaces
|
||||
var listNamespaceResponse *v1.NamespaceList
|
||||
@ -57,6 +48,30 @@ func accessAsUserTest(
|
||||
}
|
||||
}
|
||||
|
||||
func accessAsUserWithKubectlTest(
|
||||
ctx context.Context,
|
||||
adminClient kubernetes.Interface,
|
||||
testKubeConfigYAML string,
|
||||
testUsername string,
|
||||
expectedNamespace string,
|
||||
) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
addTestClusterUserCanViewEverythingRoleBinding(ctx, t, adminClient, testUsername)
|
||||
|
||||
// Use the given kubeconfig with kubectl to list namespaces as the test user
|
||||
var kubectlCommandOutput string
|
||||
var err error
|
||||
var canListNamespaces = func() bool {
|
||||
kubectlCommandOutput, err = runKubectlGetNamespaces(t, testKubeConfigYAML)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
assert.Eventually(t, canListNamespaces, 3*time.Second, 250*time.Millisecond)
|
||||
require.NoError(t, err) // prints out the error and stops the test in case of failure
|
||||
require.Contains(t, kubectlCommandOutput, expectedNamespace)
|
||||
}
|
||||
}
|
||||
|
||||
// accessAsGroupTest runs a generic test in which a clientUnderTest with membership in group
|
||||
// testGroup tries to auth to the kube API (i.e., list namespaces).
|
||||
//
|
||||
@ -69,22 +84,7 @@ func accessAsGroupTest(
|
||||
clientUnderTest kubernetes.Interface,
|
||||
) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
addTestClusterRoleBinding(ctx, t, adminClient, &rbacv1.ClusterRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "integration-test-group-readonly-role-binding",
|
||||
},
|
||||
Subjects: []rbacv1.Subject{{
|
||||
Kind: rbacv1.GroupKind,
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: testGroup,
|
||||
}},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Kind: "ClusterRole",
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: "view",
|
||||
},
|
||||
})
|
||||
addTestClusterGroupCanViewEverythingRoleBinding(ctx, t, adminClient, testGroup)
|
||||
|
||||
// Use the client which is authenticated as the test user to list namespaces
|
||||
var listNamespaceResponse *v1.NamespaceList
|
||||
@ -98,3 +98,112 @@ func accessAsGroupTest(
|
||||
require.NotEmpty(t, listNamespaceResponse.Items)
|
||||
}
|
||||
}
|
||||
|
||||
func accessAsGroupWithKubectlTest(
|
||||
ctx context.Context,
|
||||
adminClient kubernetes.Interface,
|
||||
testKubeConfigYAML string,
|
||||
testGroup string,
|
||||
expectedNamespace string,
|
||||
) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
addTestClusterGroupCanViewEverythingRoleBinding(ctx, t, adminClient, testGroup)
|
||||
|
||||
// Use the given kubeconfig with kubectl to list namespaces as the test user
|
||||
var kubectlCommandOutput string
|
||||
var err error
|
||||
var canListNamespaces = func() bool {
|
||||
kubectlCommandOutput, err = runKubectlGetNamespaces(t, testKubeConfigYAML)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
assert.Eventually(t, canListNamespaces, 3*time.Second, 250*time.Millisecond)
|
||||
require.NoError(t, err) // prints out the error and stops the test in case of failure
|
||||
require.Contains(t, kubectlCommandOutput, expectedNamespace)
|
||||
}
|
||||
}
|
||||
|
||||
func addTestClusterUserCanViewEverythingRoleBinding(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, testUsername string) {
|
||||
addTestClusterRoleBinding(ctx, t, adminClient, &rbacv1.ClusterRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "integration-test-user-readonly-role-binding",
|
||||
},
|
||||
Subjects: []rbacv1.Subject{{
|
||||
Kind: rbacv1.UserKind,
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: testUsername,
|
||||
}},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Kind: "ClusterRole",
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: "view",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func addTestClusterGroupCanViewEverythingRoleBinding(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, testGroup string) {
|
||||
addTestClusterRoleBinding(ctx, t, adminClient, &rbacv1.ClusterRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "integration-test-group-readonly-role-binding",
|
||||
},
|
||||
Subjects: []rbacv1.Subject{{
|
||||
Kind: rbacv1.GroupKind,
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: testGroup,
|
||||
}},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Kind: "ClusterRole",
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: "view",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func addTestClusterRoleBinding(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, binding *rbacv1.ClusterRoleBinding) {
|
||||
_, err := adminClient.RbacV1().ClusterRoleBindings().Get(ctx, binding.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// "404 not found" errors are acceptable, but others would be unexpected
|
||||
statusError, isStatus := err.(*errors.StatusError)
|
||||
require.True(t, isStatus)
|
||||
require.Equal(t, http.StatusNotFound, int(statusError.Status().Code))
|
||||
|
||||
_, err = adminClient.RbacV1().ClusterRoleBindings().Create(ctx, binding, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
err = adminClient.RbacV1().ClusterRoleBindings().Delete(ctx, binding.Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err, "Test failed to clean up after itself")
|
||||
})
|
||||
}
|
||||
|
||||
func runKubectlGetNamespaces(t *testing.T, kubeConfigYAML string) (string, error) {
|
||||
f := writeStringToTempFile(t, "pinniped-generated-kubeconfig-*", kubeConfigYAML)
|
||||
|
||||
//nolint: gosec // It's okay that we are passing f.Name() to an exec command here. It was created above.
|
||||
output, err := exec.Command(
|
||||
"kubectl", "get", "namespace", "--kubeconfig", f.Name(),
|
||||
).CombinedOutput()
|
||||
|
||||
return string(output), err
|
||||
}
|
||||
|
||||
func writeStringToTempFile(t *testing.T, filename string, kubeConfigYAML string) *os.File {
|
||||
t.Helper()
|
||||
f, err := ioutil.TempFile("", filename)
|
||||
require.NoError(t, err)
|
||||
deferMe := func() {
|
||||
err := os.Remove(f.Name())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
t.Cleanup(deferMe)
|
||||
_, err = f.WriteString(kubeConfigYAML)
|
||||
require.NoError(t, err)
|
||||
err = f.Close()
|
||||
require.NoError(t, err)
|
||||
return f
|
||||
}
|
||||
|
@ -7,16 +7,13 @@ import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"go.pinniped.dev/generated/1.19/apis/login/v1alpha1"
|
||||
"go.pinniped.dev/test/library"
|
||||
@ -136,26 +133,6 @@ func validCredentialRequestSpecWithRealToken(t *testing.T) v1alpha1.TokenCredent
|
||||
return v1alpha1.TokenCredentialRequestSpec{Token: library.GetEnv(t, "PINNIPED_TEST_USER_TOKEN")}
|
||||
}
|
||||
|
||||
func addTestClusterRoleBinding(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, binding *rbacv1.ClusterRoleBinding) {
|
||||
_, err := adminClient.RbacV1().ClusterRoleBindings().Get(ctx, binding.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// "404 not found" errors are acceptable, but others would be unexpected
|
||||
statusError, isStatus := err.(*errors.StatusError)
|
||||
require.True(t, isStatus)
|
||||
require.Equal(t, http.StatusNotFound, int(statusError.Status().Code))
|
||||
|
||||
_, err = adminClient.RbacV1().ClusterRoleBindings().Create(ctx, binding, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
err = adminClient.RbacV1().ClusterRoleBindings().Delete(ctx, binding.Name, metav1.DeleteOptions{})
|
||||
require.NoError(t, err, "Test failed to clean up after itself")
|
||||
})
|
||||
}
|
||||
|
||||
func stringPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user