Merge remote-tracking branch 'upstream/main' into 1-19-exec-strategy
This commit is contained in:
commit
83920db502
@ -56,29 +56,35 @@ func TestCLI(t *testing.T) {
|
|||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
// Run pinniped CLI to get kubeconfig.
|
// Run pinniped CLI to get kubeconfig.
|
||||||
kubeConfig := runPinnipedCLI(t, pinnipedExe, token, namespaceName)
|
kubeConfigYAML := runPinnipedCLI(t, pinnipedExe, token, namespaceName)
|
||||||
|
|
||||||
// In addition to the client-go based testing below, also try the kubeconfig
|
adminClient := library.NewClientset(t)
|
||||||
// with kubectl once just in case it is somehow different.
|
|
||||||
runKubectlCLI(t, kubeConfig, namespaceName, testUsername)
|
|
||||||
|
|
||||||
// Create Kubernetes client with kubeconfig from pinniped CLI.
|
|
||||||
kubeClient := library.NewClientsetForKubeConfig(t, kubeConfig)
|
|
||||||
|
|
||||||
// Validate that we can auth to the API via our user.
|
|
||||||
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*3)
|
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*3)
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
|
|
||||||
adminClient := library.NewClientset(t)
|
// In addition to the client-go based testing below, also try the kubeconfig
|
||||||
|
// with kubectl to validate that it works.
|
||||||
t.Run("access as user", accessAsUserTest(ctx, adminClient, testUsername, kubeClient))
|
t.Run(
|
||||||
|
"access as user with kubectl",
|
||||||
|
accessAsUserWithKubectlTest(ctx, adminClient, kubeConfigYAML, testUsername, namespaceName),
|
||||||
|
)
|
||||||
for _, group := range expectedTestUserGroups {
|
for _, group := range expectedTestUserGroups {
|
||||||
group := group
|
group := group
|
||||||
t.Run(
|
t.Run(
|
||||||
"access as group "+group,
|
"access as group "+group+" with kubectl",
|
||||||
accessAsGroupTest(ctx, adminClient, group, kubeClient),
|
accessAsGroupWithKubectlTest(ctx, adminClient, kubeConfigYAML, group, namespaceName),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create Kubernetes client with kubeconfig from pinniped CLI.
|
||||||
|
kubeClient := library.NewClientsetForKubeConfig(t, kubeConfigYAML)
|
||||||
|
|
||||||
|
// Validate that we can auth to the API via our user.
|
||||||
|
t.Run("access as user with client-go", accessAsUserTest(ctx, adminClient, testUsername, kubeClient))
|
||||||
|
for _, group := range expectedTestUserGroups {
|
||||||
|
group := group
|
||||||
|
t.Run("access as group "+group+" with client-go", accessAsGroupTest(ctx, adminClient, group, kubeClient))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildPinnipedCLI(t *testing.T) (string, func()) {
|
func buildPinnipedCLI(t *testing.T) (string, func()) {
|
||||||
@ -115,39 +121,3 @@ func runPinnipedCLI(t *testing.T, pinnipedExe, token, namespaceName string) stri
|
|||||||
|
|
||||||
return string(output)
|
return string(output)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runKubectlCLI(t *testing.T, kubeConfig, namespaceName, username string) string {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
f, err := ioutil.TempFile("", "pinniped-generated-kubeconfig-*")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() {
|
|
||||||
err := os.Remove(f.Name())
|
|
||||||
require.NoError(t, err)
|
|
||||||
}()
|
|
||||||
_, err = f.WriteString(kubeConfig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = f.Close()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
//nolint: gosec // It's okay that we are passing f.Name() to an exec command here. It was created above.
|
|
||||||
output, err := exec.Command(
|
|
||||||
"kubectl",
|
|
||||||
"get",
|
|
||||||
"pods",
|
|
||||||
"--kubeconfig", f.Name(),
|
|
||||||
"--namespace", namespaceName,
|
|
||||||
).CombinedOutput()
|
|
||||||
|
|
||||||
// Expect an error because this user has no RBAC permission. However, the
|
|
||||||
// error message should state that we had already authenticated as the test user.
|
|
||||||
expectedErrorMessage := `Error from server (Forbidden): pods is forbidden: User "` +
|
|
||||||
username +
|
|
||||||
`" cannot list resource "pods" in API group "" in the namespace "` +
|
|
||||||
namespaceName +
|
|
||||||
`"` + "\n"
|
|
||||||
require.EqualError(t, err, "exit status 1")
|
|
||||||
require.Equal(t, expectedErrorMessage, string(output))
|
|
||||||
|
|
||||||
return string(output)
|
|
||||||
}
|
|
||||||
|
@ -4,9 +4,15 @@ package integration
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@ -27,22 +33,7 @@ func accessAsUserTest(
|
|||||||
clientUnderTest kubernetes.Interface,
|
clientUnderTest kubernetes.Interface,
|
||||||
) func(t *testing.T) {
|
) func(t *testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
addTestClusterRoleBinding(ctx, t, adminClient, &rbacv1.ClusterRoleBinding{
|
addTestClusterUserCanViewEverythingRoleBinding(ctx, t, adminClient, testUsername)
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "integration-test-user-readonly-role-binding",
|
|
||||||
},
|
|
||||||
Subjects: []rbacv1.Subject{{
|
|
||||||
Kind: rbacv1.UserKind,
|
|
||||||
APIGroup: rbacv1.GroupName,
|
|
||||||
Name: testUsername,
|
|
||||||
}},
|
|
||||||
RoleRef: rbacv1.RoleRef{
|
|
||||||
Kind: "ClusterRole",
|
|
||||||
APIGroup: rbacv1.GroupName,
|
|
||||||
Name: "view",
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
// Use the client which is authenticated as the test user to list namespaces
|
// Use the client which is authenticated as the test user to list namespaces
|
||||||
var listNamespaceResponse *v1.NamespaceList
|
var listNamespaceResponse *v1.NamespaceList
|
||||||
@ -57,6 +48,30 @@ func accessAsUserTest(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func accessAsUserWithKubectlTest(
|
||||||
|
ctx context.Context,
|
||||||
|
adminClient kubernetes.Interface,
|
||||||
|
testKubeConfigYAML string,
|
||||||
|
testUsername string,
|
||||||
|
expectedNamespace string,
|
||||||
|
) func(t *testing.T) {
|
||||||
|
return func(t *testing.T) {
|
||||||
|
addTestClusterUserCanViewEverythingRoleBinding(ctx, t, adminClient, testUsername)
|
||||||
|
|
||||||
|
// Use the given kubeconfig with kubectl to list namespaces as the test user
|
||||||
|
var kubectlCommandOutput string
|
||||||
|
var err error
|
||||||
|
var canListNamespaces = func() bool {
|
||||||
|
kubectlCommandOutput, err = runKubectlGetNamespaces(t, testKubeConfigYAML)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Eventually(t, canListNamespaces, 3*time.Second, 250*time.Millisecond)
|
||||||
|
require.NoError(t, err) // prints out the error and stops the test in case of failure
|
||||||
|
require.Contains(t, kubectlCommandOutput, expectedNamespace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// accessAsGroupTest runs a generic test in which a clientUnderTest with membership in group
|
// accessAsGroupTest runs a generic test in which a clientUnderTest with membership in group
|
||||||
// testGroup tries to auth to the kube API (i.e., list namespaces).
|
// testGroup tries to auth to the kube API (i.e., list namespaces).
|
||||||
//
|
//
|
||||||
@ -69,22 +84,7 @@ func accessAsGroupTest(
|
|||||||
clientUnderTest kubernetes.Interface,
|
clientUnderTest kubernetes.Interface,
|
||||||
) func(t *testing.T) {
|
) func(t *testing.T) {
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
addTestClusterRoleBinding(ctx, t, adminClient, &rbacv1.ClusterRoleBinding{
|
addTestClusterGroupCanViewEverythingRoleBinding(ctx, t, adminClient, testGroup)
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "integration-test-group-readonly-role-binding",
|
|
||||||
},
|
|
||||||
Subjects: []rbacv1.Subject{{
|
|
||||||
Kind: rbacv1.GroupKind,
|
|
||||||
APIGroup: rbacv1.GroupName,
|
|
||||||
Name: testGroup,
|
|
||||||
}},
|
|
||||||
RoleRef: rbacv1.RoleRef{
|
|
||||||
Kind: "ClusterRole",
|
|
||||||
APIGroup: rbacv1.GroupName,
|
|
||||||
Name: "view",
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
// Use the client which is authenticated as the test user to list namespaces
|
// Use the client which is authenticated as the test user to list namespaces
|
||||||
var listNamespaceResponse *v1.NamespaceList
|
var listNamespaceResponse *v1.NamespaceList
|
||||||
@ -98,3 +98,112 @@ func accessAsGroupTest(
|
|||||||
require.NotEmpty(t, listNamespaceResponse.Items)
|
require.NotEmpty(t, listNamespaceResponse.Items)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func accessAsGroupWithKubectlTest(
|
||||||
|
ctx context.Context,
|
||||||
|
adminClient kubernetes.Interface,
|
||||||
|
testKubeConfigYAML string,
|
||||||
|
testGroup string,
|
||||||
|
expectedNamespace string,
|
||||||
|
) func(t *testing.T) {
|
||||||
|
return func(t *testing.T) {
|
||||||
|
addTestClusterGroupCanViewEverythingRoleBinding(ctx, t, adminClient, testGroup)
|
||||||
|
|
||||||
|
// Use the given kubeconfig with kubectl to list namespaces as the test user
|
||||||
|
var kubectlCommandOutput string
|
||||||
|
var err error
|
||||||
|
var canListNamespaces = func() bool {
|
||||||
|
kubectlCommandOutput, err = runKubectlGetNamespaces(t, testKubeConfigYAML)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Eventually(t, canListNamespaces, 3*time.Second, 250*time.Millisecond)
|
||||||
|
require.NoError(t, err) // prints out the error and stops the test in case of failure
|
||||||
|
require.Contains(t, kubectlCommandOutput, expectedNamespace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTestClusterUserCanViewEverythingRoleBinding(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, testUsername string) {
|
||||||
|
addTestClusterRoleBinding(ctx, t, adminClient, &rbacv1.ClusterRoleBinding{
|
||||||
|
TypeMeta: metav1.TypeMeta{},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "integration-test-user-readonly-role-binding",
|
||||||
|
},
|
||||||
|
Subjects: []rbacv1.Subject{{
|
||||||
|
Kind: rbacv1.UserKind,
|
||||||
|
APIGroup: rbacv1.GroupName,
|
||||||
|
Name: testUsername,
|
||||||
|
}},
|
||||||
|
RoleRef: rbacv1.RoleRef{
|
||||||
|
Kind: "ClusterRole",
|
||||||
|
APIGroup: rbacv1.GroupName,
|
||||||
|
Name: "view",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTestClusterGroupCanViewEverythingRoleBinding(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, testGroup string) {
|
||||||
|
addTestClusterRoleBinding(ctx, t, adminClient, &rbacv1.ClusterRoleBinding{
|
||||||
|
TypeMeta: metav1.TypeMeta{},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "integration-test-group-readonly-role-binding",
|
||||||
|
},
|
||||||
|
Subjects: []rbacv1.Subject{{
|
||||||
|
Kind: rbacv1.GroupKind,
|
||||||
|
APIGroup: rbacv1.GroupName,
|
||||||
|
Name: testGroup,
|
||||||
|
}},
|
||||||
|
RoleRef: rbacv1.RoleRef{
|
||||||
|
Kind: "ClusterRole",
|
||||||
|
APIGroup: rbacv1.GroupName,
|
||||||
|
Name: "view",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTestClusterRoleBinding(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, binding *rbacv1.ClusterRoleBinding) {
|
||||||
|
_, err := adminClient.RbacV1().ClusterRoleBindings().Get(ctx, binding.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
// "404 not found" errors are acceptable, but others would be unexpected
|
||||||
|
statusError, isStatus := err.(*errors.StatusError)
|
||||||
|
require.True(t, isStatus)
|
||||||
|
require.Equal(t, http.StatusNotFound, int(statusError.Status().Code))
|
||||||
|
|
||||||
|
_, err = adminClient.RbacV1().ClusterRoleBindings().Create(ctx, binding, metav1.CreateOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
err = adminClient.RbacV1().ClusterRoleBindings().Delete(ctx, binding.Name, metav1.DeleteOptions{})
|
||||||
|
require.NoError(t, err, "Test failed to clean up after itself")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func runKubectlGetNamespaces(t *testing.T, kubeConfigYAML string) (string, error) {
|
||||||
|
f := writeStringToTempFile(t, "pinniped-generated-kubeconfig-*", kubeConfigYAML)
|
||||||
|
|
||||||
|
//nolint: gosec // It's okay that we are passing f.Name() to an exec command here. It was created above.
|
||||||
|
output, err := exec.Command(
|
||||||
|
"kubectl", "get", "namespace", "--kubeconfig", f.Name(),
|
||||||
|
).CombinedOutput()
|
||||||
|
|
||||||
|
return string(output), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeStringToTempFile(t *testing.T, filename string, kubeConfigYAML string) *os.File {
|
||||||
|
t.Helper()
|
||||||
|
f, err := ioutil.TempFile("", filename)
|
||||||
|
require.NoError(t, err)
|
||||||
|
deferMe := func() {
|
||||||
|
err := os.Remove(f.Name())
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
t.Cleanup(deferMe)
|
||||||
|
_, err = f.WriteString(kubeConfigYAML)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = f.Close()
|
||||||
|
require.NoError(t, err)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
@ -7,16 +7,13 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"net/http"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
|
|
||||||
"go.pinniped.dev/generated/1.19/apis/login/v1alpha1"
|
"go.pinniped.dev/generated/1.19/apis/login/v1alpha1"
|
||||||
"go.pinniped.dev/test/library"
|
"go.pinniped.dev/test/library"
|
||||||
@ -136,26 +133,6 @@ func validCredentialRequestSpecWithRealToken(t *testing.T) v1alpha1.TokenCredent
|
|||||||
return v1alpha1.TokenCredentialRequestSpec{Token: library.GetEnv(t, "PINNIPED_TEST_USER_TOKEN")}
|
return v1alpha1.TokenCredentialRequestSpec{Token: library.GetEnv(t, "PINNIPED_TEST_USER_TOKEN")}
|
||||||
}
|
}
|
||||||
|
|
||||||
func addTestClusterRoleBinding(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, binding *rbacv1.ClusterRoleBinding) {
|
|
||||||
_, err := adminClient.RbacV1().ClusterRoleBindings().Get(ctx, binding.Name, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
// "404 not found" errors are acceptable, but others would be unexpected
|
|
||||||
statusError, isStatus := err.(*errors.StatusError)
|
|
||||||
require.True(t, isStatus)
|
|
||||||
require.Equal(t, http.StatusNotFound, int(statusError.Status().Code))
|
|
||||||
|
|
||||||
_, err = adminClient.RbacV1().ClusterRoleBindings().Create(ctx, binding, metav1.CreateOptions{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
err = adminClient.RbacV1().ClusterRoleBindings().Delete(ctx, binding.Name, metav1.DeleteOptions{})
|
|
||||||
require.NoError(t, err, "Test failed to clean up after itself")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringPtr(s string) *string {
|
func stringPtr(s string) *string {
|
||||||
return &s
|
return &s
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user