From b564454bab1f6170bfa91ba4d7d9e05ae5e6748c Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Thu, 15 Sep 2022 14:58:15 -0700 Subject: [PATCH] Make Pinniped compatible with Kube clusters which have enabled PSAs Where possible, use securityContext settings which will work with the most restrictive Pod Security Admission policy level (as of Kube 1.25). Where privileged containers are needed, use the namespace-level annotation to allow them. Also adjust some integration tests to make similar changes to allow the integration tests to pass on test clusters which use restricted PSAs. --- deploy/concierge/deployment.yaml | 18 +++++++++++++++++- .../local-user-authenticator/deployment.yaml | 13 ++++++++++++- deploy/supervisor/deployment.yaml | 9 +++++++++ test/deploy/tools/namespace.yaml | 7 ++++++- .../concierge_impersonation_proxy_test.go | 4 ++++ test/integration/whoami_test.go | 4 +++- test/testlib/client.go | 17 +++++++++++++++-- 7 files changed, 66 insertions(+), 6 deletions(-) diff --git a/deploy/concierge/deployment.yaml b/deploy/concierge/deployment.yaml index 06bb8a1b..bc8397cc 100644 --- a/deploy/concierge/deployment.yaml +++ b/deploy/concierge/deployment.yaml @@ -12,7 +12,14 @@ apiVersion: v1 kind: Namespace metadata: name: #@ data.values.namespace - labels: #@ labels() + labels: + _: #@ template.replace(labels()) + #! When deploying onto a cluster which has PSAs enabled by default for namespaces, + #! effectively disable them for this namespace. The kube-cert-agent Deployment's pod + #! created by the Concierge in this namespace needs to be able to perform privileged + #! actions. The regular Concierge pod containers created by the Deployment below do + #! not need special privileges and are marked as such in their securityContext settings. + pod-security.kubernetes.io/enforce: privileged #@ end --- apiVersion: v1 @@ -148,6 +155,15 @@ spec: imagePullPolicy: IfNotPresent securityContext: readOnlyRootFilesystem: true + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] + #! seccompProfile was introduced in Kube v1.19. Using it on an older Kube version will result in a + #! kubectl validation error when installing via `kubectl apply`, which can be ignored using kubectl's + #! `--validate=false` flag. Note that installing via `kapp` does not complain about this validation error. + seccompProfile: + type: "RuntimeDefault" resources: requests: cpu: "100m" diff --git a/deploy/local-user-authenticator/deployment.yaml b/deploy/local-user-authenticator/deployment.yaml index bb154f81..5098422a 100644 --- a/deploy/local-user-authenticator/deployment.yaml +++ b/deploy/local-user-authenticator/deployment.yaml @@ -1,4 +1,4 @@ -#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. +#! Copyright 2020-2022 the Pinniped contributors. All Rights Reserved. #! SPDX-License-Identifier: Apache-2.0 #@ load("@ytt:data", "data") @@ -65,6 +65,17 @@ spec: imagePullPolicy: IfNotPresent command: - local-user-authenticator + securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] + #! seccompProfile was introduced in Kube v1.19. Using it on an older Kube version will result in a + #! kubectl validation error when installing via `kubectl apply`, which can be ignored using kubectl's + #! `--validate=false` flag. Note that installing via `kapp` does not complain about this validation error. + seccompProfile: + type: "RuntimeDefault" --- apiVersion: v1 kind: Service diff --git a/deploy/supervisor/deployment.yaml b/deploy/supervisor/deployment.yaml index b4c60ec2..25d69a17 100644 --- a/deploy/supervisor/deployment.yaml +++ b/deploy/supervisor/deployment.yaml @@ -95,6 +95,15 @@ spec: - /etc/config/pinniped.yaml securityContext: readOnlyRootFilesystem: true + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] + #! seccompProfile was introduced in Kube v1.19. Using it on an older Kube version will result in a + #! kubectl validation error when installing via `kubectl apply`, which can be ignored using kubectl's + #! `--validate=false` flag. Note that installing via `kapp` does not complain about this validation error. + seccompProfile: + type: "RuntimeDefault" resources: requests: cpu: "100m" diff --git a/test/deploy/tools/namespace.yaml b/test/deploy/tools/namespace.yaml index 7819ca24..a540f341 100644 --- a/test/deploy/tools/namespace.yaml +++ b/test/deploy/tools/namespace.yaml @@ -1,4 +1,4 @@ -#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. +#! Copyright 2020-2022 the Pinniped contributors. All Rights Reserved. #! SPDX-License-Identifier: Apache-2.0 --- @@ -6,3 +6,8 @@ apiVersion: v1 kind: Namespace metadata: name: tools + labels: + # When deploying onto a cluster which has PSAs enabled by default for namespaces, + # effectively disable them for this namespace. This namespace is only for integration + # testing helper tools, and should never be deployed in production installs. + pod-security.kubernetes.io/enforce: privileged diff --git a/test/integration/concierge_impersonation_proxy_test.go b/test/integration/concierge_impersonation_proxy_test.go index d09f7c82..a2617fa0 100644 --- a/test/integration/concierge_impersonation_proxy_test.go +++ b/test/integration/concierge_impersonation_proxy_test.go @@ -949,6 +949,8 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl Image: env.ShellContainerImage, ImagePullPolicy: corev1.PullIfNotPresent, Command: []string{"sh", "-c", "sleep 3600"}, + // Use a restrictive security context just in case the test cluster has PSAs enabled. + SecurityContext: testlib.RestrictiveSecurityContext(), }, }, ServiceAccountName: saName, @@ -1090,6 +1092,8 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl corev1.ResourceCPU: resource.MustParse("10m"), }, }, + // Use a restrictive security context just in case the test cluster has PSAs enabled. + SecurityContext: testlib.RestrictiveSecurityContext(), }}}) // Try "kubectl exec" through the impersonation proxy. diff --git a/test/integration/whoami_test.go b/test/integration/whoami_test.go index f4aef611..d8eb7de5 100644 --- a/test/integration/whoami_test.go +++ b/test/integration/whoami_test.go @@ -1,4 +1,4 @@ -// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. +// Copyright 2020-2022 the Pinniped contributors. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package integration @@ -176,6 +176,8 @@ func TestWhoAmI_ServiceAccount_TokenRequest_Parallel(t *testing.T) { Image: env.ShellContainerImage, ImagePullPolicy: corev1.PullIfNotPresent, Command: []string{"sh", "-c", "sleep 3600"}, + // Use a restrictive security context just in case the test cluster has PSAs enabled. + SecurityContext: testlib.RestrictiveSecurityContext(), }, }, ServiceAccountName: sa.Name, diff --git a/test/testlib/client.go b/test/testlib/client.go index 481adf90..a4733bb8 100644 --- a/test/testlib/client.go +++ b/test/testlib/client.go @@ -18,14 +18,14 @@ import ( authorizationv1 "k8s.io/api/authorization/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + "k8s.io/utils/pointer" auth1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1" "go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1" @@ -526,6 +526,19 @@ func CreateTokenCredentialRequest(ctx context.Context, t *testing.T, spec v1alph ) } +// RestrictiveSecurityContext returns a container SecurityContext which will be allowed by the most +// restrictive level of Pod Security Admission policy (as of Kube v1.25's policies). +func RestrictiveSecurityContext() *corev1.SecurityContext { + return &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + RunAsNonRoot: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, + } +} + func CreatePod(ctx context.Context, t *testing.T, name, namespace string, spec corev1.PodSpec) *corev1.Pod { t.Helper()