Make Pinniped compatible with Kube clusters which have enabled PSAs
Where possible, use securityContext settings which will work with the most restrictive Pod Security Admission policy level (as of Kube 1.25). Where privileged containers are needed, use the namespace-level annotation to allow them. Also adjust some integration tests to make similar changes to allow the integration tests to pass on test clusters which use restricted PSAs.
This commit is contained in:
parent
6b3a2e87c0
commit
b564454bab
@ -12,7 +12,14 @@ apiVersion: v1
|
|||||||
kind: Namespace
|
kind: Namespace
|
||||||
metadata:
|
metadata:
|
||||||
name: #@ data.values.namespace
|
name: #@ data.values.namespace
|
||||||
labels: #@ labels()
|
labels:
|
||||||
|
_: #@ template.replace(labels())
|
||||||
|
#! When deploying onto a cluster which has PSAs enabled by default for namespaces,
|
||||||
|
#! effectively disable them for this namespace. The kube-cert-agent Deployment's pod
|
||||||
|
#! created by the Concierge in this namespace needs to be able to perform privileged
|
||||||
|
#! actions. The regular Concierge pod containers created by the Deployment below do
|
||||||
|
#! not need special privileges and are marked as such in their securityContext settings.
|
||||||
|
pod-security.kubernetes.io/enforce: privileged
|
||||||
#@ end
|
#@ end
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
@ -148,6 +155,15 @@ spec:
|
|||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
securityContext:
|
securityContext:
|
||||||
readOnlyRootFilesystem: true
|
readOnlyRootFilesystem: true
|
||||||
|
runAsNonRoot: true
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop: [ "ALL" ]
|
||||||
|
#! seccompProfile was introduced in Kube v1.19. Using it on an older Kube version will result in a
|
||||||
|
#! kubectl validation error when installing via `kubectl apply`, which can be ignored using kubectl's
|
||||||
|
#! `--validate=false` flag. Note that installing via `kapp` does not complain about this validation error.
|
||||||
|
seccompProfile:
|
||||||
|
type: "RuntimeDefault"
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: "100m"
|
cpu: "100m"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
#! Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
|
||||||
#! SPDX-License-Identifier: Apache-2.0
|
#! SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
#@ load("@ytt:data", "data")
|
#@ load("@ytt:data", "data")
|
||||||
@ -65,6 +65,17 @@ spec:
|
|||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
command:
|
command:
|
||||||
- local-user-authenticator
|
- local-user-authenticator
|
||||||
|
securityContext:
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
runAsNonRoot: true
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop: [ "ALL" ]
|
||||||
|
#! seccompProfile was introduced in Kube v1.19. Using it on an older Kube version will result in a
|
||||||
|
#! kubectl validation error when installing via `kubectl apply`, which can be ignored using kubectl's
|
||||||
|
#! `--validate=false` flag. Note that installing via `kapp` does not complain about this validation error.
|
||||||
|
seccompProfile:
|
||||||
|
type: "RuntimeDefault"
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
|
@ -95,6 +95,15 @@ spec:
|
|||||||
- /etc/config/pinniped.yaml
|
- /etc/config/pinniped.yaml
|
||||||
securityContext:
|
securityContext:
|
||||||
readOnlyRootFilesystem: true
|
readOnlyRootFilesystem: true
|
||||||
|
runAsNonRoot: true
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop: [ "ALL" ]
|
||||||
|
#! seccompProfile was introduced in Kube v1.19. Using it on an older Kube version will result in a
|
||||||
|
#! kubectl validation error when installing via `kubectl apply`, which can be ignored using kubectl's
|
||||||
|
#! `--validate=false` flag. Note that installing via `kapp` does not complain about this validation error.
|
||||||
|
seccompProfile:
|
||||||
|
type: "RuntimeDefault"
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: "100m"
|
cpu: "100m"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
#! Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
|
||||||
#! SPDX-License-Identifier: Apache-2.0
|
#! SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
---
|
---
|
||||||
@ -6,3 +6,8 @@ apiVersion: v1
|
|||||||
kind: Namespace
|
kind: Namespace
|
||||||
metadata:
|
metadata:
|
||||||
name: tools
|
name: tools
|
||||||
|
labels:
|
||||||
|
# When deploying onto a cluster which has PSAs enabled by default for namespaces,
|
||||||
|
# effectively disable them for this namespace. This namespace is only for integration
|
||||||
|
# testing helper tools, and should never be deployed in production installs.
|
||||||
|
pod-security.kubernetes.io/enforce: privileged
|
||||||
|
@ -949,6 +949,8 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
Image: env.ShellContainerImage,
|
Image: env.ShellContainerImage,
|
||||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||||
Command: []string{"sh", "-c", "sleep 3600"},
|
Command: []string{"sh", "-c", "sleep 3600"},
|
||||||
|
// Use a restrictive security context just in case the test cluster has PSAs enabled.
|
||||||
|
SecurityContext: testlib.RestrictiveSecurityContext(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
ServiceAccountName: saName,
|
ServiceAccountName: saName,
|
||||||
@ -1090,6 +1092,8 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
corev1.ResourceCPU: resource.MustParse("10m"),
|
corev1.ResourceCPU: resource.MustParse("10m"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
// Use a restrictive security context just in case the test cluster has PSAs enabled.
|
||||||
|
SecurityContext: testlib.RestrictiveSecurityContext(),
|
||||||
}}})
|
}}})
|
||||||
|
|
||||||
// Try "kubectl exec" through the impersonation proxy.
|
// Try "kubectl exec" through the impersonation proxy.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
// Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
package integration
|
package integration
|
||||||
|
|
||||||
@ -176,6 +176,8 @@ func TestWhoAmI_ServiceAccount_TokenRequest_Parallel(t *testing.T) {
|
|||||||
Image: env.ShellContainerImage,
|
Image: env.ShellContainerImage,
|
||||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||||
Command: []string{"sh", "-c", "sleep 3600"},
|
Command: []string{"sh", "-c", "sleep 3600"},
|
||||||
|
// Use a restrictive security context just in case the test cluster has PSAs enabled.
|
||||||
|
SecurityContext: testlib.RestrictiveSecurityContext(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
ServiceAccountName: sa.Name,
|
ServiceAccountName: sa.Name,
|
||||||
|
@ -18,14 +18,14 @@ import (
|
|||||||
authorizationv1 "k8s.io/api/authorization/v1"
|
authorizationv1 "k8s.io/api/authorization/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
|
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
|
|
||||||
|
|
||||||
auth1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1"
|
auth1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1"
|
||||||
"go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
|
"go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
|
||||||
@ -526,6 +526,19 @@ func CreateTokenCredentialRequest(ctx context.Context, t *testing.T, spec v1alph
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RestrictiveSecurityContext returns a container SecurityContext which will be allowed by the most
|
||||||
|
// restrictive level of Pod Security Admission policy (as of Kube v1.25's policies).
|
||||||
|
func RestrictiveSecurityContext() *corev1.SecurityContext {
|
||||||
|
return &corev1.SecurityContext{
|
||||||
|
Capabilities: &corev1.Capabilities{
|
||||||
|
Drop: []corev1.Capability{"ALL"},
|
||||||
|
},
|
||||||
|
RunAsNonRoot: pointer.Bool(true),
|
||||||
|
AllowPrivilegeEscalation: pointer.Bool(false),
|
||||||
|
SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func CreatePod(ctx context.Context, t *testing.T, name, namespace string, spec corev1.PodSpec) *corev1.Pod {
|
func CreatePod(ctx context.Context, t *testing.T, name, namespace string, spec corev1.PodSpec) *corev1.Pod {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user