Merge pull request #667 from enj/enj/f/impersonator_distinct_sa

impersonator: run as a distinct SA with minimal permissions
This commit is contained in:
Mo Khan 2021-06-11 15:36:28 -04:00 committed by GitHub
commit 87489da316
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 315 additions and 11 deletions

View File

@ -29,6 +29,18 @@ metadata:
labels: #@ labels()
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
namespace: #@ namespace()
labels: #@ labels()
annotations:
#! we need to create this service account before we create the secret
kapp.k14s.io/change-group: "impersonation-proxy.concierge.pinniped.dev/serviceaccount"
secrets: #! make sure the token controller does not create any other secrets
- name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
---
apiVersion: v1
kind: ConfigMap
metadata:
name: #@ defaultResourceNameWithSuffix("config")
@ -134,6 +146,8 @@ spec:
mountPath: /etc/config
- name: podinfo
mountPath: /etc/podinfo
- name: impersonation-proxy
mountPath: /var/run/secrets/impersonation-proxy.concierge.pinniped.dev/serviceaccount
livenessProbe:
httpGet:
path: /healthz
@ -156,6 +170,12 @@ spec:
- name: config-volume
configMap:
name: #@ defaultResourceNameWithSuffix("config")
- name: impersonation-proxy
secret:
secretName: #@ defaultResourceNameWithSuffix("impersonation-proxy")
items: #! make sure our pod does not start until the token controller has a chance to populate the secret
- key: token
path: token
- name: podinfo
downwardAPI:
items:
@ -265,3 +285,16 @@ spec:
loadBalancerIP: #@ data.values.impersonation_proxy_spec.service.load_balancer_ip
#@ end
annotations: #@ data.values.impersonation_proxy_spec.service.annotations
---
apiVersion: v1
kind: Secret
metadata:
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
namespace: #@ namespace()
labels: #@ labels()
annotations:
#! wait until the SA exists to create this secret so that the token controller does not delete it
#! we have this secret at the end so that kubectl will create the service account first
kapp.k14s.io/change-rule: "upsert after upserting impersonation-proxy.concierge.pinniped.dev/serviceaccount"
kubernetes.io/service-account.name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
type: kubernetes.io/service-account-token

View File

@ -28,12 +28,6 @@ rules:
resources: [ securitycontextconstraints ]
verbs: [ use ]
resourceNames: [ nonroot ]
- apiGroups: [ "" ]
resources: [ "users", "groups", "serviceaccounts" ]
verbs: [ "impersonate" ]
- apiGroups: [ "authentication.k8s.io" ]
resources: [ "*" ] #! What we really want is userextras/* but the RBAC authorizer only supports */subresource, not resource/*
verbs: [ "impersonate" ]
- apiGroups: [ "" ]
resources: [ nodes ]
verbs: [ list ]
@ -64,6 +58,35 @@ roleRef:
name: #@ defaultResourceNameWithSuffix("aggregated-api-server")
apiGroup: rbac.authorization.k8s.io
#! Give minimal permissions to impersonation proxy service account
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
labels: #@ labels()
rules:
- apiGroups: [ "" ]
resources: [ "users", "groups", "serviceaccounts" ]
verbs: [ "impersonate" ]
- apiGroups: [ "authentication.k8s.io" ]
resources: [ "*" ] #! What we really want is userextras/* but the RBAC authorizer only supports */subresource, not resource/*
verbs: [ "impersonate" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
labels: #@ labels()
subjects:
- kind: ServiceAccount
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
namespace: #@ namespace()
roleRef:
kind: ClusterRole
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
apiGroup: rbac.authorization.k8s.io
#! Give permission to the kube-cert-agent Pod to run privileged.
---
apiVersion: rbac.authorization.k8s.io/v1

View File

@ -11,6 +11,7 @@ import (
"net/http"
"net/http/httputil"
"net/url"
"os"
"regexp"
"strings"
"time"
@ -102,12 +103,12 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
// along with the Kube API server's CA.
// Note: any changes to the the Authentication stack need to be kept in sync with any assumptions made
// by getTransportForUser, especially if we ever update the TCR API to start returning bearer tokens.
kubeClient, err := kubeclient.New(clientOpts...)
kubeClientUnsafeForProxying, err := kubeclient.New(clientOpts...)
if err != nil {
return nil, err
}
kubeClientCA, err := dynamiccertificates.NewDynamicCAFromConfigMapController(
"client-ca", metav1.NamespaceSystem, "extension-apiserver-authentication", "client-ca-file", kubeClient.Kubernetes,
"client-ca", metav1.NamespaceSystem, "extension-apiserver-authentication", "client-ca-file", kubeClientUnsafeForProxying.Kubernetes,
)
if err != nil {
return nil, err
@ -137,7 +138,7 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
// Loopback authentication to this server does not really make sense since we just proxy everything to
// the Kube API server, thus we replace loopback connection config with one that does direct connections
// the Kube API server. Loopback config is mainly used by post start hooks, so this is mostly future proofing.
serverConfig.LoopbackClientConfig = rest.CopyConfig(kubeClient.ProtoConfig) // assume proto is safe (hooks can override)
serverConfig.LoopbackClientConfig = rest.CopyConfig(kubeClientUnsafeForProxying.ProtoConfig) // assume proto is safe (hooks can override)
// Remove the bearer token so our authorizer does not get stomped on by AuthorizeClientBearerToken.
// See sanity checks at the end of this function.
serverConfig.LoopbackClientConfig.BearerToken = ""
@ -152,9 +153,15 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
sets.NewString("attach", "exec", "proxy", "log", "portforward"),
)
// use the custom impersonation proxy service account credentials when reverse proxying to the API server
kubeClientForProxy, err := getReverseProxyClient(clientOpts)
if err != nil {
return nil, fmt.Errorf("failed to build reverse proxy client: %w", err)
}
// Assume proto config is safe because transport level configs do not use rest.ContentConfig.
// Thus if we are interacting with actual APIs, they should be using pre-built clients.
impersonationProxyFunc, err := newImpersonationReverseProxyFunc(rest.CopyConfig(kubeClient.ProtoConfig))
impersonationProxyFunc, err := newImpersonationReverseProxyFunc(rest.CopyConfig(kubeClientForProxy.ProtoConfig))
if err != nil {
return nil, err
}
@ -196,7 +203,7 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
serverConfig.AuditBackend = &auditfake.Backend{}
// Probe the API server to figure out if anonymous auth is enabled.
anonymousAuthEnabled, err := isAnonymousAuthEnabled(kubeClient.JSONConfig)
anonymousAuthEnabled, err := isAnonymousAuthEnabled(kubeClientUnsafeForProxying.JSONConfig)
if err != nil {
return nil, fmt.Errorf("could not detect if anonymous authentication is enabled: %w", err)
}
@ -315,6 +322,31 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
return result, nil
}
func getReverseProxyClient(clientOpts []kubeclient.Option) (*kubeclient.Client, error) {
// just use the overrides given during unit tests
if len(clientOpts) != 0 {
return kubeclient.New(clientOpts...)
}
// this is the magic path where the impersonation proxy SA token is mounted
const tokenFile = "/var/run/secrets/impersonation-proxy.concierge.pinniped.dev/serviceaccount/token" //nolint:gosec // this is not a credential
// make sure the token file we need exists before trying to use it
if _, err := os.Stat(tokenFile); err != nil {
return nil, err
}
// build an in cluster config that uses the impersonation proxy token file
impersonationProxyRestConfig, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
impersonationProxyRestConfig = rest.AnonymousClientConfig(impersonationProxyRestConfig)
impersonationProxyRestConfig.BearerTokenFile = tokenFile
return kubeclient.New(kubeclient.WithConfig(impersonationProxyRestConfig))
}
func isAnonymousAuthEnabled(config *rest.Config) (bool, error) {
anonymousConfig := rest.AnonymousClientConfig(config)

View File

@ -1372,6 +1372,76 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
})
})
t.Run("assert correct impersonator service account is being used", func(t *testing.T) {
impersonationProxyNodesClient := impersonationProxyKubeClient(t).CoreV1().Nodes() // pick some resource the test user cannot access
crbClient := adminClient.RbacV1().ClusterRoleBindings()
impersonationProxyName := env.ConciergeAppName + "-impersonation-proxy"
saFullName := serviceaccount.MakeUsername(env.ConciergeNamespace, impersonationProxyName)
// sanity check default expected error message
_, err := impersonationProxyNodesClient.List(ctx, metav1.ListOptions{})
require.True(t, k8serrors.IsForbidden(err), library.Sdump(err))
require.EqualError(t, err, `nodes is forbidden: User "`+env.TestUser.ExpectedUsername+`" cannot list resource "nodes" in API group "" at the cluster scope`)
// remove the impersonation proxy SA's permissions
crb, err := crbClient.Get(ctx, impersonationProxyName, metav1.GetOptions{})
require.NoError(t, err)
// sanity check the subject
require.Len(t, crb.Subjects, 1)
sub := crb.Subjects[0].DeepCopy()
require.Equal(t, &rbacv1.Subject{
Kind: "ServiceAccount",
APIGroup: "",
Name: impersonationProxyName,
Namespace: env.ConciergeNamespace,
}, sub)
crb.Subjects = nil
_, err = crbClient.Update(ctx, crb, metav1.UpdateOptions{})
require.NoError(t, err)
// make sure to put the permissions back at the end
t.Cleanup(func() {
crbEnd, errEnd := crbClient.Get(ctx, impersonationProxyName, metav1.GetOptions{})
require.NoError(t, errEnd)
crbEnd.Subjects = []rbacv1.Subject{*sub}
_, errUpdate := crbClient.Update(ctx, crbEnd, metav1.UpdateOptions{})
require.NoError(t, errUpdate)
library.WaitForUserToHaveAccess(t, saFullName, nil, &authorizationv1.ResourceAttributes{
Verb: "impersonate",
Resource: "users",
})
})
// assert that the impersonation proxy stops working when we remove its permissions
library.RequireEventuallyWithoutError(t, func() (bool, error) {
_, errList := impersonationProxyNodesClient.List(ctx, metav1.ListOptions{})
if errList == nil {
return false, fmt.Errorf("unexpected nil error for test user node list")
}
if !k8serrors.IsForbidden(errList) {
return false, fmt.Errorf("unexpected error for test user node list: %w", errList)
}
switch errList.Error() {
case `nodes is forbidden: User "` + env.TestUser.ExpectedUsername + `" cannot list resource "nodes" in API group "" at the cluster scope`:
t.Log("waiting for impersonation proxy service account to lose impersonate permissions")
return false, nil // RBAC change has not rolled out yet
case `users "` + env.TestUser.ExpectedUsername + `" is forbidden: User "` + saFullName +
`" cannot impersonate resource "users" in API group "" at the cluster scope`:
return true, nil // expected RBAC error
default:
return false, fmt.Errorf("unexpected forbidden error for test user node list: %w", errList)
}
}, time.Minute, time.Second)
})
t.Run("adding an annotation reconciles the LoadBalancer service", func(t *testing.T) {
if !(impersonatorShouldHaveStartedAutomaticallyByDefault && clusterSupportsLoadBalancers) {
t.Skip("only running when the cluster is meant to be using LoadBalancer services")

View File

@ -0,0 +1,146 @@
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package integration
import (
"context"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
authorizationv1 "k8s.io/api/authorization/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/apiserver/pkg/authentication/user"
v1 "k8s.io/client-go/kubernetes/typed/authorization/v1"
"k8s.io/client-go/rest"
"go.pinniped.dev/test/library"
)
func TestServiceAccountPermissions(t *testing.T) {
// TODO: update this test to check the permissions of all service accounts
// For now it just checks the permissions of the impersonation proxy SA
env := library.IntegrationEnv(t)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
// impersonate the SA since it is easier than fetching a token and lets us control the group memberships
config := rest.CopyConfig(library.NewClientConfig(t))
config.Impersonate = rest.ImpersonationConfig{
UserName: serviceaccount.MakeUsername(env.ConciergeNamespace, env.ConciergeAppName+"-impersonation-proxy"),
// avoid permissions assigned to system:serviceaccounts by explicitly impersonating system:serviceaccounts:<namespace>
// as not all clusters will have the system:service-account-issuer-discovery binding
// system:authenticated is required for us to create selfsubjectrulesreviews
// TODO remove this once we stop supporting Kube clusters before v1.19
Groups: []string{serviceaccount.MakeNamespaceGroupName(env.ConciergeNamespace), user.AllAuthenticated},
}
ssrrClient := library.NewKubeclient(t, config).Kubernetes.AuthorizationV1().SelfSubjectRulesReviews()
// the impersonation proxy SA has the same permissions for all checks because it should only be authorized via cluster role bindings
expectedResourceRules := []authorizationv1.ResourceRule{
// system:basic-user is bound to system:authenticated by default
{Verbs: []string{"create"}, APIGroups: []string{"authorization.k8s.io"}, Resources: []string{"selfsubjectaccessreviews", "selfsubjectrulesreviews"}},
// the expected impersonation permissions
{Verbs: []string{"impersonate"}, APIGroups: []string{""}, Resources: []string{"users", "groups", "serviceaccounts"}},
{Verbs: []string{"impersonate"}, APIGroups: []string{"authentication.k8s.io"}, Resources: []string{"*"}},
// we bind these to system:authenticated
{Verbs: []string{"create", "list"}, APIGroups: []string{"login.concierge." + env.APIGroupSuffix}, Resources: []string{"tokencredentialrequests"}},
{Verbs: []string{"create", "list"}, APIGroups: []string{"identity.concierge." + env.APIGroupSuffix}, Resources: []string{"whoamirequests"}},
}
if otherPinnipedGroupSuffix := getOtherPinnipedGroupSuffix(t); len(otherPinnipedGroupSuffix) > 0 {
expectedResourceRules = append(expectedResourceRules,
// we bind these to system:authenticated in the other instance of pinniped
authorizationv1.ResourceRule{Verbs: []string{"create", "list"}, APIGroups: []string{"login.concierge." + otherPinnipedGroupSuffix}, Resources: []string{"tokencredentialrequests"}},
authorizationv1.ResourceRule{Verbs: []string{"create", "list"}, APIGroups: []string{"identity.concierge." + otherPinnipedGroupSuffix}, Resources: []string{"whoamirequests"}},
)
}
expectedNonResourceRules := []authorizationv1.NonResourceRule{
// system:public-info-viewer is bound to system:authenticated and system:unauthenticated by default
{Verbs: []string{"get"}, NonResourceURLs: []string{"/healthz", "/livez", "/readyz", "/version", "/version/"}},
// system:discovery is bound to system:authenticated by default
{Verbs: []string{"get"}, NonResourceURLs: []string{"/api", "/api/*", "/apis", "/apis/*",
"/healthz", "/livez", "/openapi", "/openapi/*", "/readyz", "/version", "/version/",
}},
}
// check permissions in concierge namespace
testPermissionsInNamespace(ctx, t, ssrrClient, env.ConciergeNamespace, expectedResourceRules, expectedNonResourceRules)
// check permissions in supervisor namespace
testPermissionsInNamespace(ctx, t, ssrrClient, env.SupervisorNamespace, expectedResourceRules, expectedNonResourceRules)
// check permissions in kube-system namespace
testPermissionsInNamespace(ctx, t, ssrrClient, metav1.NamespaceSystem, expectedResourceRules, expectedNonResourceRules)
// check permissions in kube-public namespace
testPermissionsInNamespace(ctx, t, ssrrClient, metav1.NamespacePublic, expectedResourceRules, expectedNonResourceRules)
// check permissions in default namespace
testPermissionsInNamespace(ctx, t, ssrrClient, metav1.NamespaceDefault, expectedResourceRules, expectedNonResourceRules)
// we fake a cluster scoped selfsubjectrulesreviews check by picking a nonsense namespace
testPermissionsInNamespace(ctx, t, ssrrClient, "some-namespace-invalid-name||pandas-are-the-best", expectedResourceRules, expectedNonResourceRules)
}
func testPermissionsInNamespace(ctx context.Context, t *testing.T, ssrrClient v1.SelfSubjectRulesReviewInterface, namespace string,
expectedResourceRules []authorizationv1.ResourceRule, expectedNonResourceRules []authorizationv1.NonResourceRule) {
t.Helper()
ssrr, err := ssrrClient.Create(ctx, &authorizationv1.SelfSubjectRulesReview{
Spec: authorizationv1.SelfSubjectRulesReviewSpec{Namespace: namespace},
}, metav1.CreateOptions{})
assert.NoError(t, err)
assert.ElementsMatch(t, expectedResourceRules, ssrr.Status.ResourceRules)
assert.ElementsMatch(t, expectedNonResourceRules, ssrr.Status.NonResourceRules)
}
func getOtherPinnipedGroupSuffix(t *testing.T) string {
t.Helper()
env := library.IntegrationEnv(t)
var resources []*metav1.APIResourceList
library.RequireEventuallyWithoutError(t, func() (bool, error) {
// we need a complete discovery listing for the check we are trying to make below
// loop since tests like TestAPIServingCertificateAutoCreationAndRotation can break discovery
_, r, err := library.NewKubernetesClientset(t).Discovery().ServerGroupsAndResources()
if err != nil {
t.Logf("retrying due to partial discovery failure: %v", err)
return false, nil
}
resources = r
return true, nil
}, 3*time.Minute, time.Second)
var otherPinnipedGroupSuffix string
for _, resource := range resources {
gv, err := schema.ParseGroupVersion(resource.GroupVersion)
require.NoError(t, err)
for _, apiResource := range resource.APIResources {
if apiResource.Name == "tokencredentialrequests" && gv.Group != "login.concierge."+env.APIGroupSuffix {
require.Empty(t, otherPinnipedGroupSuffix, "only expected at most one other instance of pinniped")
otherPinnipedGroupSuffix = strings.TrimPrefix(gv.Group, "login.concierge.")
}
}
}
return otherPinnipedGroupSuffix
}