Merge branch 'main' into initial_ldap
This commit is contained in:
commit
b3b108500a
@ -43,7 +43,7 @@ To learn more, see [architecture](https://pinniped.dev/docs/background/architect
|
|||||||
|
|
||||||
## Getting started with Pinniped
|
## Getting started with Pinniped
|
||||||
|
|
||||||
Care to kick the tires? It's easy to [install and try Pinniped](https://pinniped.dev/docs/demo/).
|
Care to kick the tires? It's easy to [install and try Pinniped](https://pinniped.dev/docs/).
|
||||||
|
|
||||||
## Community meetings
|
## Community meetings
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ rules:
|
|||||||
- apiGroups:
|
- apiGroups:
|
||||||
- #@ pinnipedDevAPIGroupWithPrefix("config.concierge")
|
- #@ pinnipedDevAPIGroupWithPrefix("config.concierge")
|
||||||
resources: [ credentialissuers/status ]
|
resources: [ credentialissuers/status ]
|
||||||
verbs: [get, patch, update]
|
verbs: [ get, patch, update ]
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- #@ pinnipedDevAPIGroupWithPrefix("authentication.concierge")
|
- #@ pinnipedDevAPIGroupWithPrefix("authentication.concierge")
|
||||||
resources: [ jwtauthenticators, webhookauthenticators ]
|
resources: [ jwtauthenticators, webhookauthenticators ]
|
||||||
@ -82,16 +82,25 @@ rules:
|
|||||||
- apiGroups: [ "" ]
|
- apiGroups: [ "" ]
|
||||||
resources: [ secrets ]
|
resources: [ secrets ]
|
||||||
verbs: [ create, get, list, patch, update, watch, delete ]
|
verbs: [ create, get, list, patch, update, watch, delete ]
|
||||||
#! We need to be able to CRUD pods in our namespace so we can reconcile the kube-cert-agent pods.
|
#! We need to be able to watch pods in our namespace so we can find the kube-cert-agent pods.
|
||||||
- apiGroups: [ "" ]
|
- apiGroups: [ "" ]
|
||||||
resources: [ pods ]
|
resources: [ pods ]
|
||||||
verbs: [ create, get, list, patch, update, watch, delete ]
|
verbs: [ get, list, watch ]
|
||||||
#! We need to be able to exec into pods in our namespace so we can grab the API server's private key
|
#! We need to be able to exec into pods in our namespace so we can grab the API server's private key
|
||||||
- apiGroups: [ "" ]
|
- apiGroups: [ "" ]
|
||||||
resources: [ pods/exec ]
|
resources: [ pods/exec ]
|
||||||
verbs: [ create ]
|
verbs: [ create ]
|
||||||
|
#! We need to be able to delete pods in our namespace so we can clean up legacy kube-cert-agent pods.
|
||||||
|
- apiGroups: [ "" ]
|
||||||
|
resources: [ pods ]
|
||||||
|
verbs: [ delete ]
|
||||||
|
#! We need to be able to create and update deployments in our namespace so we can manage the kube-cert-agent Deployment.
|
||||||
- apiGroups: [ apps ]
|
- apiGroups: [ apps ]
|
||||||
resources: [ replicasets,deployments ]
|
resources: [ deployments ]
|
||||||
|
verbs: [ create, get, list, patch, update, watch ]
|
||||||
|
#! We need to be able to get replicasets so we can form the correct owner references on our generated objects.
|
||||||
|
- apiGroups: [ apps ]
|
||||||
|
resources: [ replicasets ]
|
||||||
verbs: [ get ]
|
verbs: [ get ]
|
||||||
- apiGroups: [ "" ]
|
- apiGroups: [ "" ]
|
||||||
resources: [ configmaps ]
|
resources: [ configmaps ]
|
||||||
|
@ -187,7 +187,8 @@ registry_repo_tag="${registry_repo}:${tag}"
|
|||||||
if [[ "$do_build" == "yes" ]]; then
|
if [[ "$do_build" == "yes" ]]; then
|
||||||
# Rebuild the code
|
# Rebuild the code
|
||||||
log_note "Docker building the app..."
|
log_note "Docker building the app..."
|
||||||
docker build . --tag "$registry_repo_tag"
|
# DOCKER_BUILDKIT=1 is optional on MacOS but required on linux.
|
||||||
|
DOCKER_BUILDKIT=1 docker build . --tag "$registry_repo_tag"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Load it into the cluster
|
# Load it into the cluster
|
||||||
@ -300,8 +301,9 @@ popd >/dev/null
|
|||||||
|
|
||||||
#
|
#
|
||||||
# Download the test CA bundle that was generated in the Dex pod.
|
# Download the test CA bundle that was generated in the Dex pod.
|
||||||
|
# Note that this returns a base64 encoded value.
|
||||||
#
|
#
|
||||||
test_ca_bundle_pem="$(kubectl get secrets -n tools certs -o go-template='{{index .data "ca.pem" | base64decode}}' | base64)"
|
test_ca_bundle_pem="$(kubectl get secrets -n tools certs -o go-template='{{index .data "ca.pem"}}')"
|
||||||
|
|
||||||
#
|
#
|
||||||
# Create the environment file.
|
# Create the environment file.
|
||||||
|
37
hack/prepare-webhook-on-kind.sh
Executable file
37
hack/prepare-webhook-on-kind.sh
Executable file
@ -0,0 +1,37 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
#
|
||||||
|
# This script deploys a WebhookAuthenticator to use for manual testing. It
|
||||||
|
# assumes that you have run hack/prepare-for-integration-tests.sh while pointed
|
||||||
|
# at the current cluster.
|
||||||
|
#
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Change working directory to the top of the repo.
|
||||||
|
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
cd "$ROOT"
|
||||||
|
|
||||||
|
# Read the env vars output by hack/prepare-for-integration-tests.sh.
|
||||||
|
source /tmp/integration-test-env
|
||||||
|
|
||||||
|
# Create WebhookAuthenticator.
|
||||||
|
cat <<EOF | kubectl apply -f - 1>&2
|
||||||
|
kind: WebhookAuthenticator
|
||||||
|
apiVersion: authentication.concierge.pinniped.dev/v1alpha1
|
||||||
|
metadata:
|
||||||
|
name: my-webhook
|
||||||
|
spec:
|
||||||
|
endpoint: ${PINNIPED_TEST_WEBHOOK_ENDPOINT}
|
||||||
|
tls:
|
||||||
|
certificateAuthorityData: ${PINNIPED_TEST_WEBHOOK_CA_BUNDLE}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Use the CLI to get a kubeconfig that will use this WebhookAuthenticator.
|
||||||
|
go build -o /tmp/pinniped ./cmd/pinniped
|
||||||
|
/tmp/pinniped get kubeconfig --static-token "$PINNIPED_TEST_USER_TOKEN" >/tmp/kubeconfig-with-webhook-auth.yaml
|
||||||
|
|
||||||
|
echo "export KUBECONFIG=/tmp/kubeconfig-with-webhook-auth.yaml"
|
42
internal/concierge/impersonator/doc.go
Normal file
42
internal/concierge/impersonator/doc.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package impersonator implements an HTTP server that reverse proxies all requests
|
||||||
|
to the Kubernetes API server with impersonation headers set to match the calling
|
||||||
|
user. Since impersonation cannot be disabled, this allows us to dynamically
|
||||||
|
configure authentication on any cluster, even the cloud hosted ones.
|
||||||
|
|
||||||
|
The specifics of how it is implemented are of interest. The most novel detail
|
||||||
|
about the implementation is that we use the "front-end" of the aggregated API
|
||||||
|
server logic, mainly the DefaultBuildHandlerChain func, to handle how incoming
|
||||||
|
requests are authenticated, authorized, etc. The "back-end" of the proxy is a
|
||||||
|
reverse proxy that impersonates the user (instead of serving REST APIs).
|
||||||
|
|
||||||
|
In terms of authentication, we aim to handle every type of authentication that
|
||||||
|
the Kubernetes API server supports by delegating most of the checks to it. We
|
||||||
|
also honor client certs from a CA that is specific to the impersonation proxy.
|
||||||
|
This approach allows clients to use the Token Credential Request API even when
|
||||||
|
we do not have the cluster's signing key.
|
||||||
|
|
||||||
|
In terms of authorization, we rely mostly on the Kubernetes API server. Since we
|
||||||
|
impersonate the user, the proxied request will be authorized against that user.
|
||||||
|
Thus for all regular REST verbs, we perform no authorization checks.
|
||||||
|
|
||||||
|
Nested impersonation is handled by performing the same authorization checks the
|
||||||
|
Kubernetes API server would (we get this mostly for free by using the aggregated
|
||||||
|
API server code). We preserve the original user in the reserved extra key
|
||||||
|
original-user-info.impersonation-proxy.concierge.pinniped.dev as a JSON blob of
|
||||||
|
the authenticationv1.UserInfo struct. This is necessary to make sure that the
|
||||||
|
Kubernetes audit log contains all three identities (original user, impersonated
|
||||||
|
user and the impersonation proxy's service account). Capturing the original
|
||||||
|
user information requires that we enable the auditing stack (WithImpersonation
|
||||||
|
only shares this information with the audit stack). To keep things simple,
|
||||||
|
we use the fake audit backend at the Metadata level for all requests. This
|
||||||
|
guarantees that we always have an audit event on every request.
|
||||||
|
|
||||||
|
For all normal requests, we only use http/2.0 when proxying to the API server.
|
||||||
|
For upgrade requests, we only use http/1.1 since these always go from http/1.1
|
||||||
|
to either websockets or SPDY.
|
||||||
|
*/
|
||||||
|
package impersonator
|
@ -4,11 +4,14 @@
|
|||||||
package impersonator
|
package impersonator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -21,6 +24,8 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||||
|
"k8s.io/apiserver/pkg/audit/policy"
|
||||||
"k8s.io/apiserver/pkg/authentication/user"
|
"k8s.io/apiserver/pkg/authentication/user"
|
||||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||||
"k8s.io/apiserver/pkg/endpoints/filterlatency"
|
"k8s.io/apiserver/pkg/endpoints/filterlatency"
|
||||||
@ -31,6 +36,7 @@ import (
|
|||||||
"k8s.io/apiserver/pkg/server/dynamiccertificates"
|
"k8s.io/apiserver/pkg/server/dynamiccertificates"
|
||||||
"k8s.io/apiserver/pkg/server/filters"
|
"k8s.io/apiserver/pkg/server/filters"
|
||||||
genericoptions "k8s.io/apiserver/pkg/server/options"
|
genericoptions "k8s.io/apiserver/pkg/server/options"
|
||||||
|
auditfake "k8s.io/apiserver/plugin/pkg/audit/fake"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/transport"
|
"k8s.io/client-go/transport"
|
||||||
|
|
||||||
@ -100,7 +106,6 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
recommendedOptions.Authentication.ClientCert.ClientCA = "---irrelevant-but-needs-to-be-non-empty---" // drop when we pick up https://github.com/kubernetes/kubernetes/pull/100055
|
|
||||||
recommendedOptions.Authentication.ClientCert.CAContentProvider = dynamiccertificates.NewUnionCAContentProvider(
|
recommendedOptions.Authentication.ClientCert.CAContentProvider = dynamiccertificates.NewUnionCAContentProvider(
|
||||||
impersonationProxySignerCA, kubeClientCA,
|
impersonationProxySignerCA, kubeClientCA,
|
||||||
)
|
)
|
||||||
@ -163,35 +168,55 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
|
|||||||
}))
|
}))
|
||||||
handler = filterlatency.TrackStarted(handler, "impersonationproxy")
|
handler = filterlatency.TrackStarted(handler, "impersonationproxy")
|
||||||
|
|
||||||
|
handler = filterlatency.TrackCompleted(handler)
|
||||||
|
handler = deleteKnownImpersonationHeaders(handler)
|
||||||
|
handler = filterlatency.TrackStarted(handler, "deleteimpersonationheaders")
|
||||||
|
|
||||||
// The standard Kube handler chain (authn, authz, impersonation, audit, etc).
|
// The standard Kube handler chain (authn, authz, impersonation, audit, etc).
|
||||||
// See the genericapiserver.DefaultBuildHandlerChain func for details.
|
// See the genericapiserver.DefaultBuildHandlerChain func for details.
|
||||||
handler = defaultBuildHandlerChainFunc(handler, c)
|
handler = defaultBuildHandlerChainFunc(handler, c)
|
||||||
|
|
||||||
// Always set security headers so browsers do the right thing.
|
// Always set security headers so browsers do the right thing.
|
||||||
|
handler = filterlatency.TrackCompleted(handler)
|
||||||
handler = securityheader.Wrap(handler)
|
handler = securityheader.Wrap(handler)
|
||||||
|
handler = filterlatency.TrackStarted(handler, "securityheaders")
|
||||||
|
|
||||||
return handler
|
return handler
|
||||||
}
|
}
|
||||||
|
|
||||||
// Overwrite the delegating authorizer with one that only cares about impersonation.
|
// wire up a fake audit backend at the metadata level so we can preserve the original user during nested impersonation
|
||||||
// Empty string is disallowed because request info has had bugs in the past where it would leave it empty.
|
// TODO: wire up the real std out logging audit backend based on plog log level
|
||||||
disallowedVerbs := sets.NewString("", "impersonate")
|
serverConfig.AuditPolicyChecker = policy.FakeChecker(auditinternal.LevelMetadata, nil)
|
||||||
noImpersonationAuthorizer := &comparableAuthorizer{
|
serverConfig.AuditBackend = &auditfake.Backend{}
|
||||||
AuthorizerFunc: func(a authorizer.Attributes) (authorizer.Decision, string, error) {
|
|
||||||
// Supporting impersonation is not hard, it would just require a bunch of testing
|
|
||||||
// and configuring the audit layer (to preserve the caller) which we can do later.
|
|
||||||
// We would also want to delete the incoming impersonation headers
|
|
||||||
// instead of overwriting the delegating authorizer, we would
|
|
||||||
// actually use it to make the impersonation authorization checks.
|
|
||||||
if disallowedVerbs.Has(a.GetVerb()) {
|
|
||||||
return authorizer.DecisionDeny, "impersonation is not allowed or invalid verb", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return authorizer.DecisionAllow, "deferring authorization to kube API server", nil
|
delegatingAuthorizer := serverConfig.Authorization.Authorizer
|
||||||
|
nestedImpersonationAuthorizer := &comparableAuthorizer{
|
||||||
|
authorizerFunc: func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||||
|
switch a.GetVerb() {
|
||||||
|
case "":
|
||||||
|
// Empty string is disallowed because request info has had bugs in the past where it would leave it empty.
|
||||||
|
return authorizer.DecisionDeny, "invalid verb", nil
|
||||||
|
case "create",
|
||||||
|
"update",
|
||||||
|
"delete",
|
||||||
|
"deletecollection",
|
||||||
|
"get",
|
||||||
|
"list",
|
||||||
|
"watch",
|
||||||
|
"patch",
|
||||||
|
"proxy":
|
||||||
|
// we know these verbs are from the request info parsing which is safe to delegate to KAS
|
||||||
|
return authorizer.DecisionAllow, "deferring standard verb authorization to kube API server", nil
|
||||||
|
default:
|
||||||
|
// assume everything else is internal SAR checks that we need to run against the requesting user
|
||||||
|
// because when KAS does the check, it may run the check against our service account and not the
|
||||||
|
// requesting user. This also handles the impersonate verb to allow for nested impersonation.
|
||||||
|
return delegatingAuthorizer.Authorize(ctx, a)
|
||||||
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Set our custom authorizer before calling Compete(), which will use it.
|
// Set our custom authorizer before calling Compete(), which will use it.
|
||||||
serverConfig.Authorization.Authorizer = noImpersonationAuthorizer
|
serverConfig.Authorization.Authorizer = nestedImpersonationAuthorizer
|
||||||
|
|
||||||
impersonationProxyServer, err := serverConfig.Complete().New("impersonation-proxy", genericapiserver.NewEmptyDelegate())
|
impersonationProxyServer, err := serverConfig.Complete().New("impersonation-proxy", genericapiserver.NewEmptyDelegate())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -201,7 +226,7 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
|
|||||||
preparedRun := impersonationProxyServer.PrepareRun()
|
preparedRun := impersonationProxyServer.PrepareRun()
|
||||||
|
|
||||||
// Sanity check. Make sure that our custom authorizer is still in place and did not get changed or wrapped.
|
// Sanity check. Make sure that our custom authorizer is still in place and did not get changed or wrapped.
|
||||||
if preparedRun.Authorizer != noImpersonationAuthorizer {
|
if preparedRun.Authorizer != nestedImpersonationAuthorizer {
|
||||||
return nil, constable.Error("invalid mutation of impersonation authorizer detected")
|
return nil, constable.Error("invalid mutation of impersonation authorizer detected")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -225,9 +250,44 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func deleteKnownImpersonationHeaders(delegate http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// remove known impersonation headers while avoiding mutation of input request
|
||||||
|
// unknown future impersonation headers will still get caught by our later checks
|
||||||
|
if ensureNoImpersonationHeaders(r) != nil {
|
||||||
|
r = r.Clone(r.Context())
|
||||||
|
|
||||||
|
impersonationHeaders := []string{
|
||||||
|
transport.ImpersonateUserHeader,
|
||||||
|
transport.ImpersonateGroupHeader,
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range r.Header {
|
||||||
|
if !strings.HasPrefix(k, transport.ImpersonateUserExtraHeaderPrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
impersonationHeaders = append(impersonationHeaders, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, header := range impersonationHeaders {
|
||||||
|
r.Header.Del(header) // delay mutation until the end when we are done iterating over the map
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
delegate.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// No-op wrapping around AuthorizerFunc to allow for comparisons.
|
// No-op wrapping around AuthorizerFunc to allow for comparisons.
|
||||||
type comparableAuthorizer struct {
|
type comparableAuthorizer struct {
|
||||||
authorizer.AuthorizerFunc
|
authorizerFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: delete when we pick up https://github.com/kubernetes/kubernetes/pull/100963
|
||||||
|
type authorizerFunc func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error)
|
||||||
|
|
||||||
|
func (f authorizerFunc) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||||
|
return f(ctx, a)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapiserver.Config) http.Handler, error) {
|
func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapiserver.Config) http.Handler, error) {
|
||||||
@ -258,7 +318,7 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := ensureNoImpersonationHeaders(r); err != nil {
|
if err := ensureNoImpersonationHeaders(r); err != nil {
|
||||||
plog.Error("noImpersonationAuthorizer logic did not prevent nested impersonation but it is always supposed to do so",
|
plog.Error("unknown impersonation header seen",
|
||||||
err,
|
err,
|
||||||
"url", r.URL.String(),
|
"url", r.URL.String(),
|
||||||
"method", r.Method,
|
"method", r.Method,
|
||||||
@ -277,6 +337,16 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ae := request.AuditEventFrom(r.Context())
|
||||||
|
if ae == nil {
|
||||||
|
plog.Warning("aggregated API server logic did not set audit event but it is always supposed to do so",
|
||||||
|
"url", r.URL.String(),
|
||||||
|
"method", r.Method,
|
||||||
|
)
|
||||||
|
newInternalErrResponse(w, r, c.Serializer, "invalid audit event")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// KAS only supports upgrades via http/1.1 to websockets/SPDY (upgrades never use http/2.0)
|
// KAS only supports upgrades via http/1.1 to websockets/SPDY (upgrades never use http/2.0)
|
||||||
// Thus we default to using http/2.0 when the request is not an upgrade, otherwise we use http/1.1
|
// Thus we default to using http/2.0 when the request is not an upgrade, otherwise we use http/1.1
|
||||||
baseRT := http2RoundTripper
|
baseRT := http2RoundTripper
|
||||||
@ -285,7 +355,7 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi
|
|||||||
baseRT = http1RoundTripper
|
baseRT = http1RoundTripper
|
||||||
}
|
}
|
||||||
|
|
||||||
rt, err := getTransportForUser(userInfo, baseRT)
|
rt, err := getTransportForUser(userInfo, baseRT, ae)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.WarningErr("rejecting request as we cannot act as the current user", err,
|
plog.WarningErr("rejecting request as we cannot act as the current user", err,
|
||||||
"url", r.URL.String(),
|
"url", r.URL.String(),
|
||||||
@ -332,6 +402,9 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi
|
|||||||
|
|
||||||
func ensureNoImpersonationHeaders(r *http.Request) error {
|
func ensureNoImpersonationHeaders(r *http.Request) error {
|
||||||
for key := range r.Header {
|
for key := range r.Header {
|
||||||
|
// even though we have unit tests that try to cover this case, it is hard to tell if Go does
|
||||||
|
// client side canonicalization on encode, server side canonicalization on decode, or both
|
||||||
|
key := http.CanonicalHeaderKey(key)
|
||||||
if strings.HasPrefix(key, "Impersonate") {
|
if strings.HasPrefix(key, "Impersonate") {
|
||||||
return fmt.Errorf("%q header already exists", key)
|
return fmt.Errorf("%q header already exists", key)
|
||||||
}
|
}
|
||||||
@ -340,12 +413,17 @@ func ensureNoImpersonationHeaders(r *http.Request) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTransportForUser(userInfo user.Info, delegate http.RoundTripper) (http.RoundTripper, error) {
|
func getTransportForUser(userInfo user.Info, delegate http.RoundTripper, ae *auditinternal.Event) (http.RoundTripper, error) {
|
||||||
if len(userInfo.GetUID()) == 0 {
|
if len(userInfo.GetUID()) == 0 {
|
||||||
|
extra, err := buildExtra(userInfo.GetExtra(), ae)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
impersonateConfig := transport.ImpersonationConfig{
|
impersonateConfig := transport.ImpersonationConfig{
|
||||||
UserName: userInfo.GetName(),
|
UserName: userInfo.GetName(),
|
||||||
Groups: userInfo.GetGroups(),
|
Groups: userInfo.GetGroups(),
|
||||||
Extra: userInfo.GetExtra(),
|
Extra: extra,
|
||||||
}
|
}
|
||||||
// transport.NewImpersonatingRoundTripper clones the request before setting headers
|
// transport.NewImpersonatingRoundTripper clones the request before setting headers
|
||||||
// thus it will not accidentally mutate the input request (see http.Handler docs)
|
// thus it will not accidentally mutate the input request (see http.Handler docs)
|
||||||
@ -365,6 +443,44 @@ func getTransportForUser(userInfo user.Info, delegate http.RoundTripper) (http.R
|
|||||||
return nil, constable.Error("unexpected uid")
|
return nil, constable.Error("unexpected uid")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func buildExtra(extra map[string][]string, ae *auditinternal.Event) (map[string][]string, error) {
|
||||||
|
const reservedImpersonationProxySuffix = ".impersonation-proxy.concierge.pinniped.dev"
|
||||||
|
|
||||||
|
// always validate that the extra is something we support irregardless of nested impersonation
|
||||||
|
for k := range extra {
|
||||||
|
if !extraKeyRegexp.MatchString(k) {
|
||||||
|
return nil, fmt.Errorf("disallowed extra key seen: %s", k)
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasSuffix(k, reservedImpersonationProxySuffix) {
|
||||||
|
return nil, fmt.Errorf("disallowed extra key with reserved prefix seen: %s", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ae.ImpersonatedUser == nil {
|
||||||
|
return extra, nil // just return the given extra since nested impersonation is not being used
|
||||||
|
}
|
||||||
|
|
||||||
|
// avoid mutating input map, preallocate new map to store original user info
|
||||||
|
out := make(map[string][]string, len(extra)+1)
|
||||||
|
|
||||||
|
for k, v := range extra {
|
||||||
|
out[k] = v // shallow copy of slice since we are not going to mutate it
|
||||||
|
}
|
||||||
|
|
||||||
|
origUserInfoJSON, err := json.Marshal(ae.User)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out["original-user-info"+reservedImpersonationProxySuffix] = []string{string(origUserInfoJSON)}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// extraKeyRegexp is a very conservative regex to handle impersonation's extra key fidelity limitations such as casing and escaping.
|
||||||
|
var extraKeyRegexp = regexp.MustCompile(`^[a-z0-9/\-._]+$`)
|
||||||
|
|
||||||
func newInternalErrResponse(w http.ResponseWriter, r *http.Request, s runtime.NegotiatedSerializer, msg string) {
|
func newInternalErrResponse(w http.ResponseWriter, r *http.Request, s runtime.NegotiatedSerializer, msg string) {
|
||||||
newStatusErrResponse(w, r, s, apierrors.NewInternalError(constable.Error(msg)))
|
newStatusErrResponse(w, r, s, apierrors.NewInternalError(constable.Error(msg)))
|
||||||
}
|
}
|
||||||
|
@ -15,11 +15,13 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
authenticationv1 "k8s.io/api/authentication/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||||
|
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||||
"k8s.io/apiserver/pkg/authentication/user"
|
"k8s.io/apiserver/pkg/authentication/user"
|
||||||
"k8s.io/apiserver/pkg/endpoints/request"
|
"k8s.io/apiserver/pkg/endpoints/request"
|
||||||
"k8s.io/apiserver/pkg/features"
|
"k8s.io/apiserver/pkg/features"
|
||||||
@ -39,8 +41,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestImpersonator(t *testing.T) {
|
func TestImpersonator(t *testing.T) {
|
||||||
const port = 9444
|
|
||||||
|
|
||||||
ca, err := certauthority.New("ca", time.Hour)
|
ca, err := certauthority.New("ca", time.Hour)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
caKey, err := ca.PrivateKeyToPEM()
|
caKey, err := ca.PrivateKeyToPEM()
|
||||||
@ -58,13 +58,7 @@ func TestImpersonator(t *testing.T) {
|
|||||||
unrelatedCA, err := certauthority.New("ca", time.Hour)
|
unrelatedCA, err := certauthority.New("ca", time.Hour)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Punch out just enough stuff to make New actually run without error.
|
// turn off this code path for all tests because it does not handle the config we remove correctly
|
||||||
recOpts := func(options *genericoptions.RecommendedOptions) {
|
|
||||||
options.Authentication.RemoteKubeConfigFileOptional = true
|
|
||||||
options.Authorization.RemoteKubeConfigFileOptional = true
|
|
||||||
options.CoreAPI = nil
|
|
||||||
options.Admission = nil
|
|
||||||
}
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIPriorityAndFairness, false)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIPriorityAndFairness, false)()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@ -140,7 +134,7 @@ func TestImpersonator(t *testing.T) {
|
|||||||
clientCert: newClientCert(t, ca, "test-username2", []string{"test-group3", "test-group4"}),
|
clientCert: newClientCert(t, ca, "test-username2", []string{"test-group3", "test-group4"}),
|
||||||
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
||||||
clientMutateHeaders: func(header http.Header) {
|
clientMutateHeaders: func(header http.Header) {
|
||||||
header.Add("x-FORWARDED-for", "example.com")
|
header["x-FORWARDED-for"] = append(header["x-FORWARDED-for"], "example.com")
|
||||||
},
|
},
|
||||||
wantKubeAPIServerRequestHeaders: http.Header{
|
wantKubeAPIServerRequestHeaders: http.Header{
|
||||||
"Impersonate-User": {"test-username2"},
|
"Impersonate-User": {"test-username2"},
|
||||||
@ -189,20 +183,128 @@ func TestImpersonator(t *testing.T) {
|
|||||||
wantError: "Unauthorized",
|
wantError: "Unauthorized",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "double impersonation is not allowed by regular users",
|
name: "nested impersonation by regular users calls delegating authorizer",
|
||||||
clientCert: newClientCert(t, ca, "test-username", []string{"test-group1", "test-group2"}),
|
clientCert: newClientCert(t, ca, "test-username", []string{"test-group1", "test-group2"}),
|
||||||
clientImpersonateUser: rest.ImpersonationConfig{UserName: "some-other-username"},
|
clientImpersonateUser: rest.ImpersonationConfig{UserName: "some-other-username"},
|
||||||
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
||||||
|
// this fails because the delegating authorizer in this test only allows system:masters and fails everything else
|
||||||
wantError: `users "some-other-username" is forbidden: User "test-username" ` +
|
wantError: `users "some-other-username" is forbidden: User "test-username" ` +
|
||||||
`cannot impersonate resource "users" in API group "" at the cluster scope: impersonation is not allowed or invalid verb`,
|
`cannot impersonate resource "users" in API group "" at the cluster scope`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "double impersonation is not allowed by admin users",
|
name: "nested impersonation by admin users calls delegating authorizer",
|
||||||
|
clientCert: newClientCert(t, ca, "test-admin", []string{"system:masters", "test-group2"}),
|
||||||
|
clientImpersonateUser: rest.ImpersonationConfig{
|
||||||
|
UserName: "fire",
|
||||||
|
Groups: []string{"elements"},
|
||||||
|
Extra: map[string][]string{
|
||||||
|
"colors": {"red", "orange", "blue"},
|
||||||
|
|
||||||
|
// gke
|
||||||
|
"iam.gke.io/user-assertion": {"good", "stuff"},
|
||||||
|
"user-assertion.cloud.google.com": {"smaller", "things"},
|
||||||
|
|
||||||
|
// openshift
|
||||||
|
"scopes.authorization.openshift.io": {"user:info", "user:full", "user:check-access"},
|
||||||
|
|
||||||
|
// openstack
|
||||||
|
"alpha.kubernetes.io/identity/roles": {"a-role1", "a-role2"},
|
||||||
|
"alpha.kubernetes.io/identity/project/id": {"a-project-id"},
|
||||||
|
"alpha.kubernetes.io/identity/project/name": {"a-project-name"},
|
||||||
|
"alpha.kubernetes.io/identity/user/domain/id": {"a-domain-id"},
|
||||||
|
"alpha.kubernetes.io/identity/user/domain/name": {"a-domain-name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
||||||
|
wantKubeAPIServerRequestHeaders: http.Header{
|
||||||
|
"Impersonate-User": {"fire"},
|
||||||
|
"Impersonate-Group": {"elements", "system:authenticated"},
|
||||||
|
"Impersonate-Extra-Colors": {"red", "orange", "blue"},
|
||||||
|
"Impersonate-Extra-Iam.gke.io%2fuser-Assertion": {"good", "stuff"},
|
||||||
|
"Impersonate-Extra-User-Assertion.cloud.google.com": {"smaller", "things"},
|
||||||
|
"Impersonate-Extra-Scopes.authorization.openshift.io": {"user:info", "user:full", "user:check-access"},
|
||||||
|
"Impersonate-Extra-Alpha.kubernetes.io%2fidentity%2froles": {"a-role1", "a-role2"},
|
||||||
|
"Impersonate-Extra-Alpha.kubernetes.io%2fidentity%2fproject%2fid": {"a-project-id"},
|
||||||
|
"Impersonate-Extra-Alpha.kubernetes.io%2fidentity%2fproject%2fname": {"a-project-name"},
|
||||||
|
"Impersonate-Extra-Alpha.kubernetes.io%2fidentity%2fuser%2fdomain%2fid": {"a-domain-id"},
|
||||||
|
"Impersonate-Extra-Alpha.kubernetes.io%2fidentity%2fuser%2fdomain%2fname": {"a-domain-name"},
|
||||||
|
"Impersonate-Extra-Original-User-Info.impersonation-Proxy.concierge.pinniped.dev": {`{"username":"test-admin","groups":["test-group2","system:masters","system:authenticated"]}`},
|
||||||
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
|
"User-Agent": {"test-agent"},
|
||||||
|
"Accept": {"application/vnd.kubernetes.protobuf,application/json"},
|
||||||
|
"Accept-Encoding": {"gzip"},
|
||||||
|
"X-Forwarded-For": {"127.0.0.1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nested impersonation by admin users cannot impersonate UID",
|
||||||
clientCert: newClientCert(t, ca, "test-admin", []string{"system:masters", "test-group2"}),
|
clientCert: newClientCert(t, ca, "test-admin", []string{"system:masters", "test-group2"}),
|
||||||
clientImpersonateUser: rest.ImpersonationConfig{UserName: "some-other-username"},
|
clientImpersonateUser: rest.ImpersonationConfig{UserName: "some-other-username"},
|
||||||
|
clientMutateHeaders: func(header http.Header) {
|
||||||
|
header["Impersonate-Uid"] = []string{"root"}
|
||||||
|
},
|
||||||
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
||||||
wantError: `users "some-other-username" is forbidden: User "test-admin" ` +
|
wantError: "Internal error occurred: invalid impersonation",
|
||||||
`cannot impersonate resource "users" in API group "" at the cluster scope: impersonation is not allowed or invalid verb`,
|
},
|
||||||
|
{
|
||||||
|
name: "nested impersonation by admin users cannot impersonate UID header canonicalization",
|
||||||
|
clientCert: newClientCert(t, ca, "test-admin", []string{"system:masters", "test-group2"}),
|
||||||
|
clientImpersonateUser: rest.ImpersonationConfig{UserName: "some-other-username"},
|
||||||
|
clientMutateHeaders: func(header http.Header) {
|
||||||
|
header["imPerSoNaTE-uid"] = []string{"magic"}
|
||||||
|
},
|
||||||
|
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
||||||
|
wantError: "Internal error occurred: invalid impersonation",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nested impersonation by admin users cannot use reserved key",
|
||||||
|
clientCert: newClientCert(t, ca, "test-admin", []string{"system:masters", "test-group2"}),
|
||||||
|
clientImpersonateUser: rest.ImpersonationConfig{
|
||||||
|
UserName: "other-user-to-impersonate",
|
||||||
|
Groups: []string{"other-peeps"},
|
||||||
|
Extra: map[string][]string{
|
||||||
|
"key": {"good"},
|
||||||
|
"something.impersonation-proxy.concierge.pinniped.dev": {"bad data"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
||||||
|
wantError: "Internal error occurred: unimplemented functionality - unable to act as current user",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nested impersonation by admin users cannot use invalid key",
|
||||||
|
clientCert: newClientCert(t, ca, "test-admin", []string{"system:masters", "test-group2"}),
|
||||||
|
clientImpersonateUser: rest.ImpersonationConfig{
|
||||||
|
UserName: "panda",
|
||||||
|
Groups: []string{"other-peeps"},
|
||||||
|
Extra: map[string][]string{
|
||||||
|
"party~~time": {"danger"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
||||||
|
wantError: "Internal error occurred: unimplemented functionality - unable to act as current user",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nested impersonation by admin users can use uppercase key because impersonation is lossy",
|
||||||
|
clientCert: newClientCert(t, ca, "test-admin", []string{"system:masters", "test-group2"}),
|
||||||
|
clientImpersonateUser: rest.ImpersonationConfig{
|
||||||
|
UserName: "panda",
|
||||||
|
Groups: []string{"other-peeps"},
|
||||||
|
Extra: map[string][]string{
|
||||||
|
"ROAR": {"tiger"}, // by the time our code sees this key, it is lowercased to "roar"
|
||||||
|
},
|
||||||
|
},
|
||||||
|
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
||||||
|
wantKubeAPIServerRequestHeaders: http.Header{
|
||||||
|
"Impersonate-User": {"panda"},
|
||||||
|
"Impersonate-Group": {"other-peeps", "system:authenticated"},
|
||||||
|
"Impersonate-Extra-Roar": {"tiger"},
|
||||||
|
"Impersonate-Extra-Original-User-Info.impersonation-Proxy.concierge.pinniped.dev": {`{"username":"test-admin","groups":["test-group2","system:masters","system:authenticated"]}`},
|
||||||
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
|
"User-Agent": {"test-agent"},
|
||||||
|
"Accept": {"application/vnd.kubernetes.protobuf,application/json"},
|
||||||
|
"Accept-Encoding": {"gzip"},
|
||||||
|
"X-Forwarded-For": {"127.0.0.1"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "no bearer token file in Kube API server client config",
|
name: "no bearer token file in Kube API server client config",
|
||||||
@ -212,17 +314,17 @@ func TestImpersonator(t *testing.T) {
|
|||||||
name: "header canonicalization user header",
|
name: "header canonicalization user header",
|
||||||
clientCert: newClientCert(t, ca, "test-username", []string{"test-group1", "test-group2"}),
|
clientCert: newClientCert(t, ca, "test-username", []string{"test-group1", "test-group2"}),
|
||||||
clientMutateHeaders: func(header http.Header) {
|
clientMutateHeaders: func(header http.Header) {
|
||||||
header.Set("imPerSonaTE-USer", "PANDA")
|
header["imPerSonaTE-USer"] = []string{"PANDA"}
|
||||||
},
|
},
|
||||||
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
||||||
wantError: `users "PANDA" is forbidden: User "test-username" ` +
|
wantError: `users "PANDA" is forbidden: User "test-username" ` +
|
||||||
`cannot impersonate resource "users" in API group "" at the cluster scope: impersonation is not allowed or invalid verb`,
|
`cannot impersonate resource "users" in API group "" at the cluster scope`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "header canonicalization future UID header",
|
name: "header canonicalization future UID header",
|
||||||
clientCert: newClientCert(t, ca, "test-username", []string{"test-group1", "test-group2"}),
|
clientCert: newClientCert(t, ca, "test-username", []string{"test-group1", "test-group2"}),
|
||||||
clientMutateHeaders: func(header http.Header) {
|
clientMutateHeaders: func(header http.Header) {
|
||||||
header.Set("imPerSonaTE-uid", "007")
|
header["imPerSonaTE-uid"] = []string{"007"}
|
||||||
},
|
},
|
||||||
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
||||||
wantError: "Internal error occurred: invalid impersonation",
|
wantError: "Internal error occurred: invalid impersonation",
|
||||||
@ -231,7 +333,7 @@ func TestImpersonator(t *testing.T) {
|
|||||||
name: "future UID header",
|
name: "future UID header",
|
||||||
clientCert: newClientCert(t, ca, "test-username", []string{"test-group1", "test-group2"}),
|
clientCert: newClientCert(t, ca, "test-username", []string{"test-group1", "test-group2"}),
|
||||||
clientMutateHeaders: func(header http.Header) {
|
clientMutateHeaders: func(header http.Header) {
|
||||||
header.Set("Impersonate-Uid", "008")
|
header["Impersonate-Uid"] = []string{"008"}
|
||||||
},
|
},
|
||||||
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
kubeAPIServerClientBearerTokenFile: "required-to-be-set",
|
||||||
wantError: "Internal error occurred: invalid impersonation",
|
wantError: "Internal error occurred: invalid impersonation",
|
||||||
@ -239,8 +341,14 @@ func TestImpersonator(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
tt := tt
|
||||||
// This is a serial test because the production code binds to the port.
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// we need to create this listener ourselves because the API server
|
||||||
|
// code treats (port == 0 && listener == nil) to mean "do nothing"
|
||||||
|
listener, port, err := genericoptions.CreateListener("", "127.0.0.1:0", net.ListenConfig{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
// After failing to start and after shutdown, the impersonator port should be available again.
|
// After failing to start and after shutdown, the impersonator port should be available again.
|
||||||
defer requireCanBindToPort(t, port)
|
defer requireCanBindToPort(t, port)
|
||||||
|
|
||||||
@ -293,8 +401,17 @@ func TestImpersonator(t *testing.T) {
|
|||||||
}
|
}
|
||||||
clientOpts := []kubeclient.Option{kubeclient.WithConfig(&testKubeAPIServerKubeconfig)}
|
clientOpts := []kubeclient.Option{kubeclient.WithConfig(&testKubeAPIServerKubeconfig)}
|
||||||
|
|
||||||
// Create an impersonator.
|
// Punch out just enough stuff to make New actually run without error.
|
||||||
runner, constructionErr := newInternal(port, certKeyContent, caContent, clientOpts, recOpts)
|
recOpts := func(options *genericoptions.RecommendedOptions) {
|
||||||
|
options.Authentication.RemoteKubeConfigFileOptional = true
|
||||||
|
options.Authorization.RemoteKubeConfigFileOptional = true
|
||||||
|
options.CoreAPI = nil
|
||||||
|
options.Admission = nil
|
||||||
|
options.SecureServing.Listener = listener // use our listener with the dynamic port
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create an impersonator. Use an invalid port number to make sure our listener override works.
|
||||||
|
runner, constructionErr := newInternal(-1000, certKeyContent, caContent, clientOpts, recOpts)
|
||||||
if len(tt.wantConstructionError) > 0 {
|
if len(tt.wantConstructionError) > 0 {
|
||||||
require.EqualError(t, constructionErr, tt.wantConstructionError)
|
require.EqualError(t, constructionErr, tt.wantConstructionError)
|
||||||
require.Nil(t, runner)
|
require.Nil(t, runner)
|
||||||
@ -383,20 +500,30 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
validURL, _ := url.Parse("http://pinniped.dev/blah")
|
validURL, _ := url.Parse("http://pinniped.dev/blah")
|
||||||
newRequest := func(h http.Header, userInfo user.Info) *http.Request {
|
newRequest := func(h http.Header, userInfo user.Info, event *auditinternal.Event) *http.Request {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
if userInfo != nil {
|
if userInfo != nil {
|
||||||
ctx = request.WithUser(ctx, userInfo)
|
ctx = request.WithUser(ctx, userInfo)
|
||||||
}
|
}
|
||||||
r, err := http.NewRequestWithContext(ctx, http.MethodGet, validURL.String(), nil)
|
|
||||||
require.NoError(t, err)
|
ae := &auditinternal.Event{Level: auditinternal.LevelMetadata}
|
||||||
r.Header = h
|
if event != nil {
|
||||||
|
ae = event
|
||||||
|
}
|
||||||
|
ctx = request.WithAuditEvent(ctx, ae)
|
||||||
|
|
||||||
reqInfo := &request.RequestInfo{
|
reqInfo := &request.RequestInfo{
|
||||||
IsResourceRequest: false,
|
IsResourceRequest: false,
|
||||||
Path: validURL.Path,
|
Path: validURL.Path,
|
||||||
Verb: "get",
|
Verb: "get",
|
||||||
}
|
}
|
||||||
r = r.WithContext(request.WithRequestInfo(ctx, reqInfo))
|
ctx = request.WithRequestInfo(ctx, reqInfo)
|
||||||
|
|
||||||
|
r, err := http.NewRequestWithContext(ctx, http.MethodGet, validURL.String(), nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
r.Header = h
|
||||||
|
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -436,43 +563,123 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Impersonate-User header already in request",
|
name: "Impersonate-User header already in request",
|
||||||
request: newRequest(map[string][]string{"Impersonate-User": {"some-user"}}, nil),
|
request: newRequest(map[string][]string{"Impersonate-User": {"some-user"}}, nil, nil),
|
||||||
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n",
|
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n",
|
||||||
wantHTTPStatus: http.StatusInternalServerError,
|
wantHTTPStatus: http.StatusInternalServerError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Impersonate-Group header already in request",
|
name: "Impersonate-Group header already in request",
|
||||||
request: newRequest(map[string][]string{"Impersonate-Group": {"some-group"}}, nil),
|
request: newRequest(map[string][]string{"Impersonate-Group": {"some-group"}}, nil, nil),
|
||||||
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n",
|
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n",
|
||||||
wantHTTPStatus: http.StatusInternalServerError,
|
wantHTTPStatus: http.StatusInternalServerError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Impersonate-Extra header already in request",
|
name: "Impersonate-Extra header already in request",
|
||||||
request: newRequest(map[string][]string{"Impersonate-Extra-something": {"something"}}, nil),
|
request: newRequest(map[string][]string{"Impersonate-Extra-something": {"something"}}, nil, nil),
|
||||||
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n",
|
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n",
|
||||||
wantHTTPStatus: http.StatusInternalServerError,
|
wantHTTPStatus: http.StatusInternalServerError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Impersonate-* header already in request",
|
name: "Impersonate-* header already in request",
|
||||||
request: newRequest(map[string][]string{"Impersonate-Something": {"some-newfangled-impersonate-header"}}, nil),
|
request: newRequest(map[string][]string{"Impersonate-Something": {"some-newfangled-impersonate-header"}}, nil, nil),
|
||||||
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n",
|
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid impersonation","reason":"InternalError","details":{"causes":[{"message":"invalid impersonation"}]},"code":500}` + "\n",
|
||||||
wantHTTPStatus: http.StatusInternalServerError,
|
wantHTTPStatus: http.StatusInternalServerError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "unexpected authorization header",
|
name: "unexpected authorization header",
|
||||||
request: newRequest(map[string][]string{"Authorization": {"panda"}}, nil),
|
request: newRequest(map[string][]string{"Authorization": {"panda"}}, nil, nil),
|
||||||
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid authorization header","reason":"InternalError","details":{"causes":[{"message":"invalid authorization header"}]},"code":500}` + "\n",
|
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid authorization header","reason":"InternalError","details":{"causes":[{"message":"invalid authorization header"}]},"code":500}` + "\n",
|
||||||
wantHTTPStatus: http.StatusInternalServerError,
|
wantHTTPStatus: http.StatusInternalServerError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "missing user",
|
name: "missing user",
|
||||||
request: newRequest(map[string][]string{}, nil),
|
request: newRequest(map[string][]string{}, nil, nil),
|
||||||
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid user","reason":"InternalError","details":{"causes":[{"message":"invalid user"}]},"code":500}` + "\n",
|
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid user","reason":"InternalError","details":{"causes":[{"message":"invalid user"}]},"code":500}` + "\n",
|
||||||
wantHTTPStatus: http.StatusInternalServerError,
|
wantHTTPStatus: http.StatusInternalServerError,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "unexpected UID",
|
name: "unexpected UID",
|
||||||
request: newRequest(map[string][]string{}, &user.DefaultInfo{UID: "007"}),
|
request: newRequest(map[string][]string{}, &user.DefaultInfo{UID: "007"}, nil),
|
||||||
|
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
|
||||||
|
wantHTTPStatus: http.StatusInternalServerError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "authenticated user but missing audit event",
|
||||||
|
request: func() *http.Request {
|
||||||
|
req := newRequest(map[string][]string{
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Connection": {"Upgrade"},
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Other-Header": {"test-header-value-1"},
|
||||||
|
}, &user.DefaultInfo{
|
||||||
|
Name: testUser,
|
||||||
|
Groups: testGroups,
|
||||||
|
Extra: testExtra,
|
||||||
|
}, nil)
|
||||||
|
ctx := request.WithAuditEvent(req.Context(), nil)
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
return req
|
||||||
|
}(),
|
||||||
|
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: invalid audit event","reason":"InternalError","details":{"causes":[{"message":"invalid audit event"}]},"code":500}` + "\n",
|
||||||
|
wantHTTPStatus: http.StatusInternalServerError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "authenticated user with upper case extra",
|
||||||
|
request: newRequest(map[string][]string{
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Connection": {"Upgrade"},
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Content-Length": {"some-length"},
|
||||||
|
"Other-Header": {"test-header-value-1"},
|
||||||
|
}, &user.DefaultInfo{
|
||||||
|
Name: testUser,
|
||||||
|
Groups: testGroups,
|
||||||
|
Extra: map[string][]string{
|
||||||
|
"valid-key": {"valid-value"},
|
||||||
|
"Invalid-key": {"still-valid-value"},
|
||||||
|
},
|
||||||
|
}, nil),
|
||||||
|
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
|
||||||
|
wantHTTPStatus: http.StatusInternalServerError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "authenticated user with upper case extra across multiple lines",
|
||||||
|
request: newRequest(map[string][]string{
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Connection": {"Upgrade"},
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Content-Length": {"some-length"},
|
||||||
|
"Other-Header": {"test-header-value-1"},
|
||||||
|
}, &user.DefaultInfo{
|
||||||
|
Name: testUser,
|
||||||
|
Groups: testGroups,
|
||||||
|
Extra: map[string][]string{
|
||||||
|
"valid-key": {"valid-value"},
|
||||||
|
"valid-data\nInvalid-key": {"still-valid-value"},
|
||||||
|
},
|
||||||
|
}, nil),
|
||||||
|
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
|
||||||
|
wantHTTPStatus: http.StatusInternalServerError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "authenticated user with reserved extra key",
|
||||||
|
request: newRequest(map[string][]string{
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Connection": {"Upgrade"},
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Content-Length": {"some-length"},
|
||||||
|
"Other-Header": {"test-header-value-1"},
|
||||||
|
}, &user.DefaultInfo{
|
||||||
|
Name: testUser,
|
||||||
|
Groups: testGroups,
|
||||||
|
Extra: map[string][]string{
|
||||||
|
"valid-key": {"valid-value"},
|
||||||
|
"foo.impersonation-proxy.concierge.pinniped.dev": {"still-valid-value"},
|
||||||
|
},
|
||||||
|
}, nil),
|
||||||
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
|
wantHTTPBody: `{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"Internal error occurred: unimplemented functionality - unable to act as current user","reason":"InternalError","details":{"causes":[{"message":"unimplemented functionality - unable to act as current user"}]},"code":500}` + "\n",
|
||||||
wantHTTPStatus: http.StatusInternalServerError,
|
wantHTTPStatus: http.StatusInternalServerError,
|
||||||
},
|
},
|
||||||
@ -492,7 +699,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
|
|||||||
Name: testUser,
|
Name: testUser,
|
||||||
Groups: testGroups,
|
Groups: testGroups,
|
||||||
Extra: testExtra,
|
Extra: testExtra,
|
||||||
}),
|
}, nil),
|
||||||
wantKubeAPIServerRequestHeaders: map[string][]string{
|
wantKubeAPIServerRequestHeaders: map[string][]string{
|
||||||
"Authorization": {"Bearer some-service-account-token"},
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
"Impersonate-Extra-Extra-1": {"some", "extra", "stuff"},
|
"Impersonate-Extra-Extra-1": {"some", "extra", "stuff"},
|
||||||
@ -510,6 +717,318 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
|
|||||||
wantHTTPBody: "successful proxied response",
|
wantHTTPBody: "successful proxied response",
|
||||||
wantHTTPStatus: http.StatusOK,
|
wantHTTPStatus: http.StatusOK,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "authenticated gke user",
|
||||||
|
request: newRequest(map[string][]string{
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"}, // the value "Upgrade" is handled in a special way by `httputil.NewSingleHostReverseProxy`
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Content-Length": {"some-length"},
|
||||||
|
"Other-Header": {"test-header-value-1"}, // this header will be passed through
|
||||||
|
}, &user.DefaultInfo{
|
||||||
|
Name: "username@company.com",
|
||||||
|
Groups: []string{"system:authenticated"},
|
||||||
|
Extra: map[string][]string{
|
||||||
|
// make sure we can handle these keys
|
||||||
|
"iam.gke.io/user-assertion": {"ABC"},
|
||||||
|
"user-assertion.cloud.google.com": {"XYZ"},
|
||||||
|
},
|
||||||
|
}, nil),
|
||||||
|
wantKubeAPIServerRequestHeaders: map[string][]string{
|
||||||
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
|
"Impersonate-Extra-Iam.gke.io%2fuser-Assertion": {"ABC"},
|
||||||
|
"Impersonate-Extra-User-Assertion.cloud.google.com": {"XYZ"},
|
||||||
|
"Impersonate-Group": {"system:authenticated"},
|
||||||
|
"Impersonate-User": {"username@company.com"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"},
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Other-Header": {"test-header-value-1"},
|
||||||
|
},
|
||||||
|
wantHTTPBody: "successful proxied response",
|
||||||
|
wantHTTPStatus: http.StatusOK,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "authenticated openshift/openstack user",
|
||||||
|
request: newRequest(map[string][]string{
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"}, // the value "Upgrade" is handled in a special way by `httputil.NewSingleHostReverseProxy`
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Content-Length": {"some-length"},
|
||||||
|
"Other-Header": {"test-header-value-1"}, // this header will be passed through
|
||||||
|
}, &user.DefaultInfo{
|
||||||
|
Name: "kube:admin",
|
||||||
|
// both of these auth stacks set UID but we cannot handle it today
|
||||||
|
// UID: "user-id",
|
||||||
|
Groups: []string{"system:cluster-admins", "system:authenticated"},
|
||||||
|
Extra: map[string][]string{
|
||||||
|
// openshift
|
||||||
|
"scopes.authorization.openshift.io": {"user:info", "user:full"},
|
||||||
|
|
||||||
|
// openstack
|
||||||
|
"alpha.kubernetes.io/identity/roles": {"role1", "role2"},
|
||||||
|
"alpha.kubernetes.io/identity/project/id": {"project-id"},
|
||||||
|
"alpha.kubernetes.io/identity/project/name": {"project-name"},
|
||||||
|
"alpha.kubernetes.io/identity/user/domain/id": {"domain-id"},
|
||||||
|
"alpha.kubernetes.io/identity/user/domain/name": {"domain-name"},
|
||||||
|
},
|
||||||
|
}, nil),
|
||||||
|
wantKubeAPIServerRequestHeaders: map[string][]string{
|
||||||
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
|
"Impersonate-Extra-Scopes.authorization.openshift.io": {"user:info", "user:full"},
|
||||||
|
"Impersonate-Extra-Alpha.kubernetes.io%2fidentity%2froles": {"role1", "role2"},
|
||||||
|
"Impersonate-Extra-Alpha.kubernetes.io%2fidentity%2fproject%2fid": {"project-id"},
|
||||||
|
"Impersonate-Extra-Alpha.kubernetes.io%2fidentity%2fproject%2fname": {"project-name"},
|
||||||
|
"Impersonate-Extra-Alpha.kubernetes.io%2fidentity%2fuser%2fdomain%2fid": {"domain-id"},
|
||||||
|
"Impersonate-Extra-Alpha.kubernetes.io%2fidentity%2fuser%2fdomain%2fname": {"domain-name"},
|
||||||
|
"Impersonate-Group": {"system:cluster-admins", "system:authenticated"},
|
||||||
|
"Impersonate-User": {"kube:admin"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"},
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Other-Header": {"test-header-value-1"},
|
||||||
|
},
|
||||||
|
wantHTTPBody: "successful proxied response",
|
||||||
|
wantHTTPStatus: http.StatusOK,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "authenticated user with almost reserved key",
|
||||||
|
request: newRequest(map[string][]string{
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"}, // the value "Upgrade" is handled in a special way by `httputil.NewSingleHostReverseProxy`
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Content-Length": {"some-length"},
|
||||||
|
"Other-Header": {"test-header-value-1"}, // this header will be passed through
|
||||||
|
}, &user.DefaultInfo{
|
||||||
|
Name: "username@company.com",
|
||||||
|
Groups: []string{"system:authenticated"},
|
||||||
|
Extra: map[string][]string{
|
||||||
|
"foo.iimpersonation-proxy.concierge.pinniped.dev": {"still-valid-value"},
|
||||||
|
},
|
||||||
|
}, nil),
|
||||||
|
wantKubeAPIServerRequestHeaders: map[string][]string{
|
||||||
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
|
"Impersonate-Extra-Foo.iimpersonation-Proxy.concierge.pinniped.dev": {"still-valid-value"},
|
||||||
|
"Impersonate-Group": {"system:authenticated"},
|
||||||
|
"Impersonate-User": {"username@company.com"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"},
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Other-Header": {"test-header-value-1"},
|
||||||
|
},
|
||||||
|
wantHTTPBody: "successful proxied response",
|
||||||
|
wantHTTPStatus: http.StatusOK,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "authenticated user with almost reserved key and nested impersonation",
|
||||||
|
request: newRequest(map[string][]string{
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"}, // the value "Upgrade" is handled in a special way by `httputil.NewSingleHostReverseProxy`
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Content-Length": {"some-length"},
|
||||||
|
"Other-Header": {"test-header-value-1"}, // this header will be passed through
|
||||||
|
}, &user.DefaultInfo{
|
||||||
|
Name: "username@company.com",
|
||||||
|
Groups: []string{"system:authenticated"},
|
||||||
|
Extra: map[string][]string{
|
||||||
|
"original-user-info.impersonation-proxyy.concierge.pinniped.dev": {"log confusion stuff here"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&auditinternal.Event{
|
||||||
|
User: authenticationv1.UserInfo{
|
||||||
|
Username: "panda",
|
||||||
|
UID: "0x001",
|
||||||
|
Groups: []string{"bears", "friends"},
|
||||||
|
Extra: map[string]authenticationv1.ExtraValue{
|
||||||
|
"original-user-info.impersonation-proxy.concierge.pinniped.dev": {"this is allowed"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ImpersonatedUser: &authenticationv1.UserInfo{},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
wantKubeAPIServerRequestHeaders: map[string][]string{
|
||||||
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
|
"Impersonate-Extra-Original-User-Info.impersonation-Proxyy.concierge.pinniped.dev": {"log confusion stuff here"},
|
||||||
|
"Impersonate-Extra-Original-User-Info.impersonation-Proxy.concierge.pinniped.dev": {`{"username":"panda","uid":"0x001","groups":["bears","friends"],"extra":{"original-user-info.impersonation-proxy.concierge.pinniped.dev":["this is allowed"]}}`},
|
||||||
|
"Impersonate-Group": {"system:authenticated"},
|
||||||
|
"Impersonate-User": {"username@company.com"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"},
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Other-Header": {"test-header-value-1"},
|
||||||
|
},
|
||||||
|
wantHTTPBody: "successful proxied response",
|
||||||
|
wantHTTPStatus: http.StatusOK,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "authenticated user with nested impersonation",
|
||||||
|
request: newRequest(map[string][]string{
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"}, // the value "Upgrade" is handled in a special way by `httputil.NewSingleHostReverseProxy`
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Content-Length": {"some-length"},
|
||||||
|
"Other-Header": {"test-header-value-1"}, // this header will be passed through
|
||||||
|
}, &user.DefaultInfo{
|
||||||
|
Name: testUser,
|
||||||
|
Groups: testGroups,
|
||||||
|
Extra: testExtra,
|
||||||
|
},
|
||||||
|
&auditinternal.Event{
|
||||||
|
User: authenticationv1.UserInfo{
|
||||||
|
Username: "panda",
|
||||||
|
UID: "0x001",
|
||||||
|
Groups: []string{"bears", "friends"},
|
||||||
|
Extra: map[string]authenticationv1.ExtraValue{
|
||||||
|
"assertion": {"sha", "md5"},
|
||||||
|
"req-id": {"0123"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ImpersonatedUser: &authenticationv1.UserInfo{},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
wantKubeAPIServerRequestHeaders: map[string][]string{
|
||||||
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
|
"Impersonate-Extra-Extra-1": {"some", "extra", "stuff"},
|
||||||
|
"Impersonate-Extra-Extra-2": {"some", "more", "extra", "stuff"},
|
||||||
|
"Impersonate-Group": {"test-group-1", "test-group-2"},
|
||||||
|
"Impersonate-User": {"test-user"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"},
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Other-Header": {"test-header-value-1"},
|
||||||
|
"Impersonate-Extra-Original-User-Info.impersonation-Proxy.concierge.pinniped.dev": {`{"username":"panda","uid":"0x001","groups":["bears","friends"],"extra":{"assertion":["sha","md5"],"req-id":["0123"]}}`},
|
||||||
|
},
|
||||||
|
wantHTTPBody: "successful proxied response",
|
||||||
|
wantHTTPStatus: http.StatusOK,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "authenticated gke user with nested impersonation",
|
||||||
|
request: newRequest(map[string][]string{
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"}, // the value "Upgrade" is handled in a special way by `httputil.NewSingleHostReverseProxy`
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Content-Length": {"some-length"},
|
||||||
|
"Other-Header": {"test-header-value-1"}, // this header will be passed through
|
||||||
|
}, &user.DefaultInfo{
|
||||||
|
Name: testUser,
|
||||||
|
Groups: testGroups,
|
||||||
|
Extra: testExtra,
|
||||||
|
},
|
||||||
|
&auditinternal.Event{
|
||||||
|
User: authenticationv1.UserInfo{
|
||||||
|
Username: "username@company.com",
|
||||||
|
Groups: []string{"system:authenticated"},
|
||||||
|
Extra: map[string]authenticationv1.ExtraValue{
|
||||||
|
// make sure we can handle these keys
|
||||||
|
"iam.gke.io/user-assertion": {"ABC"},
|
||||||
|
"user-assertion.cloud.google.com": {"999"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ImpersonatedUser: &authenticationv1.UserInfo{},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
wantKubeAPIServerRequestHeaders: map[string][]string{
|
||||||
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
|
"Impersonate-Extra-Extra-1": {"some", "extra", "stuff"},
|
||||||
|
"Impersonate-Extra-Extra-2": {"some", "more", "extra", "stuff"},
|
||||||
|
"Impersonate-Group": {"test-group-1", "test-group-2"},
|
||||||
|
"Impersonate-User": {"test-user"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"},
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Other-Header": {"test-header-value-1"},
|
||||||
|
"Impersonate-Extra-Original-User-Info.impersonation-Proxy.concierge.pinniped.dev": {`{"username":"username@company.com","groups":["system:authenticated"],"extra":{"iam.gke.io/user-assertion":["ABC"],"user-assertion.cloud.google.com":["999"]}}`},
|
||||||
|
},
|
||||||
|
wantHTTPBody: "successful proxied response",
|
||||||
|
wantHTTPStatus: http.StatusOK,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "authenticated user with nested impersonation of gke user",
|
||||||
|
request: newRequest(map[string][]string{
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"}, // the value "Upgrade" is handled in a special way by `httputil.NewSingleHostReverseProxy`
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Content-Length": {"some-length"},
|
||||||
|
"Other-Header": {"test-header-value-1"}, // this header will be passed through
|
||||||
|
}, &user.DefaultInfo{
|
||||||
|
Name: "username@company.com",
|
||||||
|
Groups: []string{"system:authenticated"},
|
||||||
|
Extra: map[string][]string{
|
||||||
|
// make sure we can handle these keys
|
||||||
|
"iam.gke.io/user-assertion": {"DEF"},
|
||||||
|
"user-assertion.cloud.google.com": {"XYZ"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&auditinternal.Event{
|
||||||
|
User: authenticationv1.UserInfo{
|
||||||
|
Username: "panda",
|
||||||
|
UID: "0x001",
|
||||||
|
Groups: []string{"bears", "friends"},
|
||||||
|
Extra: map[string]authenticationv1.ExtraValue{
|
||||||
|
"assertion": {"sha", "md5"},
|
||||||
|
"req-id": {"0123"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ImpersonatedUser: &authenticationv1.UserInfo{},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
wantKubeAPIServerRequestHeaders: map[string][]string{
|
||||||
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
|
"Impersonate-Extra-Iam.gke.io%2fuser-Assertion": {"DEF"},
|
||||||
|
"Impersonate-Extra-User-Assertion.cloud.google.com": {"XYZ"},
|
||||||
|
"Impersonate-Group": {"system:authenticated"},
|
||||||
|
"Impersonate-User": {"username@company.com"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Accept": {"some-accepted-format"},
|
||||||
|
"Accept-Encoding": {"some-accepted-encoding"},
|
||||||
|
"Connection": {"Upgrade"},
|
||||||
|
"Upgrade": {"some-upgrade"},
|
||||||
|
"Content-Type": {"some-type"},
|
||||||
|
"Other-Header": {"test-header-value-1"},
|
||||||
|
"Impersonate-Extra-Original-User-Info.impersonation-Proxy.concierge.pinniped.dev": {`{"username":"panda","uid":"0x001","groups":["bears","friends"],"extra":{"assertion":["sha","md5"],"req-id":["0123"]}}`},
|
||||||
|
},
|
||||||
|
wantHTTPBody: "successful proxied response",
|
||||||
|
wantHTTPStatus: http.StatusOK,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "user is authenticated but the kube API request returns an error",
|
name: "user is authenticated but the kube API request returns an error",
|
||||||
request: newRequest(map[string][]string{
|
request: newRequest(map[string][]string{
|
||||||
@ -518,7 +1037,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
|
|||||||
Name: testUser,
|
Name: testUser,
|
||||||
Groups: testGroups,
|
Groups: testGroups,
|
||||||
Extra: testExtra,
|
Extra: testExtra,
|
||||||
}),
|
}, nil),
|
||||||
kubeAPIServerStatusCode: http.StatusNotFound,
|
kubeAPIServerStatusCode: http.StatusNotFound,
|
||||||
wantKubeAPIServerRequestHeaders: map[string][]string{
|
wantKubeAPIServerRequestHeaders: map[string][]string{
|
||||||
"Accept-Encoding": {"gzip"}, // because the rest client used in this test does not disable compression
|
"Accept-Encoding": {"gzip"}, // because the rest client used in this test does not disable compression
|
||||||
@ -623,6 +1142,7 @@ type clientCert struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newClientCert(t *testing.T, ca *certauthority.CA, username string, groups []string) *clientCert {
|
func newClientCert(t *testing.T, ca *certauthority.CA, username string, groups []string) *clientCert {
|
||||||
|
t.Helper()
|
||||||
certPEM, keyPEM, err := ca.IssueClientCertPEM(username, groups, time.Hour)
|
certPEM, keyPEM, err := ca.IssueClientCertPEM(username, groups, time.Hour)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return &clientCert{
|
return &clientCert{
|
||||||
@ -632,7 +1152,113 @@ func newClientCert(t *testing.T, ca *certauthority.CA, username string, groups [
|
|||||||
}
|
}
|
||||||
|
|
||||||
func requireCanBindToPort(t *testing.T, port int) {
|
func requireCanBindToPort(t *testing.T, port int) {
|
||||||
|
t.Helper()
|
||||||
ln, _, listenErr := genericoptions.CreateListener("", "0.0.0.0:"+strconv.Itoa(port), net.ListenConfig{})
|
ln, _, listenErr := genericoptions.CreateListener("", "0.0.0.0:"+strconv.Itoa(port), net.ListenConfig{})
|
||||||
require.NoError(t, listenErr)
|
require.NoError(t, listenErr)
|
||||||
require.NoError(t, ln.Close())
|
require.NoError(t, ln.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_deleteKnownImpersonationHeaders(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
headers, want http.Header
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no impersonation",
|
||||||
|
headers: map[string][]string{
|
||||||
|
"a": {"b"},
|
||||||
|
"Accept-Encoding": {"gzip"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
},
|
||||||
|
want: map[string][]string{
|
||||||
|
"a": {"b"},
|
||||||
|
"Accept-Encoding": {"gzip"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "impersonate user header is dropped",
|
||||||
|
headers: map[string][]string{
|
||||||
|
"a": {"b"},
|
||||||
|
"Impersonate-User": {"panda"},
|
||||||
|
"Accept-Encoding": {"gzip"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
},
|
||||||
|
want: map[string][]string{
|
||||||
|
"a": {"b"},
|
||||||
|
"Accept-Encoding": {"gzip"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "all known impersonate headers are dropped",
|
||||||
|
headers: map[string][]string{
|
||||||
|
"Accept-Encoding": {"gzip"},
|
||||||
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
|
"Impersonate-Extra-Extra-1": {"some", "extra", "stuff"},
|
||||||
|
"Impersonate-Extra-Extra-2": {"some", "more", "extra", "stuff"},
|
||||||
|
"Impersonate-Group": {"test-group-1", "test-group-2"},
|
||||||
|
"Impersonate-User": {"test-user"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
},
|
||||||
|
want: map[string][]string{
|
||||||
|
"Accept-Encoding": {"gzip"},
|
||||||
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "future UID header is not dropped",
|
||||||
|
headers: map[string][]string{
|
||||||
|
"Accept-Encoding": {"gzip"},
|
||||||
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
|
"Impersonate-Extra-Extra-1": {"some", "extra", "stuff"},
|
||||||
|
"Impersonate-Extra-Extra-2": {"some", "more", "extra", "stuff"},
|
||||||
|
"Impersonate-Group": {"test-group-1", "test-group-2"},
|
||||||
|
"Impersonate-User": {"test-user"},
|
||||||
|
"Impersonate-Uid": {"008"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
},
|
||||||
|
want: map[string][]string{
|
||||||
|
"Accept-Encoding": {"gzip"},
|
||||||
|
"Authorization": {"Bearer some-service-account-token"},
|
||||||
|
"User-Agent": {"test-user-agent"},
|
||||||
|
"Impersonate-Uid": {"008"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "future UID header is not dropped, no other headers",
|
||||||
|
headers: map[string][]string{
|
||||||
|
"Impersonate-Uid": {"009"},
|
||||||
|
},
|
||||||
|
want: map[string][]string{
|
||||||
|
"Impersonate-Uid": {"009"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
inputReq := (&http.Request{Header: tt.headers}).WithContext(context.Background())
|
||||||
|
inputReqCopy := inputReq.Clone(inputReq.Context())
|
||||||
|
|
||||||
|
delegate := http.HandlerFunc(func(w http.ResponseWriter, outputReq *http.Request) {
|
||||||
|
require.Nil(t, w)
|
||||||
|
|
||||||
|
// assert only headers mutated
|
||||||
|
outputReqCopy := outputReq.Clone(outputReq.Context())
|
||||||
|
outputReqCopy.Header = tt.headers
|
||||||
|
require.Equal(t, inputReqCopy, outputReqCopy)
|
||||||
|
|
||||||
|
require.Equal(t, tt.want, outputReq.Header)
|
||||||
|
|
||||||
|
if ensureNoImpersonationHeaders(inputReq) == nil {
|
||||||
|
require.True(t, inputReq == outputReq, "expect req to passed through when no modification needed")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
deleteKnownImpersonationHeaders(delegate).ServeHTTP(nil, inputReq)
|
||||||
|
require.Equal(t, inputReqCopy, inputReq) // assert no mutation occurred
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -16,6 +16,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -25,6 +26,7 @@ import (
|
|||||||
"gopkg.in/square/go-jose.v2/jwt"
|
"gopkg.in/square/go-jose.v2/jwt"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||||
"k8s.io/apiserver/pkg/authentication/user"
|
"k8s.io/apiserver/pkg/authentication/user"
|
||||||
|
|
||||||
@ -345,15 +347,6 @@ func TestController(t *testing.T) {
|
|||||||
return // end of test unless we wanted to run tests on the resulting authenticator from the cache
|
return // end of test unless we wanted to run tests on the resulting authenticator from the cache
|
||||||
}
|
}
|
||||||
|
|
||||||
// The implementation of AuthenticateToken() that we use waits 10 seconds after creation to
|
|
||||||
// perform OIDC discovery. Therefore, the JWTAuthenticator is not functional for the first 10
|
|
||||||
// seconds. We sleep for 13 seconds in this unit test to give a little bit of cushion to that 10
|
|
||||||
// second delay.
|
|
||||||
//
|
|
||||||
// We should get rid of this 10 second delay. See
|
|
||||||
// https://github.com/vmware-tanzu/pinniped/issues/260.
|
|
||||||
time.Sleep(time.Second * 13)
|
|
||||||
|
|
||||||
// We expected the cache to have an entry, so pull that entry from the cache and test it.
|
// We expected the cache to have an entry, so pull that entry from the cache and test it.
|
||||||
expectedCacheKey := authncache.Key{
|
expectedCacheKey := authncache.Key{
|
||||||
APIGroup: auth1alpha1.GroupName,
|
APIGroup: auth1alpha1.GroupName,
|
||||||
@ -428,7 +421,17 @@ func TestController(t *testing.T) {
|
|||||||
tt.wantUsernameClaim,
|
tt.wantUsernameClaim,
|
||||||
username,
|
username,
|
||||||
)
|
)
|
||||||
rsp, authenticated, err := cachedAuthenticator.AuthenticateToken(context.Background(), jwt)
|
|
||||||
|
// Loop for a while here to allow the underlying OIDC authenticator to initialize itself asynchronously.
|
||||||
|
var (
|
||||||
|
rsp *authenticator.Response
|
||||||
|
authenticated bool
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
_ = wait.PollImmediate(10*time.Millisecond, 5*time.Second, func() (bool, error) {
|
||||||
|
rsp, authenticated, err = cachedAuthenticator.AuthenticateToken(context.Background(), jwt)
|
||||||
|
return !isNotInitialized(err), nil
|
||||||
|
})
|
||||||
if test.wantErrorRegexp != "" {
|
if test.wantErrorRegexp != "" {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Regexp(t, test.wantErrorRegexp, err.Error())
|
require.Regexp(t, test.wantErrorRegexp, err.Error())
|
||||||
@ -443,6 +446,12 @@ func TestController(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isNotInitialized checks if the error is the internally-defined "oidc: authenticator not initialized" error from
|
||||||
|
// the underlying OIDC authenticator, which is initialized asynchronously.
|
||||||
|
func isNotInitialized(err error) bool {
|
||||||
|
return err != nil && strings.Contains(err.Error(), "authenticator not initialized")
|
||||||
|
}
|
||||||
|
|
||||||
func testTableForAuthenticateTokenTests(
|
func testTableForAuthenticateTokenTests(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
goodRSASigningKey *rsa.PrivateKey,
|
goodRSASigningKey *rsa.PrivateKey,
|
||||||
|
@ -1,219 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/spf13/pflag"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/client-go/util/retry"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
pinnipedclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
|
||||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
|
||||||
"go.pinniped.dev/internal/controller/issuerconfig"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/plog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// These constants are the default values for the kube-controller-manager flags. If the flags are
|
|
||||||
// not properly set on the kube-controller-manager process, then we will fallback to using these.
|
|
||||||
const (
|
|
||||||
k8sAPIServerCACertPEMDefaultPath = "/etc/kubernetes/ca/ca.pem"
|
|
||||||
k8sAPIServerCAKeyPEMDefaultPath = "/etc/kubernetes/ca/ca.key"
|
|
||||||
)
|
|
||||||
|
|
||||||
type annotaterController struct {
|
|
||||||
agentPodConfig *AgentPodConfig
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig
|
|
||||||
credentialIssuerLabels map[string]string
|
|
||||||
clock clock.Clock
|
|
||||||
k8sClient kubernetes.Interface
|
|
||||||
pinnipedAPIClient pinnipedclientset.Interface
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer
|
|
||||||
agentPodInformer corev1informers.PodInformer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAnnotaterController returns a controller that updates agent pods with the path to the kube
|
|
||||||
// API's certificate and key.
|
|
||||||
//
|
|
||||||
// This controller will add annotations to agent pods with the best-guess paths to the kube API's
|
|
||||||
// certificate and key.
|
|
||||||
//
|
|
||||||
// It also is tasked with updating the CredentialIssuer, located via the provided
|
|
||||||
// credentialIssuerLocationConfig, with any errors that it encounters.
|
|
||||||
func NewAnnotaterController(
|
|
||||||
agentPodConfig *AgentPodConfig,
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig,
|
|
||||||
credentialIssuerLabels map[string]string,
|
|
||||||
clock clock.Clock,
|
|
||||||
k8sClient kubernetes.Interface,
|
|
||||||
pinnipedAPIClient pinnipedclientset.Interface,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
|
||||||
) controllerlib.Controller {
|
|
||||||
return controllerlib.New(
|
|
||||||
controllerlib.Config{
|
|
||||||
Name: "kube-cert-agent-annotater-controller",
|
|
||||||
Syncer: &annotaterController{
|
|
||||||
agentPodConfig: agentPodConfig,
|
|
||||||
credentialIssuerLocationConfig: credentialIssuerLocationConfig,
|
|
||||||
credentialIssuerLabels: credentialIssuerLabels,
|
|
||||||
clock: clock,
|
|
||||||
k8sClient: k8sClient,
|
|
||||||
pinnipedAPIClient: pinnipedAPIClient,
|
|
||||||
kubeSystemPodInformer: kubeSystemPodInformer,
|
|
||||||
agentPodInformer: agentPodInformer,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
withInformer(
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isControllerManagerPod),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
withInformer(
|
|
||||||
agentPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isAgentPod),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync implements controllerlib.Syncer.
|
|
||||||
func (c *annotaterController) Sync(ctx controllerlib.Context) error {
|
|
||||||
agentPods, err := c.agentPodInformer.
|
|
||||||
Lister().
|
|
||||||
Pods(c.agentPodConfig.Namespace).
|
|
||||||
List(c.agentPodConfig.AgentSelector())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("informer cannot list agent pods: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, agentPod := range agentPods {
|
|
||||||
controllerManagerPod, err := findControllerManagerPodForSpecificAgentPod(agentPod, c.kubeSystemPodInformer)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if controllerManagerPod == nil {
|
|
||||||
// The deleter will clean this orphaned agent.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
certPath := getContainerArgByName(
|
|
||||||
controllerManagerPod,
|
|
||||||
"cluster-signing-cert-file",
|
|
||||||
k8sAPIServerCACertPEMDefaultPath,
|
|
||||||
)
|
|
||||||
keyPath := getContainerArgByName(
|
|
||||||
controllerManagerPod,
|
|
||||||
"cluster-signing-key-file",
|
|
||||||
k8sAPIServerCAKeyPEMDefaultPath,
|
|
||||||
)
|
|
||||||
if err := c.maybeUpdateAgentPod(
|
|
||||||
ctx.Context,
|
|
||||||
agentPod.Name,
|
|
||||||
agentPod.Namespace,
|
|
||||||
certPath,
|
|
||||||
keyPath,
|
|
||||||
); err != nil {
|
|
||||||
err = fmt.Errorf("cannot update agent pod: %w", err)
|
|
||||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
strategyError(c.clock, err),
|
|
||||||
)
|
|
||||||
if strategyResultUpdateErr != nil {
|
|
||||||
// If the CI update fails, then we probably want to try again. This controller will get
|
|
||||||
// called again because of the pod create failure, so just try the CI update again then.
|
|
||||||
klog.ErrorS(strategyResultUpdateErr, "could not create or update CredentialIssuer")
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *annotaterController) maybeUpdateAgentPod(
|
|
||||||
ctx context.Context,
|
|
||||||
name string,
|
|
||||||
namespace string,
|
|
||||||
certPath string,
|
|
||||||
keyPath string,
|
|
||||||
) error {
|
|
||||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
|
||||||
agentPod, err := c.k8sClient.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if agentPod.Annotations[agentPodCertPathAnnotationKey] != certPath ||
|
|
||||||
agentPod.Annotations[agentPodKeyPathAnnotationKey] != keyPath {
|
|
||||||
if err := c.reallyUpdateAgentPod(
|
|
||||||
ctx,
|
|
||||||
agentPod,
|
|
||||||
certPath,
|
|
||||||
keyPath,
|
|
||||||
); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *annotaterController) reallyUpdateAgentPod(
|
|
||||||
ctx context.Context,
|
|
||||||
agentPod *corev1.Pod,
|
|
||||||
certPath string,
|
|
||||||
keyPath string,
|
|
||||||
) error {
|
|
||||||
// Create a deep copy of the agent pod since it is coming straight from the cache.
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
if updatedAgentPod.Annotations == nil {
|
|
||||||
updatedAgentPod.Annotations = make(map[string]string)
|
|
||||||
}
|
|
||||||
updatedAgentPod.Annotations[agentPodCertPathAnnotationKey] = certPath
|
|
||||||
updatedAgentPod.Annotations[agentPodKeyPathAnnotationKey] = keyPath
|
|
||||||
|
|
||||||
plog.Debug(
|
|
||||||
"updating agent pod annotations",
|
|
||||||
"pod",
|
|
||||||
klog.KObj(updatedAgentPod),
|
|
||||||
"certPath",
|
|
||||||
certPath,
|
|
||||||
"keyPath",
|
|
||||||
keyPath,
|
|
||||||
)
|
|
||||||
_, err := c.k8sClient.
|
|
||||||
CoreV1().
|
|
||||||
Pods(agentPod.Namespace).
|
|
||||||
Update(ctx, updatedAgentPod, metav1.UpdateOptions{})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func getContainerArgByName(pod *corev1.Pod, name, fallbackValue string) string {
|
|
||||||
for _, container := range pod.Spec.Containers {
|
|
||||||
flagset := pflag.NewFlagSet("", pflag.ContinueOnError)
|
|
||||||
flagset.ParseErrorsWhitelist = pflag.ParseErrorsWhitelist{UnknownFlags: true}
|
|
||||||
var val string
|
|
||||||
flagset.StringVar(&val, name, "", "")
|
|
||||||
_ = flagset.Parse(append(container.Command, container.Args...))
|
|
||||||
if val != "" {
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fallbackValue
|
|
||||||
}
|
|
@ -1,727 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/sclevine/spec"
|
|
||||||
"github.com/sclevine/spec/report"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
|
||||||
coretesting "k8s.io/client-go/testing"
|
|
||||||
|
|
||||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
|
||||||
pinnipedfake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/testutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAnnotaterControllerFilter(t *testing.T) {
|
|
||||||
defineSharedKubecertagentFilterSpecs(
|
|
||||||
t,
|
|
||||||
"AnnotaterControllerFilter",
|
|
||||||
func(
|
|
||||||
agentPodConfig *AgentPodConfig,
|
|
||||||
_ *CredentialIssuerLocationConfig,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
observableWithInformerOption *testutil.ObservableWithInformerOption,
|
|
||||||
) {
|
|
||||||
_ = NewAnnotaterController(
|
|
||||||
agentPodConfig,
|
|
||||||
nil, // credentialIssuerLabels, shouldn't matter
|
|
||||||
nil, // credentialIssuerLocationConfig, shouldn't matter
|
|
||||||
nil, // clock, shouldn't matter
|
|
||||||
nil, // k8sClient, shouldn't matter
|
|
||||||
nil, // pinnipedClient, shouldn't matter
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
agentPodInformer,
|
|
||||||
observableWithInformerOption.WithInformer,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAnnotaterControllerSync(t *testing.T) {
|
|
||||||
spec.Run(t, "AnnotaterControllerSync", func(t *testing.T, when spec.G, it spec.S) {
|
|
||||||
const kubeSystemNamespace = "kube-system"
|
|
||||||
const agentPodNamespace = "agent-pod-namespace"
|
|
||||||
const defaultKubeControllerManagerClusterSigningCertFileFlagValue = "/etc/kubernetes/ca/ca.pem"
|
|
||||||
const defaultKubeControllerManagerClusterSigningKeyFileFlagValue = "/etc/kubernetes/ca/ca.key"
|
|
||||||
const credentialIssuerResourceName = "ci-resource-name"
|
|
||||||
|
|
||||||
const (
|
|
||||||
certPath = "some-cert-path"
|
|
||||||
certPathAnnotation = "kube-cert-agent.pinniped.dev/cert-path"
|
|
||||||
|
|
||||||
keyPath = "some-key-path"
|
|
||||||
keyPathAnnotation = "kube-cert-agent.pinniped.dev/key-path"
|
|
||||||
)
|
|
||||||
|
|
||||||
var r *require.Assertions
|
|
||||||
|
|
||||||
var subject controllerlib.Controller
|
|
||||||
var kubeAPIClient *kubernetesfake.Clientset
|
|
||||||
var kubeSystemInformerClient *kubernetesfake.Clientset
|
|
||||||
var kubeSystemInformers kubeinformers.SharedInformerFactory
|
|
||||||
var agentInformerClient *kubernetesfake.Clientset
|
|
||||||
var agentInformers kubeinformers.SharedInformerFactory
|
|
||||||
var pinnipedAPIClient *pinnipedfake.Clientset
|
|
||||||
var cancelContext context.Context
|
|
||||||
var cancelContextCancelFunc context.CancelFunc
|
|
||||||
var syncContext *controllerlib.Context
|
|
||||||
var controllerManagerPod, agentPod *corev1.Pod
|
|
||||||
var podsGVR schema.GroupVersionResource
|
|
||||||
var credentialIssuerGVR schema.GroupVersionResource
|
|
||||||
var frozenNow time.Time
|
|
||||||
var credentialIssuerLabels map[string]string
|
|
||||||
|
|
||||||
// Defer starting the informers until the last possible moment so that the
|
|
||||||
// nested Before's can keep adding things to the informer caches.
|
|
||||||
var startInformersAndController = func() {
|
|
||||||
// Set this at the last second to allow for injection of server override.
|
|
||||||
subject = NewAnnotaterController(
|
|
||||||
&AgentPodConfig{
|
|
||||||
Namespace: agentPodNamespace,
|
|
||||||
ContainerImage: "some-agent-image",
|
|
||||||
PodNamePrefix: "some-agent-name-",
|
|
||||||
AdditionalLabels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&CredentialIssuerLocationConfig{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
credentialIssuerLabels,
|
|
||||||
clock.NewFakeClock(frozenNow),
|
|
||||||
kubeAPIClient,
|
|
||||||
pinnipedAPIClient,
|
|
||||||
kubeSystemInformers.Core().V1().Pods(),
|
|
||||||
agentInformers.Core().V1().Pods(),
|
|
||||||
controllerlib.WithInformer,
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set this at the last second to support calling subject.Name().
|
|
||||||
syncContext = &controllerlib.Context{
|
|
||||||
Context: cancelContext,
|
|
||||||
Name: subject.Name(),
|
|
||||||
Key: controllerlib.Key{
|
|
||||||
Namespace: kubeSystemNamespace,
|
|
||||||
Name: "should-not-matter",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must start informers before calling TestRunSynchronously()
|
|
||||||
kubeSystemInformers.Start(cancelContext.Done())
|
|
||||||
agentInformers.Start(cancelContext.Done())
|
|
||||||
controllerlib.TestRunSynchronously(t, subject)
|
|
||||||
}
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
r = require.New(t)
|
|
||||||
|
|
||||||
kubeAPIClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
|
|
||||||
kubeSystemInformerClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
kubeSystemInformers = kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
|
|
||||||
|
|
||||||
agentInformerClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
agentInformers = kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
|
|
||||||
|
|
||||||
pinnipedAPIClient = pinnipedfake.NewSimpleClientset()
|
|
||||||
|
|
||||||
cancelContext, cancelContextCancelFunc = context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
controllerManagerPod, agentPod = exampleControllerManagerAndAgentPods(
|
|
||||||
kubeSystemNamespace, agentPodNamespace, certPath, keyPath,
|
|
||||||
)
|
|
||||||
|
|
||||||
podsGVR = schema.GroupVersionResource{
|
|
||||||
Group: corev1.SchemeGroupVersion.Group,
|
|
||||||
Version: corev1.SchemeGroupVersion.Version,
|
|
||||||
Resource: "pods",
|
|
||||||
}
|
|
||||||
|
|
||||||
credentialIssuerGVR = schema.GroupVersionResource{
|
|
||||||
Group: configv1alpha1.GroupName,
|
|
||||||
Version: configv1alpha1.SchemeGroupVersion.Version,
|
|
||||||
Resource: "credentialissuers",
|
|
||||||
}
|
|
||||||
|
|
||||||
frozenNow = time.Date(2020, time.September, 23, 7, 42, 0, 0, time.Local)
|
|
||||||
|
|
||||||
// Add a pod into the test that doesn't matter to make sure we don't accidentally trigger any
|
|
||||||
// logic on this thing.
|
|
||||||
ignorablePod := corev1.Pod{}
|
|
||||||
ignorablePod.Name = "some-ignorable-pod"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(&ignorablePod))
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(&ignorablePod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(&ignorablePod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it.After(func() {
|
|
||||||
cancelContextCancelFunc()
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod without annotations set", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the annotations according to the controller manager pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("updating the agent pod fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
kubeAPIClient.PrependReactor(
|
|
||||||
"update",
|
|
||||||
"pods",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some update error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns the error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "cannot update agent pod: some update error")
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is already a CredentialIssuer", func() {
|
|
||||||
var initialCredentialIssuer *configv1alpha1.CredentialIssuer
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
initialCredentialIssuer = &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuer))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the CredentialIssuer status with the error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
expectedCredentialIssuer := initialCredentialIssuer.DeepCopy()
|
|
||||||
expectedCredentialIssuer.Status.Strategies = []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: "cannot update agent pod: some update error",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
credentialIssuerResourceName,
|
|
||||||
)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
"status",
|
|
||||||
expectedCredentialIssuer,
|
|
||||||
)
|
|
||||||
|
|
||||||
r.EqualError(err, "cannot update agent pod: some update error")
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
expectedGetAction,
|
|
||||||
expectedUpdateAction,
|
|
||||||
},
|
|
||||||
pinnipedAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("updating the CredentialIssuer fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"update",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some update error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns the original pod update error so the controller gets scheduled again", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "cannot update agent pod: some update error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is not already a CredentialIssuer", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
credentialIssuerLabels = map[string]string{"foo": "bar"}
|
|
||||||
})
|
|
||||||
|
|
||||||
it("creates the CredentialIssuer status with the error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{"foo": "bar"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{"foo": "bar"},
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: "cannot update agent pod: some update error",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
credentialIssuerResourceName,
|
|
||||||
)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
expectedCreateCredentialIssuer,
|
|
||||||
)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
"status",
|
|
||||||
expectedCredentialIssuer,
|
|
||||||
)
|
|
||||||
|
|
||||||
r.EqualError(err, "cannot update agent pod: some update error")
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
expectedGetAction,
|
|
||||||
expectedCreateAction,
|
|
||||||
expectedUpdateAction,
|
|
||||||
},
|
|
||||||
pinnipedAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a controller manager pod with CLI flag values separated by spaces", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
|
||||||
"kube-controller-manager",
|
|
||||||
"--cluster-signing-cert-file", certPath,
|
|
||||||
"--cluster-signing-key-file", keyPath,
|
|
||||||
}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the annotations according to the controller manager pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a controller manager pod with no CLI flags", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
|
||||||
"kube-controller-manager",
|
|
||||||
}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the annotations with the default values", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = defaultKubeControllerManagerClusterSigningCertFileFlagValue
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = defaultKubeControllerManagerClusterSigningKeyFileFlagValue
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a controller manager pod with unparsable CLI flags", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
|
||||||
"kube-controller-manager",
|
|
||||||
"--cluster-signing-cert-file-blah", certPath,
|
|
||||||
"--cluster-signing-key-file-blah", keyPath,
|
|
||||||
}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the annotations with the default values", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = defaultKubeControllerManagerClusterSigningCertFileFlagValue
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = defaultKubeControllerManagerClusterSigningKeyFileFlagValue
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a controller manager pod with unparsable cert CLI flag", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
|
||||||
"kube-controller-manager",
|
|
||||||
"--cluster-signing-cert-file-blah", certPath,
|
|
||||||
"--cluster-signing-key-file", keyPath,
|
|
||||||
}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the key annotation with the default cert flag value", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = defaultKubeControllerManagerClusterSigningCertFileFlagValue
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a controller manager pod with unparsable key CLI flag", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Containers[0].Command = []string{
|
|
||||||
"kube-controller-manager",
|
|
||||||
"--cluster-signing-cert-file", certPath,
|
|
||||||
"--cluster-signing-key-file-blah", keyPath,
|
|
||||||
}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the cert annotation with the default key flag value", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = defaultKubeControllerManagerClusterSigningKeyFileFlagValue
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a non-matching controller manager pod via uid", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.UID = "some-other-controller-manager-uid"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing; the deleter will delete this pod to trigger resync", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a non-matching controller manager pod via name", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Name = "some-other-controller-manager-name"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing; the deleter will delete this pod to trigger resync", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod without annotations set which does not have the configured additional labels", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
delete(agentPod.ObjectMeta.Labels, "myLabelKey1")
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the annotations according to the controller manager pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod with correct annotations set", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
agentPod.Annotations = make(map[string]string)
|
|
||||||
agentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
agentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing since the pod is up to date", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod with the wrong cert annotation", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
agentPod.Annotations[certPathAnnotation] = "wrong"
|
|
||||||
agentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the agent with the correct cert annotation", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod with the wrong key annotation", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
agentPod.Annotations[certPathAnnotation] = certPath
|
|
||||||
agentPod.Annotations[keyPathAnnotation] = "key"
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the agent with the correct key annotation", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Annotations[keyPathAnnotation] = keyPath
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewGetAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod.Name,
|
|
||||||
),
|
|
||||||
coretesting.NewUpdateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
updatedAgentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
|
||||||
}
|
|
@ -1,185 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
pinnipedclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
|
||||||
"go.pinniped.dev/internal/constable"
|
|
||||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
|
||||||
"go.pinniped.dev/internal/controller/issuerconfig"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/plog"
|
|
||||||
)
|
|
||||||
|
|
||||||
type createrController struct {
|
|
||||||
agentPodConfig *AgentPodConfig
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig
|
|
||||||
credentialIssuerLabels map[string]string
|
|
||||||
clock clock.Clock
|
|
||||||
k8sClient kubernetes.Interface
|
|
||||||
pinnipedAPIClient pinnipedclientset.Interface
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer
|
|
||||||
agentPodInformer corev1informers.PodInformer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCreaterController returns a controller that creates new kube-cert-agent pods for every known
|
|
||||||
// kube-controller-manager pod.
|
|
||||||
//
|
|
||||||
// It also is tasked with updating the CredentialIssuer, located via the provided
|
|
||||||
// credentialIssuerLocationConfig, with any errors that it encounters.
|
|
||||||
func NewCreaterController(
|
|
||||||
agentPodConfig *AgentPodConfig,
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig,
|
|
||||||
credentialIssuerLabels map[string]string,
|
|
||||||
clock clock.Clock,
|
|
||||||
k8sClient kubernetes.Interface,
|
|
||||||
pinnipedAPIClient pinnipedclientset.Interface,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
|
||||||
withInitialEvent pinnipedcontroller.WithInitialEventOptionFunc,
|
|
||||||
) controllerlib.Controller {
|
|
||||||
return controllerlib.New(
|
|
||||||
controllerlib.Config{
|
|
||||||
//nolint: misspell
|
|
||||||
Name: "kube-cert-agent-creater-controller",
|
|
||||||
Syncer: &createrController{
|
|
||||||
agentPodConfig: agentPodConfig,
|
|
||||||
credentialIssuerLocationConfig: credentialIssuerLocationConfig,
|
|
||||||
credentialIssuerLabels: credentialIssuerLabels,
|
|
||||||
clock: clock,
|
|
||||||
k8sClient: k8sClient,
|
|
||||||
pinnipedAPIClient: pinnipedAPIClient,
|
|
||||||
kubeSystemPodInformer: kubeSystemPodInformer,
|
|
||||||
agentPodInformer: agentPodInformer,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
withInformer(
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isControllerManagerPod),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
withInformer(
|
|
||||||
agentPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isAgentPod),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
// Be sure to run once even to make sure the CI is updated if there are no controller manager
|
|
||||||
// pods. We should be able to pass an empty key since we don't use the key in the sync (we sync
|
|
||||||
// the world).
|
|
||||||
withInitialEvent(controllerlib.Key{}),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync implements controllerlib.Syncer.
|
|
||||||
func (c *createrController) Sync(ctx controllerlib.Context) error {
|
|
||||||
controllerManagerSelector, err := labels.Parse("component=kube-controller-manager")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot create controller manager selector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
controllerManagerPods, err := c.kubeSystemPodInformer.Lister().List(controllerManagerSelector)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("informer cannot list controller manager pods: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(controllerManagerPods) == 0 {
|
|
||||||
// If there are no controller manager pods, we alert the user that we can't find the keypair via
|
|
||||||
// the CredentialIssuer.
|
|
||||||
return issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
strategyError(c.clock, constable.Error("did not find kube-controller-manager pod(s)")),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, controllerManagerPod := range controllerManagerPods {
|
|
||||||
agentPod, err := findAgentPodForSpecificControllerManagerPod(
|
|
||||||
controllerManagerPod,
|
|
||||||
c.kubeSystemPodInformer,
|
|
||||||
c.agentPodInformer,
|
|
||||||
c.agentPodConfig.AgentSelector(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if agentPod == nil {
|
|
||||||
agentPod = c.agentPodConfig.newAgentPod(controllerManagerPod)
|
|
||||||
|
|
||||||
plog.Debug(
|
|
||||||
"creating agent pod",
|
|
||||||
"pod",
|
|
||||||
klog.KObj(agentPod),
|
|
||||||
"controller",
|
|
||||||
klog.KObj(controllerManagerPod),
|
|
||||||
)
|
|
||||||
_, err := c.k8sClient.CoreV1().
|
|
||||||
Pods(c.agentPodConfig.Namespace).
|
|
||||||
Create(ctx.Context, agentPod, metav1.CreateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("cannot create agent pod: %w", err)
|
|
||||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
strategyError(c.clock, err),
|
|
||||||
)
|
|
||||||
if strategyResultUpdateErr != nil {
|
|
||||||
// If the CI update fails, then we probably want to try again. This controller will get
|
|
||||||
// called again because of the pod create failure, so just try the CI update again then.
|
|
||||||
klog.ErrorS(strategyResultUpdateErr, "could not create or update CredentialIssuer")
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The deleter controller handles the case where the expected fields do not match in the agent pod.
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func findAgentPodForSpecificControllerManagerPod(
|
|
||||||
controllerManagerPod *corev1.Pod,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
agentSelector labels.Selector,
|
|
||||||
) (*corev1.Pod, error) {
|
|
||||||
agentPods, err := agentPodInformer.
|
|
||||||
Lister().
|
|
||||||
List(agentSelector)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("informer cannot list agent pods: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, maybeAgentPod := range agentPods {
|
|
||||||
maybeControllerManagerPod, err := findControllerManagerPodForSpecificAgentPod(
|
|
||||||
maybeAgentPod,
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if maybeControllerManagerPod != nil &&
|
|
||||||
maybeControllerManagerPod.UID == controllerManagerPod.UID {
|
|
||||||
return maybeAgentPod, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
@ -1,623 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/sclevine/spec"
|
|
||||||
"github.com/sclevine/spec/report"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
|
||||||
coretesting "k8s.io/client-go/testing"
|
|
||||||
|
|
||||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
|
||||||
pinnipedfake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/testutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCreaterControllerFilter(t *testing.T) {
|
|
||||||
defineSharedKubecertagentFilterSpecs(
|
|
||||||
t,
|
|
||||||
"CreaterControllerFilter",
|
|
||||||
func(
|
|
||||||
agentPodConfig *AgentPodConfig,
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
observableWithInformerOption *testutil.ObservableWithInformerOption,
|
|
||||||
) {
|
|
||||||
_ = NewCreaterController(
|
|
||||||
agentPodConfig,
|
|
||||||
credentialIssuerLocationConfig,
|
|
||||||
map[string]string{},
|
|
||||||
nil, // clock, shouldn't matter
|
|
||||||
nil, // k8sClient, shouldn't matter
|
|
||||||
nil, // pinnipedAPIClient, shouldn't matter
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
agentPodInformer,
|
|
||||||
observableWithInformerOption.WithInformer,
|
|
||||||
controllerlib.WithInitialEvent,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreaterControllerInitialEvent(t *testing.T) {
|
|
||||||
kubeSystemInformerClient := kubernetesfake.NewSimpleClientset()
|
|
||||||
kubeSystemInformers := kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
|
|
||||||
|
|
||||||
agentInformerClient := kubernetesfake.NewSimpleClientset()
|
|
||||||
agentInformers := kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
|
|
||||||
|
|
||||||
observableWithInitialEventOption := testutil.NewObservableWithInitialEventOption()
|
|
||||||
|
|
||||||
_ = NewCreaterController(
|
|
||||||
nil, // agentPodConfig, shouldn't matter
|
|
||||||
nil, // credentialIssuerLocationConfig, shouldn't matter
|
|
||||||
map[string]string{},
|
|
||||||
nil, // clock, shouldn't matter
|
|
||||||
nil, // k8sClient, shouldn't matter
|
|
||||||
nil, // pinnipedAPIClient, shouldn't matter
|
|
||||||
kubeSystemInformers.Core().V1().Pods(),
|
|
||||||
agentInformers.Core().V1().Pods(),
|
|
||||||
controllerlib.WithInformer,
|
|
||||||
observableWithInitialEventOption.WithInitialEvent,
|
|
||||||
)
|
|
||||||
require.Equal(t, &controllerlib.Key{}, observableWithInitialEventOption.GetInitialEventKey())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreaterControllerSync(t *testing.T) {
|
|
||||||
spec.Run(t, "CreaterControllerSync", func(t *testing.T, when spec.G, it spec.S) {
|
|
||||||
const kubeSystemNamespace = "kube-system"
|
|
||||||
const agentPodNamespace = "agent-pod-namespace"
|
|
||||||
const credentialIssuerResourceName = "ci-resource-name"
|
|
||||||
|
|
||||||
var r *require.Assertions
|
|
||||||
|
|
||||||
var subject controllerlib.Controller
|
|
||||||
var kubeAPIClient *kubernetesfake.Clientset
|
|
||||||
var kubeSystemInformerClient *kubernetesfake.Clientset
|
|
||||||
var kubeSystemInformers kubeinformers.SharedInformerFactory
|
|
||||||
var agentInformerClient *kubernetesfake.Clientset
|
|
||||||
var agentInformers kubeinformers.SharedInformerFactory
|
|
||||||
var pinnipedAPIClient *pinnipedfake.Clientset
|
|
||||||
var cancelContext context.Context
|
|
||||||
var cancelContextCancelFunc context.CancelFunc
|
|
||||||
var syncContext *controllerlib.Context
|
|
||||||
var controllerManagerPod, agentPod *corev1.Pod
|
|
||||||
var podsGVR schema.GroupVersionResource
|
|
||||||
var credentialIssuerGVR schema.GroupVersionResource
|
|
||||||
var frozenNow time.Time
|
|
||||||
|
|
||||||
// Defer starting the informers until the last possible moment so that the
|
|
||||||
// nested Before's can keep adding things to the informer caches.
|
|
||||||
var startInformersAndController = func() {
|
|
||||||
// Set this at the last second to allow for injection of server override.
|
|
||||||
subject = NewCreaterController(
|
|
||||||
&AgentPodConfig{
|
|
||||||
Namespace: agentPodNamespace,
|
|
||||||
ContainerImage: "some-agent-image",
|
|
||||||
PodNamePrefix: "some-agent-name-",
|
|
||||||
ContainerImagePullSecrets: []string{"some-image-pull-secret"},
|
|
||||||
AdditionalLabels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&CredentialIssuerLocationConfig{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
clock.NewFakeClock(frozenNow),
|
|
||||||
kubeAPIClient,
|
|
||||||
pinnipedAPIClient,
|
|
||||||
kubeSystemInformers.Core().V1().Pods(),
|
|
||||||
agentInformers.Core().V1().Pods(),
|
|
||||||
controllerlib.WithInformer,
|
|
||||||
controllerlib.WithInitialEvent,
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set this at the last second to support calling subject.Name().
|
|
||||||
syncContext = &controllerlib.Context{
|
|
||||||
Context: cancelContext,
|
|
||||||
Name: subject.Name(),
|
|
||||||
Key: controllerlib.Key{
|
|
||||||
Namespace: kubeSystemNamespace,
|
|
||||||
Name: "should-not-matter",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must start informers before calling TestRunSynchronously()
|
|
||||||
kubeSystemInformers.Start(cancelContext.Done())
|
|
||||||
agentInformers.Start(cancelContext.Done())
|
|
||||||
controllerlib.TestRunSynchronously(t, subject)
|
|
||||||
}
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
r = require.New(t)
|
|
||||||
|
|
||||||
kubeAPIClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
|
|
||||||
kubeSystemInformerClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
kubeSystemInformers = kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
|
|
||||||
|
|
||||||
agentInformerClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
agentInformers = kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
|
|
||||||
|
|
||||||
pinnipedAPIClient = pinnipedfake.NewSimpleClientset()
|
|
||||||
|
|
||||||
cancelContext, cancelContextCancelFunc = context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
controllerManagerPod, agentPod = exampleControllerManagerAndAgentPods(
|
|
||||||
kubeSystemNamespace, agentPodNamespace, "ignored for this test", "ignored for this test",
|
|
||||||
)
|
|
||||||
|
|
||||||
podsGVR = schema.GroupVersionResource{
|
|
||||||
Group: corev1.SchemeGroupVersion.Group,
|
|
||||||
Version: corev1.SchemeGroupVersion.Version,
|
|
||||||
Resource: "pods",
|
|
||||||
}
|
|
||||||
|
|
||||||
credentialIssuerGVR = schema.GroupVersionResource{
|
|
||||||
Group: configv1alpha1.GroupName,
|
|
||||||
Version: configv1alpha1.SchemeGroupVersion.Version,
|
|
||||||
Resource: "credentialissuers",
|
|
||||||
}
|
|
||||||
|
|
||||||
frozenNow = time.Date(2020, time.September, 23, 7, 42, 0, 0, time.Local)
|
|
||||||
|
|
||||||
// Add a pod into the test that doesn't matter to make sure we don't accidentally trigger any
|
|
||||||
// logic on this thing.
|
|
||||||
ignorablePod := corev1.Pod{}
|
|
||||||
ignorablePod.Name = "some-ignorable-pod"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(&ignorablePod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(&ignorablePod))
|
|
||||||
|
|
||||||
// Add another valid agent pod to make sure our logic works for just the pod we care about.
|
|
||||||
otherAgentPod := agentPod.DeepCopy()
|
|
||||||
otherAgentPod.Name = "some-other-agent"
|
|
||||||
otherAgentPod.Annotations = map[string]string{
|
|
||||||
"kube-cert-agent.pinniped.dev/controller-manager-name": "some-other-controller-manager-name",
|
|
||||||
"kube-cert-agent.pinniped.dev/controller-manager-uid": "some-other-controller-manager-uid",
|
|
||||||
}
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(otherAgentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(otherAgentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it.After(func() {
|
|
||||||
cancelContextCancelFunc()
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching agent pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Empty(kubeAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching agent pod that is missing some of the configured additional labels", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
nonMatchingAgentPod := agentPod.DeepCopy()
|
|
||||||
delete(nonMatchingAgentPod.ObjectMeta.Labels, "myLabelKey1")
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(nonMatchingAgentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(nonMatchingAgentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing because the deleter controller is responsible for deleting it", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Empty(kubeAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a non-matching agent pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
nonMatchingAgentPod := agentPod.DeepCopy()
|
|
||||||
nonMatchingAgentPod.Name = "some-agent-name-85da432e"
|
|
||||||
nonMatchingAgentPod.Annotations[controllerManagerUIDAnnotationKey] = "some-non-matching-uid"
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(nonMatchingAgentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(nonMatchingAgentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("creates a matching agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewCreateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
agentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is no matching agent pod", func() {
|
|
||||||
it("creates a matching agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
coretesting.NewCreateAction(
|
|
||||||
podsGVR,
|
|
||||||
agentPodNamespace,
|
|
||||||
agentPod,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("creating the matching agent pod fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
kubeAPIClient.PrependReactor(
|
|
||||||
"create",
|
|
||||||
"pods",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some create error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is already a CredentialIssuer", func() {
|
|
||||||
var initialCredentialIssuer *configv1alpha1.CredentialIssuer
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
initialCredentialIssuer = &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuer))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the CredentialIssuer status saying that controller manager pods couldn't be found", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
expectedCredentialIssuer := initialCredentialIssuer.DeepCopy()
|
|
||||||
expectedCredentialIssuer.Status.Strategies = []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: "cannot create agent pod: some create error",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
credentialIssuerResourceName,
|
|
||||||
)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
"status",
|
|
||||||
expectedCredentialIssuer,
|
|
||||||
)
|
|
||||||
|
|
||||||
r.EqualError(err, "cannot create agent pod: some create error")
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
expectedGetAction,
|
|
||||||
expectedUpdateAction,
|
|
||||||
},
|
|
||||||
pinnipedAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the CredentialIssuer operation fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"update",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some update error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
it("still returns the pod create error, since the controller will get rescheduled", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "cannot create agent pod: some create error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is not already a CredentialIssuer", func() {
|
|
||||||
it("returns an error and updates the CredentialIssuer status", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: "cannot create agent pod: some create error",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
credentialIssuerResourceName,
|
|
||||||
)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
expectedCreateCredentialIssuer,
|
|
||||||
)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
"status",
|
|
||||||
expectedCredentialIssuer,
|
|
||||||
)
|
|
||||||
|
|
||||||
r.EqualError(err, "cannot create agent pod: some create error")
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
expectedGetAction,
|
|
||||||
expectedCreateAction,
|
|
||||||
expectedUpdateAction,
|
|
||||||
},
|
|
||||||
pinnipedAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is no controller manager pod", func() {
|
|
||||||
when("there is already a CredentialIssuer", func() {
|
|
||||||
var initialCredentialIssuer *configv1alpha1.CredentialIssuer
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
initialCredentialIssuer = &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuer))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("updates the CredentialIssuer status saying that controller manager pods couldn't be found", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
expectedCredentialIssuer := initialCredentialIssuer.DeepCopy()
|
|
||||||
expectedCredentialIssuer.Status.Strategies = []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: "did not find kube-controller-manager pod(s)",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
credentialIssuerResourceName,
|
|
||||||
)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
"status",
|
|
||||||
expectedCredentialIssuer,
|
|
||||||
)
|
|
||||||
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
expectedGetAction,
|
|
||||||
expectedUpdateAction,
|
|
||||||
},
|
|
||||||
pinnipedAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("when updating the CredentialIssuer fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"update",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some update error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "could not create or update credentialissuer: some update error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("when getting the CredentialIssuer fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"get",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some get error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "could not create or update credentialissuer: get failed: some get error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is not already a CredentialIssuer", func() {
|
|
||||||
it("creates the CredentialIssuer status saying that controller manager pods couldn't be found", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: "did not find kube-controller-manager pod(s)",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
credentialIssuerResourceName,
|
|
||||||
)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
expectedCreateCredentialIssuer,
|
|
||||||
)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(
|
|
||||||
credentialIssuerGVR,
|
|
||||||
"status",
|
|
||||||
expectedCredentialIssuer,
|
|
||||||
)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{
|
|
||||||
expectedGetAction,
|
|
||||||
expectedCreateAction,
|
|
||||||
expectedUpdateAction,
|
|
||||||
},
|
|
||||||
pinnipedAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("when creating the CredentialIssuer fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"create",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some create error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "could not create or update credentialissuer: create failed: some create error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("when getting the CredentialIssuer fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"get",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some get error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "could not create or update credentialissuer: get failed: some get error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
|
||||||
}
|
|
@ -1,87 +0,0 @@
|
|||||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/plog"
|
|
||||||
)
|
|
||||||
|
|
||||||
type deleterController struct {
|
|
||||||
agentPodConfig *AgentPodConfig
|
|
||||||
k8sClient kubernetes.Interface
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer
|
|
||||||
agentPodInformer corev1informers.PodInformer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDeleterController returns a controller that deletes any kube-cert-agent pods that are out of
|
|
||||||
// sync with the known kube-controller-manager pods.
|
|
||||||
func NewDeleterController(
|
|
||||||
agentPodConfig *AgentPodConfig,
|
|
||||||
k8sClient kubernetes.Interface,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
|
||||||
) controllerlib.Controller {
|
|
||||||
return controllerlib.New(
|
|
||||||
controllerlib.Config{
|
|
||||||
Name: "kube-cert-agent-deleter-controller",
|
|
||||||
Syncer: &deleterController{
|
|
||||||
agentPodConfig: agentPodConfig,
|
|
||||||
k8sClient: k8sClient,
|
|
||||||
kubeSystemPodInformer: kubeSystemPodInformer,
|
|
||||||
agentPodInformer: agentPodInformer,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
withInformer(
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isControllerManagerPod),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
withInformer(
|
|
||||||
agentPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilterWithSingletonQueue(isAgentPod),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync implements controllerlib.Syncer.
|
|
||||||
func (c *deleterController) Sync(ctx controllerlib.Context) error {
|
|
||||||
agentPods, err := c.agentPodInformer.
|
|
||||||
Lister().
|
|
||||||
Pods(c.agentPodConfig.Namespace).
|
|
||||||
List(c.agentPodConfig.AgentSelector())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("informer cannot list agent pods: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, agentPod := range agentPods {
|
|
||||||
controllerManagerPod, err := findControllerManagerPodForSpecificAgentPod(agentPod, c.kubeSystemPodInformer)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if controllerManagerPod == nil ||
|
|
||||||
!isAgentPodUpToDate(agentPod, c.agentPodConfig.newAgentPod(controllerManagerPod)) {
|
|
||||||
plog.Debug("deleting agent pod", "pod", klog.KObj(agentPod))
|
|
||||||
err := c.k8sClient.
|
|
||||||
CoreV1().
|
|
||||||
Pods(agentPod.Namespace).
|
|
||||||
Delete(ctx.Context, agentPod.Name, metav1.DeleteOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot delete agent pod: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,506 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/sclevine/spec"
|
|
||||||
"github.com/sclevine/spec/report"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
|
||||||
coretesting "k8s.io/client-go/testing"
|
|
||||||
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/testutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDeleterControllerFilter(t *testing.T) {
|
|
||||||
defineSharedKubecertagentFilterSpecs(
|
|
||||||
t,
|
|
||||||
"DeleterControllerFilter",
|
|
||||||
func(
|
|
||||||
agentPodConfig *AgentPodConfig,
|
|
||||||
_ *CredentialIssuerLocationConfig,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
observableWithInformerOption *testutil.ObservableWithInformerOption,
|
|
||||||
) {
|
|
||||||
_ = NewDeleterController(
|
|
||||||
agentPodConfig,
|
|
||||||
nil, // k8sClient, shouldn't matter
|
|
||||||
kubeSystemPodInformer,
|
|
||||||
agentPodInformer,
|
|
||||||
observableWithInformerOption.WithInformer,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleterControllerSync(t *testing.T) {
|
|
||||||
spec.Run(t, "DeleterControllerSync", func(t *testing.T, when spec.G, it spec.S) {
|
|
||||||
const kubeSystemNamespace = "kube-system"
|
|
||||||
const agentPodNamespace = "agent-pod-namespace"
|
|
||||||
|
|
||||||
var r *require.Assertions
|
|
||||||
|
|
||||||
var subject controllerlib.Controller
|
|
||||||
var kubeAPIClient *kubernetesfake.Clientset
|
|
||||||
var kubeSystemInformerClient *kubernetesfake.Clientset
|
|
||||||
var kubeSystemInformers kubeinformers.SharedInformerFactory
|
|
||||||
var agentInformerClient *kubernetesfake.Clientset
|
|
||||||
var agentInformers kubeinformers.SharedInformerFactory
|
|
||||||
var cancelContext context.Context
|
|
||||||
var cancelContextCancelFunc context.CancelFunc
|
|
||||||
var syncContext *controllerlib.Context
|
|
||||||
var controllerManagerPod, agentPod *corev1.Pod
|
|
||||||
var podsGVR schema.GroupVersionResource
|
|
||||||
|
|
||||||
// Defer starting the informers until the last possible moment so that the
|
|
||||||
// nested Before's can keep adding things to the informer caches.
|
|
||||||
var startInformersAndController = func() {
|
|
||||||
// Set this at the last second to allow for injection of server override.
|
|
||||||
subject = NewDeleterController(
|
|
||||||
&AgentPodConfig{
|
|
||||||
Namespace: agentPodNamespace,
|
|
||||||
ContainerImage: "some-agent-image",
|
|
||||||
PodNamePrefix: "some-agent-name-",
|
|
||||||
AdditionalLabels: map[string]string{
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
kubeAPIClient,
|
|
||||||
kubeSystemInformers.Core().V1().Pods(),
|
|
||||||
agentInformers.Core().V1().Pods(),
|
|
||||||
controllerlib.WithInformer,
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set this at the last second to support calling subject.Name().
|
|
||||||
syncContext = &controllerlib.Context{
|
|
||||||
Context: cancelContext,
|
|
||||||
Name: subject.Name(),
|
|
||||||
Key: controllerlib.Key{
|
|
||||||
Namespace: kubeSystemNamespace,
|
|
||||||
Name: "should-not-matter",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must start informers before calling TestRunSynchronously()
|
|
||||||
kubeSystemInformers.Start(cancelContext.Done())
|
|
||||||
agentInformers.Start(cancelContext.Done())
|
|
||||||
controllerlib.TestRunSynchronously(t, subject)
|
|
||||||
}
|
|
||||||
|
|
||||||
var requireAgentPodWasDeleted = func() {
|
|
||||||
r.Equal(
|
|
||||||
[]coretesting.Action{coretesting.NewDeleteAction(podsGVR, agentPodNamespace, agentPod.Name)},
|
|
||||||
kubeAPIClient.Actions(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
r = require.New(t)
|
|
||||||
|
|
||||||
cancelContext, cancelContextCancelFunc = context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
kubeAPIClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
|
|
||||||
kubeSystemInformerClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
kubeSystemInformers = kubeinformers.NewSharedInformerFactory(kubeSystemInformerClient, 0)
|
|
||||||
|
|
||||||
agentInformerClient = kubernetesfake.NewSimpleClientset()
|
|
||||||
agentInformers = kubeinformers.NewSharedInformerFactory(agentInformerClient, 0)
|
|
||||||
|
|
||||||
controllerManagerPod, agentPod = exampleControllerManagerAndAgentPods(
|
|
||||||
kubeSystemNamespace, agentPodNamespace, "ignored for this test", "ignored for this test",
|
|
||||||
)
|
|
||||||
|
|
||||||
podsGVR = schema.GroupVersionResource{
|
|
||||||
Group: corev1.SchemeGroupVersion.Group,
|
|
||||||
Version: corev1.SchemeGroupVersion.Version,
|
|
||||||
Resource: "pods",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add an pod into the test that doesn't matter to make sure we don't accidentally
|
|
||||||
// trigger any logic on this thing.
|
|
||||||
ignorablePod := corev1.Pod{}
|
|
||||||
ignorablePod.Name = "some-ignorable-pod"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(&ignorablePod))
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(&ignorablePod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(&ignorablePod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it.After(func() {
|
|
||||||
cancelContextCancelFunc()
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(agentInformerClient.Tracker().Add(agentPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(agentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a matching controller manager pod", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Empty(kubeAPIClient.Actions())
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the controller manager via volume mounts", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{Name: "some-other-volume-mount"}}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the controller manager via volumes", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Volumes = []corev1.Volume{{Name: "some-other-volume"}}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the controller manager via node selector", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.NodeSelector = map[string]string{
|
|
||||||
"some-other-node-selector-key": "some-other-node-selector-value",
|
|
||||||
}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the controller manager via node name", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.NodeName = "some-other-node-name"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the controller manager via tolerations", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Spec.Tolerations = []corev1.Toleration{{Key: "some-other-toleration-key"}}
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, controllerManagerPod, controllerManagerPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync via restart policy", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Spec.RestartPolicy = corev1.RestartPolicyAlways
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync via automount service account token", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
t := true
|
|
||||||
updatedAgentPod.Spec.AutomountServiceAccountToken = &t
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via name", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Spec.Containers[0].Name = "some-new-name"
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via image", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Spec.Containers[0].Image = "new-image"
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via runAsUser", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
notRoot := int64(1234)
|
|
||||||
updatedAgentPod.Spec.SecurityContext.RunAsUser = ¬Root
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via runAsGroup", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
notRoot := int64(1234)
|
|
||||||
updatedAgentPod.Spec.SecurityContext.RunAsGroup = ¬Root
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via having a nil SecurityContext", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Spec.SecurityContext = nil
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via labels", func() {
|
|
||||||
when("an additional label's value was changed", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.ObjectMeta.Labels = map[string]string{
|
|
||||||
"kube-cert-agent.pinniped.dev": "true",
|
|
||||||
// the value of a label is wrong so the pod should be deleted so it can get recreated with the new labels
|
|
||||||
"myLabelKey1": "myLabelValue1-outdated-value",
|
|
||||||
"myLabelKey2": "myLabelValue2-outdated-value",
|
|
||||||
}
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("an additional custom label was added since the agent pod was created", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.ObjectMeta.Labels = map[string]string{
|
|
||||||
"kube-cert-agent.pinniped.dev": "true",
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
// "myLabelKey2" is missing so the pod should be deleted so it can get recreated with the new labels
|
|
||||||
}
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod has extra labels that seem unrelated to the additional labels", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.ObjectMeta.Labels = map[string]string{
|
|
||||||
"kube-cert-agent.pinniped.dev": "true",
|
|
||||||
"myLabelKey1": "myLabelValue1",
|
|
||||||
"myLabelKey2": "myLabelValue2",
|
|
||||||
"extra-label": "not-related-to-the-sepcified-additional-labels",
|
|
||||||
}
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does not delete the agent pod because someone else might have put those labels on it", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Empty(kubeAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the agent pod is out of sync with the template via command", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
updatedAgentPod := agentPod.DeepCopy()
|
|
||||||
updatedAgentPod.Spec.Containers[0].Command = []string{"some", "new", "command"}
|
|
||||||
r.NoError(agentInformerClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Update(podsGVR, updatedAgentPod, updatedAgentPod.Namespace))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a non-matching controller manager pod via uid", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.UID = "some-other-controller-manager-uid"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is a non-matching controller manager pod via name", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
controllerManagerPod.Name = "some-other-controller-manager-name"
|
|
||||||
r.NoError(kubeSystemInformerClient.Tracker().Add(controllerManagerPod))
|
|
||||||
r.NoError(kubeAPIClient.Tracker().Add(controllerManagerPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is no matching controller manager pod", func() {
|
|
||||||
it("deletes the agent pod", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
requireAgentPodWasDeleted()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is no agent pod", func() {
|
|
||||||
it("does nothing", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
|
|
||||||
r.NoError(err)
|
|
||||||
r.Empty(kubeAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
|
||||||
}
|
|
@ -1,232 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
|
||||||
"k8s.io/apimachinery/pkg/util/errors"
|
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
|
||||||
|
|
||||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
|
||||||
pinnipedclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
|
||||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
|
||||||
"go.pinniped.dev/internal/controller/issuerconfig"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/dynamiccert"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
ClusterInfoNamespace = "kube-public"
|
|
||||||
clusterInfoName = "cluster-info"
|
|
||||||
clusterInfoConfigMapKey = "kubeconfig"
|
|
||||||
)
|
|
||||||
|
|
||||||
type execerController struct {
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig
|
|
||||||
credentialIssuerLabels map[string]string
|
|
||||||
discoveryURLOverride *string
|
|
||||||
dynamicCertProvider dynamiccert.Private
|
|
||||||
podCommandExecutor PodCommandExecutor
|
|
||||||
clock clock.Clock
|
|
||||||
pinnipedAPIClient pinnipedclientset.Interface
|
|
||||||
agentPodInformer corev1informers.PodInformer
|
|
||||||
configMapInformer corev1informers.ConfigMapInformer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewExecerController returns a controllerlib.Controller that listens for agent pods with proper
|
|
||||||
// cert/key path annotations and execs into them to get the cert/key material. It sets the retrieved
|
|
||||||
// key material in a provided dynamicCertProvider.
|
|
||||||
//
|
|
||||||
// It also is tasked with updating the CredentialIssuer, located via the provided
|
|
||||||
// credentialIssuerLocationConfig, with any errors that it encounters.
|
|
||||||
func NewExecerController(
|
|
||||||
credentialIssuerLocationConfig *CredentialIssuerLocationConfig,
|
|
||||||
credentialIssuerLabels map[string]string,
|
|
||||||
discoveryURLOverride *string,
|
|
||||||
dynamicCertProvider dynamiccert.Private,
|
|
||||||
podCommandExecutor PodCommandExecutor,
|
|
||||||
pinnipedAPIClient pinnipedclientset.Interface,
|
|
||||||
clock clock.Clock,
|
|
||||||
agentPodInformer corev1informers.PodInformer,
|
|
||||||
configMapInformer corev1informers.ConfigMapInformer,
|
|
||||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
|
||||||
) controllerlib.Controller {
|
|
||||||
return controllerlib.New(
|
|
||||||
controllerlib.Config{
|
|
||||||
Name: "kube-cert-agent-execer-controller",
|
|
||||||
Syncer: &execerController{
|
|
||||||
credentialIssuerLocationConfig: credentialIssuerLocationConfig,
|
|
||||||
credentialIssuerLabels: credentialIssuerLabels,
|
|
||||||
discoveryURLOverride: discoveryURLOverride,
|
|
||||||
dynamicCertProvider: dynamicCertProvider,
|
|
||||||
podCommandExecutor: podCommandExecutor,
|
|
||||||
pinnipedAPIClient: pinnipedAPIClient,
|
|
||||||
clock: clock,
|
|
||||||
agentPodInformer: agentPodInformer,
|
|
||||||
configMapInformer: configMapInformer,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
withInformer(
|
|
||||||
agentPodInformer,
|
|
||||||
pinnipedcontroller.SimpleFilter(isAgentPod, nil), // nil parent func is fine because each event is distinct
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
withInformer(
|
|
||||||
configMapInformer,
|
|
||||||
pinnipedcontroller.NameAndNamespaceExactMatchFilterFactory(clusterInfoName, ClusterInfoNamespace),
|
|
||||||
controllerlib.InformerOption{},
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *execerController) Sync(ctx controllerlib.Context) error {
|
|
||||||
maybeAgentPod, err := c.agentPodInformer.Lister().Pods(ctx.Key.Namespace).Get(ctx.Key.Name)
|
|
||||||
notFound := k8serrors.IsNotFound(err)
|
|
||||||
if err != nil && !notFound {
|
|
||||||
return fmt.Errorf("failed to get %s/%s pod: %w", ctx.Key.Namespace, ctx.Key.Name, err)
|
|
||||||
}
|
|
||||||
if notFound {
|
|
||||||
// The pod in question does not exist, so it was probably deleted
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
certPath, keyPath := c.getKeypairFilePaths(maybeAgentPod)
|
|
||||||
if certPath == "" || keyPath == "" {
|
|
||||||
// The annotator controller has not annotated this agent pod yet, or it is not an agent pod at all
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
agentPod := maybeAgentPod
|
|
||||||
|
|
||||||
if agentPod.Status.Phase != v1.PodRunning {
|
|
||||||
// Seems to be an agent pod, but it is not ready yet
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
certPEM, err := c.podCommandExecutor.Exec(agentPod.Namespace, agentPod.Name, "cat", certPath)
|
|
||||||
if err != nil {
|
|
||||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
strategyError(c.clock, err),
|
|
||||||
)
|
|
||||||
return newAggregate(err, strategyResultUpdateErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
keyPEM, err := c.podCommandExecutor.Exec(agentPod.Namespace, agentPod.Name, "cat", keyPath)
|
|
||||||
if err != nil {
|
|
||||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
strategyError(c.clock, err),
|
|
||||||
)
|
|
||||||
return newAggregate(err, strategyResultUpdateErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.dynamicCertProvider.SetCertKeyContent([]byte(certPEM), []byte(keyPEM)); err != nil {
|
|
||||||
err = fmt.Errorf("failed to set signing cert/key content from agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
|
||||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
strategyError(c.clock, err),
|
|
||||||
)
|
|
||||||
return newAggregate(err, strategyResultUpdateErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
apiInfo, err := c.getTokenCredentialRequestAPIInfo()
|
|
||||||
if err != nil {
|
|
||||||
strategyResultUpdateErr := issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotGetClusterInfoStrategyReason,
|
|
||||||
Message: err.Error(),
|
|
||||||
LastUpdateTime: metav1.NewTime(c.clock.Now()),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
return newAggregate(err, strategyResultUpdateErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return issuerconfig.UpdateStrategy(
|
|
||||||
ctx.Context,
|
|
||||||
c.credentialIssuerLocationConfig.Name,
|
|
||||||
c.credentialIssuerLabels,
|
|
||||||
c.pinnipedAPIClient,
|
|
||||||
configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.SuccessStrategyStatus,
|
|
||||||
Reason: configv1alpha1.FetchedKeyStrategyReason,
|
|
||||||
Message: "Key was fetched successfully",
|
|
||||||
LastUpdateTime: metav1.NewTime(c.clock.Now()),
|
|
||||||
Frontend: &configv1alpha1.CredentialIssuerFrontend{
|
|
||||||
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
|
|
||||||
TokenCredentialRequestAPIInfo: apiInfo,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *execerController) getTokenCredentialRequestAPIInfo() (*configv1alpha1.TokenCredentialRequestAPIInfo, error) {
|
|
||||||
configMap, err := c.configMapInformer.
|
|
||||||
Lister().
|
|
||||||
ConfigMaps(ClusterInfoNamespace).
|
|
||||||
Get(clusterInfoName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get %s configmap: %w", clusterInfoName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
kubeConfigYAML, kubeConfigPresent := configMap.Data[clusterInfoConfigMapKey]
|
|
||||||
if !kubeConfigPresent {
|
|
||||||
return nil, fmt.Errorf("failed to get %s key from %s configmap", clusterInfoConfigMapKey, clusterInfoName)
|
|
||||||
}
|
|
||||||
|
|
||||||
kubeconfig, err := clientcmd.Load([]byte(kubeConfigYAML))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to load data from %s key in %s configmap", clusterInfoConfigMapKey, clusterInfoName)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range kubeconfig.Clusters {
|
|
||||||
result := &configv1alpha1.TokenCredentialRequestAPIInfo{
|
|
||||||
Server: v.Server,
|
|
||||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString(v.CertificateAuthorityData),
|
|
||||||
}
|
|
||||||
if c.discoveryURLOverride != nil {
|
|
||||||
result.Server = *c.discoveryURLOverride
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("kubeconfig in %s key in %s configmap did not contain any clusters", clusterInfoConfigMapKey, clusterInfoName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *execerController) getKeypairFilePaths(pod *v1.Pod) (string, string) {
|
|
||||||
annotations := pod.Annotations
|
|
||||||
if annotations == nil {
|
|
||||||
annotations = make(map[string]string)
|
|
||||||
}
|
|
||||||
|
|
||||||
certPath := annotations[agentPodCertPathAnnotationKey]
|
|
||||||
keyPath := annotations[agentPodKeyPathAnnotationKey]
|
|
||||||
|
|
||||||
return certPath, keyPath
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAggregate(errs ...error) error {
|
|
||||||
return errors.NewAggregate(errs)
|
|
||||||
}
|
|
@ -1,733 +0,0 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
package kubecertagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/sclevine/spec"
|
|
||||||
"github.com/sclevine/spec/report"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
|
||||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
|
||||||
coretesting "k8s.io/client-go/testing"
|
|
||||||
|
|
||||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
|
||||||
pinnipedfake "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/fake"
|
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
|
||||||
"go.pinniped.dev/internal/dynamiccert"
|
|
||||||
"go.pinniped.dev/internal/here"
|
|
||||||
"go.pinniped.dev/internal/testutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestExecerControllerOptions(t *testing.T) {
|
|
||||||
spec.Run(t, "options", func(t *testing.T, when spec.G, it spec.S) {
|
|
||||||
var r *require.Assertions
|
|
||||||
var observableWithInformerOption *testutil.ObservableWithInformerOption
|
|
||||||
var agentPodInformerFilter controllerlib.Filter
|
|
||||||
|
|
||||||
whateverPod := &corev1.Pod{}
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
r = require.New(t)
|
|
||||||
observableWithInformerOption = testutil.NewObservableWithInformerOption()
|
|
||||||
informerFactory := kubeinformers.NewSharedInformerFactory(nil, 0)
|
|
||||||
agentPodsInformer := informerFactory.Core().V1().Pods()
|
|
||||||
configMapsInformer := informerFactory.Core().V1().ConfigMaps()
|
|
||||||
_ = NewExecerController(
|
|
||||||
&CredentialIssuerLocationConfig{
|
|
||||||
Name: "ignored by this test",
|
|
||||||
},
|
|
||||||
nil, // credentialIssuerLabels, not needed for this test
|
|
||||||
nil, // discoveryURLOverride, not needed for this test
|
|
||||||
nil, // dynamicCertProvider, not needed for this test
|
|
||||||
nil, // podCommandExecutor, not needed for this test
|
|
||||||
nil, // pinnipedAPIClient, not needed for this test
|
|
||||||
nil, // clock, not needed for this test
|
|
||||||
agentPodsInformer,
|
|
||||||
configMapsInformer,
|
|
||||||
observableWithInformerOption.WithInformer,
|
|
||||||
)
|
|
||||||
agentPodInformerFilter = observableWithInformerOption.GetFilterForInformer(agentPodsInformer)
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the change is happening in the agent's namespace", func() {
|
|
||||||
when("a pod with all agent labels is added/updated/deleted", func() {
|
|
||||||
it("returns true", func() {
|
|
||||||
pod := &corev1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{
|
|
||||||
"kube-cert-agent.pinniped.dev": "true",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
r.True(agentPodInformerFilter.Add(pod))
|
|
||||||
r.True(agentPodInformerFilter.Update(whateverPod, pod))
|
|
||||||
r.True(agentPodInformerFilter.Update(pod, whateverPod))
|
|
||||||
r.True(agentPodInformerFilter.Delete(pod))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("a pod missing the agent label is added/updated/deleted", func() {
|
|
||||||
it("returns false", func() {
|
|
||||||
pod := &corev1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{
|
|
||||||
"some-other-label-key": "some-other-label-value",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
r.False(agentPodInformerFilter.Add(pod))
|
|
||||||
r.False(agentPodInformerFilter.Update(whateverPod, pod))
|
|
||||||
r.False(agentPodInformerFilter.Update(pod, whateverPod))
|
|
||||||
r.False(agentPodInformerFilter.Delete(pod))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
|
||||||
}
|
|
||||||
|
|
||||||
type fakePodExecutor struct {
|
|
||||||
r *require.Assertions
|
|
||||||
|
|
||||||
resultsToReturn []string
|
|
||||||
errorsToReturn []error
|
|
||||||
|
|
||||||
calledWithPodName []string
|
|
||||||
calledWithPodNamespace []string
|
|
||||||
calledWithCommandAndArgs [][]string
|
|
||||||
|
|
||||||
callCount int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *fakePodExecutor) Exec(podNamespace string, podName string, commandAndArgs ...string) (string, error) {
|
|
||||||
s.calledWithPodNamespace = append(s.calledWithPodNamespace, podNamespace)
|
|
||||||
s.calledWithPodName = append(s.calledWithPodName, podName)
|
|
||||||
s.calledWithCommandAndArgs = append(s.calledWithCommandAndArgs, commandAndArgs)
|
|
||||||
s.r.Less(s.callCount, len(s.resultsToReturn), "unexpected extra invocation of fakePodExecutor")
|
|
||||||
result := s.resultsToReturn[s.callCount]
|
|
||||||
var err error = nil
|
|
||||||
if s.errorsToReturn != nil {
|
|
||||||
s.r.Less(s.callCount, len(s.errorsToReturn), "unexpected extra invocation of fakePodExecutor")
|
|
||||||
err = s.errorsToReturn[s.callCount]
|
|
||||||
}
|
|
||||||
s.callCount++
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestManagerControllerSync(t *testing.T) {
|
|
||||||
name := t.Name()
|
|
||||||
spec.Run(t, "Sync", func(t *testing.T, when spec.G, it spec.S) {
|
|
||||||
const agentPodNamespace = "some-namespace"
|
|
||||||
const agentPodName = "some-agent-pod-name-123"
|
|
||||||
const certPathAnnotationName = "kube-cert-agent.pinniped.dev/cert-path"
|
|
||||||
const keyPathAnnotationName = "kube-cert-agent.pinniped.dev/key-path"
|
|
||||||
const fakeCertPath = "/some/cert/path"
|
|
||||||
const fakeKeyPath = "/some/key/path"
|
|
||||||
const credentialIssuerResourceName = "ci-resource-name"
|
|
||||||
|
|
||||||
var r *require.Assertions
|
|
||||||
|
|
||||||
var subject controllerlib.Controller
|
|
||||||
var cancelContext context.Context
|
|
||||||
var cancelContextCancelFunc context.CancelFunc
|
|
||||||
var syncContext *controllerlib.Context
|
|
||||||
var pinnipedAPIClient *pinnipedfake.Clientset
|
|
||||||
var kubeInformerFactory kubeinformers.SharedInformerFactory
|
|
||||||
var kubeClientset *kubernetesfake.Clientset
|
|
||||||
var fakeExecutor *fakePodExecutor
|
|
||||||
var credentialIssuerLabels map[string]string
|
|
||||||
var discoveryURLOverride *string
|
|
||||||
var dynamicCertProvider dynamiccert.Provider
|
|
||||||
var fakeCertPEM, fakeKeyPEM string
|
|
||||||
var credentialIssuerGVR schema.GroupVersionResource
|
|
||||||
var frozenNow time.Time
|
|
||||||
var defaultDynamicCertProviderCert string
|
|
||||||
var defaultDynamicCertProviderKey string
|
|
||||||
|
|
||||||
// Defer starting the informers until the last possible moment so that the
|
|
||||||
// nested Before's can keep adding things to the informer caches.
|
|
||||||
var startInformersAndController = func() {
|
|
||||||
// Set this at the last second to allow for injection of server override.
|
|
||||||
subject = NewExecerController(
|
|
||||||
&CredentialIssuerLocationConfig{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
credentialIssuerLabels,
|
|
||||||
discoveryURLOverride,
|
|
||||||
dynamicCertProvider,
|
|
||||||
fakeExecutor,
|
|
||||||
pinnipedAPIClient,
|
|
||||||
clock.NewFakeClock(frozenNow),
|
|
||||||
kubeInformerFactory.Core().V1().Pods(),
|
|
||||||
kubeInformerFactory.Core().V1().ConfigMaps(),
|
|
||||||
controllerlib.WithInformer,
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set this at the last second to support calling subject.Name().
|
|
||||||
syncContext = &controllerlib.Context{
|
|
||||||
Context: cancelContext,
|
|
||||||
Name: subject.Name(),
|
|
||||||
Key: controllerlib.Key{
|
|
||||||
Namespace: agentPodNamespace,
|
|
||||||
Name: agentPodName,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must start informers before calling TestRunSynchronously()
|
|
||||||
kubeInformerFactory.Start(cancelContext.Done())
|
|
||||||
controllerlib.TestRunSynchronously(t, subject)
|
|
||||||
}
|
|
||||||
|
|
||||||
var newAgentPod = func(agentPodName string, hasCertPathAnnotations bool) *corev1.Pod {
|
|
||||||
pod := &corev1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: agentPodName,
|
|
||||||
Namespace: agentPodNamespace,
|
|
||||||
Labels: map[string]string{
|
|
||||||
"some-label-key": "some-label-value",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if hasCertPathAnnotations {
|
|
||||||
pod.Annotations = map[string]string{
|
|
||||||
certPathAnnotationName: fakeCertPath,
|
|
||||||
keyPathAnnotationName: fakeKeyPath,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pod
|
|
||||||
}
|
|
||||||
|
|
||||||
var requireDynamicCertProviderHasDefaultValues = func() {
|
|
||||||
actualCertPEM, actualKeyPEM := dynamicCertProvider.CurrentCertKeyContent()
|
|
||||||
r.Equal(defaultDynamicCertProviderCert, string(actualCertPEM))
|
|
||||||
r.Equal(defaultDynamicCertProviderKey, string(actualKeyPEM))
|
|
||||||
}
|
|
||||||
|
|
||||||
var requireNoExternalActionsTaken = func() {
|
|
||||||
r.Empty(pinnipedAPIClient.Actions())
|
|
||||||
r.Zero(fakeExecutor.callCount)
|
|
||||||
requireDynamicCertProviderHasDefaultValues()
|
|
||||||
}
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
r = require.New(t)
|
|
||||||
|
|
||||||
crt, key, err := testutil.CreateCertificate(
|
|
||||||
time.Now().Add(-time.Hour),
|
|
||||||
time.Now().Add(time.Hour),
|
|
||||||
)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defaultDynamicCertProviderCert = string(crt)
|
|
||||||
defaultDynamicCertProviderKey = string(key)
|
|
||||||
|
|
||||||
cancelContext, cancelContextCancelFunc = context.WithCancel(context.Background())
|
|
||||||
pinnipedAPIClient = pinnipedfake.NewSimpleClientset()
|
|
||||||
kubeClientset = kubernetesfake.NewSimpleClientset()
|
|
||||||
kubeInformerFactory = kubeinformers.NewSharedInformerFactory(kubeClientset, 0)
|
|
||||||
fakeExecutor = &fakePodExecutor{r: r}
|
|
||||||
frozenNow = time.Date(2020, time.September, 23, 7, 42, 0, 0, time.Local)
|
|
||||||
dynamicCertProvider = dynamiccert.NewCA(name)
|
|
||||||
err = dynamicCertProvider.SetCertKeyContent([]byte(defaultDynamicCertProviderCert), []byte(defaultDynamicCertProviderKey))
|
|
||||||
r.NoError(err)
|
|
||||||
|
|
||||||
loadFile := func(filename string) string {
|
|
||||||
bytes, err := ioutil.ReadFile(filename)
|
|
||||||
r.NoError(err)
|
|
||||||
return string(bytes)
|
|
||||||
}
|
|
||||||
fakeCertPEM = loadFile("./testdata/test.crt")
|
|
||||||
fakeKeyPEM = loadFile("./testdata/test.key")
|
|
||||||
|
|
||||||
credentialIssuerGVR = schema.GroupVersionResource{
|
|
||||||
Group: configv1alpha1.GroupName,
|
|
||||||
Version: configv1alpha1.SchemeGroupVersion.Version,
|
|
||||||
Resource: "credentialissuers",
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
it.After(func() {
|
|
||||||
cancelContextCancelFunc()
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is not yet any agent pods or they were deleted", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
unrelatedPod := &corev1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "some other pod",
|
|
||||||
Namespace: agentPodNamespace,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(unrelatedPod))
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing", func() {
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
requireNoExternalActionsTaken()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod, as determined by its labels matching the agent pod template labels, which is not yet annotated by the annotater controller", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
agentPod := newAgentPod(agentPodName, false)
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(agentPod))
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing", func() {
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
requireNoExternalActionsTaken()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod, as determined by its labels matching the agent pod template labels, and it was annotated by the annotater controller, but it is not Running", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
agentPod := newAgentPod(agentPodName, true)
|
|
||||||
agentPod.Status.Phase = corev1.PodPending // not Running
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(agentPod))
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does nothing", func() {
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
requireNoExternalActionsTaken()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is an agent pod, as determined by its labels matching the agent pod template labels, which is already annotated by the annotater controller, and it is Running", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
targetAgentPod := newAgentPod(agentPodName, true)
|
|
||||||
targetAgentPod.Status.Phase = corev1.PodRunning
|
|
||||||
anotherAgentPod := newAgentPod("some-other-agent-pod-which-is-not-the-context-of-this-sync", true)
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(targetAgentPod))
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(anotherAgentPod))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the resulting pod execs will succeed", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
fakeExecutor.resultsToReturn = []string{fakeCertPEM, fakeKeyPEM}
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the cluster-info ConfigMap is not found", func() {
|
|
||||||
it("returns an error and updates the strategy with an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), `failed to get cluster-info configmap: configmap "cluster-info" not found`)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotGetClusterInfoStrategyReason,
|
|
||||||
Message: `failed to get cluster-info configmap: configmap "cluster-info" not found`,
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
|
||||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the cluster-info ConfigMap is missing a key", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(&corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Namespace: ClusterInfoNamespace,
|
|
||||||
Name: clusterInfoName,
|
|
||||||
},
|
|
||||||
Data: map[string]string{"uninteresting-key": "uninteresting-value"},
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), `failed to get kubeconfig key from cluster-info configmap`)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the cluster-info ConfigMap is contains invalid YAML", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(&corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Namespace: ClusterInfoNamespace,
|
|
||||||
Name: clusterInfoName,
|
|
||||||
},
|
|
||||||
Data: map[string]string{"kubeconfig": "invalid-yaml"},
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), `failed to load data from kubeconfig key in cluster-info configmap`)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the cluster-info ConfigMap is contains an empty list of clusters", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(&corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Namespace: ClusterInfoNamespace,
|
|
||||||
Name: clusterInfoName,
|
|
||||||
},
|
|
||||||
Data: map[string]string{
|
|
||||||
"kubeconfig": here.Doc(`
|
|
||||||
kind: Config
|
|
||||||
apiVersion: v1
|
|
||||||
clusters: []
|
|
||||||
`),
|
|
||||||
"uninteresting-key": "uninteresting-value",
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), `kubeconfig in kubeconfig key in cluster-info configmap did not contain any clusters`)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the cluster-info ConfigMap is valid", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
const caData = "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=" // "some-certificate-authority-data" base64 encoded
|
|
||||||
const kubeServerURL = "https://some-server"
|
|
||||||
r.NoError(kubeClientset.Tracker().Add(&corev1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Namespace: ClusterInfoNamespace,
|
|
||||||
Name: clusterInfoName,
|
|
||||||
},
|
|
||||||
Data: map[string]string{
|
|
||||||
"kubeconfig": here.Docf(`
|
|
||||||
kind: Config
|
|
||||||
apiVersion: v1
|
|
||||||
clusters:
|
|
||||||
- name: ""
|
|
||||||
cluster:
|
|
||||||
certificate-authority-data: "%s"
|
|
||||||
server: "%s"`,
|
|
||||||
caData, kubeServerURL),
|
|
||||||
"uninteresting-key": "uninteresting-value",
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("execs to the agent pod to get the keys and updates the dynamic certificates provider with the new certs", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
r.Equal(2, fakeExecutor.callCount)
|
|
||||||
|
|
||||||
r.Equal(agentPodNamespace, fakeExecutor.calledWithPodNamespace[0])
|
|
||||||
r.Equal(agentPodName, fakeExecutor.calledWithPodName[0])
|
|
||||||
r.Equal([]string{"cat", fakeCertPath}, fakeExecutor.calledWithCommandAndArgs[0])
|
|
||||||
|
|
||||||
r.Equal(agentPodNamespace, fakeExecutor.calledWithPodNamespace[1])
|
|
||||||
r.Equal(agentPodName, fakeExecutor.calledWithPodName[1])
|
|
||||||
r.Equal([]string{"cat", fakeKeyPath}, fakeExecutor.calledWithCommandAndArgs[1])
|
|
||||||
|
|
||||||
actualCertPEM, actualKeyPEM := dynamicCertProvider.CurrentCertKeyContent()
|
|
||||||
r.Equal(fakeCertPEM, string(actualCertPEM))
|
|
||||||
r.Equal(fakeKeyPEM, string(actualKeyPEM))
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is already a CredentialIssuer", func() {
|
|
||||||
var initialCredentialIssuer *configv1alpha1.CredentialIssuer
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
initialCredentialIssuer = &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r.NoError(pinnipedAPIClient.Tracker().Add(initialCredentialIssuer))
|
|
||||||
})
|
|
||||||
|
|
||||||
it("also updates the the existing CredentialIssuer status field", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
// The first update to the CredentialIssuer will set the strategy entry
|
|
||||||
expectedCredentialIssuer := initialCredentialIssuer.DeepCopy()
|
|
||||||
expectedCredentialIssuer.Status.Strategies = []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.SuccessStrategyStatus,
|
|
||||||
Reason: configv1alpha1.FetchedKeyStrategyReason,
|
|
||||||
Message: "Key was fetched successfully",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
Frontend: &configv1alpha1.CredentialIssuerFrontend{
|
|
||||||
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
|
|
||||||
TokenCredentialRequestAPIInfo: &configv1alpha1.TokenCredentialRequestAPIInfo{
|
|
||||||
Server: "https://some-server",
|
|
||||||
CertificateAuthorityData: "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedCredentialIssuer.Status.KubeConfigInfo = &configv1alpha1.CredentialIssuerKubeConfigInfo{
|
|
||||||
Server: "https://some-server",
|
|
||||||
CertificateAuthorityData: "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=",
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
|
||||||
expectedCreateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
|
||||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction}, pinnipedAPIClient.Actions())
|
|
||||||
})
|
|
||||||
|
|
||||||
when("updating the CredentialIssuer fails", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
pinnipedAPIClient.PrependReactor(
|
|
||||||
"update",
|
|
||||||
"credentialissuers",
|
|
||||||
func(_ coretesting.Action) (bool, runtime.Object, error) {
|
|
||||||
return true, nil, errors.New("some update error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("returns an error", func() {
|
|
||||||
startInformersAndController()
|
|
||||||
err := controllerlib.TestSync(t, subject, *syncContext)
|
|
||||||
r.EqualError(err, "could not create or update credentialissuer: some update error")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("there is not already a CredentialIssuer", func() {
|
|
||||||
it.Before(func() {
|
|
||||||
server := "https://overridden-server-url.example.com"
|
|
||||||
discoveryURLOverride = &server
|
|
||||||
credentialIssuerLabels = map[string]string{"foo": "bar"}
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("also creates the the CredentialIssuer with the appropriate status field and labels", func() {
|
|
||||||
r.NoError(controllerlib.TestSync(t, subject, *syncContext))
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{"foo": "bar"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
Labels: map[string]string{"foo": "bar"},
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.SuccessStrategyStatus,
|
|
||||||
Reason: configv1alpha1.FetchedKeyStrategyReason,
|
|
||||||
Message: "Key was fetched successfully",
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
Frontend: &configv1alpha1.CredentialIssuerFrontend{
|
|
||||||
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
|
|
||||||
TokenCredentialRequestAPIInfo: &configv1alpha1.TokenCredentialRequestAPIInfo{
|
|
||||||
Server: "https://overridden-server-url.example.com",
|
|
||||||
CertificateAuthorityData: "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
KubeConfigInfo: &configv1alpha1.CredentialIssuerKubeConfigInfo{
|
|
||||||
Server: "https://overridden-server-url.example.com",
|
|
||||||
CertificateAuthorityData: "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
|
||||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the first resulting pod exec will fail", func() {
|
|
||||||
var podExecErrorMessage string
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
podExecErrorMessage = "some pod exec error message"
|
|
||||||
fakeExecutor.errorsToReturn = []error{fmt.Errorf(podExecErrorMessage), nil}
|
|
||||||
fakeExecutor.resultsToReturn = []string{"", fakeKeyPEM}
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does not update the dynamic certificates provider", func() {
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
|
|
||||||
requireDynamicCertProviderHasDefaultValues()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("creates or updates the the CredentialIssuer status field with an error", func() {
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: podExecErrorMessage,
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
|
||||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the second resulting pod exec will fail", func() {
|
|
||||||
var podExecErrorMessage string
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
podExecErrorMessage = "some pod exec error message"
|
|
||||||
fakeExecutor.errorsToReturn = []error{nil, fmt.Errorf(podExecErrorMessage)}
|
|
||||||
fakeExecutor.resultsToReturn = []string{fakeCertPEM, ""}
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does not update the dynamic certificates provider", func() {
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
|
|
||||||
requireDynamicCertProviderHasDefaultValues()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("creates or updates the the CredentialIssuer status field with an error", func() {
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), podExecErrorMessage)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: podExecErrorMessage,
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
|
||||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
when("the third resulting pod exec has invalid key data", func() {
|
|
||||||
var keyParseErrorMessage string
|
|
||||||
|
|
||||||
it.Before(func() {
|
|
||||||
keyParseErrorMessage = "failed to set signing cert/key content from agent pod some-namespace/some-agent-pod-name-123: TestManagerControllerSync: attempt to set invalid key pair: tls: failed to find any PEM data in key input"
|
|
||||||
fakeExecutor.errorsToReturn = []error{nil, nil}
|
|
||||||
fakeExecutor.resultsToReturn = []string{fakeCertPEM, ""}
|
|
||||||
startInformersAndController()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("does not update the dynamic certificates provider", func() {
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), keyParseErrorMessage)
|
|
||||||
requireDynamicCertProviderHasDefaultValues()
|
|
||||||
})
|
|
||||||
|
|
||||||
it("creates or updates the the CredentialIssuer status field with an error", func() {
|
|
||||||
r.EqualError(controllerlib.TestSync(t, subject, *syncContext), keyParseErrorMessage)
|
|
||||||
|
|
||||||
expectedCreateCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedCredentialIssuer := &configv1alpha1.CredentialIssuer{
|
|
||||||
TypeMeta: metav1.TypeMeta{},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: credentialIssuerResourceName,
|
|
||||||
},
|
|
||||||
Status: configv1alpha1.CredentialIssuerStatus{
|
|
||||||
Strategies: []configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: keyParseErrorMessage,
|
|
||||||
LastUpdateTime: metav1.NewTime(frozenNow),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedGetAction := coretesting.NewRootGetAction(credentialIssuerGVR, credentialIssuerResourceName)
|
|
||||||
expectedCreateAction := coretesting.NewRootCreateAction(credentialIssuerGVR, expectedCreateCredentialIssuer)
|
|
||||||
expectedUpdateAction := coretesting.NewRootUpdateSubresourceAction(credentialIssuerGVR, "status", expectedCredentialIssuer)
|
|
||||||
r.Equal([]coretesting.Action{expectedGetAction, expectedCreateAction, expectedUpdateAction}, pinnipedAPIClient.Actions())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
|
||||||
}
|
|
@ -1,130 +1,461 @@
|
|||||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
// Package kubecertagent provides controllers that ensure a set of pods (the kube-cert-agent), is
|
// Package kubecertagent provides controllers that ensure a pod (the kube-cert-agent), is
|
||||||
// colocated with the Kubernetes controller manager so that Pinniped can access its signing keys.
|
// co-located with the Kubernetes controller manager so that Pinniped can access its signing keys.
|
||||||
//
|
|
||||||
// Note: the controllers use a filter that accepts all pods that look like the controller manager or
|
|
||||||
// an agent pod, across any add/update/delete event. Each of the controllers only care about a
|
|
||||||
// subset of these events in reality, but the liberal filter implementation serves as an MVP.
|
|
||||||
package kubecertagent
|
package kubecertagent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/fnv"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/equality"
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/util/cache"
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
"k8s.io/apimachinery/pkg/util/clock"
|
||||||
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
|
appsv1informers "k8s.io/client-go/informers/apps/v1"
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||||
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"k8s.io/klog/v2/klogr"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||||
"go.pinniped.dev/internal/plog"
|
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||||
|
"go.pinniped.dev/internal/controller/issuerconfig"
|
||||||
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
|
"go.pinniped.dev/internal/dynamiccert"
|
||||||
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// ControllerManagerNamespace is the assumed namespace of the kube-controller-manager pod(s).
|
// ControllerManagerNamespace is the assumed namespace of the kube-controller-manager pod(s).
|
||||||
ControllerManagerNamespace = "kube-system"
|
ControllerManagerNamespace = "kube-system"
|
||||||
|
|
||||||
// controllerManagerNameAnnotationKey is used to store an agent pod's parent's name, i.e., the
|
|
||||||
// name of the controller manager pod with which it is supposed to be in sync.
|
|
||||||
controllerManagerNameAnnotationKey = "kube-cert-agent.pinniped.dev/controller-manager-name"
|
|
||||||
// controllerManagerUIDAnnotationKey is used to store an agent pod's parent's UID, i.e., the UID
|
|
||||||
// of the controller manager pod with which it is supposed to be in sync.
|
|
||||||
controllerManagerUIDAnnotationKey = "kube-cert-agent.pinniped.dev/controller-manager-uid"
|
|
||||||
|
|
||||||
// agentPodLabelKey is used to identify which pods are created by the kube-cert-agent
|
// agentPodLabelKey is used to identify which pods are created by the kube-cert-agent
|
||||||
// controllers.
|
// controllers.
|
||||||
agentPodLabelKey = "kube-cert-agent.pinniped.dev"
|
agentPodLabelKey = "kube-cert-agent.pinniped.dev"
|
||||||
agentPodLabelValue = "true"
|
agentPodLabelValue = "v2"
|
||||||
|
|
||||||
// agentPodCertPathAnnotationKey is the annotation that the kube-cert-agent pod will use
|
ClusterInfoNamespace = "kube-public"
|
||||||
// to communicate the in-pod path to the kube API's certificate.
|
clusterInfoName = "cluster-info"
|
||||||
agentPodCertPathAnnotationKey = "kube-cert-agent.pinniped.dev/cert-path"
|
clusterInfoConfigMapKey = "kubeconfig"
|
||||||
|
|
||||||
// agentPodKeyPathAnnotationKey is the annotation that the kube-cert-agent pod will use
|
|
||||||
// to communicate the in-pod path to the kube API's key.
|
|
||||||
agentPodKeyPathAnnotationKey = "kube-cert-agent.pinniped.dev/key-path"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type AgentPodConfig struct {
|
// AgentConfig is the configuration for the kube-cert-agent controller.
|
||||||
// The namespace in which agent pods will be created.
|
type AgentConfig struct {
|
||||||
|
// Namespace in which agent pods will be created.
|
||||||
Namespace string
|
Namespace string
|
||||||
|
|
||||||
// The container image used for the agent pods.
|
// ContainerImage specifies the container image used for the agent pods.
|
||||||
ContainerImage string
|
ContainerImage string
|
||||||
|
|
||||||
// The name prefix for each of the agent pods.
|
// NamePrefix will be prefixed to all agent pod names.
|
||||||
PodNamePrefix string
|
NamePrefix string
|
||||||
|
|
||||||
// ContainerImagePullSecrets is a list of names of Kubernetes Secret objects that will be used as
|
// ContainerImagePullSecrets is a list of names of Kubernetes Secret objects that will be used as
|
||||||
// ImagePullSecrets on the kube-cert-agent pods.
|
// ImagePullSecrets on the kube-cert-agent pods.
|
||||||
ContainerImagePullSecrets []string
|
ContainerImagePullSecrets []string
|
||||||
|
|
||||||
// Additional labels that should be added to every agent pod during creation.
|
// CredentialIssuerName specifies the CredentialIssuer to be created/updated.
|
||||||
AdditionalLabels map[string]string
|
CredentialIssuerName string
|
||||||
|
|
||||||
|
// Labels to be applied to the CredentialIssuer and agent pods.
|
||||||
|
Labels map[string]string
|
||||||
|
|
||||||
|
// DiscoveryURLOverride is the Kubernetes server endpoint to report in the CredentialIssuer, overriding any
|
||||||
|
// value discovered in the kube-public/cluster-info ConfigMap.
|
||||||
|
DiscoveryURLOverride *string
|
||||||
}
|
}
|
||||||
|
|
||||||
type CredentialIssuerLocationConfig struct {
|
func (a *AgentConfig) agentLabels() map[string]string {
|
||||||
// The resource name for the CredentialIssuer to be created/updated.
|
allLabels := map[string]string{agentPodLabelKey: agentPodLabelValue}
|
||||||
Name string
|
for k, v := range a.Labels {
|
||||||
}
|
|
||||||
|
|
||||||
func (c *AgentPodConfig) Labels() map[string]string {
|
|
||||||
allLabels := map[string]string{
|
|
||||||
agentPodLabelKey: agentPodLabelValue,
|
|
||||||
}
|
|
||||||
for k, v := range c.AdditionalLabels {
|
|
||||||
allLabels[k] = v
|
allLabels[k] = v
|
||||||
}
|
}
|
||||||
return allLabels
|
return allLabels
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *AgentPodConfig) AgentSelector() labels.Selector {
|
func (a *AgentConfig) deploymentName() string {
|
||||||
return labels.SelectorFromSet(map[string]string{agentPodLabelKey: agentPodLabelValue})
|
return strings.TrimSuffix(a.NamePrefix, "-")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *AgentPodConfig) newAgentPod(controllerManagerPod *corev1.Pod) *corev1.Pod {
|
type agentController struct {
|
||||||
terminateImmediately := int64(0)
|
cfg AgentConfig
|
||||||
rootID := int64(0)
|
client *kubeclient.Client
|
||||||
f := false
|
kubeSystemPods corev1informers.PodInformer
|
||||||
falsePtr := &f
|
agentDeployments appsv1informers.DeploymentInformer
|
||||||
|
agentPods corev1informers.PodInformer
|
||||||
|
kubePublicConfigMaps corev1informers.ConfigMapInformer
|
||||||
|
executor PodCommandExecutor
|
||||||
|
dynamicCertProvider dynamiccert.Private
|
||||||
|
clock clock.Clock
|
||||||
|
log logr.Logger
|
||||||
|
execCache *cache.Expiring
|
||||||
|
}
|
||||||
|
|
||||||
imagePullSecrets := []corev1.LocalObjectReference{}
|
var (
|
||||||
for _, imagePullSecret := range c.ContainerImagePullSecrets {
|
// controllerManagerLabels are the Kubernetes labels we expect on the kube-controller-manager Pod.
|
||||||
imagePullSecrets = append(
|
controllerManagerLabels = labels.SelectorFromSet(map[string]string{ //nolint: gochecknoglobals
|
||||||
imagePullSecrets,
|
"component": "kube-controller-manager",
|
||||||
corev1.LocalObjectReference{
|
})
|
||||||
Name: imagePullSecret,
|
|
||||||
},
|
// agentLabels are the Kubernetes labels we always expect on the kube-controller-manager Pod.
|
||||||
|
agentLabels = labels.SelectorFromSet(map[string]string{ //nolint: gochecknoglobals
|
||||||
|
agentPodLabelKey: agentPodLabelValue,
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewAgentController returns a controller that manages the kube-cert-agent Deployment. It also is tasked with updating
|
||||||
|
// the CredentialIssuer with any errors that it encounters.
|
||||||
|
func NewAgentController(
|
||||||
|
cfg AgentConfig,
|
||||||
|
client *kubeclient.Client,
|
||||||
|
kubeSystemPods corev1informers.PodInformer,
|
||||||
|
agentDeployments appsv1informers.DeploymentInformer,
|
||||||
|
agentPods corev1informers.PodInformer,
|
||||||
|
kubePublicConfigMaps corev1informers.ConfigMapInformer,
|
||||||
|
dynamicCertProvider dynamiccert.Private,
|
||||||
|
) controllerlib.Controller {
|
||||||
|
return newAgentController(
|
||||||
|
cfg,
|
||||||
|
client,
|
||||||
|
kubeSystemPods,
|
||||||
|
agentDeployments,
|
||||||
|
agentPods,
|
||||||
|
kubePublicConfigMaps,
|
||||||
|
NewPodCommandExecutor(client.JSONConfig, client.Kubernetes),
|
||||||
|
dynamicCertProvider,
|
||||||
|
&clock.RealClock{},
|
||||||
|
cache.NewExpiring(),
|
||||||
|
klogr.New(),
|
||||||
)
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAgentController(
|
||||||
|
cfg AgentConfig,
|
||||||
|
client *kubeclient.Client,
|
||||||
|
kubeSystemPods corev1informers.PodInformer,
|
||||||
|
agentDeployments appsv1informers.DeploymentInformer,
|
||||||
|
agentPods corev1informers.PodInformer,
|
||||||
|
kubePublicConfigMaps corev1informers.ConfigMapInformer,
|
||||||
|
podCommandExecutor PodCommandExecutor,
|
||||||
|
dynamicCertProvider dynamiccert.Private,
|
||||||
|
clock clock.Clock,
|
||||||
|
execCache *cache.Expiring,
|
||||||
|
log logr.Logger,
|
||||||
|
options ...controllerlib.Option,
|
||||||
|
) controllerlib.Controller {
|
||||||
|
return controllerlib.New(
|
||||||
|
controllerlib.Config{
|
||||||
|
Name: "kube-cert-agent-controller",
|
||||||
|
Syncer: &agentController{
|
||||||
|
cfg: cfg,
|
||||||
|
client: client,
|
||||||
|
kubeSystemPods: kubeSystemPods,
|
||||||
|
agentDeployments: agentDeployments,
|
||||||
|
agentPods: agentPods,
|
||||||
|
kubePublicConfigMaps: kubePublicConfigMaps,
|
||||||
|
executor: podCommandExecutor,
|
||||||
|
dynamicCertProvider: dynamicCertProvider,
|
||||||
|
clock: clock,
|
||||||
|
log: log.WithName("kube-cert-agent-controller"),
|
||||||
|
execCache: execCache,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
append([]controllerlib.Option{
|
||||||
|
controllerlib.WithInformer(
|
||||||
|
kubeSystemPods,
|
||||||
|
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
|
||||||
|
return controllerManagerLabels.Matches(labels.Set(obj.GetLabels()))
|
||||||
|
}),
|
||||||
|
controllerlib.InformerOption{},
|
||||||
|
),
|
||||||
|
controllerlib.WithInformer(
|
||||||
|
agentDeployments,
|
||||||
|
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
|
||||||
|
return obj.GetNamespace() == cfg.Namespace && obj.GetName() == cfg.deploymentName()
|
||||||
|
}),
|
||||||
|
controllerlib.InformerOption{},
|
||||||
|
),
|
||||||
|
controllerlib.WithInformer(
|
||||||
|
agentPods,
|
||||||
|
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
|
||||||
|
return agentLabels.Matches(labels.Set(obj.GetLabels()))
|
||||||
|
}),
|
||||||
|
controllerlib.InformerOption{},
|
||||||
|
),
|
||||||
|
controllerlib.WithInformer(
|
||||||
|
kubePublicConfigMaps,
|
||||||
|
pinnipedcontroller.SimpleFilterWithSingletonQueue(func(obj metav1.Object) bool {
|
||||||
|
return obj.GetNamespace() == ClusterInfoNamespace && obj.GetName() == clusterInfoName
|
||||||
|
}),
|
||||||
|
controllerlib.InformerOption{},
|
||||||
|
),
|
||||||
|
// Be sure to run once even to make sure the CredentialIssuer is updated if there are no controller manager
|
||||||
|
// pods. We should be able to pass an empty key since we don't use the key in the sync (we sync
|
||||||
|
// the world).
|
||||||
|
controllerlib.WithInitialEvent(controllerlib.Key{}),
|
||||||
|
}, options...)...,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync implements controllerlib.Syncer.
|
||||||
|
func (c *agentController) Sync(ctx controllerlib.Context) error {
|
||||||
|
// Find the latest healthy kube-controller-manager Pod in kube-system..
|
||||||
|
controllerManagerPods, err := c.kubeSystemPods.Lister().Pods(ControllerManagerNamespace).List(controllerManagerLabels)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("could not list controller manager pods: %w", err)
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||||
|
}
|
||||||
|
newestControllerManager := newestRunningPod(controllerManagerPods)
|
||||||
|
|
||||||
|
// If there are no healthy controller manager pods, we alert the user that we can't find the keypair via
|
||||||
|
// the CredentialIssuer.
|
||||||
|
if newestControllerManager == nil {
|
||||||
|
err := fmt.Errorf("could not find a healthy kube-controller-manager pod (%s)", pluralize(controllerManagerPods))
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &corev1.Pod{
|
if err := c.createOrUpdateDeployment(ctx, newestControllerManager); err != nil {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
err := fmt.Errorf("could not ensure agent deployment: %w", err)
|
||||||
Name: fmt.Sprintf("%s%s", c.PodNamePrefix, hash(controllerManagerPod)),
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||||
Namespace: c.Namespace,
|
}
|
||||||
Labels: c.Labels(),
|
|
||||||
Annotations: map[string]string{
|
// Find the latest healthy agent Pod in our namespace.
|
||||||
controllerManagerNameAnnotationKey: controllerManagerPod.Name,
|
agentPods, err := c.agentPods.Lister().Pods(c.cfg.Namespace).List(agentLabels)
|
||||||
controllerManagerUIDAnnotationKey: string(controllerManagerPod.UID),
|
if err != nil {
|
||||||
|
err := fmt.Errorf("could not list agent pods: %w", err)
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||||
|
}
|
||||||
|
newestAgentPod := newestRunningPod(agentPods)
|
||||||
|
|
||||||
|
// If there are no healthy controller agent pods, we alert the user that we can't find the keypair via
|
||||||
|
// the CredentialIssuer.
|
||||||
|
if newestAgentPod == nil {
|
||||||
|
err := fmt.Errorf("could not find a healthy agent pod (%s)", pluralize(agentPods))
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load the Kubernetes API info from the kube-public/cluster-info ConfigMap.
|
||||||
|
configMap, err := c.kubePublicConfigMaps.Lister().ConfigMaps(ClusterInfoNamespace).Get(clusterInfoName)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("failed to get %s/%s configmap: %w", ClusterInfoNamespace, clusterInfoName, err)
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason)
|
||||||
|
}
|
||||||
|
|
||||||
|
apiInfo, err := c.extractAPIInfo(configMap)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("could not extract Kubernetes API endpoint info from %s/%s configmap: %w", ClusterInfoNamespace, clusterInfoName, err)
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotGetClusterInfoStrategyReason)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load the certificate and key from the agent pod into our in-memory signer.
|
||||||
|
if err := c.loadSigningKey(newestAgentPod); err != nil {
|
||||||
|
return c.failStrategyAndErr(ctx.Context, err, configv1alpha1.CouldNotFetchKeyStrategyReason)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the CredentialIssuer strategy to successful.
|
||||||
|
return issuerconfig.UpdateStrategy(
|
||||||
|
ctx.Context,
|
||||||
|
c.cfg.CredentialIssuerName,
|
||||||
|
c.cfg.Labels,
|
||||||
|
c.client.PinnipedConcierge,
|
||||||
|
configv1alpha1.CredentialIssuerStrategy{
|
||||||
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
|
Status: configv1alpha1.SuccessStrategyStatus,
|
||||||
|
Reason: configv1alpha1.FetchedKeyStrategyReason,
|
||||||
|
Message: "key was fetched successfully",
|
||||||
|
LastUpdateTime: metav1.NewTime(c.clock.Now()),
|
||||||
|
Frontend: &configv1alpha1.CredentialIssuerFrontend{
|
||||||
|
Type: configv1alpha1.TokenCredentialRequestAPIFrontendType,
|
||||||
|
TokenCredentialRequestAPIInfo: apiInfo,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *agentController) loadSigningKey(agentPod *corev1.Pod) error {
|
||||||
|
// If we remember successfully loading the key from this pod recently, we can skip this step and return immediately.
|
||||||
|
if _, exists := c.execCache.Get(agentPod.UID); exists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec into the agent pod and cat out the certificate and the key.
|
||||||
|
combinedPEM, err := c.executor.Exec(
|
||||||
|
agentPod.Namespace, agentPod.Name,
|
||||||
|
"sh", "-c", "cat ${CERT_PATH}; echo; echo; cat ${KEY_PATH}",
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not exec into agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split up the output by looking for the block of newlines.
|
||||||
|
var certPEM, keyPEM string
|
||||||
|
if parts := strings.Split(combinedPEM, "\n\n\n"); len(parts) == 2 {
|
||||||
|
certPEM, keyPEM = parts[0], parts[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load the certificate and key into the dynamic signer.
|
||||||
|
if err := c.dynamicCertProvider.SetCertKeyContent([]byte(certPEM), []byte(keyPEM)); err != nil {
|
||||||
|
return fmt.Errorf("failed to set signing cert/key content from agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remember that we've successfully loaded the key from this pod so we can skip the exec+load if nothing has changed.
|
||||||
|
c.execCache.Set(agentPod.UID, struct{}{}, 15*time.Minute)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *agentController) createOrUpdateDeployment(ctx controllerlib.Context, newestControllerManager *corev1.Pod) error {
|
||||||
|
// Build the expected Deployment based on the kube-controller-manager Pod as a template.
|
||||||
|
expectedDeployment := c.newAgentDeployment(newestControllerManager)
|
||||||
|
|
||||||
|
// Try to get the existing Deployment, if it exists.
|
||||||
|
existingDeployment, err := c.agentDeployments.Lister().Deployments(expectedDeployment.Namespace).Get(expectedDeployment.Name)
|
||||||
|
notFound := k8serrors.IsNotFound(err)
|
||||||
|
if err != nil && !notFound {
|
||||||
|
return fmt.Errorf("could not get deployments: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log := c.log.WithValues(
|
||||||
|
"deployment", klog.KObj(expectedDeployment),
|
||||||
|
"templatePod", klog.KObj(newestControllerManager),
|
||||||
|
)
|
||||||
|
|
||||||
|
// If the Deployment did not exist, create it and be done.
|
||||||
|
if notFound {
|
||||||
|
log.Info("creating new deployment")
|
||||||
|
_, err := c.client.Kubernetes.AppsV1().Deployments(expectedDeployment.Namespace).Create(ctx.Context, expectedDeployment, metav1.CreateOptions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise update the spec of the Deployment to match our desired state.
|
||||||
|
updatedDeployment := existingDeployment.DeepCopy()
|
||||||
|
updatedDeployment.Spec = expectedDeployment.Spec
|
||||||
|
updatedDeployment.ObjectMeta = mergeLabelsAndAnnotations(updatedDeployment.ObjectMeta, expectedDeployment.ObjectMeta)
|
||||||
|
|
||||||
|
// If the existing Deployment already matches our desired spec, we're done.
|
||||||
|
if apiequality.Semantic.DeepDerivative(updatedDeployment, existingDeployment) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("updating existing deployment")
|
||||||
|
_, err = c.client.Kubernetes.AppsV1().Deployments(updatedDeployment.Namespace).Update(ctx.Context, updatedDeployment, metav1.UpdateOptions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *agentController) failStrategyAndErr(ctx context.Context, err error, reason configv1alpha1.StrategyReason) error {
|
||||||
|
return utilerrors.NewAggregate([]error{err, issuerconfig.UpdateStrategy(
|
||||||
|
ctx,
|
||||||
|
c.cfg.CredentialIssuerName,
|
||||||
|
c.cfg.Labels,
|
||||||
|
c.client.PinnipedConcierge,
|
||||||
|
configv1alpha1.CredentialIssuerStrategy{
|
||||||
|
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
||||||
|
Status: configv1alpha1.ErrorStrategyStatus,
|
||||||
|
Reason: reason,
|
||||||
|
Message: err.Error(),
|
||||||
|
LastUpdateTime: metav1.NewTime(c.clock.Now()),
|
||||||
|
},
|
||||||
|
)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *agentController) extractAPIInfo(configMap *corev1.ConfigMap) (*configv1alpha1.TokenCredentialRequestAPIInfo, error) {
|
||||||
|
kubeConfigYAML, kubeConfigPresent := configMap.Data[clusterInfoConfigMapKey]
|
||||||
|
if !kubeConfigPresent {
|
||||||
|
return nil, fmt.Errorf("missing %q key", clusterInfoConfigMapKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
kubeconfig, err := clientcmd.Load([]byte(kubeConfigYAML))
|
||||||
|
if err != nil {
|
||||||
|
// We purposefully don't wrap "err" here because it's very verbose.
|
||||||
|
return nil, fmt.Errorf("key %q does not contain a valid kubeconfig", clusterInfoConfigMapKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range kubeconfig.Clusters {
|
||||||
|
result := &configv1alpha1.TokenCredentialRequestAPIInfo{
|
||||||
|
Server: v.Server,
|
||||||
|
CertificateAuthorityData: base64.StdEncoding.EncodeToString(v.CertificateAuthorityData),
|
||||||
|
}
|
||||||
|
if c.cfg.DiscoveryURLOverride != nil {
|
||||||
|
result.Server = *c.cfg.DiscoveryURLOverride
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("kubeconfig in key %q does not contain any clusters", clusterInfoConfigMapKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newestRunningPod takes a list of pods and returns the newest one with status.phase == "Running".
|
||||||
|
func newestRunningPod(pods []*corev1.Pod) *corev1.Pod {
|
||||||
|
// Compare two pods based on creation timestamp, breaking ties by name
|
||||||
|
newer := func(a, b *corev1.Pod) bool {
|
||||||
|
if a.CreationTimestamp.Time.Equal(b.CreationTimestamp.Time) {
|
||||||
|
return a.Name < b.Name
|
||||||
|
}
|
||||||
|
return a.CreationTimestamp.After(b.CreationTimestamp.Time)
|
||||||
|
}
|
||||||
|
|
||||||
|
var result *corev1.Pod
|
||||||
|
for _, pod := range pods {
|
||||||
|
if pod.Status.Phase == corev1.PodRunning && (result == nil || newer(pod, result)) {
|
||||||
|
result = pod
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) *appsv1.Deployment {
|
||||||
|
var volumeMounts []corev1.VolumeMount
|
||||||
|
if len(controllerManagerPod.Spec.Containers) > 0 {
|
||||||
|
volumeMounts = controllerManagerPod.Spec.Containers[0].VolumeMounts
|
||||||
|
}
|
||||||
|
|
||||||
|
var imagePullSecrets []corev1.LocalObjectReference
|
||||||
|
if len(c.cfg.ContainerImagePullSecrets) > 0 {
|
||||||
|
imagePullSecrets = make([]corev1.LocalObjectReference, 0, len(c.cfg.ContainerImagePullSecrets))
|
||||||
|
for _, name := range c.cfg.ContainerImagePullSecrets {
|
||||||
|
imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{Name: name})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &appsv1.Deployment{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: c.cfg.deploymentName(),
|
||||||
|
Namespace: c.cfg.Namespace,
|
||||||
|
Labels: c.cfg.Labels,
|
||||||
|
},
|
||||||
|
Spec: appsv1.DeploymentSpec{
|
||||||
|
Replicas: pointer.Int32Ptr(1),
|
||||||
|
Selector: metav1.SetAsLabelSelector(c.cfg.agentLabels()),
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: c.cfg.agentLabels(),
|
||||||
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
TerminationGracePeriodSeconds: &terminateImmediately,
|
TerminationGracePeriodSeconds: pointer.Int64Ptr(0),
|
||||||
ImagePullSecrets: imagePullSecrets,
|
ImagePullSecrets: imagePullSecrets,
|
||||||
Containers: []corev1.Container{
|
Containers: []corev1.Container{
|
||||||
{
|
{
|
||||||
Name: "sleeper",
|
Name: "sleeper",
|
||||||
Image: c.ContainerImage,
|
Image: c.cfg.ContainerImage,
|
||||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||||
Command: []string{"/bin/sleep", "infinity"},
|
Command: []string{"/bin/sleep", "infinity"},
|
||||||
VolumeMounts: controllerManagerPod.Spec.Containers[0].VolumeMounts,
|
VolumeMounts: volumeMounts,
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "CERT_PATH", Value: getContainerArgByName(controllerManagerPod, "cluster-signing-cert-file", "/etc/kubernetes/ca/ca.pem")},
|
||||||
|
{Name: "KEY_PATH", Value: getContainerArgByName(controllerManagerPod, "cluster-signing-key-file", "/etc/kubernetes/ca/ca.key")},
|
||||||
|
},
|
||||||
Resources: corev1.ResourceRequirements{
|
Resources: corev1.ResourceRequirements{
|
||||||
Limits: corev1.ResourceList{
|
Limits: corev1.ResourceList{
|
||||||
corev1.ResourceMemory: resource.MustParse("16Mi"),
|
corev1.ResourceMemory: resource.MustParse("16Mi"),
|
||||||
@ -138,159 +469,60 @@ func (c *AgentPodConfig) newAgentPod(controllerManagerPod *corev1.Pod) *corev1.P
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Volumes: controllerManagerPod.Spec.Volumes,
|
Volumes: controllerManagerPod.Spec.Volumes,
|
||||||
RestartPolicy: corev1.RestartPolicyNever,
|
RestartPolicy: corev1.RestartPolicyAlways,
|
||||||
NodeSelector: controllerManagerPod.Spec.NodeSelector,
|
NodeSelector: controllerManagerPod.Spec.NodeSelector,
|
||||||
AutomountServiceAccountToken: falsePtr,
|
AutomountServiceAccountToken: pointer.BoolPtr(false),
|
||||||
NodeName: controllerManagerPod.Spec.NodeName,
|
NodeName: controllerManagerPod.Spec.NodeName,
|
||||||
Tolerations: controllerManagerPod.Spec.Tolerations,
|
Tolerations: controllerManagerPod.Spec.Tolerations,
|
||||||
// We need to run the agent pod as root since the file permissions
|
// We need to run the agent pod as root since the file permissions
|
||||||
// on the cluster keypair usually restricts access to only root.
|
// on the cluster keypair usually restricts access to only root.
|
||||||
SecurityContext: &corev1.PodSecurityContext{
|
SecurityContext: &corev1.PodSecurityContext{
|
||||||
RunAsUser: &rootID,
|
RunAsUser: pointer.Int64Ptr(0),
|
||||||
RunAsGroup: &rootID,
|
RunAsGroup: pointer.Int64Ptr(0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Setting MinReadySeconds prevents the agent pods from being churned too quickly by the deployments controller.
|
||||||
|
MinReadySeconds: 10,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func isAgentPodUpToDate(actualAgentPod, expectedAgentPod *corev1.Pod) bool {
|
func mergeLabelsAndAnnotations(existing metav1.ObjectMeta, desired metav1.ObjectMeta) metav1.ObjectMeta {
|
||||||
requiredLabelsAllPresentWithCorrectValues := true
|
result := existing.DeepCopy()
|
||||||
actualLabels := actualAgentPod.ObjectMeta.Labels
|
for k, v := range desired.Labels {
|
||||||
for expectedLabelKey, expectedLabelValue := range expectedAgentPod.ObjectMeta.Labels {
|
if result.Labels == nil {
|
||||||
if actualLabels[expectedLabelKey] != expectedLabelValue {
|
result.Labels = map[string]string{}
|
||||||
requiredLabelsAllPresentWithCorrectValues = false
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
result.Labels[k] = v
|
||||||
}
|
}
|
||||||
|
for k, v := range desired.Annotations {
|
||||||
if actualAgentPod.Spec.SecurityContext == nil {
|
if result.Annotations == nil {
|
||||||
return false
|
result.Annotations = map[string]string{}
|
||||||
}
|
}
|
||||||
|
result.Annotations[k] = v
|
||||||
return requiredLabelsAllPresentWithCorrectValues &&
|
}
|
||||||
equality.Semantic.DeepEqual(
|
return *result
|
||||||
actualAgentPod.Spec.Containers[0].VolumeMounts,
|
|
||||||
expectedAgentPod.Spec.Containers[0].VolumeMounts,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.Containers[0].Name,
|
|
||||||
expectedAgentPod.Spec.Containers[0].Name,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.Containers[0].Image,
|
|
||||||
expectedAgentPod.Spec.Containers[0].Image,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.Containers[0].Command,
|
|
||||||
expectedAgentPod.Spec.Containers[0].Command,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.Volumes,
|
|
||||||
expectedAgentPod.Spec.Volumes,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.RestartPolicy,
|
|
||||||
expectedAgentPod.Spec.RestartPolicy,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.NodeSelector,
|
|
||||||
expectedAgentPod.Spec.NodeSelector,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.AutomountServiceAccountToken,
|
|
||||||
expectedAgentPod.Spec.AutomountServiceAccountToken,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.NodeName,
|
|
||||||
expectedAgentPod.Spec.NodeName,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.Tolerations,
|
|
||||||
expectedAgentPod.Spec.Tolerations,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.SecurityContext.RunAsUser,
|
|
||||||
expectedAgentPod.Spec.SecurityContext.RunAsUser,
|
|
||||||
) &&
|
|
||||||
equality.Semantic.DeepEqual(
|
|
||||||
actualAgentPod.Spec.SecurityContext.RunAsGroup,
|
|
||||||
expectedAgentPod.Spec.SecurityContext.RunAsGroup,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func isControllerManagerPod(obj metav1.Object) bool {
|
func getContainerArgByName(pod *corev1.Pod, name, fallbackValue string) string {
|
||||||
pod, ok := obj.(*corev1.Pod)
|
for _, container := range pod.Spec.Containers {
|
||||||
if !ok {
|
flagset := pflag.NewFlagSet("", pflag.ContinueOnError)
|
||||||
return false
|
flagset.ParseErrorsWhitelist = pflag.ParseErrorsWhitelist{UnknownFlags: true}
|
||||||
|
var val string
|
||||||
|
flagset.StringVar(&val, name, "", "")
|
||||||
|
_ = flagset.Parse(append(container.Command, container.Args...))
|
||||||
|
if val != "" {
|
||||||
|
return val
|
||||||
}
|
}
|
||||||
|
|
||||||
if pod.Labels == nil {
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
return fallbackValue
|
||||||
component, ok := pod.Labels["component"]
|
|
||||||
if !ok || component != "kube-controller-manager" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if pod.Status.Phase != corev1.PodRunning {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func isAgentPod(obj metav1.Object) bool {
|
func pluralize(pods []*corev1.Pod) string {
|
||||||
value, foundLabel := obj.GetLabels()[agentPodLabelKey]
|
if len(pods) == 1 {
|
||||||
return foundLabel && value == agentPodLabelValue
|
return "1 candidate"
|
||||||
}
|
|
||||||
|
|
||||||
func findControllerManagerPodForSpecificAgentPod(
|
|
||||||
agentPod *corev1.Pod,
|
|
||||||
kubeSystemPodInformer corev1informers.PodInformer,
|
|
||||||
) (*corev1.Pod, error) {
|
|
||||||
name, ok := agentPod.Annotations[controllerManagerNameAnnotationKey]
|
|
||||||
if !ok {
|
|
||||||
plog.Debug("agent pod missing parent name annotation", "pod", agentPod.Name)
|
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
|
return fmt.Sprintf("%d candidates", len(pods))
|
||||||
uid, ok := agentPod.Annotations[controllerManagerUIDAnnotationKey]
|
|
||||||
if !ok {
|
|
||||||
plog.Debug("agent pod missing parent uid annotation", "pod", agentPod.Name)
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
maybeControllerManagerPod, err := kubeSystemPodInformer.
|
|
||||||
Lister().
|
|
||||||
Pods(ControllerManagerNamespace).
|
|
||||||
Get(name)
|
|
||||||
notFound := k8serrors.IsNotFound(err)
|
|
||||||
if err != nil && !notFound {
|
|
||||||
return nil, fmt.Errorf("cannot get controller pod: %w", err)
|
|
||||||
} else if notFound ||
|
|
||||||
maybeControllerManagerPod == nil ||
|
|
||||||
string(maybeControllerManagerPod.UID) != uid {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return maybeControllerManagerPod, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func strategyError(clock clock.Clock, err error) configv1alpha1.CredentialIssuerStrategy {
|
|
||||||
return configv1alpha1.CredentialIssuerStrategy{
|
|
||||||
Type: configv1alpha1.KubeClusterSigningCertificateStrategyType,
|
|
||||||
Status: configv1alpha1.ErrorStrategyStatus,
|
|
||||||
Reason: configv1alpha1.CouldNotFetchKeyStrategyReason,
|
|
||||||
Message: err.Error(),
|
|
||||||
LastUpdateTime: metav1.NewTime(clock.Now()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func hash(controllerManagerPod *corev1.Pod) string {
|
|
||||||
// FNV should be faster than SHA, and we don't care about hash-reversibility here, and Kubernetes
|
|
||||||
// uses FNV for their pod templates, so should be good enough for us?
|
|
||||||
h := fnv.New32a()
|
|
||||||
_, _ = h.Write([]byte(controllerManagerPod.UID)) // Never returns an error, per godoc.
|
|
||||||
return hex.EncodeToString(h.Sum([]byte{}))
|
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
63
internal/controller/kubecertagent/legacypodcleaner.go
Normal file
63
internal/controller/kubecertagent/legacypodcleaner.go
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package kubecertagent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
|
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||||
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewLegacyPodCleanerController returns a controller that cleans up legacy kube-cert-agent Pods created by Pinniped v0.7.0 and below.
|
||||||
|
func NewLegacyPodCleanerController(
|
||||||
|
cfg AgentConfig,
|
||||||
|
client *kubeclient.Client,
|
||||||
|
agentPods corev1informers.PodInformer,
|
||||||
|
log logr.Logger,
|
||||||
|
options ...controllerlib.Option,
|
||||||
|
) controllerlib.Controller {
|
||||||
|
// legacyAgentLabels are the Kubernetes labels we previously added to agent pods (the new value is "v2").
|
||||||
|
// We also expect these pods to have the "extra" labels configured on the Concierge.
|
||||||
|
legacyAgentLabels := map[string]string{"kube-cert-agent.pinniped.dev": "true"}
|
||||||
|
for k, v := range cfg.Labels {
|
||||||
|
legacyAgentLabels[k] = v
|
||||||
|
}
|
||||||
|
legacyAgentSelector := labels.SelectorFromSet(legacyAgentLabels)
|
||||||
|
|
||||||
|
log = log.WithName("legacy-pod-cleaner-controller")
|
||||||
|
|
||||||
|
return controllerlib.New(
|
||||||
|
controllerlib.Config{
|
||||||
|
Name: "legacy-pod-cleaner-controller",
|
||||||
|
Syncer: controllerlib.SyncFunc(func(ctx controllerlib.Context) error {
|
||||||
|
if err := client.Kubernetes.CoreV1().Pods(ctx.Key.Namespace).Delete(ctx.Context, ctx.Key.Name, metav1.DeleteOptions{}); err != nil {
|
||||||
|
if k8serrors.IsNotFound(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("could not delete legacy agent pod: %w", err)
|
||||||
|
}
|
||||||
|
log.Info("deleted legacy kube-cert-agent pod", "pod", klog.KRef(ctx.Key.Namespace, ctx.Key.Name))
|
||||||
|
return nil
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
append([]controllerlib.Option{
|
||||||
|
controllerlib.WithInformer(
|
||||||
|
agentPods,
|
||||||
|
pinnipedcontroller.SimpleFilter(func(obj metav1.Object) bool {
|
||||||
|
return obj.GetNamespace() == cfg.Namespace && legacyAgentSelector.Matches(labels.Set(obj.GetLabels()))
|
||||||
|
}, nil),
|
||||||
|
controllerlib.InformerOption{},
|
||||||
|
),
|
||||||
|
}, options...)...,
|
||||||
|
)
|
||||||
|
}
|
145
internal/controller/kubecertagent/legacypodcleaner_test.go
Normal file
145
internal/controller/kubecertagent/legacypodcleaner_test.go
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package kubecertagent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/informers"
|
||||||
|
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||||
|
coretesting "k8s.io/client-go/testing"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
|
"go.pinniped.dev/internal/testutil/testlogger"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLegacyPodCleanerController(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
legacyAgentPodWithoutExtraLabel := &corev1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: "concierge",
|
||||||
|
Name: "pinniped-concierge-kube-cert-agent-without-extra-label",
|
||||||
|
Labels: map[string]string{"kube-cert-agent.pinniped.dev": "true"},
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{},
|
||||||
|
Status: corev1.PodStatus{Phase: corev1.PodRunning},
|
||||||
|
}
|
||||||
|
|
||||||
|
legacyAgentPodWithExtraLabel := legacyAgentPodWithoutExtraLabel.DeepCopy()
|
||||||
|
legacyAgentPodWithExtraLabel.Name = "pinniped-concierge-kube-cert-agent-with-extra-label"
|
||||||
|
legacyAgentPodWithExtraLabel.Labels["extralabel"] = "labelvalue"
|
||||||
|
legacyAgentPodWithExtraLabel.Labels["anotherextralabel"] = "labelvalue"
|
||||||
|
|
||||||
|
nonLegacyAgentPod := legacyAgentPodWithExtraLabel.DeepCopy()
|
||||||
|
nonLegacyAgentPod.Name = "pinniped-concierge-kube-cert-agent-not-legacy"
|
||||||
|
nonLegacyAgentPod.Labels["kube-cert-agent.pinniped.dev"] = "v2"
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
kubeObjects []runtime.Object
|
||||||
|
addKubeReactions func(*kubefake.Clientset)
|
||||||
|
wantDistinctErrors []string
|
||||||
|
wantDistinctLogs []string
|
||||||
|
wantActions []coretesting.Action
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no pods",
|
||||||
|
wantActions: []coretesting.Action{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mix of pods",
|
||||||
|
kubeObjects: []runtime.Object{
|
||||||
|
legacyAgentPodWithoutExtraLabel, // should not be delete (missing extra label)
|
||||||
|
legacyAgentPodWithExtraLabel, // should be deleted
|
||||||
|
nonLegacyAgentPod, // should not be deleted (missing legacy agent label)
|
||||||
|
},
|
||||||
|
wantDistinctErrors: []string{""},
|
||||||
|
wantDistinctLogs: []string{
|
||||||
|
`legacy-pod-cleaner-controller "level"=0 "msg"="deleted legacy kube-cert-agent pod" "pod"={"name":"pinniped-concierge-kube-cert-agent-with-extra-label","namespace":"concierge"}`,
|
||||||
|
},
|
||||||
|
wantActions: []coretesting.Action{ // the first delete triggers the informer again, but the second invocation triggers a Not Found
|
||||||
|
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
|
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fail to delete",
|
||||||
|
kubeObjects: []runtime.Object{
|
||||||
|
legacyAgentPodWithoutExtraLabel, // should not be delete (missing extra label)
|
||||||
|
legacyAgentPodWithExtraLabel, // should be deleted
|
||||||
|
nonLegacyAgentPod, // should not be deleted (missing legacy agent label)
|
||||||
|
},
|
||||||
|
addKubeReactions: func(clientset *kubefake.Clientset) {
|
||||||
|
clientset.PrependReactor("delete", "*", func(action coretesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
|
return true, nil, fmt.Errorf("some delete error")
|
||||||
|
})
|
||||||
|
},
|
||||||
|
wantDistinctErrors: []string{
|
||||||
|
"could not delete legacy agent pod: some delete error",
|
||||||
|
},
|
||||||
|
wantActions: []coretesting.Action{
|
||||||
|
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
|
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fail to delete because of not found error",
|
||||||
|
kubeObjects: []runtime.Object{
|
||||||
|
legacyAgentPodWithoutExtraLabel, // should not be delete (missing extra label)
|
||||||
|
legacyAgentPodWithExtraLabel, // should be deleted
|
||||||
|
nonLegacyAgentPod, // should not be deleted (missing legacy agent label)
|
||||||
|
},
|
||||||
|
addKubeReactions: func(clientset *kubefake.Clientset) {
|
||||||
|
clientset.PrependReactor("delete", "*", func(action coretesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
|
return true, nil, k8serrors.NewNotFound(action.GetResource().GroupResource(), "")
|
||||||
|
})
|
||||||
|
},
|
||||||
|
wantDistinctErrors: []string{""},
|
||||||
|
wantActions: []coretesting.Action{
|
||||||
|
coretesting.NewDeleteAction(corev1.Resource("pods").WithVersion("v1"), "concierge", legacyAgentPodWithExtraLabel.Name),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
kubeClientset := kubefake.NewSimpleClientset(tt.kubeObjects...)
|
||||||
|
if tt.addKubeReactions != nil {
|
||||||
|
tt.addKubeReactions(kubeClientset)
|
||||||
|
}
|
||||||
|
kubeInformers := informers.NewSharedInformerFactory(kubeClientset, 0)
|
||||||
|
log := testlogger.New(t)
|
||||||
|
controller := NewLegacyPodCleanerController(
|
||||||
|
AgentConfig{
|
||||||
|
Namespace: "concierge",
|
||||||
|
Labels: map[string]string{"extralabel": "labelvalue"},
|
||||||
|
},
|
||||||
|
&kubeclient.Client{Kubernetes: kubeClientset},
|
||||||
|
kubeInformers.Core().V1().Pods(),
|
||||||
|
log,
|
||||||
|
controllerlib.WithMaxRetries(1),
|
||||||
|
)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
errorMessages := runControllerUntilQuiet(ctx, t, controller, kubeInformers)
|
||||||
|
assert.Equal(t, tt.wantDistinctErrors, deduplicate(errorMessages), "unexpected errors")
|
||||||
|
assert.Equal(t, tt.wantDistinctLogs, deduplicate(log.Lines()), "unexpected logs")
|
||||||
|
assert.Equal(t, tt.wantActions, kubeClientset.Actions()[2:], "unexpected actions")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
7
internal/controller/kubecertagent/mocks/generate.go
Normal file
7
internal/controller/kubecertagent/mocks/generate.go
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
//go:generate go run -v github.com/golang/mock/mockgen -destination=mockpodcommandexecutor.go -package=mocks -copyright_file=../../../../hack/header.txt go.pinniped.dev/internal/controller/kubecertagent PodCommandExecutor
|
||||||
|
//go:generate go run -v github.com/golang/mock/mockgen -destination=mockdynamiccert.go -package=mocks -copyright_file=../../../../hack/header.txt -mock_names Private=MockDynamicCertPrivate go.pinniped.dev/internal/dynamiccert Private
|
132
internal/controller/kubecertagent/mocks/mockdynamiccert.go
Normal file
132
internal/controller/kubecertagent/mocks/mockdynamiccert.go
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: go.pinniped.dev/internal/dynamiccert (interfaces: Private)
|
||||||
|
|
||||||
|
// Package mocks is a generated GoMock package.
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
dynamiccertificates "k8s.io/apiserver/pkg/server/dynamiccertificates"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockDynamicCertPrivate is a mock of Private interface.
|
||||||
|
type MockDynamicCertPrivate struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockDynamicCertPrivateMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockDynamicCertPrivateMockRecorder is the mock recorder for MockDynamicCertPrivate.
|
||||||
|
type MockDynamicCertPrivateMockRecorder struct {
|
||||||
|
mock *MockDynamicCertPrivate
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockDynamicCertPrivate creates a new mock instance.
|
||||||
|
func NewMockDynamicCertPrivate(ctrl *gomock.Controller) *MockDynamicCertPrivate {
|
||||||
|
mock := &MockDynamicCertPrivate{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockDynamicCertPrivateMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
|
func (m *MockDynamicCertPrivate) EXPECT() *MockDynamicCertPrivateMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddListener mocks base method.
|
||||||
|
func (m *MockDynamicCertPrivate) AddListener(arg0 dynamiccertificates.Listener) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
m.ctrl.Call(m, "AddListener", arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddListener indicates an expected call of AddListener.
|
||||||
|
func (mr *MockDynamicCertPrivateMockRecorder) AddListener(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddListener", reflect.TypeOf((*MockDynamicCertPrivate)(nil).AddListener), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentCertKeyContent mocks base method.
|
||||||
|
func (m *MockDynamicCertPrivate) CurrentCertKeyContent() ([]byte, []byte) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "CurrentCertKeyContent")
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
ret1, _ := ret[1].([]byte)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentCertKeyContent indicates an expected call of CurrentCertKeyContent.
|
||||||
|
func (mr *MockDynamicCertPrivateMockRecorder) CurrentCertKeyContent() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentCertKeyContent", reflect.TypeOf((*MockDynamicCertPrivate)(nil).CurrentCertKeyContent))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name mocks base method.
|
||||||
|
func (m *MockDynamicCertPrivate) Name() string {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Name")
|
||||||
|
ret0, _ := ret[0].(string)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name indicates an expected call of Name.
|
||||||
|
func (mr *MockDynamicCertPrivateMockRecorder) Name() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockDynamicCertPrivate)(nil).Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run mocks base method.
|
||||||
|
func (m *MockDynamicCertPrivate) Run(arg0 int, arg1 <-chan struct{}) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
m.ctrl.Call(m, "Run", arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run indicates an expected call of Run.
|
||||||
|
func (mr *MockDynamicCertPrivateMockRecorder) Run(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockDynamicCertPrivate)(nil).Run), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunOnce mocks base method.
|
||||||
|
func (m *MockDynamicCertPrivate) RunOnce() error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "RunOnce")
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunOnce indicates an expected call of RunOnce.
|
||||||
|
func (mr *MockDynamicCertPrivateMockRecorder) RunOnce() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunOnce", reflect.TypeOf((*MockDynamicCertPrivate)(nil).RunOnce))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCertKeyContent mocks base method.
|
||||||
|
func (m *MockDynamicCertPrivate) SetCertKeyContent(arg0, arg1 []byte) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SetCertKeyContent", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCertKeyContent indicates an expected call of SetCertKeyContent.
|
||||||
|
func (mr *MockDynamicCertPrivateMockRecorder) SetCertKeyContent(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCertKeyContent", reflect.TypeOf((*MockDynamicCertPrivate)(nil).SetCertKeyContent), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnsetCertKeyContent mocks base method.
|
||||||
|
func (m *MockDynamicCertPrivate) UnsetCertKeyContent() {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
m.ctrl.Call(m, "UnsetCertKeyContent")
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnsetCertKeyContent indicates an expected call of UnsetCertKeyContent.
|
||||||
|
func (mr *MockDynamicCertPrivateMockRecorder) UnsetCertKeyContent() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnsetCertKeyContent", reflect.TypeOf((*MockDynamicCertPrivate)(nil).UnsetCertKeyContent))
|
||||||
|
}
|
@ -0,0 +1,58 @@
|
|||||||
|
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: go.pinniped.dev/internal/controller/kubecertagent (interfaces: PodCommandExecutor)
|
||||||
|
|
||||||
|
// Package mocks is a generated GoMock package.
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockPodCommandExecutor is a mock of PodCommandExecutor interface.
|
||||||
|
type MockPodCommandExecutor struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockPodCommandExecutorMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockPodCommandExecutorMockRecorder is the mock recorder for MockPodCommandExecutor.
|
||||||
|
type MockPodCommandExecutorMockRecorder struct {
|
||||||
|
mock *MockPodCommandExecutor
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockPodCommandExecutor creates a new mock instance.
|
||||||
|
func NewMockPodCommandExecutor(ctrl *gomock.Controller) *MockPodCommandExecutor {
|
||||||
|
mock := &MockPodCommandExecutor{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockPodCommandExecutorMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
|
func (m *MockPodCommandExecutor) EXPECT() *MockPodCommandExecutorMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec mocks base method.
|
||||||
|
func (m *MockPodCommandExecutor) Exec(arg0, arg1 string, arg2 ...string) (string, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
varargs := []interface{}{arg0, arg1}
|
||||||
|
for _, a := range arg2 {
|
||||||
|
varargs = append(varargs, a)
|
||||||
|
}
|
||||||
|
ret := m.ctrl.Call(m, "Exec", varargs...)
|
||||||
|
ret0, _ := ret[0].(string)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec indicates an expected call of Exec.
|
||||||
|
func (mr *MockPodCommandExecutorMockRecorder) Exec(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
varargs := append([]interface{}{arg0, arg1}, arg2...)
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockPodCommandExecutor)(nil).Exec), varargs...)
|
||||||
|
}
|
@ -30,6 +30,7 @@ func NewPodCommandExecutor(kubeConfig *restclient.Config, kubeClient kubernetes.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *kubeClientPodCommandExecutor) Exec(podNamespace string, podName string, commandAndArgs ...string) (string, error) {
|
func (s *kubeClientPodCommandExecutor) Exec(podNamespace string, podName string, commandAndArgs ...string) (string, error) {
|
||||||
|
// TODO: see if we can add a timeout or make this cancelable somehow
|
||||||
request := s.kubeClient.
|
request := s.kubeClient.
|
||||||
CoreV1().
|
CoreV1().
|
||||||
RESTClient().
|
RESTClient().
|
||||||
|
@ -119,16 +119,14 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
|||||||
// Create informers. Don't forget to make sure they get started in the function returned below.
|
// Create informers. Don't forget to make sure they get started in the function returned below.
|
||||||
informers := createInformers(c.ServerInstallationInfo.Namespace, client.Kubernetes, client.PinnipedConcierge)
|
informers := createInformers(c.ServerInstallationInfo.Namespace, client.Kubernetes, client.PinnipedConcierge)
|
||||||
|
|
||||||
// Configuration for the kubecertagent controllers created below.
|
agentConfig := kubecertagent.AgentConfig{
|
||||||
agentPodConfig := &kubecertagent.AgentPodConfig{
|
|
||||||
Namespace: c.ServerInstallationInfo.Namespace,
|
Namespace: c.ServerInstallationInfo.Namespace,
|
||||||
ContainerImage: *c.KubeCertAgentConfig.Image,
|
ContainerImage: *c.KubeCertAgentConfig.Image,
|
||||||
PodNamePrefix: *c.KubeCertAgentConfig.NamePrefix,
|
NamePrefix: *c.KubeCertAgentConfig.NamePrefix,
|
||||||
ContainerImagePullSecrets: c.KubeCertAgentConfig.ImagePullSecrets,
|
ContainerImagePullSecrets: c.KubeCertAgentConfig.ImagePullSecrets,
|
||||||
AdditionalLabels: c.Labels,
|
Labels: c.Labels,
|
||||||
}
|
CredentialIssuerName: c.NamesConfig.CredentialIssuer,
|
||||||
credentialIssuerLocationConfig := &kubecertagent.CredentialIssuerLocationConfig{
|
DiscoveryURLOverride: c.DiscoveryURLOverride,
|
||||||
Name: c.NamesConfig.CredentialIssuer,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create controller manager.
|
// Create controller manager.
|
||||||
@ -195,64 +193,31 @@ func PrepareControllers(c *Config) (func(ctx context.Context), error) {
|
|||||||
),
|
),
|
||||||
singletonWorker,
|
singletonWorker,
|
||||||
).
|
).
|
||||||
|
// The kube-cert-agent controller is responsible for finding the cluster's signing keys and keeping them
|
||||||
// Kube cert agent controllers are responsible for finding the cluster's signing keys and keeping them
|
|
||||||
// up to date in memory, as well as reporting status on this cluster integration strategy.
|
// up to date in memory, as well as reporting status on this cluster integration strategy.
|
||||||
WithController(
|
WithController(
|
||||||
kubecertagent.NewCreaterController(
|
kubecertagent.NewAgentController(
|
||||||
agentPodConfig,
|
agentConfig,
|
||||||
credentialIssuerLocationConfig,
|
client,
|
||||||
c.Labels,
|
|
||||||
clock.RealClock{},
|
|
||||||
client.Kubernetes,
|
|
||||||
client.PinnipedConcierge,
|
|
||||||
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
||||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
informers.installationNamespaceK8s.Apps().V1().Deployments(),
|
||||||
controllerlib.WithInformer,
|
|
||||||
controllerlib.WithInitialEvent,
|
|
||||||
),
|
|
||||||
singletonWorker,
|
|
||||||
).
|
|
||||||
WithController(
|
|
||||||
kubecertagent.NewAnnotaterController(
|
|
||||||
agentPodConfig,
|
|
||||||
credentialIssuerLocationConfig,
|
|
||||||
c.Labels,
|
|
||||||
clock.RealClock{},
|
|
||||||
client.Kubernetes,
|
|
||||||
client.PinnipedConcierge,
|
|
||||||
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
|
||||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
|
||||||
controllerlib.WithInformer,
|
|
||||||
),
|
|
||||||
singletonWorker,
|
|
||||||
).
|
|
||||||
WithController(
|
|
||||||
kubecertagent.NewExecerController(
|
|
||||||
credentialIssuerLocationConfig,
|
|
||||||
c.Labels,
|
|
||||||
c.DiscoveryURLOverride,
|
|
||||||
c.DynamicSigningCertProvider,
|
|
||||||
kubecertagent.NewPodCommandExecutor(client.JSONConfig, client.Kubernetes),
|
|
||||||
client.PinnipedConcierge,
|
|
||||||
clock.RealClock{},
|
|
||||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
informers.installationNamespaceK8s.Core().V1().Pods(),
|
||||||
informers.kubePublicNamespaceK8s.Core().V1().ConfigMaps(),
|
informers.kubePublicNamespaceK8s.Core().V1().ConfigMaps(),
|
||||||
controllerlib.WithInformer,
|
c.DynamicSigningCertProvider,
|
||||||
),
|
),
|
||||||
singletonWorker,
|
singletonWorker,
|
||||||
).
|
).
|
||||||
|
// The kube-cert-agent legacy pod cleaner controller is responsible for cleaning up pods that were deployed by
|
||||||
|
// versions of Pinniped prior to v0.7.0. If we stop supporting upgrades from v0.7.0, we can safely remove this.
|
||||||
WithController(
|
WithController(
|
||||||
kubecertagent.NewDeleterController(
|
kubecertagent.NewLegacyPodCleanerController(
|
||||||
agentPodConfig,
|
agentConfig,
|
||||||
client.Kubernetes,
|
client,
|
||||||
informers.kubeSystemNamespaceK8s.Core().V1().Pods(),
|
|
||||||
informers.installationNamespaceK8s.Core().V1().Pods(),
|
informers.installationNamespaceK8s.Core().V1().Pods(),
|
||||||
controllerlib.WithInformer,
|
klogr.New(),
|
||||||
),
|
),
|
||||||
singletonWorker,
|
singletonWorker,
|
||||||
).
|
).
|
||||||
|
|
||||||
// The cache filler/cleaner controllers are responsible for keep an in-memory representation of active
|
// The cache filler/cleaner controllers are responsible for keep an in-memory representation of active
|
||||||
// authenticators up to date.
|
// authenticators up to date.
|
||||||
WithController(
|
WithController(
|
||||||
|
@ -6,7 +6,6 @@ package kubeclient
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
@ -32,39 +31,17 @@ func defaultServerUrlFor(config *restclient.Config) (*url.URL, string, error) {
|
|||||||
return restclient.DefaultServerURL(host, config.APIPath, schema.GroupVersion{}, defaultTLS)
|
return restclient.DefaultServerURL(host, config.APIPath, schema.GroupVersion{}, defaultTLS)
|
||||||
}
|
}
|
||||||
|
|
||||||
// truncateBody was copied from k8s.io/client-go/rest/request.go
|
|
||||||
// ...except i changed klog invocations to analogous plog invocations
|
|
||||||
//
|
|
||||||
// truncateBody decides if the body should be truncated, based on the glog Verbosity.
|
|
||||||
func truncateBody(body string) string {
|
|
||||||
max := 0
|
|
||||||
switch {
|
|
||||||
case plog.Enabled(plog.LevelAll):
|
|
||||||
return body
|
|
||||||
case plog.Enabled(plog.LevelTrace):
|
|
||||||
max = 10240
|
|
||||||
case plog.Enabled(plog.LevelDebug):
|
|
||||||
max = 1024
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(body) <= max {
|
|
||||||
return body
|
|
||||||
}
|
|
||||||
|
|
||||||
return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max)
|
|
||||||
}
|
|
||||||
|
|
||||||
// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against
|
// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against
|
||||||
// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
|
// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
|
||||||
// whether the body is printable.
|
// whether the body is printable.
|
||||||
func glogBody(prefix string, body []byte) {
|
func glogBody(prefix string, body []byte) {
|
||||||
if plog.Enabled(plog.LevelDebug) {
|
if plog.Enabled(plog.LevelAll) {
|
||||||
if bytes.IndexFunc(body, func(r rune) bool {
|
if bytes.IndexFunc(body, func(r rune) bool {
|
||||||
return r < 0x0a
|
return r < 0x0a
|
||||||
}) != -1 {
|
}) != -1 {
|
||||||
plog.Debug(prefix, "body", truncateBody(hex.Dump(body)))
|
plog.Debug(prefix, "body", hex.Dump(body))
|
||||||
} else {
|
} else {
|
||||||
plog.Debug(prefix, "body", truncateBody(string(body)))
|
plog.Debug(prefix, "body", string(body))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,10 @@ func requireCleanKubectlStderr(t *testing.T, stderr string) {
|
|||||||
if strings.Contains(line, "Throttling request took") {
|
if strings.Contains(line, "Throttling request took") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
require.Failf(t, "unexpected kubectl stderr", "kubectl produced unexpected stderr output:\n%s\n\n", stderr)
|
if strings.Contains(line, "due to client-side throttling, not priority and fairness") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
require.Failf(t, "unexpected kubectl stderr", "kubectl produced unexpected stderr:\n%s\n\n", stderr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -80,7 +80,7 @@ func TestCredentialIssuer(t *testing.T) {
|
|||||||
if env.HasCapability(library.ClusterSigningKeyIsAvailable) {
|
if env.HasCapability(library.ClusterSigningKeyIsAvailable) {
|
||||||
require.Equal(t, configv1alpha1.SuccessStrategyStatus, actualStatusStrategy.Status)
|
require.Equal(t, configv1alpha1.SuccessStrategyStatus, actualStatusStrategy.Status)
|
||||||
require.Equal(t, configv1alpha1.FetchedKeyStrategyReason, actualStatusStrategy.Reason)
|
require.Equal(t, configv1alpha1.FetchedKeyStrategyReason, actualStatusStrategy.Reason)
|
||||||
require.Equal(t, "Key was fetched successfully", actualStatusStrategy.Message)
|
require.Equal(t, "key was fetched successfully", actualStatusStrategy.Message)
|
||||||
require.NotNil(t, actualStatusStrategy.Frontend)
|
require.NotNil(t, actualStatusStrategy.Frontend)
|
||||||
require.Equal(t, configv1alpha1.TokenCredentialRequestAPIFrontendType, actualStatusStrategy.Frontend.Type)
|
require.Equal(t, configv1alpha1.TokenCredentialRequestAPIFrontendType, actualStatusStrategy.Frontend.Type)
|
||||||
expectedTokenRequestAPIInfo := configv1alpha1.TokenCredentialRequestAPIInfo{
|
expectedTokenRequestAPIInfo := configv1alpha1.TokenCredentialRequestAPIInfo{
|
||||||
@ -111,10 +111,7 @@ func TestCredentialIssuer(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
require.Equal(t, configv1alpha1.ErrorStrategyStatus, actualStatusStrategy.Status)
|
require.Equal(t, configv1alpha1.ErrorStrategyStatus, actualStatusStrategy.Status)
|
||||||
require.Equal(t, configv1alpha1.CouldNotFetchKeyStrategyReason, actualStatusStrategy.Reason)
|
require.Equal(t, configv1alpha1.CouldNotFetchKeyStrategyReason, actualStatusStrategy.Reason)
|
||||||
require.Contains(t, actualStatusStrategy.Message, "did not find kube-controller-manager pod(s)")
|
require.Contains(t, actualStatusStrategy.Message, "could not find a healthy kube-controller-manager pod (0 candidates)")
|
||||||
// For now, don't verify the kube config info because its not available on GKE. We'll need to address
|
|
||||||
// this somehow once we starting supporting those cluster types.
|
|
||||||
// Require `nil` to remind us to address this later for other types of clusters where it is available.
|
|
||||||
require.Nil(t, actualStatusKubeConfigInfo)
|
require.Nil(t, actualStatusKubeConfigInfo)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -26,7 +26,8 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
v1 "k8s.io/api/authorization/v1"
|
authenticationv1 "k8s.io/api/authentication/v1"
|
||||||
|
authorizationv1 "k8s.io/api/authorization/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
@ -34,9 +35,14 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
|
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||||
|
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
|
||||||
|
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||||
k8sinformers "k8s.io/client-go/informers"
|
k8sinformers "k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
|
"k8s.io/client-go/transport"
|
||||||
|
"k8s.io/client-go/util/keyutil"
|
||||||
"sigs.k8s.io/yaml"
|
"sigs.k8s.io/yaml"
|
||||||
|
|
||||||
conciergev1alpha "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
conciergev1alpha "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||||
@ -44,6 +50,7 @@ import (
|
|||||||
loginv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
|
loginv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
|
||||||
pinnipedconciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
pinnipedconciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
||||||
"go.pinniped.dev/internal/concierge/impersonator"
|
"go.pinniped.dev/internal/concierge/impersonator"
|
||||||
|
"go.pinniped.dev/internal/httputil/roundtripper"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
"go.pinniped.dev/internal/testutil"
|
"go.pinniped.dev/internal/testutil"
|
||||||
"go.pinniped.dev/test/library"
|
"go.pinniped.dev/test/library"
|
||||||
@ -102,23 +109,13 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
// The address of the ClusterIP service that points at the impersonation proxy's port (used when there is no load balancer).
|
// The address of the ClusterIP service that points at the impersonation proxy's port (used when there is no load balancer).
|
||||||
proxyServiceEndpoint := fmt.Sprintf("%s-proxy.%s.svc.cluster.local", env.ConciergeAppName, env.ConciergeNamespace)
|
proxyServiceEndpoint := fmt.Sprintf("%s-proxy.%s.svc.cluster.local", env.ConciergeAppName, env.ConciergeNamespace)
|
||||||
|
|
||||||
newImpersonationProxyClientWithCredentials := func(credentials *loginv1alpha1.ClusterCredential, impersonationProxyURL string, impersonationProxyCACertPEM []byte, doubleImpersonateUser string) *kubeclient.Client {
|
var (
|
||||||
kubeconfig := impersonationProxyRestConfig(credentials, impersonationProxyURL, impersonationProxyCACertPEM, doubleImpersonateUser)
|
mostRecentTokenCredentialRequestResponse *loginv1alpha1.TokenCredentialRequest
|
||||||
if !clusterSupportsLoadBalancers {
|
mostRecentTokenCredentialRequestResponseLock sync.Mutex
|
||||||
// Only if there is no possibility to send traffic through a load balancer, then send the traffic through the Squid proxy.
|
)
|
||||||
// Prefer to go through a load balancer because that's how the impersonator is intended to be used in the real world.
|
|
||||||
kubeconfig.Proxy = kubeconfigProxyFunc(t, env.Proxy)
|
|
||||||
}
|
|
||||||
return library.NewKubeclient(t, kubeconfig)
|
|
||||||
}
|
|
||||||
|
|
||||||
newAnonymousImpersonationProxyClient := func(impersonationProxyURL string, impersonationProxyCACertPEM []byte, doubleImpersonateUser string) *kubeclient.Client {
|
|
||||||
emptyCredentials := &loginv1alpha1.ClusterCredential{}
|
|
||||||
return newImpersonationProxyClientWithCredentials(emptyCredentials, impersonationProxyURL, impersonationProxyCACertPEM, doubleImpersonateUser)
|
|
||||||
}
|
|
||||||
|
|
||||||
var mostRecentTokenCredentialRequestResponse *loginv1alpha1.TokenCredentialRequest
|
|
||||||
refreshCredential := func(t *testing.T, impersonationProxyURL string, impersonationProxyCACertPEM []byte) *loginv1alpha1.ClusterCredential {
|
refreshCredential := func(t *testing.T, impersonationProxyURL string, impersonationProxyCACertPEM []byte) *loginv1alpha1.ClusterCredential {
|
||||||
|
mostRecentTokenCredentialRequestResponseLock.Lock()
|
||||||
|
defer mostRecentTokenCredentialRequestResponseLock.Unlock()
|
||||||
if mostRecentTokenCredentialRequestResponse == nil || credentialAlmostExpired(t, mostRecentTokenCredentialRequestResponse) {
|
if mostRecentTokenCredentialRequestResponse == nil || credentialAlmostExpired(t, mostRecentTokenCredentialRequestResponse) {
|
||||||
var err error
|
var err error
|
||||||
// Make a TokenCredentialRequest. This can either return a cert signed by the Kube API server's CA (e.g. on kind)
|
// Make a TokenCredentialRequest. This can either return a cert signed by the Kube API server's CA (e.g. on kind)
|
||||||
@ -132,7 +129,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
// what would normally happen when a user is using a kubeconfig where the server is the impersonation proxy,
|
// what would normally happen when a user is using a kubeconfig where the server is the impersonation proxy,
|
||||||
// so it more closely simulates the normal use case, and also because we want this to work on AKS clusters
|
// so it more closely simulates the normal use case, and also because we want this to work on AKS clusters
|
||||||
// which do not allow anonymous requests.
|
// which do not allow anonymous requests.
|
||||||
client := newAnonymousImpersonationProxyClient(impersonationProxyURL, impersonationProxyCACertPEM, "").PinnipedConcierge
|
client := newAnonymousImpersonationProxyClient(t, impersonationProxyURL, impersonationProxyCACertPEM, nil).PinnipedConcierge
|
||||||
require.Eventually(t, func() bool {
|
require.Eventually(t, func() bool {
|
||||||
mostRecentTokenCredentialRequestResponse, err = createTokenCredentialRequest(credentialRequestSpecWithWorkingCredentials, client)
|
mostRecentTokenCredentialRequestResponse, err = createTokenCredentialRequest(credentialRequestSpecWithWorkingCredentials, client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -154,19 +151,6 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
return mostRecentTokenCredentialRequestResponse.Status.Credential
|
return mostRecentTokenCredentialRequestResponse.Status.Credential
|
||||||
}
|
}
|
||||||
|
|
||||||
impersonationProxyViaSquidKubeClientWithoutCredential := func() kubernetes.Interface {
|
|
||||||
proxyURL := "https://" + proxyServiceEndpoint
|
|
||||||
kubeconfig := impersonationProxyRestConfig(&loginv1alpha1.ClusterCredential{}, proxyURL, nil, "")
|
|
||||||
kubeconfig.Proxy = kubeconfigProxyFunc(t, env.Proxy)
|
|
||||||
return library.NewKubeclient(t, kubeconfig).Kubernetes
|
|
||||||
}
|
|
||||||
|
|
||||||
newImpersonationProxyClient := func(t *testing.T, impersonationProxyURL string, impersonationProxyCACertPEM []byte, doubleImpersonateUser string) *kubeclient.Client {
|
|
||||||
refreshedCredentials := refreshCredential(t, impersonationProxyURL, impersonationProxyCACertPEM).DeepCopy()
|
|
||||||
refreshedCredentials.Token = "not a valid token" // demonstrates that client certs take precedence over tokens by setting both on the requests
|
|
||||||
return newImpersonationProxyClientWithCredentials(refreshedCredentials, impersonationProxyURL, impersonationProxyCACertPEM, doubleImpersonateUser)
|
|
||||||
}
|
|
||||||
|
|
||||||
oldConfigMap, err := adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Get(ctx, impersonationProxyConfigMapName(env), metav1.GetOptions{})
|
oldConfigMap, err := adminClient.CoreV1().ConfigMaps(env.ConciergeNamespace).Get(ctx, impersonationProxyConfigMapName(env), metav1.GetOptions{})
|
||||||
if !k8serrors.IsNotFound(err) {
|
if !k8serrors.IsNotFound(err) {
|
||||||
require.NoError(t, err) // other errors aside from NotFound are unexpected
|
require.NoError(t, err) // other errors aside from NotFound are unexpected
|
||||||
@ -175,7 +159,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
}
|
}
|
||||||
// At the end of the test, clean up the ConfigMap.
|
// At the end of the test, clean up the ConfigMap.
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), 2*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// Delete any version that was created by this test.
|
// Delete any version that was created by this test.
|
||||||
@ -249,7 +233,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
}, 10*time.Second, 500*time.Millisecond)
|
}, 10*time.Second, 500*time.Millisecond)
|
||||||
|
|
||||||
// Check that we can't use the impersonation proxy to execute kubectl commands yet.
|
// Check that we can't use the impersonation proxy to execute kubectl commands yet.
|
||||||
_, err = impersonationProxyViaSquidKubeClientWithoutCredential().CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
_, err = impersonationProxyViaSquidKubeClientWithoutCredential(t, proxyServiceEndpoint).CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||||
isErr, message := isServiceUnavailableViaSquidError(err, proxyServiceEndpoint)
|
isErr, message := isServiceUnavailableViaSquidError(err, proxyServiceEndpoint)
|
||||||
require.Truef(t, isErr, "wanted error %q to be service unavailable via squid error, but: %s", err, message)
|
require.Truef(t, isErr, "wanted error %q to be service unavailable via squid error, but: %s", err, message)
|
||||||
|
|
||||||
@ -279,7 +263,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
// so we don't have to keep repeating them.
|
// so we don't have to keep repeating them.
|
||||||
// This client performs TLS checks, so it also provides test coverage that the impersonation proxy server is generating TLS certs correctly.
|
// This client performs TLS checks, so it also provides test coverage that the impersonation proxy server is generating TLS certs correctly.
|
||||||
impersonationProxyKubeClient := func(t *testing.T) kubernetes.Interface {
|
impersonationProxyKubeClient := func(t *testing.T) kubernetes.Interface {
|
||||||
return newImpersonationProxyClient(t, impersonationProxyURL, impersonationProxyCACertPEM, "").Kubernetes
|
return newImpersonationProxyClient(t, impersonationProxyURL, impersonationProxyCACertPEM, nil, refreshCredential).Kubernetes
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("positive tests", func(t *testing.T) {
|
t.Run("positive tests", func(t *testing.T) {
|
||||||
@ -289,7 +273,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "edit"},
|
rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "edit"},
|
||||||
)
|
)
|
||||||
// Wait for the above RBAC rule to take effect.
|
// Wait for the above RBAC rule to take effect.
|
||||||
library.WaitForUserToHaveAccess(t, env.TestUser.ExpectedUsername, []string{}, &v1.ResourceAttributes{
|
library.WaitForUserToHaveAccess(t, env.TestUser.ExpectedUsername, []string{}, &authorizationv1.ResourceAttributes{
|
||||||
Verb: "get", Group: "", Version: "v1", Resource: "namespaces",
|
Verb: "get", Group: "", Version: "v1", Resource: "namespaces",
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -323,12 +307,13 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("kubectl port-forward and keeping the connection open for over a minute (non-idle)", func(t *testing.T) {
|
t.Run("kubectl port-forward and keeping the connection open for over a minute (non-idle)", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
kubeconfigPath, envVarsWithProxy, _ := getImpersonationKubeconfig(t, env, impersonationProxyURL, impersonationProxyCACertPEM, credentialRequestSpecWithWorkingCredentials.Authenticator)
|
kubeconfigPath, envVarsWithProxy, _ := getImpersonationKubeconfig(t, env, impersonationProxyURL, impersonationProxyCACertPEM, credentialRequestSpecWithWorkingCredentials.Authenticator)
|
||||||
|
|
||||||
// Run the kubectl port-forward command.
|
// Run the kubectl port-forward command.
|
||||||
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
|
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
portForwardCmd, _, portForwardStderr := kubectlCommand(timeout, t, kubeconfigPath, envVarsWithProxy, "port-forward", "--namespace", env.ConciergeNamespace, conciergePod.Name, "8443:8443")
|
portForwardCmd, _, portForwardStderr := kubectlCommand(timeout, t, kubeconfigPath, envVarsWithProxy, "port-forward", "--namespace", env.ConciergeNamespace, conciergePod.Name, "10443:8443")
|
||||||
portForwardCmd.Env = envVarsWithProxy
|
portForwardCmd.Env = envVarsWithProxy
|
||||||
|
|
||||||
// Start, but don't wait for the command to finish.
|
// Start, but don't wait for the command to finish.
|
||||||
@ -348,7 +333,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
for time.Now().Before(startTime.Add(70 * time.Second)) {
|
for time.Now().Before(startTime.Add(70 * time.Second)) {
|
||||||
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:8443") // -sS turns off the progressbar but still prints errors
|
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:10443") // -sS turns off the progressbar but still prints errors
|
||||||
curlCmd.Stdout = &curlStdOut
|
curlCmd.Stdout = &curlStdOut
|
||||||
curlCmd.Stderr = &curlStdErr
|
curlCmd.Stderr = &curlStdErr
|
||||||
curlErr := curlCmd.Run()
|
curlErr := curlCmd.Run()
|
||||||
@ -364,7 +349,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
// curl the endpoint once more, once 70 seconds has elapsed, to make sure the connection is still open.
|
// curl the endpoint once more, once 70 seconds has elapsed, to make sure the connection is still open.
|
||||||
timeout, cancelFunc = context.WithTimeout(ctx, 30*time.Second)
|
timeout, cancelFunc = context.WithTimeout(ctx, 30*time.Second)
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:8443") // -sS turns off the progressbar but still prints errors
|
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:10443") // -sS turns off the progressbar but still prints errors
|
||||||
curlCmd.Stdout = &curlStdOut
|
curlCmd.Stdout = &curlStdOut
|
||||||
curlCmd.Stderr = &curlStdErr
|
curlCmd.Stderr = &curlStdErr
|
||||||
curlErr := curlCmd.Run()
|
curlErr := curlCmd.Run()
|
||||||
@ -376,16 +361,17 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
}
|
}
|
||||||
// We expect this to 403, but all we care is that it gets through.
|
// We expect this to 403, but all we care is that it gets through.
|
||||||
require.NoError(t, curlErr)
|
require.NoError(t, curlErr)
|
||||||
require.Contains(t, curlStdOut.String(), "\"forbidden: User \\\"system:anonymous\\\" cannot get path \\\"/\\\"\"")
|
require.Contains(t, curlStdOut.String(), `"forbidden: User \"system:anonymous\" cannot get path \"/\""`)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("kubectl port-forward and keeping the connection open for over a minute (idle)", func(t *testing.T) {
|
t.Run("kubectl port-forward and keeping the connection open for over a minute (idle)", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
kubeconfigPath, envVarsWithProxy, _ := getImpersonationKubeconfig(t, env, impersonationProxyURL, impersonationProxyCACertPEM, credentialRequestSpecWithWorkingCredentials.Authenticator)
|
kubeconfigPath, envVarsWithProxy, _ := getImpersonationKubeconfig(t, env, impersonationProxyURL, impersonationProxyCACertPEM, credentialRequestSpecWithWorkingCredentials.Authenticator)
|
||||||
|
|
||||||
// Run the kubectl port-forward command.
|
// Run the kubectl port-forward command.
|
||||||
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
|
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
portForwardCmd, _, portForwardStderr := kubectlCommand(timeout, t, kubeconfigPath, envVarsWithProxy, "port-forward", "--namespace", env.ConciergeNamespace, conciergePod.Name, "8443:8443")
|
portForwardCmd, _, portForwardStderr := kubectlCommand(timeout, t, kubeconfigPath, envVarsWithProxy, "port-forward", "--namespace", env.ConciergeNamespace, conciergePod.Name, "10444:8443")
|
||||||
portForwardCmd.Env = envVarsWithProxy
|
portForwardCmd.Env = envVarsWithProxy
|
||||||
|
|
||||||
// Start, but don't wait for the command to finish.
|
// Start, but don't wait for the command to finish.
|
||||||
@ -401,7 +387,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
|
|
||||||
timeout, cancelFunc = context.WithTimeout(ctx, 2*time.Minute)
|
timeout, cancelFunc = context.WithTimeout(ctx, 2*time.Minute)
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:8443") // -sS turns off the progressbar but still prints errors
|
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:10444") // -sS turns off the progressbar but still prints errors
|
||||||
var curlStdOut, curlStdErr bytes.Buffer
|
var curlStdOut, curlStdErr bytes.Buffer
|
||||||
curlCmd.Stdout = &curlStdOut
|
curlCmd.Stdout = &curlStdOut
|
||||||
curlCmd.Stderr = &curlStdErr
|
curlCmd.Stderr = &curlStdErr
|
||||||
@ -413,10 +399,11 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
}
|
}
|
||||||
// We expect this to 403, but all we care is that it gets through.
|
// We expect this to 403, but all we care is that it gets through.
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Contains(t, curlStdOut.String(), "\"forbidden: User \\\"system:anonymous\\\" cannot get path \\\"/\\\"\"")
|
require.Contains(t, curlStdOut.String(), `"forbidden: User \"system:anonymous\" cannot get path \"/\""`)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("using and watching all the basic verbs", func(t *testing.T) {
|
t.Run("using and watching all the basic verbs", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Create a namespace, because it will be easier to exercise "deletecollection" if we have a namespace.
|
// Create a namespace, because it will be easier to exercise "deletecollection" if we have a namespace.
|
||||||
namespaceName := createTestNamespace(t, adminClient)
|
namespaceName := createTestNamespace(t, adminClient)
|
||||||
|
|
||||||
@ -536,10 +523,12 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
require.Len(t, listResult.Items, 0)
|
require.Len(t, listResult.Items, 0)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("double impersonation as a regular user is blocked", func(t *testing.T) {
|
t.Run("nested impersonation as a regular user is allowed if they have enough RBAC permissions", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Make a client which will send requests through the impersonation proxy and will also add
|
// Make a client which will send requests through the impersonation proxy and will also add
|
||||||
// impersonate headers to the request.
|
// impersonate headers to the request.
|
||||||
doubleImpersonationKubeClient := newImpersonationProxyClient(t, impersonationProxyURL, impersonationProxyCACertPEM, "other-user-to-impersonate").Kubernetes
|
nestedImpersonationClient := newImpersonationProxyClient(t, impersonationProxyURL, impersonationProxyCACertPEM,
|
||||||
|
&rest.ImpersonationConfig{UserName: "other-user-to-impersonate"}, refreshCredential)
|
||||||
|
|
||||||
// Check that we can get some resource through the impersonation proxy without any impersonation headers on the request.
|
// Check that we can get some resource through the impersonation proxy without any impersonation headers on the request.
|
||||||
// We could use any resource for this, but we happen to know that this one should exist.
|
// We could use any resource for this, but we happen to know that this one should exist.
|
||||||
@ -548,68 +537,236 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
|
|
||||||
// Now we'll see what happens when we add an impersonation header to the request. This should generate a
|
// Now we'll see what happens when we add an impersonation header to the request. This should generate a
|
||||||
// request similar to the one above, except that it will also have an impersonation header.
|
// request similar to the one above, except that it will also have an impersonation header.
|
||||||
_, err = doubleImpersonationKubeClient.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, impersonationProxyTLSSecretName(env), metav1.GetOptions{})
|
_, err = nestedImpersonationClient.Kubernetes.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, impersonationProxyTLSSecretName(env), metav1.GetOptions{})
|
||||||
// Double impersonation is not supported yet, so we should get an error.
|
// this user is not allowed to impersonate other users
|
||||||
|
require.True(t, k8serrors.IsForbidden(err), err)
|
||||||
require.EqualError(t, err, fmt.Sprintf(
|
require.EqualError(t, err, fmt.Sprintf(
|
||||||
`users "other-user-to-impersonate" is forbidden: `+
|
`users "other-user-to-impersonate" is forbidden: `+
|
||||||
`User "%s" cannot impersonate resource "users" in API group "" at the cluster scope: `+
|
`User "%s" cannot impersonate resource "users" in API group "" at the cluster scope`,
|
||||||
`impersonation is not allowed or invalid verb`,
|
|
||||||
env.TestUser.ExpectedUsername))
|
env.TestUser.ExpectedUsername))
|
||||||
})
|
|
||||||
|
|
||||||
// This is a separate test from the above double impersonation test because the cluster admin user gets special
|
// impersonate the GC service account instead which can read anything (the binding to edit allows this)
|
||||||
// authorization treatment from the Kube API server code that we are using, and we want to ensure that we are blocking
|
nestedImpersonationClientAsSA := newImpersonationProxyClient(t, impersonationProxyURL, impersonationProxyCACertPEM,
|
||||||
// double impersonation even for the cluster admin.
|
&rest.ImpersonationConfig{UserName: "system:serviceaccount:kube-system:generic-garbage-collector"}, refreshCredential)
|
||||||
t.Run("double impersonation as a cluster admin user is blocked", func(t *testing.T) {
|
|
||||||
// Copy the admin credentials from the admin kubeconfig.
|
|
||||||
adminClientRestConfig := library.NewClientConfig(t)
|
|
||||||
|
|
||||||
if adminClientRestConfig.BearerToken == "" && adminClientRestConfig.CertData == nil && adminClientRestConfig.KeyData == nil {
|
_, err = nestedImpersonationClientAsSA.Kubernetes.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, impersonationProxyTLSSecretName(env), metav1.GetOptions{})
|
||||||
t.Skip("The admin kubeconfig does not include credentials, so skipping this test.")
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expectedGroups := make([]string, 0, len(env.TestUser.ExpectedGroups)+1) // make sure we do not mutate env.TestUser.ExpectedGroups
|
||||||
|
expectedGroups = append(expectedGroups, env.TestUser.ExpectedGroups...)
|
||||||
|
expectedGroups = append(expectedGroups, "system:authenticated")
|
||||||
|
expectedOriginalUserInfo := authenticationv1.UserInfo{
|
||||||
|
Username: env.TestUser.ExpectedUsername,
|
||||||
|
Groups: expectedGroups,
|
||||||
}
|
}
|
||||||
|
expectedOriginalUserInfoJSON, err := json.Marshal(expectedOriginalUserInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
clusterAdminCredentials := &loginv1alpha1.ClusterCredential{
|
// check that we impersonated the correct user and that the original user is retained in the extra
|
||||||
Token: adminClientRestConfig.BearerToken,
|
whoAmI, err := nestedImpersonationClientAsSA.PinnipedConcierge.IdentityV1alpha1().WhoAmIRequests().
|
||||||
ClientCertificateData: string(adminClientRestConfig.CertData),
|
|
||||||
ClientKeyData: string(adminClientRestConfig.KeyData),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make a client using the admin credentials which will send requests through the impersonation proxy
|
|
||||||
// and will also add impersonate headers to the request.
|
|
||||||
doubleImpersonationKubeClient := newImpersonationProxyClientWithCredentials(
|
|
||||||
clusterAdminCredentials, impersonationProxyURL, impersonationProxyCACertPEM, "other-user-to-impersonate",
|
|
||||||
).Kubernetes
|
|
||||||
|
|
||||||
_, err := doubleImpersonationKubeClient.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, impersonationProxyTLSSecretName(env), metav1.GetOptions{})
|
|
||||||
// Double impersonation is not supported yet, so we should get an error.
|
|
||||||
require.Error(t, err)
|
|
||||||
require.Regexp(t,
|
|
||||||
`users "other-user-to-impersonate" is forbidden: `+
|
|
||||||
`User ".*" cannot impersonate resource "users" in API group "" at the cluster scope: `+
|
|
||||||
`impersonation is not allowed or invalid verb`,
|
|
||||||
err.Error(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("WhoAmIRequests and different kinds of authentication through the impersonation proxy", func(t *testing.T) {
|
|
||||||
// Test using the TokenCredentialRequest for authentication.
|
|
||||||
impersonationProxyPinnipedConciergeClient := newImpersonationProxyClient(t,
|
|
||||||
impersonationProxyURL, impersonationProxyCACertPEM, "",
|
|
||||||
).PinnipedConcierge
|
|
||||||
whoAmI, err := impersonationProxyPinnipedConciergeClient.IdentityV1alpha1().WhoAmIRequests().
|
|
||||||
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
|
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.Equal(t,
|
||||||
|
expectedWhoAmIRequestResponse(
|
||||||
|
"system:serviceaccount:kube-system:generic-garbage-collector",
|
||||||
|
[]string{"system:serviceaccounts", "system:serviceaccounts:kube-system", "system:authenticated"},
|
||||||
|
map[string]identityv1alpha1.ExtraValue{
|
||||||
|
"original-user-info.impersonation-proxy.concierge.pinniped.dev": {string(expectedOriginalUserInfoJSON)},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
whoAmI,
|
||||||
|
)
|
||||||
|
|
||||||
|
_, err = newImpersonationProxyClient(t, impersonationProxyURL, impersonationProxyCACertPEM,
|
||||||
|
&rest.ImpersonationConfig{
|
||||||
|
UserName: "system:serviceaccount:kube-system:generic-garbage-collector",
|
||||||
|
Extra: map[string][]string{
|
||||||
|
"some-fancy-key": {"with a dangerous value"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
refreshCredential).PinnipedConcierge.IdentityV1alpha1().WhoAmIRequests().
|
||||||
|
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
|
||||||
|
// this user should not be able to impersonate extra
|
||||||
|
require.True(t, k8serrors.IsForbidden(err), err)
|
||||||
|
require.EqualError(t, err, fmt.Sprintf(
|
||||||
|
`userextras.authentication.k8s.io "with a dangerous value" is forbidden: `+
|
||||||
|
`User "%s" cannot impersonate resource "userextras/some-fancy-key" in API group "authentication.k8s.io" at the cluster scope`,
|
||||||
|
env.TestUser.ExpectedUsername))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("nested impersonation as a cluster admin user is allowed", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
// Copy the admin credentials from the admin kubeconfig.
|
||||||
|
adminClientRestConfig := library.NewClientConfig(t)
|
||||||
|
clusterAdminCredentials := getCredForConfig(t, adminClientRestConfig)
|
||||||
|
|
||||||
|
// figure out who the admin user is
|
||||||
|
whoAmIAdmin, err := newImpersonationProxyClientWithCredentials(t,
|
||||||
|
clusterAdminCredentials, impersonationProxyURL, impersonationProxyCACertPEM, nil).
|
||||||
|
PinnipedConcierge.IdentityV1alpha1().WhoAmIRequests().
|
||||||
|
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expectedExtra := make(map[string]authenticationv1.ExtraValue, len(whoAmIAdmin.Status.KubernetesUserInfo.User.Extra))
|
||||||
|
for k, v := range whoAmIAdmin.Status.KubernetesUserInfo.User.Extra {
|
||||||
|
expectedExtra[k] = authenticationv1.ExtraValue(v)
|
||||||
|
}
|
||||||
|
expectedOriginalUserInfo := authenticationv1.UserInfo{
|
||||||
|
Username: whoAmIAdmin.Status.KubernetesUserInfo.User.Username,
|
||||||
|
// The WhoAmI API is lossy so this will fail when the admin user actually does have a UID
|
||||||
|
UID: whoAmIAdmin.Status.KubernetesUserInfo.User.UID,
|
||||||
|
Groups: whoAmIAdmin.Status.KubernetesUserInfo.User.Groups,
|
||||||
|
Extra: expectedExtra,
|
||||||
|
}
|
||||||
|
expectedOriginalUserInfoJSON, err := json.Marshal(expectedOriginalUserInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Make a client using the admin credentials which will send requests through the impersonation proxy
|
||||||
|
// and will also add impersonate headers to the request.
|
||||||
|
nestedImpersonationClient := newImpersonationProxyClientWithCredentials(t,
|
||||||
|
clusterAdminCredentials, impersonationProxyURL, impersonationProxyCACertPEM,
|
||||||
|
&rest.ImpersonationConfig{
|
||||||
|
UserName: "other-user-to-impersonate",
|
||||||
|
Groups: []string{"other-group-1", "other-group-2"},
|
||||||
|
Extra: map[string][]string{
|
||||||
|
"this-key": {"to this value"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
_, err = nestedImpersonationClient.Kubernetes.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, impersonationProxyTLSSecretName(env), metav1.GetOptions{})
|
||||||
|
// the impersonated user lacks the RBAC to perform this call
|
||||||
|
require.True(t, k8serrors.IsForbidden(err), err)
|
||||||
|
require.EqualError(t, err, fmt.Sprintf(
|
||||||
|
`secrets "%s" is forbidden: User "other-user-to-impersonate" cannot get resource "secrets" in API group "" in the namespace "%s"`,
|
||||||
|
impersonationProxyTLSSecretName(env), env.ConciergeNamespace,
|
||||||
|
))
|
||||||
|
|
||||||
|
// check that we impersonated the correct user and that the original user is retained in the extra
|
||||||
|
whoAmI, err := nestedImpersonationClient.PinnipedConcierge.IdentityV1alpha1().WhoAmIRequests().
|
||||||
|
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t,
|
||||||
|
expectedWhoAmIRequestResponse(
|
||||||
|
"other-user-to-impersonate",
|
||||||
|
[]string{"other-group-1", "other-group-2", "system:authenticated"},
|
||||||
|
map[string]identityv1alpha1.ExtraValue{
|
||||||
|
"this-key": {"to this value"},
|
||||||
|
"original-user-info.impersonation-proxy.concierge.pinniped.dev": {string(expectedOriginalUserInfoJSON)},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
whoAmI,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("nested impersonation as a cluster admin fails on reserved key", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
adminClientRestConfig := library.NewClientConfig(t)
|
||||||
|
clusterAdminCredentials := getCredForConfig(t, adminClientRestConfig)
|
||||||
|
|
||||||
|
nestedImpersonationClient := newImpersonationProxyClientWithCredentials(t,
|
||||||
|
clusterAdminCredentials, impersonationProxyURL, impersonationProxyCACertPEM,
|
||||||
|
&rest.ImpersonationConfig{
|
||||||
|
UserName: "other-user-to-impersonate",
|
||||||
|
Groups: []string{"other-group-1", "other-group-2"},
|
||||||
|
Extra: map[string][]string{
|
||||||
|
"this-good-key": {"to this good value"},
|
||||||
|
"something.impersonation-proxy.concierge.pinniped.dev": {"super sneaky value"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
_, err := nestedImpersonationClient.Kubernetes.CoreV1().Secrets(env.ConciergeNamespace).Get(ctx, impersonationProxyTLSSecretName(env), metav1.GetOptions{})
|
||||||
|
require.EqualError(t, err, "Internal error occurred: unimplemented functionality - unable to act as current user")
|
||||||
|
require.True(t, k8serrors.IsInternalError(err), err)
|
||||||
|
require.Equal(t, &k8serrors.StatusError{
|
||||||
|
ErrStatus: metav1.Status{
|
||||||
|
Status: metav1.StatusFailure,
|
||||||
|
Code: http.StatusInternalServerError,
|
||||||
|
Reason: metav1.StatusReasonInternalError,
|
||||||
|
Details: &metav1.StatusDetails{
|
||||||
|
Causes: []metav1.StatusCause{
|
||||||
|
{
|
||||||
|
Message: "unimplemented functionality - unable to act as current user",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Message: "Internal error occurred: unimplemented functionality - unable to act as current user",
|
||||||
|
},
|
||||||
|
}, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
// this works because impersonation cannot set UID and thus the final user info the proxy sees has no UID
|
||||||
|
t.Run("nested impersonation as a service account is allowed if it has enough RBAC permissions", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
namespaceName := createTestNamespace(t, adminClient)
|
||||||
|
saName, saToken, saUID := createServiceAccountToken(ctx, t, adminClient, namespaceName)
|
||||||
|
nestedImpersonationClient := newImpersonationProxyClientWithCredentials(t,
|
||||||
|
&loginv1alpha1.ClusterCredential{Token: saToken}, impersonationProxyURL, impersonationProxyCACertPEM,
|
||||||
|
&rest.ImpersonationConfig{UserName: "system:serviceaccount:kube-system:root-ca-cert-publisher"}).PinnipedConcierge
|
||||||
|
_, err := nestedImpersonationClient.IdentityV1alpha1().WhoAmIRequests().
|
||||||
|
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
|
||||||
|
// this SA is not yet allowed to impersonate SAs
|
||||||
|
require.True(t, k8serrors.IsForbidden(err), err)
|
||||||
|
require.EqualError(t, err, fmt.Sprintf(
|
||||||
|
`serviceaccounts "root-ca-cert-publisher" is forbidden: `+
|
||||||
|
`User "%s" cannot impersonate resource "serviceaccounts" in API group "" in the namespace "kube-system"`,
|
||||||
|
serviceaccount.MakeUsername(namespaceName, saName)))
|
||||||
|
|
||||||
|
// webhook authorizer deny cache TTL is 10 seconds so we need to wait long enough for it to drain
|
||||||
|
time.Sleep(15 * time.Second)
|
||||||
|
|
||||||
|
// allow the test SA to impersonate any SA
|
||||||
|
library.CreateTestClusterRoleBinding(t,
|
||||||
|
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Name: saName, Namespace: namespaceName},
|
||||||
|
rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "edit"},
|
||||||
|
)
|
||||||
|
library.WaitForUserToHaveAccess(t, serviceaccount.MakeUsername(namespaceName, saName), []string{}, &authorizationv1.ResourceAttributes{
|
||||||
|
Verb: "impersonate", Group: "", Version: "v1", Resource: "serviceaccounts",
|
||||||
|
})
|
||||||
|
|
||||||
|
whoAmI, err := nestedImpersonationClient.IdentityV1alpha1().WhoAmIRequests().
|
||||||
|
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t,
|
||||||
|
expectedWhoAmIRequestResponse(
|
||||||
|
"system:serviceaccount:kube-system:root-ca-cert-publisher",
|
||||||
|
[]string{"system:serviceaccounts", "system:serviceaccounts:kube-system", "system:authenticated"},
|
||||||
|
map[string]identityv1alpha1.ExtraValue{
|
||||||
|
"original-user-info.impersonation-proxy.concierge.pinniped.dev": {
|
||||||
|
fmt.Sprintf(`{"username":"%s","uid":"%s","groups":["system:serviceaccounts","system:serviceaccounts:%s","system:authenticated"]}`,
|
||||||
|
serviceaccount.MakeUsername(namespaceName, saName), saUID, namespaceName),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
whoAmI,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("WhoAmIRequests and different kinds of authentication through the impersonation proxy", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
// Test using the TokenCredentialRequest for authentication.
|
||||||
|
impersonationProxyPinnipedConciergeClient := newImpersonationProxyClient(t,
|
||||||
|
impersonationProxyURL, impersonationProxyCACertPEM, nil, refreshCredential,
|
||||||
|
).PinnipedConcierge
|
||||||
|
whoAmI, err := impersonationProxyPinnipedConciergeClient.IdentityV1alpha1().WhoAmIRequests().
|
||||||
|
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
expectedGroups := make([]string, 0, len(env.TestUser.ExpectedGroups)+1) // make sure we do not mutate env.TestUser.ExpectedGroups
|
||||||
|
expectedGroups = append(expectedGroups, env.TestUser.ExpectedGroups...)
|
||||||
|
expectedGroups = append(expectedGroups, "system:authenticated")
|
||||||
require.Equal(t,
|
require.Equal(t,
|
||||||
expectedWhoAmIRequestResponse(
|
expectedWhoAmIRequestResponse(
|
||||||
env.TestUser.ExpectedUsername,
|
env.TestUser.ExpectedUsername,
|
||||||
append(env.TestUser.ExpectedGroups, "system:authenticated"),
|
expectedGroups,
|
||||||
|
nil,
|
||||||
),
|
),
|
||||||
whoAmI,
|
whoAmI,
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test an unauthenticated request which does not include any credentials.
|
// Test an unauthenticated request which does not include any credentials.
|
||||||
impersonationProxyAnonymousPinnipedConciergeClient := newAnonymousImpersonationProxyClient(
|
impersonationProxyAnonymousPinnipedConciergeClient := newAnonymousImpersonationProxyClient(
|
||||||
impersonationProxyURL, impersonationProxyCACertPEM, "",
|
t, impersonationProxyURL, impersonationProxyCACertPEM, nil,
|
||||||
).PinnipedConcierge
|
).PinnipedConcierge
|
||||||
whoAmI, err = impersonationProxyAnonymousPinnipedConciergeClient.IdentityV1alpha1().WhoAmIRequests().
|
whoAmI, err = impersonationProxyAnonymousPinnipedConciergeClient.IdentityV1alpha1().WhoAmIRequests().
|
||||||
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
|
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
|
||||||
@ -618,6 +775,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
expectedWhoAmIRequestResponse(
|
expectedWhoAmIRequestResponse(
|
||||||
"system:anonymous",
|
"system:anonymous",
|
||||||
[]string{"system:unauthenticated"},
|
[]string{"system:unauthenticated"},
|
||||||
|
nil,
|
||||||
),
|
),
|
||||||
whoAmI,
|
whoAmI,
|
||||||
)
|
)
|
||||||
@ -625,9 +783,10 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
// Test using a service account token. Authenticating as Service Accounts through the impersonation
|
// Test using a service account token. Authenticating as Service Accounts through the impersonation
|
||||||
// proxy is not supported, so it should fail.
|
// proxy is not supported, so it should fail.
|
||||||
namespaceName := createTestNamespace(t, adminClient)
|
namespaceName := createTestNamespace(t, adminClient)
|
||||||
impersonationProxyServiceAccountPinnipedConciergeClient := newImpersonationProxyClientWithCredentials(
|
_, saToken, _ := createServiceAccountToken(ctx, t, adminClient, namespaceName)
|
||||||
&loginv1alpha1.ClusterCredential{Token: createServiceAccountToken(ctx, t, adminClient, namespaceName)},
|
impersonationProxyServiceAccountPinnipedConciergeClient := newImpersonationProxyClientWithCredentials(t,
|
||||||
impersonationProxyURL, impersonationProxyCACertPEM, "").PinnipedConcierge
|
&loginv1alpha1.ClusterCredential{Token: saToken},
|
||||||
|
impersonationProxyURL, impersonationProxyCACertPEM, nil).PinnipedConcierge
|
||||||
_, err = impersonationProxyServiceAccountPinnipedConciergeClient.IdentityV1alpha1().WhoAmIRequests().
|
_, err = impersonationProxyServiceAccountPinnipedConciergeClient.IdentityV1alpha1().WhoAmIRequests().
|
||||||
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
|
Create(ctx, &identityv1alpha1.WhoAmIRequest{}, metav1.CreateOptions{})
|
||||||
require.EqualError(t, err, "Internal error occurred: unimplemented functionality - unable to act as current user")
|
require.EqualError(t, err, "Internal error occurred: unimplemented functionality - unable to act as current user")
|
||||||
@ -650,6 +809,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("kubectl as a client", func(t *testing.T) {
|
t.Run("kubectl as a client", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
kubeconfigPath, envVarsWithProxy, tempDir := getImpersonationKubeconfig(t, env, impersonationProxyURL, impersonationProxyCACertPEM, credentialRequestSpecWithWorkingCredentials.Authenticator)
|
kubeconfigPath, envVarsWithProxy, tempDir := getImpersonationKubeconfig(t, env, impersonationProxyURL, impersonationProxyCACertPEM, credentialRequestSpecWithWorkingCredentials.Authenticator)
|
||||||
|
|
||||||
// Try "kubectl exec" through the impersonation proxy.
|
// Try "kubectl exec" through the impersonation proxy.
|
||||||
@ -720,11 +880,12 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("websocket client", func(t *testing.T) {
|
t.Run("websocket client", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
namespaceName := createTestNamespace(t, adminClient)
|
namespaceName := createTestNamespace(t, adminClient)
|
||||||
|
|
||||||
impersonationRestConfig := impersonationProxyRestConfig(
|
impersonationRestConfig := impersonationProxyRestConfig(
|
||||||
refreshCredential(t, impersonationProxyURL, impersonationProxyCACertPEM),
|
refreshCredential(t, impersonationProxyURL, impersonationProxyCACertPEM),
|
||||||
impersonationProxyURL, impersonationProxyCACertPEM, "",
|
impersonationProxyURL, impersonationProxyCACertPEM, nil,
|
||||||
)
|
)
|
||||||
tlsConfig, err := rest.TLSConfigFor(impersonationRestConfig)
|
tlsConfig, err := rest.TLSConfigFor(impersonationRestConfig)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -793,6 +954,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("http2 client", func(t *testing.T) {
|
t.Run("http2 client", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
namespaceName := createTestNamespace(t, adminClient)
|
namespaceName := createTestNamespace(t, adminClient)
|
||||||
|
|
||||||
wantConfigMapLabelKey, wantConfigMapLabelValue := "some-label-key", "some-label-value"
|
wantConfigMapLabelKey, wantConfigMapLabelValue := "some-label-key", "some-label-value"
|
||||||
@ -811,7 +973,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
// create rest client
|
// create rest client
|
||||||
restConfig := impersonationProxyRestConfig(
|
restConfig := impersonationProxyRestConfig(
|
||||||
refreshCredential(t, impersonationProxyURL, impersonationProxyCACertPEM),
|
refreshCredential(t, impersonationProxyURL, impersonationProxyCACertPEM),
|
||||||
impersonationProxyURL, impersonationProxyCACertPEM, "",
|
impersonationProxyURL, impersonationProxyCACertPEM, nil,
|
||||||
)
|
)
|
||||||
|
|
||||||
tlsConfig, err := rest.TLSConfigFor(restConfig)
|
tlsConfig, err := rest.TLSConfigFor(restConfig)
|
||||||
@ -916,7 +1078,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
require.Eventually(t, func() bool {
|
require.Eventually(t, func() bool {
|
||||||
// It's okay if this returns RBAC errors because this user has no role bindings.
|
// It's okay if this returns RBAC errors because this user has no role bindings.
|
||||||
// What we want to see is that the proxy eventually shuts down entirely.
|
// What we want to see is that the proxy eventually shuts down entirely.
|
||||||
_, err := impersonationProxyViaSquidKubeClientWithoutCredential().CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
_, err := impersonationProxyViaSquidKubeClientWithoutCredential(t, proxyServiceEndpoint).CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||||
isErr, _ := isServiceUnavailableViaSquidError(err, proxyServiceEndpoint)
|
isErr, _ := isServiceUnavailableViaSquidError(err, proxyServiceEndpoint)
|
||||||
return isErr
|
return isErr
|
||||||
}, 20*time.Second, 500*time.Millisecond)
|
}, 20*time.Second, 500*time.Millisecond)
|
||||||
@ -977,7 +1139,7 @@ func createTestNamespace(t *testing.T, adminClient kubernetes.Interface) string
|
|||||||
return namespace.Name
|
return namespace.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
func createServiceAccountToken(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, namespaceName string) string {
|
func createServiceAccountToken(ctx context.Context, t *testing.T, adminClient kubernetes.Interface, namespaceName string) (name, token string, uid types.UID) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
serviceAccount, err := adminClient.CoreV1().ServiceAccounts(namespaceName).Create(ctx,
|
serviceAccount, err := adminClient.CoreV1().ServiceAccounts(namespaceName).Create(ctx,
|
||||||
@ -1011,10 +1173,10 @@ func createServiceAccountToken(ctx context.Context, t *testing.T, adminClient ku
|
|||||||
return len(secret.Data[corev1.ServiceAccountTokenKey]) > 0, nil
|
return len(secret.Data[corev1.ServiceAccountTokenKey]) > 0, nil
|
||||||
}, time.Minute, time.Second)
|
}, time.Minute, time.Second)
|
||||||
|
|
||||||
return string(secret.Data[corev1.ServiceAccountTokenKey])
|
return serviceAccount.Name, string(secret.Data[corev1.ServiceAccountTokenKey]), serviceAccount.UID
|
||||||
}
|
}
|
||||||
|
|
||||||
func expectedWhoAmIRequestResponse(username string, groups []string) *identityv1alpha1.WhoAmIRequest {
|
func expectedWhoAmIRequestResponse(username string, groups []string, extra map[string]identityv1alpha1.ExtraValue) *identityv1alpha1.WhoAmIRequest {
|
||||||
return &identityv1alpha1.WhoAmIRequest{
|
return &identityv1alpha1.WhoAmIRequest{
|
||||||
Status: identityv1alpha1.WhoAmIRequestStatus{
|
Status: identityv1alpha1.WhoAmIRequestStatus{
|
||||||
KubernetesUserInfo: identityv1alpha1.KubernetesUserInfo{
|
KubernetesUserInfo: identityv1alpha1.KubernetesUserInfo{
|
||||||
@ -1022,7 +1184,7 @@ func expectedWhoAmIRequestResponse(username string, groups []string) *identityv1
|
|||||||
Username: username,
|
Username: username,
|
||||||
UID: "", // no way to impersonate UID: https://github.com/kubernetes/kubernetes/issues/93699
|
UID: "", // no way to impersonate UID: https://github.com/kubernetes/kubernetes/issues/93699
|
||||||
Groups: groups,
|
Groups: groups,
|
||||||
Extra: nil,
|
Extra: extra,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1031,6 +1193,7 @@ func expectedWhoAmIRequestResponse(username string, groups []string) *identityv1
|
|||||||
|
|
||||||
func performImpersonatorDiscovery(ctx context.Context, t *testing.T, env *library.TestEnv, adminConciergeClient pinnipedconciergeclientset.Interface) (string, []byte) {
|
func performImpersonatorDiscovery(ctx context.Context, t *testing.T, env *library.TestEnv, adminConciergeClient pinnipedconciergeclientset.Interface) (string, []byte) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
var impersonationProxyURL string
|
var impersonationProxyURL string
|
||||||
var impersonationProxyCACertPEM []byte
|
var impersonationProxyCACertPEM []byte
|
||||||
|
|
||||||
@ -1105,6 +1268,7 @@ func requireDisabledStrategy(ctx context.Context, t *testing.T, env *library.Tes
|
|||||||
|
|
||||||
func credentialAlmostExpired(t *testing.T, credential *loginv1alpha1.TokenCredentialRequest) bool {
|
func credentialAlmostExpired(t *testing.T, credential *loginv1alpha1.TokenCredentialRequest) bool {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
pemBlock, _ := pem.Decode([]byte(credential.Status.Credential.ClientCertificateData))
|
pemBlock, _ := pem.Decode([]byte(credential.Status.Credential.ClientCertificateData))
|
||||||
parsedCredential, err := x509.ParseCertificate(pemBlock.Bytes)
|
parsedCredential, err := x509.ParseCertificate(pemBlock.Bytes)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -1117,7 +1281,7 @@ func credentialAlmostExpired(t *testing.T, credential *loginv1alpha1.TokenCreden
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func impersonationProxyRestConfig(credential *loginv1alpha1.ClusterCredential, host string, caData []byte, doubleImpersonateUser string) *rest.Config {
|
func impersonationProxyRestConfig(credential *loginv1alpha1.ClusterCredential, host string, caData []byte, nestedImpersonationConfig *rest.ImpersonationConfig) *rest.Config {
|
||||||
config := rest.Config{
|
config := rest.Config{
|
||||||
Host: host,
|
Host: host,
|
||||||
TLSClientConfig: rest.TLSClientConfig{
|
TLSClientConfig: rest.TLSClientConfig{
|
||||||
@ -1135,8 +1299,8 @@ func impersonationProxyRestConfig(credential *loginv1alpha1.ClusterCredential, h
|
|||||||
// We would like the impersonation proxy to imitate that behavior, so we test it here.
|
// We would like the impersonation proxy to imitate that behavior, so we test it here.
|
||||||
BearerToken: credential.Token,
|
BearerToken: credential.Token,
|
||||||
}
|
}
|
||||||
if doubleImpersonateUser != "" {
|
if nestedImpersonationConfig != nil {
|
||||||
config.Impersonate = rest.ImpersonationConfig{UserName: doubleImpersonateUser}
|
config.Impersonate = *nestedImpersonationConfig
|
||||||
}
|
}
|
||||||
return &config
|
return &config
|
||||||
}
|
}
|
||||||
@ -1144,6 +1308,7 @@ func impersonationProxyRestConfig(credential *loginv1alpha1.ClusterCredential, h
|
|||||||
func kubeconfigProxyFunc(t *testing.T, squidProxyURL string) func(req *http.Request) (*url.URL, error) {
|
func kubeconfigProxyFunc(t *testing.T, squidProxyURL string) func(req *http.Request) (*url.URL, error) {
|
||||||
return func(req *http.Request) (*url.URL, error) {
|
return func(req *http.Request) (*url.URL, error) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
parsedSquidProxyURL, err := url.Parse(squidProxyURL)
|
parsedSquidProxyURL, err := url.Parse(squidProxyURL)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
t.Logf("passing request for %s through proxy %s", library.RedactURLParams(req.URL), parsedSquidProxyURL.String())
|
t.Logf("passing request for %s through proxy %s", library.RedactURLParams(req.URL), parsedSquidProxyURL.String())
|
||||||
@ -1153,6 +1318,7 @@ func kubeconfigProxyFunc(t *testing.T, squidProxyURL string) func(req *http.Requ
|
|||||||
|
|
||||||
func impersonationProxyConfigMapForConfig(t *testing.T, env *library.TestEnv, config impersonator.Config) corev1.ConfigMap {
|
func impersonationProxyConfigMapForConfig(t *testing.T, env *library.TestEnv, config impersonator.Config) corev1.ConfigMap {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
configString, err := yaml.Marshal(config)
|
configString, err := yaml.Marshal(config)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
configMap := corev1.ConfigMap{
|
configMap := corev1.ConfigMap{
|
||||||
@ -1244,6 +1410,8 @@ func getImpersonationKubeconfig(t *testing.T, env *library.TestEnv, impersonatio
|
|||||||
|
|
||||||
// func to create kubectl commands with a kubeconfig.
|
// func to create kubectl commands with a kubeconfig.
|
||||||
func kubectlCommand(timeout context.Context, t *testing.T, kubeconfigPath string, envVarsWithProxy []string, args ...string) (*exec.Cmd, *syncBuffer, *syncBuffer) {
|
func kubectlCommand(timeout context.Context, t *testing.T, kubeconfigPath string, envVarsWithProxy []string, args ...string) (*exec.Cmd, *syncBuffer, *syncBuffer) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
allArgs := append([]string{"--kubeconfig", kubeconfigPath}, args...)
|
allArgs := append([]string{"--kubeconfig", kubeconfigPath}, args...)
|
||||||
//nolint:gosec // we are not performing malicious argument injection against ourselves
|
//nolint:gosec // we are not performing malicious argument injection against ourselves
|
||||||
kubectlCmd := exec.CommandContext(timeout, "kubectl", allArgs...)
|
kubectlCmd := exec.CommandContext(timeout, "kubectl", allArgs...)
|
||||||
@ -1296,6 +1464,7 @@ func isServiceUnavailableViaSquidError(err error, proxyServiceEndpoint string) (
|
|||||||
|
|
||||||
func requireClose(t *testing.T, c chan struct{}, timeout time.Duration) {
|
func requireClose(t *testing.T, c chan struct{}, timeout time.Duration) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
timer := time.NewTimer(timeout)
|
timer := time.NewTimer(timeout)
|
||||||
select {
|
select {
|
||||||
case <-c:
|
case <-c:
|
||||||
@ -1317,3 +1486,117 @@ func createTokenCredentialRequest(
|
|||||||
&loginv1alpha1.TokenCredentialRequest{Spec: spec}, metav1.CreateOptions{},
|
&loginv1alpha1.TokenCredentialRequest{Spec: spec}, metav1.CreateOptions{},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newImpersonationProxyClientWithCredentials(t *testing.T, credentials *loginv1alpha1.ClusterCredential, impersonationProxyURL string, impersonationProxyCACertPEM []byte, nestedImpersonationConfig *rest.ImpersonationConfig) *kubeclient.Client {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
env := library.IntegrationEnv(t)
|
||||||
|
clusterSupportsLoadBalancers := env.HasCapability(library.HasExternalLoadBalancerProvider)
|
||||||
|
|
||||||
|
kubeconfig := impersonationProxyRestConfig(credentials, impersonationProxyURL, impersonationProxyCACertPEM, nestedImpersonationConfig)
|
||||||
|
if !clusterSupportsLoadBalancers {
|
||||||
|
// Only if there is no possibility to send traffic through a load balancer, then send the traffic through the Squid proxy.
|
||||||
|
// Prefer to go through a load balancer because that's how the impersonator is intended to be used in the real world.
|
||||||
|
kubeconfig.Proxy = kubeconfigProxyFunc(t, env.Proxy)
|
||||||
|
}
|
||||||
|
return library.NewKubeclient(t, kubeconfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAnonymousImpersonationProxyClient(t *testing.T, impersonationProxyURL string, impersonationProxyCACertPEM []byte, nestedImpersonationConfig *rest.ImpersonationConfig) *kubeclient.Client {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
emptyCredentials := &loginv1alpha1.ClusterCredential{}
|
||||||
|
return newImpersonationProxyClientWithCredentials(t, emptyCredentials, impersonationProxyURL, impersonationProxyCACertPEM, nestedImpersonationConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
func impersonationProxyViaSquidKubeClientWithoutCredential(t *testing.T, proxyServiceEndpoint string) kubernetes.Interface {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
env := library.IntegrationEnv(t)
|
||||||
|
proxyURL := "https://" + proxyServiceEndpoint
|
||||||
|
kubeconfig := impersonationProxyRestConfig(&loginv1alpha1.ClusterCredential{}, proxyURL, nil, nil)
|
||||||
|
kubeconfig.Proxy = kubeconfigProxyFunc(t, env.Proxy)
|
||||||
|
return library.NewKubeclient(t, kubeconfig).Kubernetes
|
||||||
|
}
|
||||||
|
|
||||||
|
func newImpersonationProxyClient(
|
||||||
|
t *testing.T,
|
||||||
|
impersonationProxyURL string,
|
||||||
|
impersonationProxyCACertPEM []byte,
|
||||||
|
nestedImpersonationConfig *rest.ImpersonationConfig,
|
||||||
|
refreshCredentialFunc func(t *testing.T, impersonationProxyURL string, impersonationProxyCACertPEM []byte) *loginv1alpha1.ClusterCredential,
|
||||||
|
) *kubeclient.Client {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
refreshedCredentials := refreshCredentialFunc(t, impersonationProxyURL, impersonationProxyCACertPEM).DeepCopy()
|
||||||
|
refreshedCredentials.Token = "not a valid token" // demonstrates that client certs take precedence over tokens by setting both on the requests
|
||||||
|
return newImpersonationProxyClientWithCredentials(t, refreshedCredentials, impersonationProxyURL, impersonationProxyCACertPEM, nestedImpersonationConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCredForConfig is mostly just a hacky workaround for impersonationProxyRestConfig needing creds directly.
|
||||||
|
func getCredForConfig(t *testing.T, config *rest.Config) *loginv1alpha1.ClusterCredential {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
out := &loginv1alpha1.ClusterCredential{}
|
||||||
|
|
||||||
|
config = rest.CopyConfig(config)
|
||||||
|
|
||||||
|
config.Wrap(func(rt http.RoundTripper) http.RoundTripper {
|
||||||
|
return roundtripper.Func(func(req *http.Request) (*http.Response, error) {
|
||||||
|
resp, err := rt.RoundTrip(req)
|
||||||
|
|
||||||
|
r := req
|
||||||
|
if resp != nil && resp.Request != nil {
|
||||||
|
r = resp.Request
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, _ = bearertoken.New(authenticator.TokenFunc(func(_ context.Context, token string) (*authenticator.Response, bool, error) {
|
||||||
|
out.Token = token
|
||||||
|
return nil, false, nil
|
||||||
|
})).AuthenticateRequest(r)
|
||||||
|
|
||||||
|
return resp, err
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
transportConfig, err := config.TransportConfig()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
rt, err := transport.New(transportConfig)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "GET", "https://localhost", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resp, _ := rt.RoundTrip(req)
|
||||||
|
if resp != nil && resp.Body != nil {
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsConfig, err := transport.TLSConfigFor(transportConfig)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if tlsConfig != nil && tlsConfig.GetClientCertificate != nil {
|
||||||
|
cert, err := tlsConfig.GetClientCertificate(nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, cert.Certificate, 1)
|
||||||
|
|
||||||
|
publicKey := pem.EncodeToMemory(&pem.Block{
|
||||||
|
Type: "CERTIFICATE",
|
||||||
|
Bytes: cert.Certificate[0],
|
||||||
|
})
|
||||||
|
out.ClientCertificateData = string(publicKey)
|
||||||
|
|
||||||
|
privateKey, err := keyutil.MarshalPrivateKeyToPEM(cert.PrivateKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
out.ClientKeyData = string(privateKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
if *out == (loginv1alpha1.ClusterCredential{}) {
|
||||||
|
t.Fatal("failed to get creds for config")
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
@ -6,169 +6,143 @@ package integration
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/equality"
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/diff"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
conciergev1alpha "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
conciergev1alpha "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||||
"go.pinniped.dev/test/library"
|
"go.pinniped.dev/test/library"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
kubeCertAgentLabelSelector = "kube-cert-agent.pinniped.dev=true"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestKubeCertAgent(t *testing.T) {
|
func TestKubeCertAgent(t *testing.T) {
|
||||||
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
|
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
kubeClient := library.NewKubernetesClientset(t)
|
||||||
|
adminConciergeClient := library.NewConciergeClientset(t)
|
||||||
|
|
||||||
|
// Expect there to be at least on healthy kube-cert-agent pod on this cluster.
|
||||||
|
library.RequireEventuallyWithoutError(t, func() (bool, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
agentPods, err := kubeClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{
|
||||||
|
LabelSelector: "kube-cert-agent.pinniped.dev=v2",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to list pods: %w", err)
|
||||||
|
}
|
||||||
|
for _, p := range agentPods.Items {
|
||||||
|
t.Logf("found agent pod %s/%s in phase %s", p.Namespace, p.Name, p.Status.Phase)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range agentPods.Items {
|
||||||
|
if p.Status.Phase == corev1.PodRunning {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}, 1*time.Minute, 2*time.Second, "never saw a healthy kube-cert-agent Pod running")
|
||||||
|
|
||||||
|
// Expect that the CredentialIssuer will have a healthy KubeClusterSigningCertificate strategy.
|
||||||
|
library.RequireEventuallyWithoutError(t, func() (bool, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
credentialIssuer, err := adminConciergeClient.ConfigV1alpha1().CredentialIssuers().Get(ctx, credentialIssuerName(env), metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("could not get the CredentialIssuer: %v", err)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there's no successful strategy yet, wait until there is.
|
||||||
|
strategy := findSuccessfulStrategy(credentialIssuer, conciergev1alpha.KubeClusterSigningCertificateStrategyType)
|
||||||
|
if strategy == nil {
|
||||||
|
t.Log("could not find a successful TokenCredentialRequestAPI strategy in the CredentialIssuer:")
|
||||||
|
for _, s := range credentialIssuer.Status.Strategies {
|
||||||
|
t.Logf(" strategy %s has status %s/%s: %s", s.Type, s.Status, s.Reason, s.Message)
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The successful strategy must have a frontend of type TokenCredentialRequestAPI.
|
||||||
|
if strategy.Frontend == nil {
|
||||||
|
return false, fmt.Errorf("strategy did not find a Frontend")
|
||||||
|
}
|
||||||
|
if strategy.Frontend.Type != conciergev1alpha.TokenCredentialRequestAPIFrontendType {
|
||||||
|
return false, fmt.Errorf("strategy had unexpected frontend type %q", strategy.Frontend.Type)
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}, 3*time.Minute, 2*time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findSuccessfulStrategy(credentialIssuer *conciergev1alpha.CredentialIssuer, strategyType conciergev1alpha.StrategyType) *conciergev1alpha.CredentialIssuerStrategy {
|
||||||
|
for _, strategy := range credentialIssuer.Status.Strategies {
|
||||||
|
if strategy.Type != strategyType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strategy.Status != conciergev1alpha.SuccessStrategyStatus {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return &strategy
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLegacyPodCleaner(t *testing.T) {
|
||||||
|
env := library.IntegrationEnv(t).WithCapability(library.ClusterSigningKeyIsAvailable)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||||
|
defer cancel()
|
||||||
kubeClient := library.NewKubernetesClientset(t)
|
kubeClient := library.NewKubernetesClientset(t)
|
||||||
|
|
||||||
// Get the current number of kube-cert-agent pods.
|
// Pick the same labels that the legacy code would have used to run the kube-cert-agent pod.
|
||||||
//
|
legacyAgentLabels := map[string]string{}
|
||||||
// We can pretty safely assert there should be more than 1, since there should be a
|
|
||||||
// kube-cert-agent pod per kube-controller-manager pod, and there should probably be at least
|
|
||||||
// 1 kube-controller-manager for this to be a working kube API.
|
|
||||||
originalAgentPods, err := kubeClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{
|
|
||||||
LabelSelector: kubeCertAgentLabelSelector,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotEmpty(t, originalAgentPods.Items)
|
|
||||||
sortPods(originalAgentPods)
|
|
||||||
|
|
||||||
for _, agentPod := range originalAgentPods.Items {
|
|
||||||
// All agent pods should contain all custom labels
|
|
||||||
for k, v := range env.ConciergeCustomLabels {
|
for k, v := range env.ConciergeCustomLabels {
|
||||||
require.Equalf(t, v, agentPod.Labels[k], "expected agent pod to have label `%s: %s`", k, v)
|
legacyAgentLabels[k] = v
|
||||||
}
|
|
||||||
require.Equal(t, env.ConciergeAppName, agentPod.Labels["app"])
|
|
||||||
}
|
}
|
||||||
|
legacyAgentLabels["app"] = env.ConciergeAppName
|
||||||
|
legacyAgentLabels["kube-cert-agent.pinniped.dev"] = "true"
|
||||||
|
legacyAgentLabels["pinniped.dev/test"] = ""
|
||||||
|
|
||||||
agentPodsReconciled := func() bool {
|
// Deploy a fake legacy agent pod using those labels.
|
||||||
var currentAgentPods *corev1.PodList
|
pod, err := kubeClient.CoreV1().Pods(env.ConciergeNamespace).Create(ctx, &corev1.Pod{
|
||||||
currentAgentPods, err = kubeClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
LabelSelector: kubeCertAgentLabelSelector,
|
GenerateName: "test-legacy-kube-cert-agent-",
|
||||||
|
Labels: legacyAgentLabels,
|
||||||
|
Annotations: map[string]string{"pinniped.dev/testName": t.Name()},
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{{
|
||||||
|
Name: "sleeper",
|
||||||
|
Image: "debian:10.9-slim",
|
||||||
|
Command: []string{"/bin/sleep", "infinity"},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, metav1.CreateOptions{})
|
||||||
|
require.NoError(t, err, "failed to create fake legacy agent pod")
|
||||||
|
t.Logf("deployed fake legacy agent pod %s/%s with labels %s", pod.Namespace, pod.Name, labels.SelectorFromSet(legacyAgentLabels).String())
|
||||||
|
|
||||||
|
// No matter what happens, clean up the agent pod at the end of the test (normally it will already have been deleted).
|
||||||
|
t.Cleanup(func() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{GracePeriodSeconds: pointer.Int64Ptr(0)})
|
||||||
|
if !k8serrors.IsNotFound(err) {
|
||||||
|
require.NoError(t, err, "failed to clean up fake legacy agent pod")
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
// Expect the legacy-pod-cleaner controller to delete the pod.
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(originalAgentPods.Items) != len(currentAgentPods.Items) {
|
|
||||||
err = fmt.Errorf(
|
|
||||||
"original agent pod len != current agent pod len: %s",
|
|
||||||
diff.ObjectDiff(originalAgentPods.Items, currentAgentPods.Items),
|
|
||||||
)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
sortPods(currentAgentPods)
|
|
||||||
for i := range originalAgentPods.Items {
|
|
||||||
if !equality.Semantic.DeepEqual(
|
|
||||||
originalAgentPods.Items[i].Spec,
|
|
||||||
currentAgentPods.Items[i].Spec,
|
|
||||||
) {
|
|
||||||
err = fmt.Errorf(
|
|
||||||
"original agent pod != current agent pod: %s",
|
|
||||||
diff.ObjectDiff(originalAgentPods.Items[i].Spec, currentAgentPods.Items[i].Spec),
|
|
||||||
)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("reconcile on update", func(t *testing.T) {
|
|
||||||
// Ensure that the next test will start from a known state.
|
|
||||||
defer ensureKubeCertAgentSteadyState(t, agentPodsReconciled)
|
|
||||||
|
|
||||||
// Update the image of the first pod. The controller should see it, and flip it back.
|
|
||||||
//
|
|
||||||
// Note that we update the toleration field here because it is the only field, currently, that
|
|
||||||
// 1) we are allowed to update on a running pod AND 2) the kube-cert-agent controllers care
|
|
||||||
// about.
|
|
||||||
updatedAgentPod := originalAgentPods.Items[0].DeepCopy()
|
|
||||||
updatedAgentPod.Spec.Tolerations = append(
|
|
||||||
updatedAgentPod.Spec.Tolerations,
|
|
||||||
corev1.Toleration{Key: "fake-toleration"},
|
|
||||||
)
|
|
||||||
_, err = kubeClient.CoreV1().Pods(env.ConciergeNamespace).Update(ctx, updatedAgentPod, metav1.UpdateOptions{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Make sure the original pods come back.
|
|
||||||
assert.Eventually(t, agentPodsReconciled, 10*time.Second, 250*time.Millisecond)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("reconcile on delete", func(t *testing.T) {
|
|
||||||
// Ensure that the next test will start from a known state.
|
|
||||||
defer ensureKubeCertAgentSteadyState(t, agentPodsReconciled)
|
|
||||||
|
|
||||||
// Delete the first pod. The controller should see it, and flip it back.
|
|
||||||
err = kubeClient.
|
|
||||||
CoreV1().
|
|
||||||
Pods(env.ConciergeNamespace).
|
|
||||||
Delete(ctx, originalAgentPods.Items[0].Name, metav1.DeleteOptions{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Make sure the original pods come back.
|
|
||||||
assert.Eventually(t, agentPodsReconciled, 10*time.Second, 250*time.Millisecond)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Because the above tests have purposefully put the kube cert issuer strategy into a broken
|
|
||||||
// state, wait for it to become healthy again before moving on to other integration tests,
|
|
||||||
// otherwise those tests would be polluted by this test and would have to wait for the
|
|
||||||
// strategy to become successful again.
|
|
||||||
library.RequireEventuallyWithoutError(t, func() (bool, error) {
|
library.RequireEventuallyWithoutError(t, func() (bool, error) {
|
||||||
adminConciergeClient := library.NewConciergeClientset(t)
|
_, err := kubeClient.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||||
credentialIssuer, err := adminConciergeClient.ConfigV1alpha1().CredentialIssuers().Get(ctx, credentialIssuerName(env), metav1.GetOptions{})
|
if k8serrors.IsNotFound(err) {
|
||||||
if err != nil || credentialIssuer.Status.Strategies == nil {
|
t.Logf("fake legacy agent pod %s/%s was deleted as expected", pod.Namespace, pod.Name)
|
||||||
t.Log("Did not find any CredentialIssuer with any strategies")
|
return true, nil
|
||||||
return false, nil // didn't find it, but keep trying
|
|
||||||
}
|
}
|
||||||
for _, strategy := range credentialIssuer.Status.Strategies {
|
return false, err
|
||||||
// There will be other strategy types in the list, so ignore those.
|
}, 60*time.Second, 1*time.Second)
|
||||||
if strategy.Type == conciergev1alpha.KubeClusterSigningCertificateStrategyType && strategy.Status == conciergev1alpha.SuccessStrategyStatus { //nolint:nestif
|
|
||||||
if strategy.Frontend == nil {
|
|
||||||
return false, fmt.Errorf("did not find a Frontend") // unexpected, fail the test
|
|
||||||
}
|
|
||||||
return true, nil // found it, continue the test!
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.Log("Did not find any successful KubeClusterSigningCertificate strategy on CredentialIssuer")
|
|
||||||
return false, nil // didn't find it, but keep trying
|
|
||||||
}, 3*time.Minute, 3*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ensureKubeCertAgentSteadyState(t *testing.T, agentPodsReconciled func() bool) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
const wantSteadyStateSnapshots = 3
|
|
||||||
var steadyStateSnapshots int
|
|
||||||
require.NoError(t, wait.Poll(250*time.Millisecond, 30*time.Second, func() (bool, error) {
|
|
||||||
if agentPodsReconciled() {
|
|
||||||
steadyStateSnapshots++
|
|
||||||
} else {
|
|
||||||
steadyStateSnapshots = 0
|
|
||||||
}
|
|
||||||
return steadyStateSnapshots == wantSteadyStateSnapshots, nil
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
func sortPods(pods *corev1.PodList) {
|
|
||||||
sort.Slice(pods.Items, func(i, j int) bool {
|
|
||||||
return pods.Items[i].Name < pods.Items[j].Name
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
@ -151,11 +151,6 @@ func TestE2EFullIntegration(t *testing.T) {
|
|||||||
kubeconfigPath := filepath.Join(tempDir, "kubeconfig.yaml")
|
kubeconfigPath := filepath.Join(tempDir, "kubeconfig.yaml")
|
||||||
require.NoError(t, ioutil.WriteFile(kubeconfigPath, []byte(kubeconfigYAML), 0600))
|
require.NoError(t, ioutil.WriteFile(kubeconfigPath, []byte(kubeconfigYAML), 0600))
|
||||||
|
|
||||||
// Wait 10 seconds for the JWTAuthenticator to become initialized.
|
|
||||||
// TODO: remove this sleep once we have fixed the initialization problem.
|
|
||||||
t.Log("sleeping 10s to wait for JWTAuthenticator to become initialized")
|
|
||||||
time.Sleep(10 * time.Second)
|
|
||||||
|
|
||||||
// Run "kubectl get namespaces" which should trigger a browser login via the plugin.
|
// Run "kubectl get namespaces" which should trigger a browser login via the plugin.
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
|
kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
|
||||||
|
Loading…
Reference in New Issue
Block a user