2021-02-03 19:32:29 +00:00
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
2021-01-22 18:00:27 +00:00
// SPDX-License-Identifier: Apache-2.0
package integration
import (
2021-03-06 00:14:45 +00:00
"bytes"
2021-01-22 18:00:27 +00:00
"context"
2021-04-20 15:19:58 +00:00
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
2021-03-10 00:58:44 +00:00
"crypto/x509"
2021-04-20 15:19:58 +00:00
"crypto/x509/pkix"
2021-03-03 20:53:23 +00:00
"encoding/base64"
2021-03-10 00:58:44 +00:00
"encoding/json"
2021-03-10 18:30:06 +00:00
"encoding/pem"
2021-01-22 18:00:27 +00:00
"fmt"
2021-03-06 00:14:45 +00:00
"io/ioutil"
2021-07-15 23:39:15 +00:00
"net"
2021-01-22 18:00:27 +00:00
"net/http"
"net/url"
2021-03-06 00:14:45 +00:00
"os"
"os/exec"
"path/filepath"
2021-07-23 21:23:24 +00:00
"sort"
2021-03-06 00:14:45 +00:00
"strings"
2021-03-11 20:10:16 +00:00
"sync"
2021-01-22 18:00:27 +00:00
"testing"
"time"
2021-03-10 23:49:09 +00:00
"github.com/gorilla/websocket"
2021-03-11 18:04:36 +00:00
"github.com/stretchr/testify/assert"
2021-01-22 18:00:27 +00:00
"github.com/stretchr/testify/require"
2021-03-11 23:49:24 +00:00
"golang.org/x/net/http2"
2021-04-09 21:52:53 +00:00
authenticationv1 "k8s.io/api/authentication/v1"
authorizationv1 "k8s.io/api/authorization/v1"
2021-04-20 15:19:58 +00:00
certificatesv1 "k8s.io/api/certificates/v1"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
2021-01-22 18:00:27 +00:00
corev1 "k8s.io/api/core/v1"
2021-02-23 01:23:11 +00:00
rbacv1 "k8s.io/api/rbac/v1"
2021-06-01 18:25:31 +00:00
"k8s.io/apimachinery/pkg/api/equality"
2021-02-23 18:38:02 +00:00
k8serrors "k8s.io/apimachinery/pkg/api/errors"
2021-07-26 16:18:43 +00:00
"k8s.io/apimachinery/pkg/api/resource"
2021-01-22 18:00:27 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2021-05-27 14:17:19 +00:00
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme"
2021-02-23 01:23:11 +00:00
"k8s.io/apimachinery/pkg/labels"
2021-05-27 14:17:19 +00:00
"k8s.io/apimachinery/pkg/runtime/schema"
2021-02-23 18:38:02 +00:00
"k8s.io/apimachinery/pkg/types"
2021-03-10 00:58:44 +00:00
"k8s.io/apimachinery/pkg/watch"
2021-04-09 21:52:53 +00:00
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authentication/request/bearertoken"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
2021-05-27 14:17:19 +00:00
"k8s.io/apiserver/pkg/authentication/user"
2021-02-23 01:23:11 +00:00
k8sinformers "k8s.io/client-go/informers"
2021-01-22 18:00:27 +00:00
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
2021-04-09 21:52:53 +00:00
"k8s.io/client-go/transport"
2021-04-20 15:19:58 +00:00
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/certificate/csr"
2021-04-09 21:52:53 +00:00
"k8s.io/client-go/util/keyutil"
2021-05-18 16:51:11 +00:00
"k8s.io/client-go/util/retry"
2021-05-27 14:17:19 +00:00
"k8s.io/utils/pointer"
2021-01-22 18:00:27 +00:00
2021-03-15 18:42:57 +00:00
conciergev1alpha "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
2021-03-11 00:57:15 +00:00
identityv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/identity/v1alpha1"
2021-03-10 18:30:06 +00:00
loginv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
2021-03-27 02:32:33 +00:00
pinnipedconciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
2021-04-09 21:52:53 +00:00
"go.pinniped.dev/internal/httputil/roundtripper"
2021-03-11 00:57:15 +00:00
"go.pinniped.dev/internal/kubeclient"
2021-03-06 00:14:45 +00:00
"go.pinniped.dev/internal/testutil"
2021-06-22 15:23:19 +00:00
"go.pinniped.dev/test/testlib"
2021-01-22 18:00:27 +00:00
)
2021-03-11 20:10:16 +00:00
// syncBuffer wraps bytes.Buffer with a mutex so we don't have races in our test code.
type syncBuffer struct {
buf bytes . Buffer
mu sync . Mutex
}
func ( sb * syncBuffer ) String ( ) string {
sb . mu . Lock ( )
defer sb . mu . Unlock ( )
return sb . buf . String ( )
}
func ( sb * syncBuffer ) Read ( b [ ] byte ) ( int , error ) {
sb . mu . Lock ( )
defer sb . mu . Unlock ( )
return sb . buf . Read ( b )
}
func ( sb * syncBuffer ) Write ( b [ ] byte ) ( int , error ) {
sb . mu . Lock ( )
defer sb . mu . Unlock ( )
return sb . buf . Write ( b )
}
2021-02-25 22:40:02 +00:00
// Note that this test supports being run on all of our integration test cluster types:
2021-03-26 16:28:42 +00:00
// - TKGS acceptance (long-running) cluster: auto mode will choose disabled, supports LBs, does not have squid.
// - GKE acceptance (long-running) cluster: auto will choose enabled, support LBs, does not have squid.
// - kind: auto mode will choose disabled, does not support LBs, has squid.
// - GKE ephemeral clusters: auto mode will choose enabled, supports LBs, has squid.
// - AKS ephemeral clusters: auto mode will choose enabled, supports LBs, has squid.
// - EKS ephemeral clusters: auto mode will choose enabled, supports LBs, has squid.
2021-03-10 18:30:06 +00:00
func TestImpersonationProxy ( t * testing . T ) { //nolint:gocyclo // yeah, it's complex.
2021-06-22 15:23:19 +00:00
env := testlib . IntegrationEnv ( t )
2021-01-22 18:00:27 +00:00
2021-06-22 15:23:19 +00:00
impersonatorShouldHaveStartedAutomaticallyByDefault := ! env . HasCapability ( testlib . ClusterSigningKeyIsAvailable )
clusterSupportsLoadBalancers := env . HasCapability ( testlib . HasExternalLoadBalancerProvider )
2021-03-26 16:28:42 +00:00
2021-08-01 22:19:53 +00:00
ctx , cancel := context . WithTimeout ( context . Background ( ) , 30 * time . Minute )
2021-01-22 18:00:27 +00:00
defer cancel ( )
// Create a client using the admin kubeconfig.
2021-06-22 15:23:19 +00:00
adminClient := testlib . NewKubernetesClientset ( t )
adminConciergeClient := testlib . NewConciergeClientset ( t )
2021-01-22 18:00:27 +00:00
2021-03-11 00:57:15 +00:00
// Create a WebhookAuthenticator and prepare a TokenCredentialRequestSpec using the authenticator for use later.
credentialRequestSpecWithWorkingCredentials := loginv1alpha1 . TokenCredentialRequestSpec {
Token : env . TestUser . Token ,
2021-06-22 15:23:19 +00:00
Authenticator : testlib . CreateTestWebhookAuthenticator ( ctx , t ) ,
2021-03-11 00:57:15 +00:00
}
2021-01-22 18:00:27 +00:00
2021-02-25 22:40:02 +00:00
// The address of the ClusterIP service that points at the impersonation proxy's port (used when there is no load balancer).
2021-02-25 18:27:19 +00:00
proxyServiceEndpoint := fmt . Sprintf ( "%s-proxy.%s.svc.cluster.local" , env . ConciergeAppName , env . ConciergeNamespace )
2021-01-22 18:00:27 +00:00
2021-04-09 21:52:53 +00:00
var (
mostRecentTokenCredentialRequestResponse * loginv1alpha1 . TokenCredentialRequest
mostRecentTokenCredentialRequestResponseLock sync . Mutex
)
2021-05-26 22:52:31 +00:00
refreshCredentialHelper := func ( t * testing . T , client pinnipedconciergeclientset . Interface ) * loginv1alpha1 . ClusterCredential {
t . Helper ( )
2021-04-09 21:52:53 +00:00
mostRecentTokenCredentialRequestResponseLock . Lock ( )
defer mostRecentTokenCredentialRequestResponseLock . Unlock ( )
2021-03-11 00:57:15 +00:00
if mostRecentTokenCredentialRequestResponse == nil || credentialAlmostExpired ( t , mostRecentTokenCredentialRequestResponse ) {
2021-03-10 18:30:06 +00:00
// Make a TokenCredentialRequest. This can either return a cert signed by the Kube API server's CA (e.g. on kind)
// or a cert signed by the impersonator's signing CA (e.g. on GKE). Either should be accepted by the impersonation
// proxy server as a valid authentication.
//
// However, we issue short-lived certs, so this cert will only be valid for a few minutes.
// Cache it until it is almost expired and then refresh it whenever it is close to expired.
2021-03-27 02:32:33 +00:00
//
2021-06-22 15:23:19 +00:00
testlib . RequireEventually ( t , func ( requireEventually * require . Assertions ) {
2021-06-16 22:51:23 +00:00
resp , err := createTokenCredentialRequest ( credentialRequestSpecWithWorkingCredentials , client )
requireEventually . NoError ( err )
requireEventually . NotNil ( resp )
requireEventually . NotNil ( resp . Status )
requireEventually . NotNil ( resp . Status . Credential )
2021-06-22 15:23:19 +00:00
requireEventually . Nilf ( resp . Status . Message , "expected no error message but got: %s" , testlib . Sdump ( resp . Status . Message ) )
2021-06-16 22:51:23 +00:00
requireEventually . NotEmpty ( resp . Status . Credential . ClientCertificateData )
requireEventually . NotEmpty ( resp . Status . Credential . ClientKeyData )
// At the moment the credential request should not have returned a token. In the future, if we make it return
// tokens, we should revisit this test's rest config below.
requireEventually . Empty ( resp . Status . Credential . Token )
mostRecentTokenCredentialRequestResponse = resp
2021-03-30 18:47:25 +00:00
} , 5 * time . Minute , 5 * time . Second )
2021-03-02 01:53:26 +00:00
}
2021-05-26 22:52:31 +00:00
2021-03-11 00:57:15 +00:00
return mostRecentTokenCredentialRequestResponse . Status . Credential
2021-03-02 01:53:26 +00:00
}
2021-05-26 22:52:31 +00:00
refreshCredential := func ( t * testing . T , impersonationProxyURL string , impersonationProxyCACertPEM [ ] byte ) * loginv1alpha1 . ClusterCredential {
// Use an anonymous client which goes through the impersonation proxy to make the request because that's
// what would normally happen when a user is using a kubeconfig where the server is the impersonation proxy,
// so it more closely simulates the normal use case, and also because we want this to work on AKS clusters
// which do not allow anonymous requests.
client := newAnonymousImpersonationProxyClient ( t , impersonationProxyURL , impersonationProxyCACertPEM , nil ) . PinnipedConcierge
return refreshCredentialHelper ( t , client )
}
2021-05-18 16:51:11 +00:00
oldCredentialIssuer , err := adminConciergeClient . ConfigV1alpha1 ( ) . CredentialIssuers ( ) . Get ( ctx , credentialIssuerName ( env ) , metav1 . GetOptions { } )
require . NoError ( t , err )
// At the end of the test, clean up the CredentialIssuer
2021-03-04 00:23:07 +00:00
t . Cleanup ( func ( ) {
2021-04-09 21:52:53 +00:00
ctx , cancel := context . WithTimeout ( context . Background ( ) , 2 * time . Minute )
2021-03-04 00:23:07 +00:00
defer cancel ( )
// Delete any version that was created by this test.
2021-05-18 16:51:11 +00:00
t . Logf ( "cleaning up credentialissuer at end of test %s" , credentialIssuerName ( env ) )
err = retry . RetryOnConflict ( retry . DefaultRetry , func ( ) error {
newCredentialIssuer , err := adminConciergeClient . ConfigV1alpha1 ( ) . CredentialIssuers ( ) . Get ( ctx , credentialIssuerName ( env ) , metav1 . GetOptions { } )
if err != nil {
return err
}
oldCredentialIssuer . Spec . DeepCopyInto ( & newCredentialIssuer . Spec )
_ , err = adminConciergeClient . ConfigV1alpha1 ( ) . CredentialIssuers ( ) . Update ( ctx , newCredentialIssuer , metav1 . UpdateOptions { } )
return err
} )
require . NoError ( t , err )
2021-03-19 16:54:37 +00:00
// If we are running on an environment that has a load balancer, expect that the
// CredentialIssuer will be updated eventually with a successful impersonation proxy frontend.
// We do this to ensure that future tests that use the impersonation proxy (e.g.,
// TestE2EFullIntegration) will start with a known-good state.
2021-03-26 16:28:42 +00:00
if clusterSupportsLoadBalancers {
2021-08-10 16:05:04 +00:00
performImpersonatorDiscovery ( ctx , t , env , adminClient , adminConciergeClient , refreshCredential )
2021-03-19 16:54:37 +00:00
}
2021-03-04 00:23:07 +00:00
} )
2021-02-12 01:22:47 +00:00
2021-03-26 16:28:42 +00:00
// Done with set-up and ready to get started with the test. There are several states that we could be in at
// this point depending on the capabilities of the cluster under test. We handle each possible case here.
switch {
case impersonatorShouldHaveStartedAutomaticallyByDefault && clusterSupportsLoadBalancers :
2021-05-18 23:54:59 +00:00
// configure the credential issuer spec to have the impersonation proxy in auto mode
updateCredentialIssuer ( ctx , t , env , adminConciergeClient , conciergev1alpha . CredentialIssuerSpec {
2021-05-19 16:40:32 +00:00
ImpersonationProxy : & conciergev1alpha . ImpersonationProxySpec {
2021-05-18 23:54:59 +00:00
Mode : conciergev1alpha . ImpersonationProxyModeAuto ,
Service : conciergev1alpha . ImpersonationProxyServiceSpec {
2021-05-21 17:19:33 +00:00
Type : conciergev1alpha . ImpersonationProxyServiceTypeLoadBalancer ,
2021-06-01 18:25:31 +00:00
Annotations : map [ string ] string {
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout" : "4000" ,
} ,
2021-05-18 23:54:59 +00:00
} ,
} ,
} )
2021-03-26 16:28:42 +00:00
// Auto mode should have decided that the impersonator will run and should have started a load balancer,
// and we will be able to use the load balancer to access the impersonator. (e.g. GKE, AKS, EKS)
2021-03-03 20:53:23 +00:00
// Check that load balancer has been automatically created by the impersonator's "auto" mode.
2021-06-22 15:23:19 +00:00
testlib . RequireEventuallyWithoutError ( t , func ( ) ( bool , error ) {
2021-03-02 17:31:24 +00:00
return hasImpersonationProxyLoadBalancerService ( ctx , env , adminClient )
2021-03-02 23:49:01 +00:00
} , 30 * time . Second , 500 * time . Millisecond )
2021-03-26 16:28:42 +00:00
case impersonatorShouldHaveStartedAutomaticallyByDefault && ! clusterSupportsLoadBalancers :
t . Fatal ( "None of the clusters types that we currently test against should automatically" +
"enable the impersonation proxy without also supporting load balancers. If we add such a" +
"cluster type in the future then we should enhance this test." )
case ! impersonatorShouldHaveStartedAutomaticallyByDefault && clusterSupportsLoadBalancers :
// Auto mode should have decided that the impersonator will be disabled. We need to manually enable it.
// The cluster supports load balancers so we should enable it and let the impersonator create a load balancer
// automatically. (e.g. TKGS)
// The CredentialIssuer's strategies array should have been updated to include an unsuccessful impersonation
// strategy saying that it was automatically disabled.
requireDisabledStrategy ( ctx , t , env , adminConciergeClient )
// Create configuration to make the impersonation proxy turn on with no endpoint (i.e. automatically create a load balancer).
2021-05-18 16:51:11 +00:00
updateCredentialIssuer ( ctx , t , env , adminConciergeClient , conciergev1alpha . CredentialIssuerSpec {
2021-05-19 16:40:32 +00:00
ImpersonationProxy : & conciergev1alpha . ImpersonationProxySpec {
2021-05-18 16:51:11 +00:00
Mode : conciergev1alpha . ImpersonationProxyModeEnabled ,
} ,
} )
2021-03-26 16:28:42 +00:00
default :
// Auto mode should have decided that the impersonator will be disabled. We need to manually enable it.
// However, the cluster does not support load balancers so we should enable it without a load balancer
// and use squid to make requests. (e.g. kind)
2021-03-26 21:28:33 +00:00
if env . Proxy == "" {
t . Skip ( "test cluster does not support load balancers but also doesn't have a squid proxy... " +
2021-02-25 22:40:02 +00:00
"this is not a supported configuration for test clusters" )
2021-03-26 21:28:33 +00:00
}
2021-02-25 22:40:02 +00:00
2021-03-03 20:53:23 +00:00
// Check that no load balancer has been created by the impersonator's "auto" mode.
2021-06-22 15:23:19 +00:00
testlib . RequireNeverWithoutError ( t , func ( ) ( bool , error ) {
2021-03-02 17:31:24 +00:00
return hasImpersonationProxyLoadBalancerService ( ctx , env , adminClient )
2021-02-12 01:22:47 +00:00
} , 10 * time . Second , 500 * time . Millisecond )
2021-02-25 22:40:02 +00:00
// Check that we can't use the impersonation proxy to execute kubectl commands yet.
2021-04-09 21:52:53 +00:00
_ , err = impersonationProxyViaSquidKubeClientWithoutCredential ( t , proxyServiceEndpoint ) . CoreV1 ( ) . Namespaces ( ) . List ( ctx , metav1 . ListOptions { } )
2021-03-18 20:34:30 +00:00
isErr , message := isServiceUnavailableViaSquidError ( err , proxyServiceEndpoint )
require . Truef ( t , isErr , "wanted error %q to be service unavailable via squid error, but: %s" , err , message )
2021-02-12 01:22:47 +00:00
2021-03-03 20:53:23 +00:00
// Create configuration to make the impersonation proxy turn on with a hard coded endpoint (without a load balancer).
2021-05-18 16:51:11 +00:00
updateCredentialIssuer ( ctx , t , env , adminConciergeClient , conciergev1alpha . CredentialIssuerSpec {
2021-05-19 16:40:32 +00:00
ImpersonationProxy : & conciergev1alpha . ImpersonationProxySpec {
2021-05-18 16:51:11 +00:00
Mode : conciergev1alpha . ImpersonationProxyModeEnabled ,
ExternalEndpoint : proxyServiceEndpoint ,
} ,
2021-02-12 01:22:47 +00:00
} )
}
2021-03-03 20:53:23 +00:00
// At this point the impersonator should be starting/running. When it is ready, the CredentialIssuer's
// strategies array should be updated to include a successful impersonation strategy which can be used
// to discover the impersonator's URL and CA certificate. Until it has finished starting, it may not be included
// in the strategies array or it may be included in an error state. It can be in an error state for
// awhile when it is waiting for the load balancer to be assigned an ip/hostname.
2021-08-10 16:05:04 +00:00
impersonationProxyURL , impersonationProxyCACertPEM := performImpersonatorDiscovery ( ctx , t , env , adminClient , adminConciergeClient , refreshCredential )
2021-03-26 16:28:42 +00:00
if ! clusterSupportsLoadBalancers {
2021-03-03 20:53:23 +00:00
// In this case, we specified the endpoint in the configmap, so check that it was reported correctly in the CredentialIssuer.
require . Equal ( t , "https://" + proxyServiceEndpoint , impersonationProxyURL )
2021-07-15 23:39:15 +00:00
} else {
// If the impersonationProxyURL is a hostname, make sure DNS will resolve before we move on.
ensureDNSResolves ( t , impersonationProxyURL )
2021-03-10 18:30:06 +00:00
}
// Because our credentials expire so quickly, we'll always use a new client, to give us a chance to refresh our
// credentials before they expire. Create a closure to capture the arguments to newImpersonationProxyClient
// so we don't have to keep repeating them.
// This client performs TLS checks, so it also provides test coverage that the impersonation proxy server is generating TLS certs correctly.
2021-03-27 02:32:33 +00:00
impersonationProxyKubeClient := func ( t * testing . T ) kubernetes . Interface {
2021-04-09 21:52:53 +00:00
return newImpersonationProxyClient ( t , impersonationProxyURL , impersonationProxyCACertPEM , nil , refreshCredential ) . Kubernetes
2021-02-25 22:40:02 +00:00
}
2021-03-18 17:44:37 +00:00
2021-03-15 21:34:09 +00:00
t . Run ( "positive tests" , func ( t * testing . T ) {
2021-03-16 23:57:28 +00:00
// Create an RBAC rule to allow this user to read/write everything.
2021-06-22 15:23:19 +00:00
testlib . CreateTestClusterRoleBinding ( t ,
2021-03-16 23:57:28 +00:00
rbacv1 . Subject { Kind : rbacv1 . UserKind , APIGroup : rbacv1 . GroupName , Name : env . TestUser . ExpectedUsername } ,
rbacv1 . RoleRef { Kind : "ClusterRole" , APIGroup : rbacv1 . GroupName , Name : "edit" } ,
)
2021-03-18 17:44:37 +00:00
// Wait for the above RBAC rule to take effect.
2021-06-22 15:23:19 +00:00
testlib . WaitForUserToHaveAccess ( t , env . TestUser . ExpectedUsername , [ ] string { } , & authorizationv1 . ResourceAttributes {
2021-03-18 17:44:37 +00:00
Verb : "get" , Group : "" , Version : "v1" , Resource : "namespaces" ,
} )
2021-03-16 23:57:28 +00:00
// Get pods in concierge namespace and pick one.
// this is for tests that require performing actions against a running pod. We use the concierge pod because we already have it handy.
2021-07-26 16:18:43 +00:00
// We want to make sure it's a concierge pod (not cert agent), because we need to be able to port-forward a running port.
2021-03-16 23:57:28 +00:00
pods , err := adminClient . CoreV1 ( ) . Pods ( env . ConciergeNamespace ) . List ( ctx , metav1 . ListOptions { } )
require . NoError ( t , err )
require . Greater ( t , len ( pods . Items ) , 0 )
var conciergePod * corev1 . Pod
for _ , pod := range pods . Items {
pod := pod
if ! strings . Contains ( pod . Name , "kube-cert-agent" ) {
conciergePod = & pod
}
}
require . NotNil ( t , conciergePod , "could not find a concierge pod" )
2021-03-15 21:34:09 +00:00
// Test that the user can perform basic actions through the client with their username and group membership
// influencing RBAC checks correctly.
2021-01-22 18:00:27 +00:00
t . Run (
2021-03-15 21:34:09 +00:00
"access as user" ,
2021-06-22 15:23:19 +00:00
testlib . AccessAsUserTest ( ctx , env . TestUser . ExpectedUsername , impersonationProxyKubeClient ( t ) ) ,
2021-01-22 18:00:27 +00:00
)
2021-03-15 21:34:09 +00:00
for _ , group := range env . TestUser . ExpectedGroups {
group := group
t . Run (
"access as group " + group ,
2021-06-22 15:23:19 +00:00
testlib . AccessAsGroupTest ( ctx , group , impersonationProxyKubeClient ( t ) ) ,
2021-03-15 21:34:09 +00:00
)
}
2021-02-12 01:22:47 +00:00
2021-08-01 22:19:53 +00:00
if env . KubernetesDistribution == testlib . EKSDistro {
t . Log ( "eks: sleeping for 10 minutes to allow DNS propagation" )
time . Sleep ( 10 * time . Minute )
}
2021-03-29 16:30:20 +00:00
t . Run ( "kubectl port-forward and keeping the connection open for over a minute (non-idle)" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-03-25 23:57:37 +00:00
kubeconfigPath , envVarsWithProxy , _ := getImpersonationKubeconfig ( t , env , impersonationProxyURL , impersonationProxyCACertPEM , credentialRequestSpecWithWorkingCredentials . Authenticator )
2021-03-15 19:28:53 +00:00
2021-03-19 16:36:05 +00:00
// Run the kubectl port-forward command.
2021-03-15 21:34:09 +00:00
timeout , cancelFunc := context . WithTimeout ( ctx , 2 * time . Minute )
defer cancelFunc ( )
2021-04-09 21:52:53 +00:00
portForwardCmd , _ , portForwardStderr := kubectlCommand ( timeout , t , kubeconfigPath , envVarsWithProxy , "port-forward" , "--namespace" , env . ConciergeNamespace , conciergePod . Name , "10443:8443" )
2021-03-15 21:34:09 +00:00
portForwardCmd . Env = envVarsWithProxy
2021-03-19 16:36:05 +00:00
// Start, but don't wait for the command to finish.
2021-03-18 15:24:02 +00:00
err := portForwardCmd . Start ( )
2021-03-15 21:34:09 +00:00
require . NoError ( t , err , ` "kubectl port-forward" failed ` )
go func ( ) {
assert . EqualErrorf ( t , portForwardCmd . Wait ( ) , "signal: killed" , ` wanted "kubectl port-forward" to get signaled because context was cancelled (stderr: %q) ` , portForwardStderr . String ( ) )
} ( )
2021-03-15 23:31:54 +00:00
2021-03-29 16:30:20 +00:00
// The server should recognize this this
2021-03-19 16:36:05 +00:00
// is going to be a long-running command and keep the connection open as long as the client stays connected.
2021-03-15 21:34:09 +00:00
2021-03-29 16:30:20 +00:00
// curl the endpoint as many times as we can within 70 seconds.
// this will ensure that we don't run into idle timeouts.
var curlStdOut , curlStdErr bytes . Buffer
timeout , cancelFunc = context . WithTimeout ( ctx , 75 * time . Second )
defer cancelFunc ( )
startTime := time . Now ( )
for time . Now ( ) . Before ( startTime . Add ( 70 * time . Second ) ) {
2021-04-09 21:52:53 +00:00
curlCmd := exec . CommandContext ( timeout , "curl" , "-k" , "-sS" , "https://127.0.0.1:10443" ) // -sS turns off the progressbar but still prints errors
2021-03-15 21:34:09 +00:00
curlCmd . Stdout = & curlStdOut
curlCmd . Stderr = & curlStdErr
2021-03-29 16:30:20 +00:00
curlErr := curlCmd . Run ( )
if curlErr != nil {
t . Log ( "curl error: " + curlErr . Error ( ) )
2021-03-15 21:34:09 +00:00
t . Log ( "curlStdErr: " + curlStdErr . String ( ) )
t . Log ( "stdout: " + curlStdOut . String ( ) )
}
2021-04-05 22:14:24 +00:00
t . Log ( "Running curl through the kubectl port-forward port for 70 seconds. Elapsed time:" , time . Since ( startTime ) )
2021-03-29 16:30:20 +00:00
time . Sleep ( 1 * time . Second )
}
// curl the endpoint once more, once 70 seconds has elapsed, to make sure the connection is still open.
timeout , cancelFunc = context . WithTimeout ( ctx , 30 * time . Second )
defer cancelFunc ( )
2021-04-09 21:52:53 +00:00
curlCmd := exec . CommandContext ( timeout , "curl" , "-k" , "-sS" , "https://127.0.0.1:10443" ) // -sS turns off the progressbar but still prints errors
2021-03-29 16:30:20 +00:00
curlCmd . Stdout = & curlStdOut
curlCmd . Stderr = & curlStdErr
curlErr := curlCmd . Run ( )
if curlErr != nil {
t . Log ( "curl error: " + curlErr . Error ( ) )
t . Log ( "curlStdErr: " + curlStdErr . String ( ) )
t . Log ( "stdout: " + curlStdOut . String ( ) )
}
// We expect this to 403, but all we care is that it gets through.
require . NoError ( t , curlErr )
2021-04-09 21:52:53 +00:00
require . Contains ( t , curlStdOut . String ( ) , ` "forbidden: User \"system:anonymous\" cannot get path \"/\"" ` )
2021-03-29 16:30:20 +00:00
} )
t . Run ( "kubectl port-forward and keeping the connection open for over a minute (idle)" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-03-29 16:30:20 +00:00
kubeconfigPath , envVarsWithProxy , _ := getImpersonationKubeconfig ( t , env , impersonationProxyURL , impersonationProxyCACertPEM , credentialRequestSpecWithWorkingCredentials . Authenticator )
// Run the kubectl port-forward command.
timeout , cancelFunc := context . WithTimeout ( ctx , 2 * time . Minute )
defer cancelFunc ( )
2021-04-09 21:52:53 +00:00
portForwardCmd , _ , portForwardStderr := kubectlCommand ( timeout , t , kubeconfigPath , envVarsWithProxy , "port-forward" , "--namespace" , env . ConciergeNamespace , conciergePod . Name , "10444:8443" )
2021-03-29 16:30:20 +00:00
portForwardCmd . Env = envVarsWithProxy
// Start, but don't wait for the command to finish.
err := portForwardCmd . Start ( )
require . NoError ( t , err , ` "kubectl port-forward" failed ` )
go func ( ) {
assert . EqualErrorf ( t , portForwardCmd . Wait ( ) , "signal: killed" , ` wanted "kubectl port-forward" to get signaled because context was cancelled (stderr: %q) ` , portForwardStderr . String ( ) )
} ( )
// Wait to see if we time out. The default timeout is 60 seconds, but the server should recognize this this
// is going to be a long-running command and keep the connection open as long as the client stays connected.
time . Sleep ( 70 * time . Second )
timeout , cancelFunc = context . WithTimeout ( ctx , 2 * time . Minute )
defer cancelFunc ( )
2021-04-09 21:52:53 +00:00
curlCmd := exec . CommandContext ( timeout , "curl" , "-k" , "-sS" , "https://127.0.0.1:10444" ) // -sS turns off the progressbar but still prints errors
2021-03-29 16:30:20 +00:00
var curlStdOut , curlStdErr bytes . Buffer
curlCmd . Stdout = & curlStdOut
curlCmd . Stderr = & curlStdErr
err = curlCmd . Run ( )
if err != nil {
t . Log ( "curl error: " + err . Error ( ) )
t . Log ( "curlStdErr: " + curlStdErr . String ( ) )
t . Log ( "stdout: " + curlStdOut . String ( ) )
}
// We expect this to 403, but all we care is that it gets through.
require . NoError ( t , err )
2021-04-09 21:52:53 +00:00
require . Contains ( t , curlStdOut . String ( ) , ` "forbidden: User \"system:anonymous\" cannot get path \"/\"" ` )
2021-02-23 01:23:11 +00:00
} )
2021-02-25 22:40:02 +00:00
2021-03-15 21:34:09 +00:00
t . Run ( "using and watching all the basic verbs" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-03-15 21:34:09 +00:00
// Create a namespace, because it will be easier to exercise "deletecollection" if we have a namespace.
namespaceName := createTestNamespace ( t , adminClient )
// Create and start informer to exercise the "watch" verb for us.
informerFactory := k8sinformers . NewSharedInformerFactoryWithOptions (
2021-03-27 02:32:33 +00:00
impersonationProxyKubeClient ( t ) ,
2021-03-15 21:34:09 +00:00
0 ,
k8sinformers . WithNamespace ( namespaceName ) )
informer := informerFactory . Core ( ) . V1 ( ) . ConfigMaps ( )
informer . Informer ( ) // makes sure that the informer will cache
stopChannel := make ( chan struct { } )
informerFactory . Start ( stopChannel )
t . Cleanup ( func ( ) {
// Shut down the informer.
close ( stopChannel )
} )
informerFactory . WaitForCacheSync ( ctx . Done ( ) )
// Use labels on our created ConfigMaps to avoid accidentally listing other ConfigMaps that might
// exist in the namespace. In Kube 1.20+ there is a default ConfigMap in every namespace.
configMapLabels := labels . Set {
2021-06-22 15:23:19 +00:00
"pinniped.dev/testConfigMap" : testlib . RandHex ( t , 8 ) ,
2021-03-15 21:34:09 +00:00
}
2021-02-23 18:38:02 +00:00
2021-03-15 21:34:09 +00:00
// Test "create" verb through the impersonation proxy.
2021-03-27 02:32:33 +00:00
_ , err := impersonationProxyKubeClient ( t ) . CoreV1 ( ) . ConfigMaps ( namespaceName ) . Create ( ctx ,
2021-03-15 21:34:09 +00:00
& corev1 . ConfigMap { ObjectMeta : metav1 . ObjectMeta { Name : "configmap-1" , Labels : configMapLabels } } ,
metav1 . CreateOptions { } ,
)
require . NoError ( t , err )
2021-03-27 02:32:33 +00:00
_ , err = impersonationProxyKubeClient ( t ) . CoreV1 ( ) . ConfigMaps ( namespaceName ) . Create ( ctx ,
2021-03-15 21:34:09 +00:00
& corev1 . ConfigMap { ObjectMeta : metav1 . ObjectMeta { Name : "configmap-2" , Labels : configMapLabels } } ,
metav1 . CreateOptions { } ,
)
require . NoError ( t , err )
2021-03-27 02:32:33 +00:00
_ , err = impersonationProxyKubeClient ( t ) . CoreV1 ( ) . ConfigMaps ( namespaceName ) . Create ( ctx ,
2021-03-15 21:34:09 +00:00
& corev1 . ConfigMap { ObjectMeta : metav1 . ObjectMeta { Name : "configmap-3" , Labels : configMapLabels } } ,
metav1 . CreateOptions { } ,
)
require . NoError ( t , err )
2021-02-23 18:38:02 +00:00
2021-03-15 21:34:09 +00:00
// Make sure that all of the created ConfigMaps show up in the informer's cache to
// demonstrate that the informer's "watch" verb is working through the impersonation proxy.
2021-06-22 15:23:19 +00:00
testlib . RequireEventually ( t , func ( requireEventually * require . Assertions ) {
2021-06-16 22:51:23 +00:00
_ , err := informer . Lister ( ) . ConfigMaps ( namespaceName ) . Get ( "configmap-1" )
requireEventually . NoError ( err )
_ , err = informer . Lister ( ) . ConfigMaps ( namespaceName ) . Get ( "configmap-2" )
requireEventually . NoError ( err )
_ , err = informer . Lister ( ) . ConfigMaps ( namespaceName ) . Get ( "configmap-3" )
requireEventually . NoError ( err )
2021-03-15 21:34:09 +00:00
} , 10 * time . Second , 50 * time . Millisecond )
// Test "get" verb through the impersonation proxy.
2021-03-27 02:32:33 +00:00
configMap3 , err := impersonationProxyKubeClient ( t ) . CoreV1 ( ) . ConfigMaps ( namespaceName ) . Get ( ctx , "configmap-3" , metav1 . GetOptions { } )
2021-03-15 21:34:09 +00:00
require . NoError ( t , err )
2021-02-23 18:38:02 +00:00
2021-03-15 21:34:09 +00:00
// Test "list" verb through the impersonation proxy.
2021-03-27 02:32:33 +00:00
listResult , err := impersonationProxyKubeClient ( t ) . CoreV1 ( ) . ConfigMaps ( namespaceName ) . List ( ctx , metav1 . ListOptions {
2021-03-15 21:34:09 +00:00
LabelSelector : configMapLabels . String ( ) ,
} )
require . NoError ( t , err )
require . Len ( t , listResult . Items , 3 )
2021-02-23 18:38:02 +00:00
2021-03-15 21:34:09 +00:00
// Test "update" verb through the impersonation proxy.
configMap3 . Data = map [ string ] string { "foo" : "bar" }
2021-03-27 02:32:33 +00:00
updateResult , err := impersonationProxyKubeClient ( t ) . CoreV1 ( ) . ConfigMaps ( namespaceName ) . Update ( ctx , configMap3 , metav1 . UpdateOptions { } )
2021-03-15 21:34:09 +00:00
require . NoError ( t , err )
require . Equal ( t , "bar" , updateResult . Data [ "foo" ] )
// Make sure that the updated ConfigMap shows up in the informer's cache.
2021-06-22 15:23:19 +00:00
testlib . RequireEventually ( t , func ( requireEventually * require . Assertions ) {
2021-03-15 21:34:09 +00:00
configMap , err := informer . Lister ( ) . ConfigMaps ( namespaceName ) . Get ( "configmap-3" )
2021-06-16 22:51:23 +00:00
requireEventually . NoError ( err )
requireEventually . Equal ( "bar" , configMap . Data [ "foo" ] )
2021-03-15 21:34:09 +00:00
} , 10 * time . Second , 50 * time . Millisecond )
// Test "patch" verb through the impersonation proxy.
2021-03-27 02:32:33 +00:00
patchResult , err := impersonationProxyKubeClient ( t ) . CoreV1 ( ) . ConfigMaps ( namespaceName ) . Patch ( ctx ,
2021-03-15 21:34:09 +00:00
"configmap-3" ,
types . MergePatchType ,
[ ] byte ( ` { "data": { "baz":"42"}} ` ) ,
metav1 . PatchOptions { } ,
)
require . NoError ( t , err )
require . Equal ( t , "bar" , patchResult . Data [ "foo" ] )
require . Equal ( t , "42" , patchResult . Data [ "baz" ] )
2021-02-23 18:38:02 +00:00
2021-03-15 21:34:09 +00:00
// Make sure that the patched ConfigMap shows up in the informer's cache.
2021-06-22 15:23:19 +00:00
testlib . RequireEventually ( t , func ( requireEventually * require . Assertions ) {
2021-03-15 21:34:09 +00:00
configMap , err := informer . Lister ( ) . ConfigMaps ( namespaceName ) . Get ( "configmap-3" )
2021-06-16 22:51:23 +00:00
requireEventually . NoError ( err )
requireEventually . Equal ( "bar" , configMap . Data [ "foo" ] )
requireEventually . Equal ( "42" , configMap . Data [ "baz" ] )
2021-03-15 21:34:09 +00:00
} , 10 * time . Second , 50 * time . Millisecond )
2021-02-23 18:38:02 +00:00
2021-03-15 21:34:09 +00:00
// Test "delete" verb through the impersonation proxy.
2021-03-27 02:32:33 +00:00
err = impersonationProxyKubeClient ( t ) . CoreV1 ( ) . ConfigMaps ( namespaceName ) . Delete ( ctx , "configmap-3" , metav1 . DeleteOptions { } )
2021-03-15 21:34:09 +00:00
require . NoError ( t , err )
2021-02-23 18:38:02 +00:00
2021-03-15 21:34:09 +00:00
// Make sure that the deleted ConfigMap shows up in the informer's cache.
2021-06-22 15:23:19 +00:00
testlib . RequireEventually ( t , func ( requireEventually * require . Assertions ) {
2021-06-16 22:51:23 +00:00
_ , err := informer . Lister ( ) . ConfigMaps ( namespaceName ) . Get ( "configmap-3" )
requireEventually . Truef ( k8serrors . IsNotFound ( err ) , "expected a NotFound error from get, got %v" , err )
list , err := informer . Lister ( ) . ConfigMaps ( namespaceName ) . List ( configMapLabels . AsSelector ( ) )
requireEventually . NoError ( err )
requireEventually . Len ( list , 2 )
2021-03-15 21:34:09 +00:00
} , 10 * time . Second , 50 * time . Millisecond )
2021-02-23 18:38:02 +00:00
2021-03-15 21:34:09 +00:00
// Test "deletecollection" verb through the impersonation proxy.
2021-03-27 02:32:33 +00:00
err = impersonationProxyKubeClient ( t ) . CoreV1 ( ) . ConfigMaps ( namespaceName ) . DeleteCollection ( ctx , metav1 . DeleteOptions { } , metav1 . ListOptions { } )
2021-03-15 21:34:09 +00:00
require . NoError ( t , err )
2021-02-23 18:38:02 +00:00
2021-03-15 21:34:09 +00:00
// Make sure that the deleted ConfigMaps shows up in the informer's cache.
2021-06-22 15:23:19 +00:00
testlib . RequireEventually ( t , func ( requireEventually * require . Assertions ) {
2021-06-16 22:51:23 +00:00
list , err := informer . Lister ( ) . ConfigMaps ( namespaceName ) . List ( configMapLabels . AsSelector ( ) )
requireEventually . NoError ( err )
requireEventually . Empty ( list )
2021-03-15 21:34:09 +00:00
} , 10 * time . Second , 50 * time . Millisecond )
2021-02-19 00:27:03 +00:00
2021-03-15 21:34:09 +00:00
// There should be no ConfigMaps left.
2021-03-27 02:32:33 +00:00
listResult , err = impersonationProxyKubeClient ( t ) . CoreV1 ( ) . ConfigMaps ( namespaceName ) . List ( ctx , metav1 . ListOptions {
2021-03-15 21:34:09 +00:00
LabelSelector : configMapLabels . String ( ) ,
} )
require . NoError ( t , err )
require . Len ( t , listResult . Items , 0 )
2021-03-02 01:53:26 +00:00
} )
2021-04-09 21:52:53 +00:00
t . Run ( "nested impersonation as a regular user is allowed if they have enough RBAC permissions" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-03-15 21:34:09 +00:00
// Make a client which will send requests through the impersonation proxy and will also add
// impersonate headers to the request.
2021-04-09 21:52:53 +00:00
nestedImpersonationClient := newImpersonationProxyClient ( t , impersonationProxyURL , impersonationProxyCACertPEM ,
& rest . ImpersonationConfig { UserName : "other-user-to-impersonate" } , refreshCredential )
2021-03-15 21:34:09 +00:00
// Check that we can get some resource through the impersonation proxy without any impersonation headers on the request.
// We could use any resource for this, but we happen to know that this one should exist.
2021-03-27 02:32:33 +00:00
_ , err := impersonationProxyKubeClient ( t ) . CoreV1 ( ) . Secrets ( env . ConciergeNamespace ) . Get ( ctx , impersonationProxyTLSSecretName ( env ) , metav1 . GetOptions { } )
2021-03-15 21:34:09 +00:00
require . NoError ( t , err )
2021-03-02 01:53:26 +00:00
2021-03-15 21:34:09 +00:00
// Now we'll see what happens when we add an impersonation header to the request. This should generate a
// request similar to the one above, except that it will also have an impersonation header.
2021-04-09 21:52:53 +00:00
_ , err = nestedImpersonationClient . Kubernetes . CoreV1 ( ) . Secrets ( env . ConciergeNamespace ) . Get ( ctx , impersonationProxyTLSSecretName ( env ) , metav1 . GetOptions { } )
// this user is not allowed to impersonate other users
require . True ( t , k8serrors . IsForbidden ( err ) , err )
2021-03-15 21:34:09 +00:00
require . EqualError ( t , err , fmt . Sprintf (
` users "other-user-to-impersonate" is forbidden: ` +
2021-06-11 18:03:18 +00:00
` User "%s" cannot impersonate resource "users" in API group "" at the cluster scope: ` +
` decision made by impersonation-proxy.concierge.pinniped.dev ` ,
2021-04-09 21:52:53 +00:00
env . TestUser . ExpectedUsername ) )
// impersonate the GC service account instead which can read anything (the binding to edit allows this)
nestedImpersonationClientAsSA := newImpersonationProxyClient ( t , impersonationProxyURL , impersonationProxyCACertPEM ,
& rest . ImpersonationConfig { UserName : "system:serviceaccount:kube-system:generic-garbage-collector" } , refreshCredential )
_ , err = nestedImpersonationClientAsSA . Kubernetes . CoreV1 ( ) . Secrets ( env . ConciergeNamespace ) . Get ( ctx , impersonationProxyTLSSecretName ( env ) , metav1 . GetOptions { } )
require . NoError ( t , err )
expectedGroups := make ( [ ] string , 0 , len ( env . TestUser . ExpectedGroups ) + 1 ) // make sure we do not mutate env.TestUser.ExpectedGroups
expectedGroups = append ( expectedGroups , env . TestUser . ExpectedGroups ... )
expectedGroups = append ( expectedGroups , "system:authenticated" )
expectedOriginalUserInfo := authenticationv1 . UserInfo {
Username : env . TestUser . ExpectedUsername ,
Groups : expectedGroups ,
}
expectedOriginalUserInfoJSON , err := json . Marshal ( expectedOriginalUserInfo )
require . NoError ( t , err )
// check that we impersonated the correct user and that the original user is retained in the extra
whoAmI , err := nestedImpersonationClientAsSA . PinnipedConcierge . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
require . NoError ( t , err )
require . Equal ( t ,
expectedWhoAmIRequestResponse (
"system:serviceaccount:kube-system:generic-garbage-collector" ,
[ ] string { "system:serviceaccounts" , "system:serviceaccounts:kube-system" , "system:authenticated" } ,
map [ string ] identityv1alpha1 . ExtraValue {
"original-user-info.impersonation-proxy.concierge.pinniped.dev" : { string ( expectedOriginalUserInfoJSON ) } ,
} ,
) ,
whoAmI ,
)
_ , err = newImpersonationProxyClient ( t , impersonationProxyURL , impersonationProxyCACertPEM ,
& rest . ImpersonationConfig {
UserName : "system:serviceaccount:kube-system:generic-garbage-collector" ,
Extra : map [ string ] [ ] string {
"some-fancy-key" : { "with a dangerous value" } ,
} ,
} ,
refreshCredential ) . PinnipedConcierge . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
// this user should not be able to impersonate extra
require . True ( t , k8serrors . IsForbidden ( err ) , err )
require . EqualError ( t , err , fmt . Sprintf (
` userextras.authentication.k8s.io "with a dangerous value" is forbidden: ` +
2021-06-11 18:03:18 +00:00
` User "%s" cannot impersonate resource "userextras/some-fancy-key" in API group "authentication.k8s.io" at the cluster scope: ` +
` decision made by impersonation-proxy.concierge.pinniped.dev ` ,
2021-03-15 21:34:09 +00:00
env . TestUser . ExpectedUsername ) )
} )
2021-03-02 01:53:26 +00:00
2021-04-09 21:52:53 +00:00
t . Run ( "nested impersonation as a cluster admin user is allowed" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-03-15 21:34:09 +00:00
// Copy the admin credentials from the admin kubeconfig.
2021-06-22 15:23:19 +00:00
adminClientRestConfig := testlib . NewClientConfig ( t )
2021-04-09 21:52:53 +00:00
clusterAdminCredentials := getCredForConfig ( t , adminClientRestConfig )
2021-03-11 20:52:39 +00:00
2021-04-09 21:52:53 +00:00
// figure out who the admin user is
whoAmIAdmin , err := newImpersonationProxyClientWithCredentials ( t ,
clusterAdminCredentials , impersonationProxyURL , impersonationProxyCACertPEM , nil ) .
PinnipedConcierge . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
require . NoError ( t , err )
2021-03-11 20:52:39 +00:00
2021-05-10 17:22:51 +00:00
// The WhoAmI API is lossy:
// - It drops UID
// - It lowercases all extra keys
// the admin user on EKS has both a UID set and an extra key with uppercase characters
// Thus we fallback to the CSR API to grab the UID and Extra to handle this scenario
uid , extra := getUIDAndExtraViaCSR ( ctx , t , whoAmIAdmin . Status . KubernetesUserInfo . User . UID ,
newImpersonationProxyClientWithCredentials ( t ,
clusterAdminCredentials , impersonationProxyURL , impersonationProxyCACertPEM , nil ) .
Kubernetes ,
)
expectedExtra := make ( map [ string ] authenticationv1 . ExtraValue , len ( extra ) )
for k , v := range extra {
2021-04-09 21:52:53 +00:00
expectedExtra [ k ] = authenticationv1 . ExtraValue ( v )
}
expectedOriginalUserInfo := authenticationv1 . UserInfo {
Username : whoAmIAdmin . Status . KubernetesUserInfo . User . Username ,
2021-05-10 17:22:51 +00:00
UID : uid ,
Groups : whoAmIAdmin . Status . KubernetesUserInfo . User . Groups ,
Extra : expectedExtra ,
2021-03-15 21:34:09 +00:00
}
2021-04-09 21:52:53 +00:00
expectedOriginalUserInfoJSON , err := json . Marshal ( expectedOriginalUserInfo )
require . NoError ( t , err )
2021-03-11 20:52:39 +00:00
2021-03-15 21:34:09 +00:00
// Make a client using the admin credentials which will send requests through the impersonation proxy
// and will also add impersonate headers to the request.
2021-04-09 21:52:53 +00:00
nestedImpersonationClient := newImpersonationProxyClientWithCredentials ( t ,
clusterAdminCredentials , impersonationProxyURL , impersonationProxyCACertPEM ,
& rest . ImpersonationConfig {
UserName : "other-user-to-impersonate" ,
Groups : [ ] string { "other-group-1" , "other-group-2" } ,
Extra : map [ string ] [ ] string {
"this-key" : { "to this value" } ,
} ,
} ,
)
_ , err = nestedImpersonationClient . Kubernetes . CoreV1 ( ) . Secrets ( env . ConciergeNamespace ) . Get ( ctx , impersonationProxyTLSSecretName ( env ) , metav1 . GetOptions { } )
// the impersonated user lacks the RBAC to perform this call
require . True ( t , k8serrors . IsForbidden ( err ) , err )
require . EqualError ( t , err , fmt . Sprintf (
2021-06-11 18:03:18 +00:00
` secrets "%s" is forbidden: User "other-user-to-impersonate" cannot get resource "secrets" in API group "" in the namespace "%s": ` +
` decision made by impersonation-proxy.concierge.pinniped.dev ` ,
2021-04-09 21:52:53 +00:00
impersonationProxyTLSSecretName ( env ) , env . ConciergeNamespace ,
) )
// check that we impersonated the correct user and that the original user is retained in the extra
whoAmI , err := nestedImpersonationClient . PinnipedConcierge . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
require . NoError ( t , err )
require . Equal ( t ,
expectedWhoAmIRequestResponse (
"other-user-to-impersonate" ,
[ ] string { "other-group-1" , "other-group-2" , "system:authenticated" } ,
map [ string ] identityv1alpha1 . ExtraValue {
"this-key" : { "to this value" } ,
"original-user-info.impersonation-proxy.concierge.pinniped.dev" : { string ( expectedOriginalUserInfoJSON ) } ,
} ,
) ,
whoAmI ,
)
} )
t . Run ( "nested impersonation as a cluster admin fails on reserved key" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-06-22 15:23:19 +00:00
adminClientRestConfig := testlib . NewClientConfig ( t )
2021-04-09 21:52:53 +00:00
clusterAdminCredentials := getCredForConfig ( t , adminClientRestConfig )
nestedImpersonationClient := newImpersonationProxyClientWithCredentials ( t ,
clusterAdminCredentials , impersonationProxyURL , impersonationProxyCACertPEM ,
& rest . ImpersonationConfig {
UserName : "other-user-to-impersonate" ,
2021-06-11 18:03:18 +00:00
Groups : [ ] string { "other-group-1" , "other-group-2" , "system:masters" } , // impersonate system:masters so we get past authorization checks
2021-04-09 21:52:53 +00:00
Extra : map [ string ] [ ] string {
"this-good-key" : { "to this good value" } ,
"something.impersonation-proxy.concierge.pinniped.dev" : { "super sneaky value" } ,
} ,
} ,
)
_ , err := nestedImpersonationClient . Kubernetes . CoreV1 ( ) . Secrets ( env . ConciergeNamespace ) . Get ( ctx , impersonationProxyTLSSecretName ( env ) , metav1 . GetOptions { } )
require . EqualError ( t , err , "Internal error occurred: unimplemented functionality - unable to act as current user" )
require . True ( t , k8serrors . IsInternalError ( err ) , err )
require . Equal ( t , & k8serrors . StatusError {
ErrStatus : metav1 . Status {
Status : metav1 . StatusFailure ,
Code : http . StatusInternalServerError ,
Reason : metav1 . StatusReasonInternalError ,
Details : & metav1 . StatusDetails {
Causes : [ ] metav1 . StatusCause {
{
Message : "unimplemented functionality - unable to act as current user" ,
} ,
} ,
} ,
Message : "Internal error occurred: unimplemented functionality - unable to act as current user" ,
} ,
} , err )
} )
2021-08-10 04:03:33 +00:00
t . Run ( "nested impersonation as a cluster admin fails if UID impersonation is attempted" , func ( t * testing . T ) {
parallelIfNotEKS ( t )
adminClientRestConfig := testlib . NewClientConfig ( t )
clusterAdminCredentials := getCredForConfig ( t , adminClientRestConfig )
nestedImpersonationUIDOnly := newImpersonationProxyConfigWithCredentials ( t ,
clusterAdminCredentials , impersonationProxyURL , impersonationProxyCACertPEM , nil ,
)
nestedImpersonationUIDOnly . Wrap ( func ( rt http . RoundTripper ) http . RoundTripper {
return roundtripper . Func ( func ( r * http . Request ) ( * http . Response , error ) {
r . Header . Set ( "iMperSONATE-uid" , "some-awesome-uid" )
return rt . RoundTrip ( r )
} )
} )
_ , errUID := testlib . NewKubeclient ( t , nestedImpersonationUIDOnly ) . Kubernetes . CoreV1 ( ) . Secrets ( "foo" ) . Get ( ctx , "bar" , metav1 . GetOptions { } )
msg := ` Internal Server Error: "/api/v1/namespaces/foo/secrets/bar": requested [ { UID some-awesome-uid authentication.k8s.io/v1 }] without impersonating a user `
full := fmt . Sprintf ( ` an error on the server (%q) has prevented the request from succeeding (get secrets bar) ` , msg )
require . EqualError ( t , errUID , full )
require . True ( t , k8serrors . IsInternalError ( errUID ) , errUID )
require . Equal ( t , & k8serrors . StatusError {
ErrStatus : metav1 . Status {
Status : metav1 . StatusFailure ,
Code : http . StatusInternalServerError ,
Reason : metav1 . StatusReasonInternalError ,
Details : & metav1 . StatusDetails {
Name : "bar" ,
Kind : "secrets" ,
Causes : [ ] metav1 . StatusCause {
{
Type : metav1 . CauseTypeUnexpectedServerResponse ,
Message : msg ,
} ,
} ,
} ,
Message : full ,
} ,
} , errUID )
nestedImpersonationUID := newImpersonationProxyConfigWithCredentials ( t ,
clusterAdminCredentials , impersonationProxyURL , impersonationProxyCACertPEM ,
& rest . ImpersonationConfig {
UserName : "other-user-to-impersonate" ,
Groups : [ ] string { "system:masters" } , // impersonate system:masters so we get past authorization checks
} ,
)
nestedImpersonationUID . Wrap ( func ( rt http . RoundTripper ) http . RoundTripper {
return roundtripper . Func ( func ( r * http . Request ) ( * http . Response , error ) {
r . Header . Set ( "imperSONate-uiD" , "some-fancy-uid" )
return rt . RoundTrip ( r )
} )
} )
_ , err := testlib . NewKubeclient ( t , nestedImpersonationUID ) . Kubernetes . CoreV1 ( ) . Secrets ( env . ConciergeNamespace ) . Get ( ctx , impersonationProxyTLSSecretName ( env ) , metav1 . GetOptions { } )
require . EqualError ( t , err , "Internal error occurred: unimplemented functionality - unable to act as current user" )
require . True ( t , k8serrors . IsInternalError ( err ) , err )
require . Equal ( t , & k8serrors . StatusError {
ErrStatus : metav1 . Status {
Status : metav1 . StatusFailure ,
Code : http . StatusInternalServerError ,
Reason : metav1 . StatusReasonInternalError ,
Details : & metav1 . StatusDetails {
Causes : [ ] metav1 . StatusCause {
{
Message : "unimplemented functionality - unable to act as current user" ,
} ,
} ,
} ,
Message : "Internal error occurred: unimplemented functionality - unable to act as current user" ,
} ,
} , err )
} )
2021-04-09 21:52:53 +00:00
// this works because impersonation cannot set UID and thus the final user info the proxy sees has no UID
t . Run ( "nested impersonation as a service account is allowed if it has enough RBAC permissions" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-04-09 21:52:53 +00:00
namespaceName := createTestNamespace ( t , adminClient )
saName , saToken , saUID := createServiceAccountToken ( ctx , t , adminClient , namespaceName )
nestedImpersonationClient := newImpersonationProxyClientWithCredentials ( t ,
& loginv1alpha1 . ClusterCredential { Token : saToken } , impersonationProxyURL , impersonationProxyCACertPEM ,
& rest . ImpersonationConfig { UserName : "system:serviceaccount:kube-system:root-ca-cert-publisher" } ) . PinnipedConcierge
_ , err := nestedImpersonationClient . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
// this SA is not yet allowed to impersonate SAs
require . True ( t , k8serrors . IsForbidden ( err ) , err )
require . EqualError ( t , err , fmt . Sprintf (
` serviceaccounts "root-ca-cert-publisher" is forbidden: ` +
2021-06-11 18:03:18 +00:00
` User "%s" cannot impersonate resource "serviceaccounts" in API group "" in the namespace "kube-system": ` +
` decision made by impersonation-proxy.concierge.pinniped.dev ` ,
2021-04-09 21:52:53 +00:00
serviceaccount . MakeUsername ( namespaceName , saName ) ) )
// webhook authorizer deny cache TTL is 10 seconds so we need to wait long enough for it to drain
time . Sleep ( 15 * time . Second )
// allow the test SA to impersonate any SA
2021-06-22 15:23:19 +00:00
testlib . CreateTestClusterRoleBinding ( t ,
2021-04-09 21:52:53 +00:00
rbacv1 . Subject { Kind : rbacv1 . ServiceAccountKind , Name : saName , Namespace : namespaceName } ,
rbacv1 . RoleRef { Kind : "ClusterRole" , APIGroup : rbacv1 . GroupName , Name : "edit" } ,
)
2021-06-22 15:23:19 +00:00
testlib . WaitForUserToHaveAccess ( t , serviceaccount . MakeUsername ( namespaceName , saName ) , [ ] string { } , & authorizationv1 . ResourceAttributes {
2021-04-09 21:52:53 +00:00
Verb : "impersonate" , Group : "" , Version : "v1" , Resource : "serviceaccounts" ,
} )
whoAmI , err := nestedImpersonationClient . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
require . NoError ( t , err )
require . Equal ( t ,
expectedWhoAmIRequestResponse (
"system:serviceaccount:kube-system:root-ca-cert-publisher" ,
[ ] string { "system:serviceaccounts" , "system:serviceaccounts:kube-system" , "system:authenticated" } ,
map [ string ] identityv1alpha1 . ExtraValue {
"original-user-info.impersonation-proxy.concierge.pinniped.dev" : {
fmt . Sprintf ( ` { "username":"%s","uid":"%s","groups":["system:serviceaccounts","system:serviceaccounts:%s","system:authenticated"]} ` ,
serviceaccount . MakeUsername ( namespaceName , saName ) , saUID , namespaceName ) ,
} ,
} ,
) ,
whoAmI ,
2021-03-15 21:34:09 +00:00
)
} )
2021-03-11 00:57:15 +00:00
2021-03-15 21:34:09 +00:00
t . Run ( "WhoAmIRequests and different kinds of authentication through the impersonation proxy" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-03-15 21:34:09 +00:00
// Test using the TokenCredentialRequest for authentication.
2021-03-27 02:32:33 +00:00
impersonationProxyPinnipedConciergeClient := newImpersonationProxyClient ( t ,
2021-04-09 21:52:53 +00:00
impersonationProxyURL , impersonationProxyCACertPEM , nil , refreshCredential ,
2021-03-15 21:34:09 +00:00
) . PinnipedConcierge
whoAmI , err := impersonationProxyPinnipedConciergeClient . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
require . NoError ( t , err )
2021-04-09 21:52:53 +00:00
expectedGroups := make ( [ ] string , 0 , len ( env . TestUser . ExpectedGroups ) + 1 ) // make sure we do not mutate env.TestUser.ExpectedGroups
expectedGroups = append ( expectedGroups , env . TestUser . ExpectedGroups ... )
expectedGroups = append ( expectedGroups , "system:authenticated" )
2021-03-15 21:34:09 +00:00
require . Equal ( t ,
expectedWhoAmIRequestResponse (
env . TestUser . ExpectedUsername ,
2021-04-09 21:52:53 +00:00
expectedGroups ,
nil ,
2021-03-15 21:34:09 +00:00
) ,
whoAmI ,
)
// Test an unauthenticated request which does not include any credentials.
impersonationProxyAnonymousPinnipedConciergeClient := newAnonymousImpersonationProxyClient (
2021-04-09 21:52:53 +00:00
t , impersonationProxyURL , impersonationProxyCACertPEM , nil ,
2021-03-15 21:34:09 +00:00
) . PinnipedConcierge
whoAmI , err = impersonationProxyAnonymousPinnipedConciergeClient . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
2021-05-27 14:17:19 +00:00
// we expect the impersonation proxy to match the behavior of KAS in regards to anonymous requests
2021-06-22 15:23:19 +00:00
if env . HasCapability ( testlib . AnonymousAuthenticationSupported ) {
2021-05-27 14:17:19 +00:00
require . NoError ( t , err )
require . Equal ( t ,
expectedWhoAmIRequestResponse (
"system:anonymous" ,
[ ] string { "system:unauthenticated" } ,
nil ,
) ,
whoAmI ,
)
} else {
2021-06-22 15:23:19 +00:00
require . True ( t , k8serrors . IsUnauthorized ( err ) , testlib . Sdump ( err ) )
2021-05-27 14:17:19 +00:00
}
2021-03-15 21:34:09 +00:00
2021-04-20 15:19:58 +00:00
// Test using a service account token.
2021-03-15 21:34:09 +00:00
namespaceName := createTestNamespace ( t , adminClient )
2021-04-20 15:19:58 +00:00
saName , saToken , _ := createServiceAccountToken ( ctx , t , adminClient , namespaceName )
2021-04-09 21:52:53 +00:00
impersonationProxyServiceAccountPinnipedConciergeClient := newImpersonationProxyClientWithCredentials ( t ,
& loginv1alpha1 . ClusterCredential { Token : saToken } ,
impersonationProxyURL , impersonationProxyCACertPEM , nil ) . PinnipedConcierge
2021-04-20 15:19:58 +00:00
whoAmI , err = impersonationProxyServiceAccountPinnipedConciergeClient . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
2021-03-15 21:34:09 +00:00
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
2021-04-20 15:19:58 +00:00
require . NoError ( t , err )
require . Equal ( t ,
expectedWhoAmIRequestResponse (
serviceaccount . MakeUsername ( namespaceName , saName ) ,
[ ] string { "system:serviceaccounts" , "system:serviceaccounts:" + namespaceName , "system:authenticated" } ,
nil ,
) ,
whoAmI ,
)
} )
t . Run ( "WhoAmIRequests and SA token request" , func ( t * testing . T ) {
namespaceName := createTestNamespace ( t , adminClient )
kubeClient := adminClient . CoreV1 ( )
saName , _ , saUID := createServiceAccountToken ( ctx , t , adminClient , namespaceName )
_ , tokenRequestProbeErr := kubeClient . ServiceAccounts ( namespaceName ) . CreateToken ( ctx , saName , & authenticationv1 . TokenRequest { } , metav1 . CreateOptions { } )
if k8serrors . IsNotFound ( tokenRequestProbeErr ) && tokenRequestProbeErr . Error ( ) == "the server could not find the requested resource" {
return // stop test early since the token request API is not enabled on this cluster - other errors are caught below
}
2021-08-17 00:47:08 +00:00
pod := testlib . CreatePod ( ctx , t , "impersonation-proxy" , namespaceName ,
corev1 . PodSpec {
2021-04-20 15:19:58 +00:00
Containers : [ ] corev1 . Container {
{
2021-08-17 00:47:08 +00:00
Name : "ignored-but-required" ,
Image : "busybox" ,
Command : [ ] string { "sh" , "-c" , "sleep 3600" } ,
2021-03-14 01:25:23 +00:00
} ,
} ,
2021-04-20 15:19:58 +00:00
ServiceAccountName : saName ,
2021-08-17 00:47:08 +00:00
} )
2021-04-20 15:19:58 +00:00
tokenRequestBadAudience , err := kubeClient . ServiceAccounts ( namespaceName ) . CreateToken ( ctx , saName , & authenticationv1 . TokenRequest {
Spec : authenticationv1 . TokenRequestSpec {
Audiences : [ ] string { "should-fail-because-wrong-audience" } , // anything that is not an API server audience
BoundObjectRef : & authenticationv1 . BoundObjectReference {
Kind : "Pod" ,
APIVersion : "" ,
Name : pod . Name ,
UID : pod . UID ,
} ,
} ,
} , metav1 . CreateOptions { } )
require . NoError ( t , err )
impersonationProxySABadAudPinnipedConciergeClient := newImpersonationProxyClientWithCredentials ( t ,
& loginv1alpha1 . ClusterCredential { Token : tokenRequestBadAudience . Status . Token } ,
impersonationProxyURL , impersonationProxyCACertPEM , nil ) . PinnipedConcierge
_ , badAudErr := impersonationProxySABadAudPinnipedConciergeClient . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
2021-06-22 15:23:19 +00:00
require . True ( t , k8serrors . IsUnauthorized ( badAudErr ) , testlib . Sdump ( badAudErr ) )
2021-04-20 15:19:58 +00:00
tokenRequest , err := kubeClient . ServiceAccounts ( namespaceName ) . CreateToken ( ctx , saName , & authenticationv1 . TokenRequest {
Spec : authenticationv1 . TokenRequestSpec {
Audiences : [ ] string { } ,
BoundObjectRef : & authenticationv1 . BoundObjectReference {
Kind : "Pod" ,
APIVersion : "" ,
Name : pod . Name ,
UID : pod . UID ,
} ,
} ,
} , metav1 . CreateOptions { } )
require . NoError ( t , err )
impersonationProxySAClient := newImpersonationProxyClientWithCredentials ( t ,
& loginv1alpha1 . ClusterCredential { Token : tokenRequest . Status . Token } ,
impersonationProxyURL , impersonationProxyCACertPEM , nil )
whoAmITokenReq , err := impersonationProxySAClient . PinnipedConcierge . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
require . NoError ( t , err )
// new service account tokens include the pod info in the extra fields
require . Equal ( t ,
expectedWhoAmIRequestResponse (
serviceaccount . MakeUsername ( namespaceName , saName ) ,
[ ] string { "system:serviceaccounts" , "system:serviceaccounts:" + namespaceName , "system:authenticated" } ,
map [ string ] identityv1alpha1 . ExtraValue {
"authentication.kubernetes.io/pod-name" : { pod . Name } ,
"authentication.kubernetes.io/pod-uid" : { string ( pod . UID ) } ,
} ,
) ,
whoAmITokenReq ,
)
// allow the test SA to create CSRs
2021-06-22 15:23:19 +00:00
testlib . CreateTestClusterRoleBinding ( t ,
2021-04-20 15:19:58 +00:00
rbacv1 . Subject { Kind : rbacv1 . ServiceAccountKind , Name : saName , Namespace : namespaceName } ,
rbacv1 . RoleRef { Kind : "ClusterRole" , APIGroup : rbacv1 . GroupName , Name : "system:node-bootstrapper" } ,
)
2021-06-22 15:23:19 +00:00
testlib . WaitForUserToHaveAccess ( t , serviceaccount . MakeUsername ( namespaceName , saName ) , [ ] string { } , & authorizationv1 . ResourceAttributes {
2021-04-20 15:19:58 +00:00
Verb : "create" , Group : certificatesv1 . GroupName , Version : "*" , Resource : "certificatesigningrequests" ,
} )
privateKey , err := ecdsa . GenerateKey ( elliptic . P256 ( ) , rand . Reader )
require . NoError ( t , err )
csrPEM , err := cert . MakeCSR ( privateKey , & pkix . Name {
CommonName : "panda-man" ,
Organization : [ ] string { "living-the-dream" , "need-more-sleep" } ,
} , nil , nil )
require . NoError ( t , err )
csrName , _ , err := csr . RequestCertificate (
impersonationProxySAClient . Kubernetes ,
csrPEM ,
"" ,
certificatesv1 . KubeAPIServerClientSignerName ,
2021-08-09 23:16:50 +00:00
nil ,
2021-04-20 15:19:58 +00:00
[ ] certificatesv1 . KeyUsage { certificatesv1 . UsageClientAuth } ,
privateKey ,
)
require . NoError ( t , err )
saCSR , err := impersonationProxySAClient . Kubernetes . CertificatesV1beta1 ( ) . CertificateSigningRequests ( ) . Get ( ctx , csrName , metav1 . GetOptions { } )
require . NoError ( t , err )
err = adminClient . CertificatesV1beta1 ( ) . CertificateSigningRequests ( ) . Delete ( ctx , csrName , metav1 . DeleteOptions { } )
require . NoError ( t , err )
// make sure the user info that the CSR captured matches the SA, including the UID
require . Equal ( t , serviceaccount . MakeUsername ( namespaceName , saName ) , saCSR . Spec . Username )
require . Equal ( t , string ( saUID ) , saCSR . Spec . UID )
require . Equal ( t , [ ] string { "system:serviceaccounts" , "system:serviceaccounts:" + namespaceName , "system:authenticated" } , saCSR . Spec . Groups )
require . Equal ( t , map [ string ] certificatesv1beta1 . ExtraValue {
"authentication.kubernetes.io/pod-name" : { pod . Name } ,
"authentication.kubernetes.io/pod-uid" : { string ( pod . UID ) } ,
} , saCSR . Spec . Extra )
2021-03-06 00:14:45 +00:00
} )
2021-03-15 21:34:09 +00:00
t . Run ( "kubectl as a client" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-03-25 23:57:37 +00:00
kubeconfigPath , envVarsWithProxy , tempDir := getImpersonationKubeconfig ( t , env , impersonationProxyURL , impersonationProxyCACertPEM , credentialRequestSpecWithWorkingCredentials . Authenticator )
2021-03-15 21:34:09 +00:00
2021-07-26 16:18:43 +00:00
// Run a new test pod so we can interact with it using kubectl. We use a fresh pod here rather than the
// existing Concierge pod because we need more tools than we can get from a scratch/distroless base image.
runningTestPod := testlib . CreatePod ( ctx , t , "impersonation-proxy" , env . ConciergeNamespace , corev1 . PodSpec { Containers : [ ] corev1 . Container { {
Name : "impersonation-proxy-test" ,
Image : "debian:10.10-slim" ,
ImagePullPolicy : corev1 . PullIfNotPresent ,
Command : [ ] string { "bash" , "-c" , ` while true; do read VAR; echo "VAR: $VAR"; done ` } ,
Stdin : true ,
Resources : corev1 . ResourceRequirements {
Limits : corev1 . ResourceList {
corev1 . ResourceMemory : resource . MustParse ( "16Mi" ) ,
corev1 . ResourceCPU : resource . MustParse ( "10m" ) ,
} ,
Requests : corev1 . ResourceList {
corev1 . ResourceMemory : resource . MustParse ( "16Mi" ) ,
corev1 . ResourceCPU : resource . MustParse ( "10m" ) ,
} ,
} ,
} } } )
2021-03-15 21:34:09 +00:00
// Try "kubectl exec" through the impersonation proxy.
echoString := "hello world"
remoteEchoFile := fmt . Sprintf ( "/tmp/test-impersonation-proxy-echo-file-%d.txt" , time . Now ( ) . Unix ( ) )
2021-07-26 16:18:43 +00:00
stdout , err := runKubectl ( t , kubeconfigPath , envVarsWithProxy , "exec" , "--namespace" , runningTestPod . Namespace , runningTestPod . Name , "--" , "bash" , "-c" , fmt . Sprintf ( ` echo "%s" | tee %s ` , echoString , remoteEchoFile ) )
2021-03-15 21:34:09 +00:00
require . NoError ( t , err , ` "kubectl exec" failed ` )
require . Equal ( t , echoString + "\n" , stdout )
// run the kubectl cp command
localEchoFile := filepath . Join ( tempDir , filepath . Base ( remoteEchoFile ) )
2021-07-26 16:18:43 +00:00
_ , err = runKubectl ( t , kubeconfigPath , envVarsWithProxy , "cp" , fmt . Sprintf ( "%s/%s:%s" , runningTestPod . Namespace , runningTestPod . Name , remoteEchoFile ) , localEchoFile )
2021-03-15 21:34:09 +00:00
require . NoError ( t , err , ` "kubectl cp" failed ` )
localEchoFileData , err := ioutil . ReadFile ( localEchoFile )
require . NoError ( t , err )
require . Equal ( t , echoString + "\n" , string ( localEchoFileData ) )
2021-03-11 20:10:16 +00:00
2021-03-15 21:34:09 +00:00
// run the kubectl logs command
logLinesCount := 10
2021-07-26 16:18:43 +00:00
stdout , err = runKubectl ( t , kubeconfigPath , envVarsWithProxy , "logs" , "--namespace" , conciergePod . Namespace , conciergePod . Name , fmt . Sprintf ( "--tail=%d" , logLinesCount ) )
2021-03-15 21:34:09 +00:00
require . NoError ( t , err , ` "kubectl logs" failed ` )
2021-06-01 19:58:32 +00:00
// Expect _approximately_ logLinesCount lines in the output
// (we can't match 100% exactly due to https://github.com/kubernetes/kubernetes/issues/72628).
require . InDeltaf ( t , logLinesCount , strings . Count ( stdout , "\n" ) , 1 , "wanted %d newlines in kubectl logs output:\n%s" , logLinesCount , stdout )
2021-03-15 21:34:09 +00:00
// run the kubectl attach command
timeout , cancelFunc := context . WithTimeout ( ctx , 2 * time . Minute )
defer cancelFunc ( )
2021-07-26 16:18:43 +00:00
attachCmd , attachStdout , attachStderr := kubectlCommand ( timeout , t , kubeconfigPath , envVarsWithProxy , "attach" , "--stdin=true" , "--namespace" , runningTestPod . Namespace , runningTestPod . Name , "-v=10" )
2021-03-15 21:34:09 +00:00
attachCmd . Env = envVarsWithProxy
attachStdin , err := attachCmd . StdinPipe ( )
require . NoError ( t , err )
2021-03-11 20:10:16 +00:00
2021-03-15 21:34:09 +00:00
// start but don't wait for the attach command
err = attachCmd . Start ( )
require . NoError ( t , err )
2021-03-19 13:59:24 +00:00
attachExitCh := make ( chan struct { } )
go func ( ) {
assert . NoError ( t , attachCmd . Wait ( ) )
close ( attachExitCh )
} ( )
2021-03-11 20:10:16 +00:00
2021-03-15 21:34:09 +00:00
// write to stdin on the attach process
_ , err = attachStdin . Write ( [ ] byte ( echoString + "\n" ) )
require . NoError ( t , err )
2021-03-11 20:10:16 +00:00
2021-03-15 21:34:09 +00:00
// see that we can read stdout and it spits out stdin output back to us
wantAttachStdout := fmt . Sprintf ( "VAR: %s\n" , echoString )
2021-06-22 15:23:19 +00:00
testlib . RequireEventually ( t , func ( requireEventually * require . Assertions ) {
2021-06-16 22:51:23 +00:00
requireEventually . Equal (
wantAttachStdout ,
attachStdout . String ( ) ,
` got "kubectl attach" stdout: %q, wanted: %q (stderr: %q) ` ,
attachStdout . String ( ) ,
wantAttachStdout ,
attachStderr . String ( ) ,
)
} , time . Second * 60 , time . Millisecond * 250 )
2021-03-06 00:14:45 +00:00
2021-03-15 21:34:09 +00:00
// close stdin and attach process should exit
err = attachStdin . Close ( )
require . NoError ( t , err )
2021-03-19 13:59:24 +00:00
requireClose ( t , attachExitCh , time . Second * 20 )
2021-03-10 23:49:09 +00:00
} )
2021-03-15 21:34:09 +00:00
t . Run ( "websocket client" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-03-15 21:34:09 +00:00
namespaceName := createTestNamespace ( t , adminClient )
2021-03-16 23:57:28 +00:00
2021-03-27 02:32:33 +00:00
impersonationRestConfig := impersonationProxyRestConfig (
refreshCredential ( t , impersonationProxyURL , impersonationProxyCACertPEM ) ,
2021-04-09 21:52:53 +00:00
impersonationProxyURL , impersonationProxyCACertPEM , nil ,
2021-03-27 02:32:33 +00:00
)
2021-03-15 21:34:09 +00:00
tlsConfig , err := rest . TLSConfigFor ( impersonationRestConfig )
require . NoError ( t , err )
2021-03-10 23:49:09 +00:00
2021-03-15 21:34:09 +00:00
wantConfigMapLabelKey , wantConfigMapLabelValue := "some-label-key" , "some-label-value"
dest , _ := url . Parse ( impersonationProxyURL )
dest . Scheme = "wss"
dest . Path = "/api/v1/namespaces/" + namespaceName + "/configmaps"
dest . RawQuery = url . Values {
"watch" : { "1" } ,
"labelSelector" : { fmt . Sprintf ( "%s=%s" , wantConfigMapLabelKey , wantConfigMapLabelValue ) } ,
"resourceVersion" : { "0" } ,
} . Encode ( )
dialer := websocket . Dialer {
TLSClientConfig : tlsConfig ,
2021-03-10 23:49:09 +00:00
}
2021-03-26 16:28:42 +00:00
if ! clusterSupportsLoadBalancers {
2021-03-15 21:34:09 +00:00
dialer . Proxy = func ( req * http . Request ) ( * url . URL , error ) {
proxyURL , err := url . Parse ( env . Proxy )
require . NoError ( t , err )
2021-06-22 15:23:19 +00:00
t . Logf ( "passing request for %s through proxy %s" , testlib . RedactURLParams ( req . URL ) , proxyURL . String ( ) )
2021-03-15 21:34:09 +00:00
return proxyURL , nil
}
}
2021-07-09 01:47:37 +00:00
var (
resp * http . Response
conn * websocket . Conn
)
testlib . RequireEventually ( t , func ( requireEventually * require . Assertions ) {
var err error
conn , resp , err = dialer . Dial ( dest . String ( ) , http . Header { "Origin" : { dest . String ( ) } } )
if resp != nil {
defer func ( ) { requireEventually . NoError ( resp . Body . Close ( ) ) } ( )
}
if err != nil && resp != nil {
body , _ := ioutil . ReadAll ( resp . Body )
t . Logf ( "websocket dial failed: %d:%s" , resp . StatusCode , body )
}
requireEventually . NoError ( err )
} , time . Minute , time . Second )
2021-03-10 00:58:44 +00:00
2021-03-15 21:34:09 +00:00
// perform a create through the admin client
wantConfigMap := & corev1 . ConfigMap {
ObjectMeta : metav1 . ObjectMeta { Name : "configmap-1" , Labels : map [ string ] string { wantConfigMapLabelKey : wantConfigMapLabelValue } } ,
}
wantConfigMap , err = adminClient . CoreV1 ( ) . ConfigMaps ( namespaceName ) . Create ( ctx ,
wantConfigMap ,
metav1 . CreateOptions { } ,
)
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , adminClient . CoreV1 ( ) . ConfigMaps ( namespaceName ) .
DeleteCollection ( context . Background ( ) , metav1 . DeleteOptions { } , metav1 . ListOptions { } ) )
} )
2021-03-11 23:49:24 +00:00
2021-03-15 21:34:09 +00:00
// see if the websocket client received an event for the create
2021-07-09 01:47:37 +00:00
_ , message , err := conn . ReadMessage ( )
2021-03-15 21:34:09 +00:00
if err != nil {
t . Fatalf ( "Unexpected error: %v" , err )
}
var got watchJSON
err = json . Unmarshal ( message , & got )
require . NoError ( t , err )
if got . Type != watch . Added {
t . Errorf ( "Unexpected type: %v" , got . Type )
}
var actualConfigMap corev1 . ConfigMap
require . NoError ( t , json . Unmarshal ( got . Object , & actualConfigMap ) )
actualConfigMap . TypeMeta = metav1 . TypeMeta { } // This isn't filled out in the wantConfigMap we got back from create.
require . Equal ( t , * wantConfigMap , actualConfigMap )
2021-03-11 23:49:24 +00:00
} )
2021-03-15 21:34:09 +00:00
t . Run ( "http2 client" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-03-15 21:34:09 +00:00
namespaceName := createTestNamespace ( t , adminClient )
2021-03-16 23:57:28 +00:00
2021-03-15 21:34:09 +00:00
wantConfigMapLabelKey , wantConfigMapLabelValue := "some-label-key" , "some-label-value"
wantConfigMap := & corev1 . ConfigMap {
ObjectMeta : metav1 . ObjectMeta { Name : "configmap-1" , Labels : map [ string ] string { wantConfigMapLabelKey : wantConfigMapLabelValue } } ,
}
wantConfigMap , err = adminClient . CoreV1 ( ) . ConfigMaps ( namespaceName ) . Create ( ctx ,
wantConfigMap ,
metav1 . CreateOptions { } ,
)
require . NoError ( t , err )
t . Cleanup ( func ( ) {
_ = adminClient . CoreV1 ( ) . ConfigMaps ( namespaceName ) . DeleteCollection ( ctx , metav1 . DeleteOptions { } , metav1 . ListOptions { } )
} )
2021-03-11 23:49:24 +00:00
2021-03-15 21:34:09 +00:00
// create rest client
2021-03-27 02:32:33 +00:00
restConfig := impersonationProxyRestConfig (
refreshCredential ( t , impersonationProxyURL , impersonationProxyCACertPEM ) ,
2021-04-09 21:52:53 +00:00
impersonationProxyURL , impersonationProxyCACertPEM , nil ,
2021-03-27 02:32:33 +00:00
)
2021-03-11 23:49:24 +00:00
2021-03-15 21:34:09 +00:00
tlsConfig , err := rest . TLSConfigFor ( restConfig )
require . NoError ( t , err )
httpTransport := http . Transport {
TLSClientConfig : tlsConfig ,
2021-03-11 23:49:24 +00:00
}
2021-03-26 16:28:42 +00:00
if ! clusterSupportsLoadBalancers {
2021-03-15 21:34:09 +00:00
httpTransport . Proxy = func ( req * http . Request ) ( * url . URL , error ) {
proxyURL , err := url . Parse ( env . Proxy )
require . NoError ( t , err )
2021-06-22 15:23:19 +00:00
t . Logf ( "passing request for %s through proxy %s" , testlib . RedactURLParams ( req . URL ) , proxyURL . String ( ) )
2021-03-15 21:34:09 +00:00
return proxyURL , nil
}
}
err = http2 . ConfigureTransport ( & httpTransport )
require . NoError ( t , err )
2021-03-11 23:49:24 +00:00
2021-03-15 21:34:09 +00:00
httpClient := http . Client {
Transport : & httpTransport ,
}
2021-03-11 23:49:24 +00:00
2021-03-15 21:34:09 +00:00
dest , _ := url . Parse ( impersonationProxyURL )
dest . Path = "/api/v1/namespaces/" + namespaceName + "/configmaps/configmap-1"
getConfigmapRequest , err := http . NewRequestWithContext ( ctx , http . MethodGet , dest . String ( ) , nil )
require . NoError ( t , err )
response , err := httpClient . Do ( getConfigmapRequest )
require . NoError ( t , err )
body , _ := ioutil . ReadAll ( response . Body )
t . Logf ( "http2 status code: %d, proto: %s, message: %s" , response . StatusCode , response . Proto , body )
require . Equal ( t , "HTTP/2.0" , response . Proto )
require . Equal ( t , http . StatusOK , response . StatusCode )
defer func ( ) {
require . NoError ( t , response . Body . Close ( ) )
} ( )
var actualConfigMap corev1 . ConfigMap
require . NoError ( t , json . Unmarshal ( body , & actualConfigMap ) )
actualConfigMap . TypeMeta = metav1 . TypeMeta { } // This isn't filled out in the wantConfigMap we got back from create.
require . Equal ( t , * wantConfigMap , actualConfigMap )
// watch configmaps
dest . Path = "/api/v1/namespaces/" + namespaceName + "/configmaps"
dest . RawQuery = url . Values {
"watch" : { "1" } ,
"labelSelector" : { fmt . Sprintf ( "%s=%s" , wantConfigMapLabelKey , wantConfigMapLabelValue ) } ,
"resourceVersion" : { "0" } ,
} . Encode ( )
watchConfigmapsRequest , err := http . NewRequestWithContext ( ctx , http . MethodGet , dest . String ( ) , nil )
require . NoError ( t , err )
response , err = httpClient . Do ( watchConfigmapsRequest )
require . NoError ( t , err )
require . Equal ( t , "HTTP/2.0" , response . Proto )
require . Equal ( t , http . StatusOK , response . StatusCode )
defer func ( ) {
require . NoError ( t , response . Body . Close ( ) )
} ( )
// decode
decoder := json . NewDecoder ( response . Body )
var got watchJSON
err = decoder . Decode ( & got )
require . NoError ( t , err )
if got . Type != watch . Added {
t . Errorf ( "Unexpected type: %v" , got . Type )
}
err = json . Unmarshal ( got . Object , & actualConfigMap )
require . NoError ( t , err )
require . Equal ( t , "configmap-1" , actualConfigMap . Name )
actualConfigMap . TypeMeta = metav1 . TypeMeta { } // This isn't filled out in the wantConfigMap we got back from create.
require . Equal ( t , * wantConfigMap , actualConfigMap )
} )
2021-05-27 14:17:19 +00:00
t . Run ( "honors anonymous authentication of KAS" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-05-27 14:17:19 +00:00
impersonationProxyAnonymousClient := newAnonymousImpersonationProxyClient (
t , impersonationProxyURL , impersonationProxyCACertPEM , nil ,
)
copyConfig := rest . CopyConfig ( impersonationProxyAnonymousClient . JSONConfig )
copyConfig . GroupVersion = & schema . GroupVersion { }
copyConfig . NegotiatedSerializer = unstructuredscheme . NewUnstructuredNegotiatedSerializer ( )
impersonationProxyAnonymousRestClient , err := rest . RESTClientFor ( copyConfig )
require . NoError ( t , err )
2021-06-22 15:23:19 +00:00
adminClientRestConfig := testlib . NewClientConfig ( t )
2021-05-27 14:17:19 +00:00
clusterAdminCredentials := getCredForConfig ( t , adminClientRestConfig )
impersonationProxyAdminClientAsAnonymousConfig := newImpersonationProxyClientWithCredentials ( t ,
clusterAdminCredentials ,
impersonationProxyURL , impersonationProxyCACertPEM ,
& rest . ImpersonationConfig { UserName : user . Anonymous } ) .
JSONConfig
impersonationProxyAdminClientAsAnonymousConfigCopy := rest . CopyConfig ( impersonationProxyAdminClientAsAnonymousConfig )
impersonationProxyAdminClientAsAnonymousConfigCopy . GroupVersion = & schema . GroupVersion { }
impersonationProxyAdminClientAsAnonymousConfigCopy . NegotiatedSerializer = unstructuredscheme . NewUnstructuredNegotiatedSerializer ( )
impersonationProxyAdminRestClientAsAnonymous , err := rest . RESTClientFor ( impersonationProxyAdminClientAsAnonymousConfigCopy )
require . NoError ( t , err )
t . Run ( "anonymous authentication irrelevant" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-05-27 14:17:19 +00:00
// - hit the token credential request endpoint with an empty body
// - through the impersonation proxy
// - should succeed as an invalid request whether anonymous authentication is enabled or disabled
// - should not reject as unauthorized
t . Run ( "token credential request" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-05-27 14:17:19 +00:00
tkr , err := impersonationProxyAnonymousClient . PinnipedConcierge . LoginV1alpha1 ( ) . TokenCredentialRequests ( ) .
Create ( ctx , & loginv1alpha1 . TokenCredentialRequest {
Spec : loginv1alpha1 . TokenCredentialRequestSpec {
Authenticator : corev1 . TypedLocalObjectReference { APIGroup : pointer . String ( "anything.pinniped.dev" ) } ,
} ,
} , metav1 . CreateOptions { } )
2021-06-22 15:23:19 +00:00
require . True ( t , k8serrors . IsInvalid ( err ) , testlib . Sdump ( err ) )
2021-05-27 14:17:19 +00:00
require . Equal ( t , ` .login.concierge.pinniped.dev "" is invalid: spec.token.value: Required value: token must be supplied ` , err . Error ( ) )
require . Equal ( t , & loginv1alpha1 . TokenCredentialRequest { } , tkr )
} )
// - hit the healthz endpoint (non-resource endpoint)
// - through the impersonation proxy
// - as cluster admin, impersonating anonymous user
// - should succeed, authentication happens as cluster-admin
// - whoami should confirm we are using impersonation
// - healthz should succeed, anonymous users can request this endpoint
// - healthz/log should fail, forbidden anonymous
t . Run ( "non-resource request while impersonating anonymous - nested impersonation" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-05-27 14:17:19 +00:00
whoami , errWho := impersonationProxyAdminRestClientAsAnonymous . Post ( ) . Body ( [ ] byte ( ` { } ` ) ) . AbsPath ( "/apis/identity.concierge." + env . APIGroupSuffix + "/v1alpha1/whoamirequests" ) . DoRaw ( ctx )
2021-06-22 15:23:19 +00:00
require . NoError ( t , errWho , testlib . Sdump ( errWho ) )
2021-05-27 14:17:19 +00:00
require . True ( t , strings . HasPrefix ( string ( whoami ) , ` { "kind":"WhoAmIRequest","apiVersion":"identity.concierge. ` + env . APIGroupSuffix + ` /v1alpha1","metadata": { "creationTimestamp":null},"spec": { },"status": { "kubernetesUserInfo": { "user": { "username":"system:anonymous","groups":["system:unauthenticated"],"extra": { "original-user-info.impersonation-proxy.concierge.pinniped.dev":[" { \"username\": ` ) , string ( whoami ) )
healthz , errHealth := impersonationProxyAdminRestClientAsAnonymous . Get ( ) . AbsPath ( "/healthz" ) . DoRaw ( ctx )
2021-06-22 15:23:19 +00:00
require . NoError ( t , errHealth , testlib . Sdump ( errHealth ) )
2021-05-27 14:17:19 +00:00
require . Equal ( t , "ok" , string ( healthz ) )
healthzLog , errHealthzLog := impersonationProxyAdminRestClientAsAnonymous . Get ( ) . AbsPath ( "/healthz/log" ) . DoRaw ( ctx )
2021-06-22 15:23:19 +00:00
require . True ( t , k8serrors . IsForbidden ( errHealthzLog ) , "%s\n%s" , testlib . Sdump ( errHealthzLog ) , string ( healthzLog ) )
2021-06-11 18:03:18 +00:00
require . Equal ( t , ` { "kind":"Status","apiVersion":"v1","metadata": { },"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz/log\": decision made by impersonation-proxy.concierge.pinniped.dev","reason":"Forbidden","details": { },"code":403} ` + "\n" , string ( healthzLog ) )
2021-05-27 14:17:19 +00:00
} )
} )
t . Run ( "anonymous authentication enabled" , func ( t * testing . T ) {
2021-06-22 15:23:19 +00:00
testlib . IntegrationEnv ( t ) . WithCapability ( testlib . AnonymousAuthenticationSupported )
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-05-27 14:17:19 +00:00
// anonymous auth enabled
// - hit the healthz endpoint (non-resource endpoint)
// - through the impersonation proxy
// - should succeed 200
// - should respond "ok"
t . Run ( "non-resource request" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-05-27 14:17:19 +00:00
healthz , errHealth := impersonationProxyAnonymousRestClient . Get ( ) . AbsPath ( "/healthz" ) . DoRaw ( ctx )
2021-06-22 15:23:19 +00:00
require . NoError ( t , errHealth , testlib . Sdump ( errHealth ) )
2021-05-27 14:17:19 +00:00
require . Equal ( t , "ok" , string ( healthz ) )
} )
// - hit the pods endpoint (a resource endpoint)
// - through the impersonation proxy
// - should fail forbidden
// - system:anonymous cannot get pods
t . Run ( "resource" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-05-27 14:17:19 +00:00
pod , err := impersonationProxyAnonymousClient . Kubernetes . CoreV1 ( ) . Pods ( metav1 . NamespaceSystem ) .
Get ( ctx , "does-not-matter" , metav1 . GetOptions { } )
2021-06-22 15:23:19 +00:00
require . True ( t , k8serrors . IsForbidden ( err ) , testlib . Sdump ( err ) )
2021-06-11 18:03:18 +00:00
require . EqualError ( t , err , ` pods "does-not-matter" is forbidden: User "system:anonymous" cannot get resource "pods" in API group "" in the namespace "kube-system": ` +
2021-06-22 15:23:19 +00:00
` decision made by impersonation-proxy.concierge.pinniped.dev ` , testlib . Sdump ( err ) )
2021-05-27 14:17:19 +00:00
require . Equal ( t , & corev1 . Pod { } , pod )
} )
2021-08-10 16:05:04 +00:00
// - request to whoami (pinniped resource endpoint)
2021-05-27 14:17:19 +00:00
// - through the impersonation proxy
// - should succeed 200
// - should respond "you are system:anonymous"
t . Run ( "pinniped resource request" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-05-27 14:17:19 +00:00
whoAmI , err := impersonationProxyAnonymousClient . PinnipedConcierge . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
require . NoError ( t , err )
require . Equal ( t ,
expectedWhoAmIRequestResponse (
"system:anonymous" ,
[ ] string { "system:unauthenticated" } ,
nil ,
) ,
whoAmI ,
)
} )
} )
t . Run ( "anonymous authentication disabled" , func ( t * testing . T ) {
2021-06-22 15:23:19 +00:00
testlib . IntegrationEnv ( t ) . WithoutCapability ( testlib . AnonymousAuthenticationSupported )
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-05-27 14:17:19 +00:00
// - hit the healthz endpoint (non-resource endpoint)
// - through the impersonation proxy
// - should fail unauthorized
// - kube api server should reject it
t . Run ( "non-resource request" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-05-27 14:17:19 +00:00
healthz , err := impersonationProxyAnonymousRestClient . Get ( ) . AbsPath ( "/healthz" ) . DoRaw ( ctx )
2021-06-22 15:23:19 +00:00
require . True ( t , k8serrors . IsUnauthorized ( err ) , testlib . Sdump ( err ) )
2021-05-27 14:17:19 +00:00
require . Equal ( t , ` { "kind":"Status","apiVersion":"v1","metadata": { },"status":"Failure","message":"Unauthorized","reason":"Unauthorized","code":401} ` + "\n" , string ( healthz ) )
} )
// - hit the pods endpoint (a resource endpoint)
// - through the impersonation proxy
// - should fail unauthorized
// - kube api server should reject it
t . Run ( "resource" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-05-27 14:17:19 +00:00
pod , err := impersonationProxyAnonymousClient . Kubernetes . CoreV1 ( ) . Pods ( metav1 . NamespaceSystem ) .
Get ( ctx , "does-not-matter" , metav1 . GetOptions { } )
2021-06-22 15:23:19 +00:00
require . True ( t , k8serrors . IsUnauthorized ( err ) , testlib . Sdump ( err ) )
2021-05-27 14:17:19 +00:00
require . Equal ( t , & corev1 . Pod { } , pod )
} )
// - request to whoami (pinniped resource endpoing)
// - through the impersonation proxy
// - should fail unauthorized
// - kube api server should reject it
t . Run ( "pinniped resource request" , func ( t * testing . T ) {
2021-08-01 22:19:53 +00:00
parallelIfNotEKS ( t )
2021-05-27 14:17:19 +00:00
whoAmI , err := impersonationProxyAnonymousClient . PinnipedConcierge . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
2021-06-22 15:23:19 +00:00
require . True ( t , k8serrors . IsUnauthorized ( err ) , testlib . Sdump ( err ) )
2021-05-27 14:17:19 +00:00
require . Equal ( t , & identityv1alpha1 . WhoAmIRequest { } , whoAmI )
} )
} )
} )
2021-03-10 00:58:44 +00:00
} )
2021-06-09 23:00:54 +00:00
t . Run ( "assert correct impersonator service account is being used" , func ( t * testing . T ) {
2021-06-11 18:03:18 +00:00
// pick an API that everyone can access but always make invalid requests to it
// we can tell that the request is reaching KAS because only it has the validation logic
impersonationProxySSRRClient := impersonationProxyKubeClient ( t ) . AuthorizationV1 ( ) . SelfSubjectRulesReviews ( )
2021-06-09 23:00:54 +00:00
crbClient := adminClient . RbacV1 ( ) . ClusterRoleBindings ( )
impersonationProxyName := env . ConciergeAppName + "-impersonation-proxy"
saFullName := serviceaccount . MakeUsername ( env . ConciergeNamespace , impersonationProxyName )
2021-06-11 18:03:18 +00:00
invalidSSRR := & authorizationv1 . SelfSubjectRulesReview { }
2021-06-09 23:00:54 +00:00
// sanity check default expected error message
2021-06-11 18:03:18 +00:00
_ , err := impersonationProxySSRRClient . Create ( ctx , invalidSSRR , metav1 . CreateOptions { } )
2021-06-22 15:23:19 +00:00
require . True ( t , k8serrors . IsBadRequest ( err ) , testlib . Sdump ( err ) )
2021-06-11 18:03:18 +00:00
require . EqualError ( t , err , "no namespace on request" )
2021-06-09 23:00:54 +00:00
// remove the impersonation proxy SA's permissions
crb , err := crbClient . Get ( ctx , impersonationProxyName , metav1 . GetOptions { } )
require . NoError ( t , err )
// sanity check the subject
require . Len ( t , crb . Subjects , 1 )
sub := crb . Subjects [ 0 ] . DeepCopy ( )
require . Equal ( t , & rbacv1 . Subject {
Kind : "ServiceAccount" ,
APIGroup : "" ,
Name : impersonationProxyName ,
Namespace : env . ConciergeNamespace ,
} , sub )
crb . Subjects = nil
_ , err = crbClient . Update ( ctx , crb , metav1 . UpdateOptions { } )
require . NoError ( t , err )
// make sure to put the permissions back at the end
t . Cleanup ( func ( ) {
crbEnd , errEnd := crbClient . Get ( ctx , impersonationProxyName , metav1 . GetOptions { } )
require . NoError ( t , errEnd )
crbEnd . Subjects = [ ] rbacv1 . Subject { * sub }
_ , errUpdate := crbClient . Update ( ctx , crbEnd , metav1 . UpdateOptions { } )
require . NoError ( t , errUpdate )
2021-06-22 15:23:19 +00:00
testlib . WaitForUserToHaveAccess ( t , saFullName , nil , & authorizationv1 . ResourceAttributes {
2021-06-09 23:00:54 +00:00
Verb : "impersonate" ,
Resource : "users" ,
} )
} )
// assert that the impersonation proxy stops working when we remove its permissions
2021-06-22 15:23:19 +00:00
testlib . RequireEventuallyWithoutError ( t , func ( ) ( bool , error ) {
2021-06-11 18:03:18 +00:00
_ , errCreate := impersonationProxySSRRClient . Create ( ctx , invalidSSRR , metav1 . CreateOptions { } )
2021-06-09 23:00:54 +00:00
2021-06-11 18:03:18 +00:00
switch {
case errCreate == nil :
return false , fmt . Errorf ( "unexpected nil error for test user create invalid SSRR" )
2021-06-09 23:00:54 +00:00
2021-06-11 18:03:18 +00:00
case k8serrors . IsBadRequest ( errCreate ) && errCreate . Error ( ) == "no namespace on request" :
2021-06-09 23:00:54 +00:00
t . Log ( "waiting for impersonation proxy service account to lose impersonate permissions" )
return false , nil // RBAC change has not rolled out yet
2021-06-11 18:03:18 +00:00
case k8serrors . IsForbidden ( errCreate ) && errCreate . Error ( ) ==
` users " ` + env . TestUser . ExpectedUsername + ` " is forbidden: User " ` + saFullName +
` " cannot impersonate resource "users" in API group "" at the cluster scope ` :
2021-06-09 23:00:54 +00:00
return true , nil // expected RBAC error
default :
2021-06-11 18:03:18 +00:00
return false , fmt . Errorf ( "unexpected error for test user create invalid SSRR: %w" , errCreate )
2021-06-09 23:00:54 +00:00
}
} , time . Minute , time . Second )
} )
2021-06-01 18:25:31 +00:00
t . Run ( "adding an annotation reconciles the LoadBalancer service" , func ( t * testing . T ) {
if ! ( impersonatorShouldHaveStartedAutomaticallyByDefault && clusterSupportsLoadBalancers ) {
t . Skip ( "only running when the cluster is meant to be using LoadBalancer services" )
}
2021-07-28 21:19:14 +00:00
// Use this string in all annotation keys added by this test, so the assertions can ignore annotation keys
// which might exist on the Service which are not related to this test.
recognizableAnnotationKeyString := "pinniped.dev"
2021-06-01 20:01:42 +00:00
// Grab the state of the CredentialIssuer prior to this test, so we can restore things back afterwards.
previous , err := adminConciergeClient . ConfigV1alpha1 ( ) . CredentialIssuers ( ) . Get ( ctx , credentialIssuerName ( env ) , metav1 . GetOptions { } )
require . NoError ( t , err )
2021-07-23 16:46:40 +00:00
updateServiceAnnotations := func ( annotations map [ string ] string ) {
require . NoError ( t , retry . RetryOnConflict ( retry . DefaultRetry , func ( ) error {
service , err := adminClient . CoreV1 ( ) . Services ( env . ConciergeNamespace ) . Get ( ctx , impersonationProxyLoadBalancerName ( env ) , metav1 . GetOptions { } )
if err != nil {
return err
}
updated := service . DeepCopy ( )
if updated . Annotations == nil {
updated . Annotations = map [ string ] string { }
}
2021-07-23 21:23:24 +00:00
// Add/update each requested annotation, without overwriting others that are already there.
2021-07-23 16:46:40 +00:00
for k , v := range annotations {
updated . Annotations [ k ] = v
}
2021-07-23 21:23:24 +00:00
if equality . Semantic . DeepEqual ( service , updated ) {
return nil
}
2021-07-23 16:46:40 +00:00
t . Logf ( "updating Service with annotations: %v" , annotations )
_ , err = adminClient . CoreV1 ( ) . Services ( env . ConciergeNamespace ) . Update ( ctx , updated , metav1 . UpdateOptions { } )
return err
} ) )
}
2021-07-23 21:23:24 +00:00
deleteServiceAnnotations := func ( annotations map [ string ] string ) {
2021-07-23 16:46:40 +00:00
require . NoError ( t , retry . RetryOnConflict ( retry . DefaultRetry , func ( ) error {
service , err := adminClient . CoreV1 ( ) . Services ( env . ConciergeNamespace ) . Get ( ctx , impersonationProxyLoadBalancerName ( env ) , metav1 . GetOptions { } )
if err != nil {
return err
}
updated := service . DeepCopy ( )
2021-07-23 21:23:24 +00:00
if updated . Annotations != nil {
for k := range annotations {
delete ( updated . Annotations , k )
}
}
if equality . Semantic . DeepEqual ( service , updated ) {
return nil
2021-07-23 16:46:40 +00:00
}
2021-07-23 21:23:24 +00:00
t . Logf ( "updating Service to remove annotations: %v" , annotations )
2021-07-23 16:46:40 +00:00
_ , err = adminClient . CoreV1 ( ) . Services ( env . ConciergeNamespace ) . Update ( ctx , updated , metav1 . UpdateOptions { } )
return err
} ) )
}
2021-06-01 20:01:42 +00:00
applyCredentialIssuerAnnotations := func ( annotations map [ string ] string ) {
2021-06-01 18:25:31 +00:00
require . NoError ( t , retry . RetryOnConflict ( retry . DefaultRetry , func ( ) error {
2021-06-01 20:01:42 +00:00
issuer , err := adminConciergeClient . ConfigV1alpha1 ( ) . CredentialIssuers ( ) . Get ( ctx , credentialIssuerName ( env ) , metav1 . GetOptions { } )
2021-06-01 18:25:31 +00:00
if err != nil {
return err
}
2021-06-01 20:01:42 +00:00
updated := issuer . DeepCopy ( )
updated . Spec . ImpersonationProxy . Service . Annotations = annotations
if equality . Semantic . DeepEqual ( issuer , updated ) {
return nil
}
t . Logf ( "updating CredentialIssuer with spec.impersonationProxy.service.annotations: %v" , annotations )
_ , err = adminConciergeClient . ConfigV1alpha1 ( ) . CredentialIssuers ( ) . Update ( ctx , updated , metav1 . UpdateOptions { } )
2021-06-01 18:25:31 +00:00
return err
} ) )
2021-06-01 20:01:42 +00:00
}
2021-06-01 18:25:31 +00:00
2021-07-28 21:19:14 +00:00
waitForServiceAnnotations := func ( wantAnnotations map [ string ] string , annotationKeyFilter string ) {
2021-06-22 15:23:19 +00:00
testlib . RequireEventuallyWithoutError ( t , func ( ) ( bool , error ) {
2021-06-01 18:25:31 +00:00
service , err := adminClient . CoreV1 ( ) . Services ( env . ConciergeNamespace ) . Get ( ctx , impersonationProxyLoadBalancerName ( env ) , metav1 . GetOptions { } )
if err != nil {
return false , err
}
2021-07-28 21:19:14 +00:00
filteredActualAnnotations := map [ string ] string { }
for k , v := range service . Annotations {
// We do want to pay attention to any annotation for which we intend to make an explicit assertion,
// e.g. "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout" which is from our
// default CredentialIssuer spec.
_ , wantToMakeAssertionOnThisAnnotation := wantAnnotations [ k ]
// We do not want to pay attention to Service annotations added by other controllers,
// e.g. the "cloud.google.com/neg" annotation that is sometimes added by GKE on Services.
// These can come and go in time intervals outside of our control.
annotationContainsFilterString := strings . Contains ( k , annotationKeyFilter )
if wantToMakeAssertionOnThisAnnotation || annotationContainsFilterString {
filteredActualAnnotations [ k ] = v
}
}
t . Logf ( "found Service %s of type %s with actual annotations %q; filtered by interesting keys results in %q; expected annotations %q" ,
service . Name , service . Spec . Type , service . Annotations , filteredActualAnnotations , wantAnnotations )
return equality . Semantic . DeepEqual ( filteredActualAnnotations , wantAnnotations ) , nil
} , 1 * time . Minute , 1 * time . Second )
2021-06-01 18:25:31 +00:00
}
2021-07-23 21:23:24 +00:00
expectedAnnotations := func ( credentialIssuerSpecAnnotations map [ string ] string , otherAnnotations map [ string ] string ) map [ string ] string {
credentialIssuerSpecAnnotationKeys := [ ] string { }
expectedAnnotations := map [ string ] string { }
// Expect the annotations specified on the CredentialIssuer spec to be present.
for k , v := range credentialIssuerSpecAnnotations {
credentialIssuerSpecAnnotationKeys = append ( credentialIssuerSpecAnnotationKeys , k )
expectedAnnotations [ k ] = v
}
// Aside from the annotations requested on the CredentialIssuer spec, also expect the other annotation to still be there too.
for k , v := range otherAnnotations {
expectedAnnotations [ k ] = v
}
// Also expect the internal bookkeeping annotation to be present. It tracks the requested keys from the spec.
// Our controller sorts these keys to make the order in the annotation's value predictable.
sort . Strings ( credentialIssuerSpecAnnotationKeys )
credentialIssuerSpecAnnotationKeysJSON , err := json . Marshal ( credentialIssuerSpecAnnotationKeys )
require . NoError ( t , err )
2021-07-28 21:19:14 +00:00
// The name of this annotation key is decided by our controller.
expectedAnnotations [ "credentialissuer." + recognizableAnnotationKeyString + "/annotation-keys" ] = string ( credentialIssuerSpecAnnotationKeysJSON )
2021-07-23 21:23:24 +00:00
return expectedAnnotations
}
otherActorAnnotations := map [ string ] string {
2021-07-28 21:19:14 +00:00
recognizableAnnotationKeyString + "/test-other-actor-" + testlib . RandHex ( t , 8 ) : "test-other-actor-" + testlib . RandHex ( t , 8 ) ,
2021-07-23 21:23:24 +00:00
}
2021-07-23 16:46:40 +00:00
2021-06-01 20:01:42 +00:00
// Whatever happens, set the annotations back to the original value and expect the Service to be updated.
t . Cleanup ( func ( ) {
t . Log ( "reverting CredentialIssuer back to previous configuration" )
2021-07-23 21:23:24 +00:00
deleteServiceAnnotations ( otherActorAnnotations )
2021-06-01 20:01:42 +00:00
applyCredentialIssuerAnnotations ( previous . Spec . ImpersonationProxy . Service . DeepCopy ( ) . Annotations )
2021-07-23 21:23:24 +00:00
waitForServiceAnnotations (
expectedAnnotations ( previous . Spec . ImpersonationProxy . Service . DeepCopy ( ) . Annotations , map [ string ] string { } ) ,
2021-07-28 21:19:14 +00:00
recognizableAnnotationKeyString ,
2021-07-23 21:23:24 +00:00
)
2021-06-01 20:01:42 +00:00
} )
2021-07-23 21:23:24 +00:00
// Having another actor, like a human or a non-Pinniped controller, add unrelated annotations to the Service
// should not cause the Pinniped controllers to overwrite those annotations.
updateServiceAnnotations ( otherActorAnnotations )
2021-06-01 20:01:42 +00:00
// Set a new annotation in the CredentialIssuer spec.impersonationProxy.service.annotations field.
2021-07-28 21:19:14 +00:00
newAnnotationKey := recognizableAnnotationKeyString + "/test-" + testlib . RandHex ( t , 8 )
2021-06-22 15:23:19 +00:00
newAnnotationValue := "test-" + testlib . RandHex ( t , 8 )
2021-06-01 20:01:42 +00:00
updatedAnnotations := previous . Spec . ImpersonationProxy . Service . DeepCopy ( ) . Annotations
updatedAnnotations [ newAnnotationKey ] = newAnnotationValue
applyCredentialIssuerAnnotations ( updatedAnnotations )
2021-06-01 18:25:31 +00:00
2021-07-23 21:23:24 +00:00
// Expect them to be applied to the Service.
2021-07-28 21:19:14 +00:00
waitForServiceAnnotations (
expectedAnnotations ( updatedAnnotations , otherActorAnnotations ) ,
recognizableAnnotationKeyString ,
)
2021-06-01 18:25:31 +00:00
} )
2021-05-25 20:50:50 +00:00
t . Run ( "running impersonation proxy with ClusterIP service" , func ( t * testing . T ) {
if env . Proxy == "" {
t . Skip ( "Skipping ClusterIP test because squid proxy is not present" )
}
clusterIPServiceURL := fmt . Sprintf ( "%s.%s.svc.cluster.local" , impersonationProxyClusterIPName ( env ) , env . ConciergeNamespace )
updateCredentialIssuer ( ctx , t , env , adminConciergeClient , conciergev1alpha . CredentialIssuerSpec {
ImpersonationProxy : & conciergev1alpha . ImpersonationProxySpec {
Mode : conciergev1alpha . ImpersonationProxyModeEnabled ,
ExternalEndpoint : clusterIPServiceURL ,
Service : conciergev1alpha . ImpersonationProxyServiceSpec {
Type : conciergev1alpha . ImpersonationProxyServiceTypeClusterIP ,
} ,
} ,
} )
2021-05-26 22:52:31 +00:00
// wait until the credential issuer is updated with the new url
2021-06-22 15:23:19 +00:00
testlib . RequireEventuallyWithoutError ( t , func ( ) ( bool , error ) {
2021-08-10 16:05:04 +00:00
newImpersonationProxyURL , _ := performImpersonatorDiscoveryURL ( ctx , t , env , adminConciergeClient )
2021-06-02 19:00:35 +00:00
return newImpersonationProxyURL == "https://" + clusterIPServiceURL , nil
2021-05-26 22:52:31 +00:00
} , 30 * time . Second , 500 * time . Millisecond )
2021-08-10 16:05:04 +00:00
newImpersonationProxyURL , newImpersonationProxyCACertPEM := performImpersonatorDiscovery ( ctx , t , env , adminClient , adminConciergeClient , refreshCredential )
2021-05-25 20:50:50 +00:00
2021-05-26 22:52:31 +00:00
anonymousClient := newAnonymousImpersonationProxyClientWithProxy ( t , newImpersonationProxyURL , newImpersonationProxyCACertPEM , nil ) . PinnipedConcierge
refreshedCredentials := refreshCredentialHelper ( t , anonymousClient )
2021-05-25 20:50:50 +00:00
2021-05-26 22:52:31 +00:00
client := newImpersonationProxyClientWithCredentialsAndProxy ( t , refreshedCredentials , newImpersonationProxyURL , newImpersonationProxyCACertPEM , nil ) . Kubernetes
2021-05-25 20:50:50 +00:00
// everything should work properly through the cluster ip service
t . Run (
"access as user" ,
2021-06-22 15:23:19 +00:00
testlib . AccessAsUserTest ( ctx , env . TestUser . ExpectedUsername , client ) ,
2021-05-25 20:50:50 +00:00
)
} )
2021-03-10 21:08:15 +00:00
t . Run ( "manually disabling the impersonation proxy feature" , func ( t * testing . T ) {
// Update configuration to force the proxy to disabled mode
2021-05-18 16:51:11 +00:00
updateCredentialIssuer ( ctx , t , env , adminConciergeClient , conciergev1alpha . CredentialIssuerSpec {
2021-05-19 16:40:32 +00:00
ImpersonationProxy : & conciergev1alpha . ImpersonationProxySpec {
2021-05-18 16:51:11 +00:00
Mode : conciergev1alpha . ImpersonationProxyModeDisabled ,
} ,
} )
2021-02-12 01:22:47 +00:00
2021-03-26 16:28:42 +00:00
if clusterSupportsLoadBalancers {
2021-03-10 21:08:15 +00:00
// The load balancer should have been deleted when we disabled the impersonation proxy.
// Note that this can take kind of a long time on real cloud providers (e.g. ~22 seconds on EKS).
2021-06-22 15:23:19 +00:00
testlib . RequireEventuallyWithoutError ( t , func ( ) ( bool , error ) {
2021-03-10 21:08:15 +00:00
hasService , err := hasImpersonationProxyLoadBalancerService ( ctx , env , adminClient )
return ! hasService , err
} , 2 * time . Minute , 500 * time . Millisecond )
}
2021-02-25 18:27:19 +00:00
2021-03-10 21:08:15 +00:00
// Check that the impersonation proxy port has shut down.
// Ideally we could always check that the impersonation proxy's port has shut down, but on clusters where we
// do not run the squid proxy we have no easy way to see beyond the load balancer to see inside the cluster,
// so we'll skip this check on clusters which have load balancers but don't run the squid proxy.
// The other cluster types that do run the squid proxy will give us sufficient coverage here.
if env . Proxy != "" {
2021-06-22 15:23:19 +00:00
testlib . RequireEventually ( t , func ( requireEventually * require . Assertions ) {
2021-03-10 21:08:15 +00:00
// It's okay if this returns RBAC errors because this user has no role bindings.
// What we want to see is that the proxy eventually shuts down entirely.
2021-04-09 21:52:53 +00:00
_ , err := impersonationProxyViaSquidKubeClientWithoutCredential ( t , proxyServiceEndpoint ) . CoreV1 ( ) . Namespaces ( ) . List ( ctx , metav1 . ListOptions { } )
2021-03-18 20:34:30 +00:00
isErr , _ := isServiceUnavailableViaSquidError ( err , proxyServiceEndpoint )
2021-06-16 22:51:23 +00:00
requireEventually . Truef ( isErr , "wanted service unavailable via squid error, got %v" , err )
2021-03-10 21:08:15 +00:00
} , 20 * time . Second , 500 * time . Millisecond )
}
2021-02-25 22:40:02 +00:00
2021-03-10 21:08:15 +00:00
// Check that the generated TLS cert Secret was deleted by the controller because it's supposed to clean this up
// when we disable the impersonator.
2021-06-22 15:23:19 +00:00
testlib . RequireEventually ( t , func ( requireEventually * require . Assertions ) {
2021-03-18 15:24:02 +00:00
_ , err := adminClient . CoreV1 ( ) . Secrets ( env . ConciergeNamespace ) . Get ( ctx , impersonationProxyTLSSecretName ( env ) , metav1 . GetOptions { } )
2021-06-16 22:51:23 +00:00
requireEventually . Truef ( k8serrors . IsNotFound ( err ) , "expected NotFound error, got %v" , err )
2021-03-10 21:08:15 +00:00
} , 10 * time . Second , 250 * time . Millisecond )
// Check that the generated CA cert Secret was not deleted by the controller because it's supposed to keep this
// around in case we decide to later re-enable the impersonator. We want to avoid generating new CA certs when
// possible because they make their way into kubeconfigs on client machines.
2021-03-18 15:24:02 +00:00
_ , err := adminClient . CoreV1 ( ) . Secrets ( env . ConciergeNamespace ) . Get ( ctx , impersonationProxyCASecretName ( env ) , metav1 . GetOptions { } )
2021-03-10 21:08:15 +00:00
require . NoError ( t , err )
2021-03-03 20:53:23 +00:00
2021-03-10 21:08:15 +00:00
// At this point the impersonator should be stopped. The CredentialIssuer's strategies array should be updated to
// include an unsuccessful impersonation strategy saying that it was manually configured to be disabled.
2021-03-26 16:28:42 +00:00
requireDisabledStrategy ( ctx , t , env , adminConciergeClient )
2021-03-10 21:08:15 +00:00
2021-06-22 15:23:19 +00:00
if ! env . HasCapability ( testlib . ClusterSigningKeyIsAvailable ) && env . HasCapability ( testlib . AnonymousAuthenticationSupported ) {
2021-03-10 21:08:15 +00:00
// This cluster does not support the cluster signing key strategy, so now that we've manually disabled the
// impersonation strategy, we should be left with no working strategies.
// Given that there are no working strategies, a TokenCredentialRequest which would otherwise work should now
// fail, because there is no point handing out credentials that are not going to work for any strategy.
2021-03-27 02:32:33 +00:00
// Note that library.CreateTokenCredentialRequest makes an unauthenticated request, so we can't meaningfully
// perform this part of the test on a cluster which does not allow anonymous authentication.
2021-06-22 15:23:19 +00:00
tokenCredentialRequestResponse , err := testlib . CreateTokenCredentialRequest ( ctx , t , credentialRequestSpecWithWorkingCredentials )
2021-03-10 21:08:15 +00:00
require . NoError ( t , err )
2021-03-10 22:50:46 +00:00
2021-03-10 21:08:15 +00:00
require . NotNil ( t , tokenCredentialRequestResponse . Status . Message , "expected an error message but got nil" )
require . Equal ( t , "authentication failed" , * tokenCredentialRequestResponse . Status . Message )
require . Nil ( t , tokenCredentialRequestResponse . Status . Credential )
}
} )
2021-03-03 20:53:23 +00:00
}
2021-07-15 23:39:15 +00:00
func ensureDNSResolves ( t * testing . T , urlString string ) {
t . Helper ( )
2021-07-29 17:34:37 +00:00
2021-07-15 23:39:15 +00:00
parsedURL , err := url . Parse ( urlString )
require . NoError ( t , err )
2021-07-29 17:34:37 +00:00
host := parsedURL . Hostname ( )
if net . ParseIP ( host ) != nil {
return // ignore IPs
2021-07-15 23:39:15 +00:00
}
2021-07-29 17:34:37 +00:00
var d net . Dialer
loggingDialer := func ( ctx context . Context , network , address string ) ( net . Conn , error ) {
t . Logf ( "dns lookup, network=%s address=%s" , network , address )
conn , connErr := d . DialContext ( ctx , network , address )
if connErr != nil {
t . Logf ( "dns lookup, err=%v" , connErr )
} else {
local := conn . LocalAddr ( )
remote := conn . RemoteAddr ( )
t . Logf ( "dns lookup, local conn network=%s addr=%s" , local . Network ( ) , local . String ( ) )
t . Logf ( "dns lookup, remote conn network=%s addr=%s" , remote . Network ( ) , remote . String ( ) )
}
return conn , connErr
}
goResolver := & net . Resolver {
PreferGo : true ,
StrictErrors : true ,
Dial : loggingDialer ,
}
notGoResolver := & net . Resolver {
PreferGo : false ,
StrictErrors : true ,
Dial : loggingDialer ,
}
testlib . RequireEventually ( t , func ( requireEventually * require . Assertions ) {
ctx , cancel := context . WithTimeout ( context . Background ( ) , 10 * time . Second )
defer cancel ( )
for _ , resolver := range [ ] * net . Resolver { goResolver , notGoResolver } {
resolver := resolver
ips , ipErr := resolver . LookupIPAddr ( ctx , host )
requireEventually . NoError ( ipErr )
requireEventually . NotEmpty ( ips )
}
} , 5 * time . Minute , 1 * time . Second )
2021-07-15 23:39:15 +00:00
}
2021-03-11 18:01:17 +00:00
func createTestNamespace ( t * testing . T , adminClient kubernetes . Interface ) string {
t . Helper ( )
ctx , cancel := context . WithTimeout ( context . Background ( ) , time . Minute )
defer cancel ( )
namespace , err := adminClient . CoreV1 ( ) . Namespaces ( ) . Create ( ctx , & corev1 . Namespace {
ObjectMeta : metav1 . ObjectMeta { GenerateName : "impersonation-integration-test-" } ,
} , metav1 . CreateOptions { } )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
ctx , cancel := context . WithTimeout ( context . Background ( ) , 2 * time . Minute )
defer cancel ( )
t . Logf ( "cleaning up test namespace %s" , namespace . Name )
require . NoError ( t , adminClient . CoreV1 ( ) . Namespaces ( ) . Delete ( ctx , namespace . Name , metav1 . DeleteOptions { } ) )
} )
return namespace . Name
}
2021-04-09 21:52:53 +00:00
func createServiceAccountToken ( ctx context . Context , t * testing . T , adminClient kubernetes . Interface , namespaceName string ) ( name , token string , uid types . UID ) {
2021-03-11 18:01:17 +00:00
t . Helper ( )
serviceAccount , err := adminClient . CoreV1 ( ) . ServiceAccounts ( namespaceName ) . Create ( ctx ,
& corev1 . ServiceAccount { ObjectMeta : metav1 . ObjectMeta { GenerateName : "int-test-service-account-" } } , metav1 . CreateOptions { } )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , adminClient . CoreV1 ( ) . ServiceAccounts ( namespaceName ) .
Delete ( context . Background ( ) , serviceAccount . Name , metav1 . DeleteOptions { } ) )
} )
secret , err := adminClient . CoreV1 ( ) . Secrets ( namespaceName ) . Create ( ctx , & corev1 . Secret {
ObjectMeta : metav1 . ObjectMeta {
GenerateName : "int-test-service-account-token-" ,
Annotations : map [ string ] string {
corev1 . ServiceAccountNameKey : serviceAccount . Name ,
} ,
} ,
Type : corev1 . SecretTypeServiceAccountToken ,
} , metav1 . CreateOptions { } )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , adminClient . CoreV1 ( ) . Secrets ( namespaceName ) .
Delete ( context . Background ( ) , secret . Name , metav1 . DeleteOptions { } ) )
} )
2021-06-22 15:23:19 +00:00
testlib . RequireEventuallyWithoutError ( t , func ( ) ( bool , error ) {
2021-03-11 18:01:17 +00:00
secret , err = adminClient . CoreV1 ( ) . Secrets ( namespaceName ) . Get ( ctx , secret . Name , metav1 . GetOptions { } )
if err != nil {
return false , err
}
return len ( secret . Data [ corev1 . ServiceAccountTokenKey ] ) > 0 , nil
} , time . Minute , time . Second )
2021-04-09 21:52:53 +00:00
return serviceAccount . Name , string ( secret . Data [ corev1 . ServiceAccountTokenKey ] ) , serviceAccount . UID
2021-03-11 18:01:17 +00:00
}
2021-04-09 21:52:53 +00:00
func expectedWhoAmIRequestResponse ( username string , groups [ ] string , extra map [ string ] identityv1alpha1 . ExtraValue ) * identityv1alpha1 . WhoAmIRequest {
2021-03-11 00:57:15 +00:00
return & identityv1alpha1 . WhoAmIRequest {
Status : identityv1alpha1 . WhoAmIRequestStatus {
KubernetesUserInfo : identityv1alpha1 . KubernetesUserInfo {
User : identityv1alpha1 . UserInfo {
Username : username ,
UID : "" , // no way to impersonate UID: https://github.com/kubernetes/kubernetes/issues/93699
Groups : groups ,
2021-04-09 21:52:53 +00:00
Extra : extra ,
2021-03-11 00:57:15 +00:00
} ,
} ,
} ,
}
}
2021-08-10 16:05:04 +00:00
func performImpersonatorDiscovery ( ctx context . Context , t * testing . T , env * testlib . TestEnv ,
adminClient kubernetes . Interface , adminConciergeClient pinnipedconciergeclientset . Interface ,
refreshCredential func ( t * testing . T , impersonationProxyURL string , impersonationProxyCACertPEM [ ] byte ) * loginv1alpha1 . ClusterCredential ) ( string , [ ] byte ) {
t . Helper ( )
impersonationProxyURL , impersonationProxyCACertPEM := performImpersonatorDiscoveryURL ( ctx , t , env , adminConciergeClient )
if len ( env . Proxy ) == 0 {
t . Log ( "no test proxy is available, skipping readiness checks for concierge impersonation proxy pods" )
return impersonationProxyURL , impersonationProxyCACertPEM
}
impersonationProxyParsedURL , err := url . Parse ( impersonationProxyURL )
require . NoError ( t , err )
expectedGroups := make ( [ ] string , 0 , len ( env . TestUser . ExpectedGroups ) + 1 ) // make sure we do not mutate env.TestUser.ExpectedGroups
expectedGroups = append ( expectedGroups , env . TestUser . ExpectedGroups ... )
expectedGroups = append ( expectedGroups , "system:authenticated" )
// probe each pod directly for readiness since the concierge status is a lie - it just means a single pod is ready
testlib . RequireEventually ( t , func ( requireEventually * require . Assertions ) {
pods , err := adminClient . CoreV1 ( ) . Pods ( env . ConciergeNamespace ) . List ( ctx ,
metav1 . ListOptions { LabelSelector : "app=" + env . ConciergeAppName + ",!kube-cert-agent.pinniped.dev" } ) // TODO replace with deployment.pinniped.dev=concierge
requireEventually . NoError ( err )
requireEventually . Len ( pods . Items , 2 ) // has to stay in sync with the defaults in our YAML
for _ , pod := range pods . Items {
t . Logf ( "checking if concierge impersonation proxy pod %q is ready" , pod . Name )
requireEventually . NotEmptyf ( pod . Status . PodIP , "pod %q does not have an IP" , pod . Name )
credentials := refreshCredential ( t , impersonationProxyURL , impersonationProxyCACertPEM ) . DeepCopy ( )
credentials . Token = "not a valid token" // demonstrates that client certs take precedence over tokens by setting both on the requests
config := newImpersonationProxyConfigWithCredentials ( t , credentials , impersonationProxyURL , impersonationProxyCACertPEM , nil )
config = rest . CopyConfig ( config )
config . Proxy = kubeconfigProxyFunc ( t , env . Proxy ) // always use the proxy since we are talking directly to a pod IP
config . Host = "https://" + pod . Status . PodIP + ":8444" // hardcode the internal port - it should not change
config . TLSClientConfig . ServerName = impersonationProxyParsedURL . Hostname ( ) // make SNI hostname TLS verification work even when using IP
whoAmI , err := testlib . NewKubeclient ( t , config ) . PinnipedConcierge . IdentityV1alpha1 ( ) . WhoAmIRequests ( ) .
Create ( ctx , & identityv1alpha1 . WhoAmIRequest { } , metav1 . CreateOptions { } )
requireEventually . NoError ( err )
requireEventually . Equal (
expectedWhoAmIRequestResponse (
env . TestUser . ExpectedUsername ,
expectedGroups ,
nil ,
) ,
whoAmI ,
)
}
} , 10 * time . Minute , 10 * time . Second )
return impersonationProxyURL , impersonationProxyCACertPEM
}
func performImpersonatorDiscoveryURL ( ctx context . Context , t * testing . T , env * testlib . TestEnv , adminConciergeClient pinnipedconciergeclientset . Interface ) ( string , [ ] byte ) {
2021-03-03 20:53:23 +00:00
t . Helper ( )
2021-04-09 21:52:53 +00:00
2021-03-03 20:53:23 +00:00
var impersonationProxyURL string
var impersonationProxyCACertPEM [ ] byte
t . Log ( "Waiting for CredentialIssuer strategy to be successful" )
2021-06-22 15:23:19 +00:00
testlib . RequireEventuallyWithoutError ( t , func ( ) ( bool , error ) {
2021-03-03 20:53:23 +00:00
credentialIssuer , err := adminConciergeClient . ConfigV1alpha1 ( ) . CredentialIssuers ( ) . Get ( ctx , credentialIssuerName ( env ) , metav1 . GetOptions { } )
if err != nil || credentialIssuer . Status . Strategies == nil {
t . Log ( "Did not find any CredentialIssuer with any strategies" )
return false , nil // didn't find it, but keep trying
}
for _ , strategy := range credentialIssuer . Status . Strategies {
// There will be other strategy types in the list, so ignore those.
2021-03-15 18:42:57 +00:00
if strategy . Type == conciergev1alpha . ImpersonationProxyStrategyType && strategy . Status == conciergev1alpha . SuccessStrategyStatus { //nolint:nestif
2021-03-03 20:53:23 +00:00
if strategy . Frontend == nil {
return false , fmt . Errorf ( "did not find a Frontend" ) // unexpected, fail the test
}
if strategy . Frontend . ImpersonationProxyInfo == nil {
return false , fmt . Errorf ( "did not find an ImpersonationProxyInfo" ) // unexpected, fail the test
}
impersonationProxyURL = strategy . Frontend . ImpersonationProxyInfo . Endpoint
2021-07-15 23:39:15 +00:00
2021-03-03 20:53:23 +00:00
impersonationProxyCACertPEM , err = base64 . StdEncoding . DecodeString ( strategy . Frontend . ImpersonationProxyInfo . CertificateAuthorityData )
if err != nil {
return false , err // unexpected, fail the test
}
return true , nil // found it, continue the test!
2021-03-15 18:42:57 +00:00
} else if strategy . Type == conciergev1alpha . ImpersonationProxyStrategyType {
2021-03-03 20:53:23 +00:00
t . Logf ( "Waiting for successful impersonation proxy strategy on %s: found status %s with reason %s and message: %s" ,
credentialIssuerName ( env ) , strategy . Status , strategy . Reason , strategy . Message )
2021-05-27 16:13:10 +00:00
if strategy . Reason == conciergev1alpha . ErrorDuringSetupStrategyReason {
// The server encountered an unexpected error while starting the impersonator, so fail the test fast.
return false , fmt . Errorf ( "found impersonation strategy in %s state with message: %s" , strategy . Reason , strategy . Message )
}
2021-03-03 20:53:23 +00:00
}
}
t . Log ( "Did not find any impersonation proxy strategy on CredentialIssuer" )
return false , nil // didn't find it, but keep trying
} , 10 * time . Minute , 10 * time . Second )
t . Log ( "Found successful CredentialIssuer strategy" )
return impersonationProxyURL , impersonationProxyCACertPEM
}
2021-06-22 15:23:19 +00:00
func requireDisabledStrategy ( ctx context . Context , t * testing . T , env * testlib . TestEnv , adminConciergeClient pinnipedconciergeclientset . Interface ) {
2021-03-03 20:53:23 +00:00
t . Helper ( )
2021-06-22 15:23:19 +00:00
testlib . RequireEventuallyWithoutError ( t , func ( ) ( bool , error ) {
2021-03-03 20:53:23 +00:00
credentialIssuer , err := adminConciergeClient . ConfigV1alpha1 ( ) . CredentialIssuers ( ) . Get ( ctx , credentialIssuerName ( env ) , metav1 . GetOptions { } )
if err != nil || credentialIssuer . Status . Strategies == nil {
t . Log ( "Did not find any CredentialIssuer with any strategies" )
return false , nil // didn't find it, but keep trying
}
for _ , strategy := range credentialIssuer . Status . Strategies {
// There will be other strategy types in the list, so ignore those.
2021-03-15 18:42:57 +00:00
if strategy . Type == conciergev1alpha . ImpersonationProxyStrategyType &&
strategy . Status == conciergev1alpha . ErrorStrategyStatus &&
strategy . Reason == conciergev1alpha . DisabledStrategyReason { //nolint:nestif
2021-03-03 20:53:23 +00:00
return true , nil // found it, continue the test!
2021-03-15 18:42:57 +00:00
} else if strategy . Type == conciergev1alpha . ImpersonationProxyStrategyType {
2021-03-03 20:53:23 +00:00
t . Logf ( "Waiting for disabled impersonation proxy strategy on %s: found status %s with reason %s and message: %s" ,
credentialIssuerName ( env ) , strategy . Status , strategy . Reason , strategy . Message )
2021-03-15 18:42:57 +00:00
if strategy . Reason == conciergev1alpha . ErrorDuringSetupStrategyReason {
2021-03-03 20:53:23 +00:00
// The server encountered an unexpected error while stopping the impersonator, so fail the test fast.
return false , fmt . Errorf ( "found impersonation strategy in %s state with message: %s" , strategy . Reason , strategy . Message )
}
}
}
t . Log ( "Did not find any impersonation proxy strategy on CredentialIssuer" )
return false , nil // didn't find it, but keep trying
} , 1 * time . Minute , 500 * time . Millisecond )
2021-02-12 01:22:47 +00:00
}
2021-03-11 00:57:15 +00:00
func credentialAlmostExpired ( t * testing . T , credential * loginv1alpha1 . TokenCredentialRequest ) bool {
t . Helper ( )
2021-04-09 21:52:53 +00:00
2021-03-11 00:57:15 +00:00
pemBlock , _ := pem . Decode ( [ ] byte ( credential . Status . Credential . ClientCertificateData ) )
parsedCredential , err := x509 . ParseCertificate ( pemBlock . Bytes )
require . NoError ( t , err )
timeRemaining := time . Until ( parsedCredential . NotAfter )
if timeRemaining < 2 * time . Minute {
t . Logf ( "The TokenCredentialRequest cred is almost expired and needs to be refreshed. Expires in %s." , timeRemaining )
return true
}
t . Logf ( "The TokenCredentialRequest cred is good for some more time (%s) so using it." , timeRemaining )
return false
}
2021-04-09 21:52:53 +00:00
func impersonationProxyRestConfig ( credential * loginv1alpha1 . ClusterCredential , host string , caData [ ] byte , nestedImpersonationConfig * rest . ImpersonationConfig ) * rest . Config {
2021-03-11 00:57:15 +00:00
config := rest . Config {
Host : host ,
TLSClientConfig : rest . TLSClientConfig {
Insecure : caData == nil ,
CAData : caData ,
CertData : [ ] byte ( credential . ClientCertificateData ) ,
KeyData : [ ] byte ( credential . ClientKeyData ) ,
} ,
// kubectl would set both the client cert and the token, so we'll do that too.
// The Kube API server will ignore the token if the client cert successfully authenticates.
// Only if the client cert is not present or fails to authenticate will it use the token.
// Historically, it works that way because some web browsers will always send your
// corporate-assigned client cert even if it is not valid, and it doesn't want to treat
// that as a failure if you also sent a perfectly good token.
// We would like the impersonation proxy to imitate that behavior, so we test it here.
BearerToken : credential . Token ,
}
2021-04-09 21:52:53 +00:00
if nestedImpersonationConfig != nil {
config . Impersonate = * nestedImpersonationConfig
2021-03-11 00:57:15 +00:00
}
return & config
}
func kubeconfigProxyFunc ( t * testing . T , squidProxyURL string ) func ( req * http . Request ) ( * url . URL , error ) {
return func ( req * http . Request ) ( * url . URL , error ) {
t . Helper ( )
2021-04-09 21:52:53 +00:00
2021-03-11 00:57:15 +00:00
parsedSquidProxyURL , err := url . Parse ( squidProxyURL )
require . NoError ( t , err )
2021-06-22 15:23:19 +00:00
t . Logf ( "passing request for %s through proxy %s" , testlib . RedactURLParams ( req . URL ) , parsedSquidProxyURL . String ( ) )
2021-03-11 00:57:15 +00:00
return parsedSquidProxyURL , nil
}
}
2021-06-22 15:23:19 +00:00
func updateCredentialIssuer ( ctx context . Context , t * testing . T , env * testlib . TestEnv , adminConciergeClient pinnipedconciergeclientset . Interface , spec conciergev1alpha . CredentialIssuerSpec ) {
2021-03-03 20:53:23 +00:00
t . Helper ( )
2021-04-09 21:52:53 +00:00
2021-05-18 16:51:11 +00:00
err := retry . RetryOnConflict ( retry . DefaultRetry , func ( ) error {
newCredentialIssuer , err := adminConciergeClient . ConfigV1alpha1 ( ) . CredentialIssuers ( ) . Get ( ctx , credentialIssuerName ( env ) , metav1 . GetOptions { } )
if err != nil {
return err
}
spec . DeepCopyInto ( & newCredentialIssuer . Spec )
_ , err = adminConciergeClient . ConfigV1alpha1 ( ) . CredentialIssuers ( ) . Update ( ctx , newCredentialIssuer , metav1 . UpdateOptions { } )
return err
} )
2021-02-12 01:22:47 +00:00
require . NoError ( t , err )
}
2021-06-22 15:23:19 +00:00
func hasImpersonationProxyLoadBalancerService ( ctx context . Context , env * testlib . TestEnv , client kubernetes . Interface ) ( bool , error ) {
2021-03-02 17:31:24 +00:00
service , err := client . CoreV1 ( ) . Services ( env . ConciergeNamespace ) . Get ( ctx , impersonationProxyLoadBalancerName ( env ) , metav1 . GetOptions { } )
2021-02-25 22:40:02 +00:00
if k8serrors . IsNotFound ( err ) {
return false , nil
}
if err != nil {
return false , err
}
return service . Spec . Type == corev1 . ServiceTypeLoadBalancer , nil
}
2021-02-12 01:22:47 +00:00
2021-06-22 15:23:19 +00:00
func impersonationProxyTLSSecretName ( env * testlib . TestEnv ) string {
2021-03-02 17:31:24 +00:00
return env . ConciergeAppName + "-impersonation-proxy-tls-serving-certificate"
}
2021-06-22 15:23:19 +00:00
func impersonationProxyCASecretName ( env * testlib . TestEnv ) string {
2021-03-02 17:31:24 +00:00
return env . ConciergeAppName + "-impersonation-proxy-ca-certificate"
}
2021-06-22 15:23:19 +00:00
func impersonationProxyLoadBalancerName ( env * testlib . TestEnv ) string {
2021-03-02 17:31:24 +00:00
return env . ConciergeAppName + "-impersonation-proxy-load-balancer"
}
2021-03-03 20:53:23 +00:00
2021-06-22 15:23:19 +00:00
func impersonationProxyClusterIPName ( env * testlib . TestEnv ) string {
2021-05-25 20:50:50 +00:00
return env . ConciergeAppName + "-impersonation-proxy-cluster-ip"
}
2021-06-22 15:23:19 +00:00
func credentialIssuerName ( env * testlib . TestEnv ) string {
2021-03-03 20:53:23 +00:00
return env . ConciergeAppName + "-config"
}
2021-03-10 00:58:44 +00:00
2021-06-22 15:23:19 +00:00
func getImpersonationKubeconfig ( t * testing . T , env * testlib . TestEnv , impersonationProxyURL string , impersonationProxyCACertPEM [ ] byte , authenticator corev1 . TypedLocalObjectReference ) ( string , [ ] string , string ) {
2021-03-15 19:28:53 +00:00
t . Helper ( )
2021-06-22 15:23:19 +00:00
pinnipedExe := testlib . PinnipedCLIPath ( t )
2021-03-15 19:28:53 +00:00
tempDir := testutil . TempDir ( t )
var envVarsWithProxy [ ] string
2021-06-22 15:23:19 +00:00
if ! env . HasCapability ( testlib . HasExternalLoadBalancerProvider ) {
2021-03-15 19:28:53 +00:00
// Only if you don't have a load balancer, use the squid proxy when it's available.
envVarsWithProxy = append ( os . Environ ( ) , env . ProxyEnv ( ) ... )
}
// Get the kubeconfig.
getKubeConfigCmd := [ ] string { "get" , "kubeconfig" ,
"--concierge-api-group-suffix" , env . APIGroupSuffix ,
"--oidc-skip-browser" ,
"--static-token" , env . TestUser . Token ,
2021-03-25 23:57:37 +00:00
"--concierge-authenticator-name" , authenticator . Name ,
2021-03-26 00:19:47 +00:00
"--concierge-authenticator-type" , "webhook" ,
2021-03-15 19:28:53 +00:00
// Force the use of impersonation proxy strategy, but let it auto-discover the endpoint and CA.
"--concierge-mode" , "ImpersonationProxy" }
t . Log ( "Running:" , pinnipedExe , getKubeConfigCmd )
kubeconfigYAML , getKubeConfigStderr := runPinnipedCLI ( t , envVarsWithProxy , pinnipedExe , getKubeConfigCmd ... )
// "pinniped get kubectl" prints some status messages to stderr
t . Log ( getKubeConfigStderr )
// Make sure that the "pinniped get kubeconfig" auto-discovered the impersonation proxy and we're going to
// make our kubectl requests through the impersonation proxy. Avoid using require.Contains because the error
// message would contain credentials.
require . True ( t ,
strings . Contains ( kubeconfigYAML , "server: " + impersonationProxyURL + "\n" ) ,
"the generated kubeconfig did not include the expected impersonation server address: %s" ,
impersonationProxyURL ,
)
require . True ( t ,
strings . Contains ( kubeconfigYAML , "- --concierge-ca-bundle-data=" + base64 . StdEncoding . EncodeToString ( impersonationProxyCACertPEM ) + "\n" ) ,
"the generated kubeconfig did not include the base64 encoded version of this expected impersonation CA cert: %s" ,
impersonationProxyCACertPEM ,
)
// Write the kubeconfig to a temp file.
kubeconfigPath := filepath . Join ( tempDir , "kubeconfig.yaml" )
require . NoError ( t , ioutil . WriteFile ( kubeconfigPath , [ ] byte ( kubeconfigYAML ) , 0600 ) )
return kubeconfigPath , envVarsWithProxy , tempDir
}
// func to create kubectl commands with a kubeconfig.
func kubectlCommand ( timeout context . Context , t * testing . T , kubeconfigPath string , envVarsWithProxy [ ] string , args ... string ) ( * exec . Cmd , * syncBuffer , * syncBuffer ) {
2021-04-09 21:52:53 +00:00
t . Helper ( )
2021-03-15 19:28:53 +00:00
allArgs := append ( [ ] string { "--kubeconfig" , kubeconfigPath } , args ... )
//nolint:gosec // we are not performing malicious argument injection against ourselves
kubectlCmd := exec . CommandContext ( timeout , "kubectl" , allArgs ... )
var stdout , stderr syncBuffer
kubectlCmd . Stdout = & stdout
kubectlCmd . Stderr = & stderr
kubectlCmd . Env = envVarsWithProxy
t . Log ( "starting kubectl subprocess: kubectl" , strings . Join ( allArgs , " " ) )
return kubectlCmd , & stdout , & stderr
}
// Func to run kubeconfig commands.
func runKubectl ( t * testing . T , kubeconfigPath string , envVarsWithProxy [ ] string , args ... string ) ( string , error ) {
timeout , cancelFunc := context . WithTimeout ( context . Background ( ) , 2 * time . Minute )
defer cancelFunc ( )
kubectlCmd , stdout , stderr := kubectlCommand ( timeout , t , kubeconfigPath , envVarsWithProxy , args ... )
err := kubectlCmd . Run ( )
t . Logf ( "kubectl stdout output: %s" , stdout . String ( ) )
t . Logf ( "kubectl stderr output: %s" , stderr . String ( ) )
return stdout . String ( ) , err
}
2021-03-10 18:30:06 +00:00
// watchJSON defines the expected JSON wire equivalent of watch.Event.
2021-03-10 00:58:44 +00:00
type watchJSON struct {
Type watch . EventType ` json:"type,omitempty" `
Object json . RawMessage ` json:"object,omitempty" `
}
2021-03-18 20:34:30 +00:00
// requireServiceUnavailableViaSquidError returns whether the provided err is the error that is
// returned by squid when the impersonation proxy port inside the cluster is not listening.
func isServiceUnavailableViaSquidError ( err error , proxyServiceEndpoint string ) ( bool , string ) {
if err == nil {
return false , "error is nil"
}
for _ , wantContains := range [ ] string {
fmt . Sprintf ( ` Get "https://%s/api/v1/namespaces" ` , proxyServiceEndpoint ) ,
": Service Unavailable" ,
} {
if ! strings . Contains ( err . Error ( ) , wantContains ) {
return false , fmt . Sprintf ( "error does not contain %q" , wantContains )
}
}
return true , ""
}
2021-03-19 13:59:24 +00:00
func requireClose ( t * testing . T , c chan struct { } , timeout time . Duration ) {
t . Helper ( )
2021-04-09 21:52:53 +00:00
2021-03-19 13:59:24 +00:00
timer := time . NewTimer ( timeout )
select {
case <- c :
if ! timer . Stop ( ) {
<- timer . C
}
case <- timer . C :
require . FailNow ( t , "failed to receive from channel within " + timeout . String ( ) )
}
}
2021-03-27 02:32:33 +00:00
func createTokenCredentialRequest (
spec loginv1alpha1 . TokenCredentialRequestSpec ,
client pinnipedconciergeclientset . Interface ,
) ( * loginv1alpha1 . TokenCredentialRequest , error ) {
ctx , cancel := context . WithTimeout ( context . Background ( ) , time . Minute )
defer cancel ( )
return client . LoginV1alpha1 ( ) . TokenCredentialRequests ( ) . Create ( ctx ,
& loginv1alpha1 . TokenCredentialRequest { Spec : spec } , metav1 . CreateOptions { } ,
)
}
2021-04-09 21:52:53 +00:00
func newImpersonationProxyClientWithCredentials ( t * testing . T , credentials * loginv1alpha1 . ClusterCredential , impersonationProxyURL string , impersonationProxyCACertPEM [ ] byte , nestedImpersonationConfig * rest . ImpersonationConfig ) * kubeclient . Client {
t . Helper ( )
2021-08-10 04:03:33 +00:00
kubeconfig := newImpersonationProxyConfigWithCredentials ( t , credentials , impersonationProxyURL , impersonationProxyCACertPEM , nestedImpersonationConfig )
return testlib . NewKubeclient ( t , kubeconfig )
}
func newImpersonationProxyConfigWithCredentials ( t * testing . T , credentials * loginv1alpha1 . ClusterCredential , impersonationProxyURL string , impersonationProxyCACertPEM [ ] byte , nestedImpersonationConfig * rest . ImpersonationConfig ) * rest . Config {
t . Helper ( )
2021-06-22 15:23:19 +00:00
env := testlib . IntegrationEnv ( t )
clusterSupportsLoadBalancers := env . HasCapability ( testlib . HasExternalLoadBalancerProvider )
2021-04-09 21:52:53 +00:00
kubeconfig := impersonationProxyRestConfig ( credentials , impersonationProxyURL , impersonationProxyCACertPEM , nestedImpersonationConfig )
if ! clusterSupportsLoadBalancers {
// Only if there is no possibility to send traffic through a load balancer, then send the traffic through the Squid proxy.
// Prefer to go through a load balancer because that's how the impersonator is intended to be used in the real world.
kubeconfig . Proxy = kubeconfigProxyFunc ( t , env . Proxy )
}
2021-08-10 04:03:33 +00:00
return kubeconfig
2021-04-09 21:52:53 +00:00
}
func newAnonymousImpersonationProxyClient ( t * testing . T , impersonationProxyURL string , impersonationProxyCACertPEM [ ] byte , nestedImpersonationConfig * rest . ImpersonationConfig ) * kubeclient . Client {
t . Helper ( )
emptyCredentials := & loginv1alpha1 . ClusterCredential { }
return newImpersonationProxyClientWithCredentials ( t , emptyCredentials , impersonationProxyURL , impersonationProxyCACertPEM , nestedImpersonationConfig )
}
2021-05-26 22:52:31 +00:00
func newImpersonationProxyClientWithCredentialsAndProxy ( t * testing . T , credentials * loginv1alpha1 . ClusterCredential , impersonationProxyURL string , impersonationProxyCACertPEM [ ] byte , nestedImpersonationConfig * rest . ImpersonationConfig ) * kubeclient . Client {
t . Helper ( )
2021-06-22 15:23:19 +00:00
env := testlib . IntegrationEnv ( t )
2021-05-26 22:52:31 +00:00
kubeconfig := impersonationProxyRestConfig ( credentials , impersonationProxyURL , impersonationProxyCACertPEM , nestedImpersonationConfig )
kubeconfig . Proxy = kubeconfigProxyFunc ( t , env . Proxy )
2021-06-22 15:23:19 +00:00
return testlib . NewKubeclient ( t , kubeconfig )
2021-05-26 22:52:31 +00:00
}
// this uses a proxy in all cases, the other will only use it if you don't have load balancer capabilities.
func newAnonymousImpersonationProxyClientWithProxy ( t * testing . T , impersonationProxyURL string , impersonationProxyCACertPEM [ ] byte , nestedImpersonationConfig * rest . ImpersonationConfig ) * kubeclient . Client {
t . Helper ( )
2021-06-22 15:23:19 +00:00
env := testlib . IntegrationEnv ( t )
2021-05-26 22:52:31 +00:00
emptyCredentials := & loginv1alpha1 . ClusterCredential { }
kubeconfig := impersonationProxyRestConfig ( emptyCredentials , impersonationProxyURL , impersonationProxyCACertPEM , nestedImpersonationConfig )
kubeconfig . Proxy = kubeconfigProxyFunc ( t , env . Proxy )
2021-06-22 15:23:19 +00:00
return testlib . NewKubeclient ( t , kubeconfig )
2021-05-26 22:52:31 +00:00
}
2021-04-09 21:52:53 +00:00
func impersonationProxyViaSquidKubeClientWithoutCredential ( t * testing . T , proxyServiceEndpoint string ) kubernetes . Interface {
t . Helper ( )
2021-06-22 15:23:19 +00:00
env := testlib . IntegrationEnv ( t )
2021-04-09 21:52:53 +00:00
proxyURL := "https://" + proxyServiceEndpoint
kubeconfig := impersonationProxyRestConfig ( & loginv1alpha1 . ClusterCredential { } , proxyURL , nil , nil )
kubeconfig . Proxy = kubeconfigProxyFunc ( t , env . Proxy )
2021-06-22 15:23:19 +00:00
return testlib . NewKubeclient ( t , kubeconfig ) . Kubernetes
2021-04-09 21:52:53 +00:00
}
func newImpersonationProxyClient (
t * testing . T ,
impersonationProxyURL string ,
impersonationProxyCACertPEM [ ] byte ,
nestedImpersonationConfig * rest . ImpersonationConfig ,
refreshCredentialFunc func ( t * testing . T , impersonationProxyURL string , impersonationProxyCACertPEM [ ] byte ) * loginv1alpha1 . ClusterCredential ,
) * kubeclient . Client {
t . Helper ( )
refreshedCredentials := refreshCredentialFunc ( t , impersonationProxyURL , impersonationProxyCACertPEM ) . DeepCopy ( )
refreshedCredentials . Token = "not a valid token" // demonstrates that client certs take precedence over tokens by setting both on the requests
return newImpersonationProxyClientWithCredentials ( t , refreshedCredentials , impersonationProxyURL , impersonationProxyCACertPEM , nestedImpersonationConfig )
}
// getCredForConfig is mostly just a hacky workaround for impersonationProxyRestConfig needing creds directly.
func getCredForConfig ( t * testing . T , config * rest . Config ) * loginv1alpha1 . ClusterCredential {
t . Helper ( )
out := & loginv1alpha1 . ClusterCredential { }
config = rest . CopyConfig ( config )
config . Wrap ( func ( rt http . RoundTripper ) http . RoundTripper {
return roundtripper . Func ( func ( req * http . Request ) ( * http . Response , error ) {
resp , err := rt . RoundTrip ( req )
r := req
if resp != nil && resp . Request != nil {
r = resp . Request
}
_ , _ , _ = bearertoken . New ( authenticator . TokenFunc ( func ( _ context . Context , token string ) ( * authenticator . Response , bool , error ) {
out . Token = token
return nil , false , nil
} ) ) . AuthenticateRequest ( r )
return resp , err
} )
} )
transportConfig , err := config . TransportConfig ( )
require . NoError ( t , err )
rt , err := transport . New ( transportConfig )
require . NoError ( t , err )
ctx , cancel := context . WithTimeout ( context . Background ( ) , 5 * time . Second )
defer cancel ( )
req , err := http . NewRequestWithContext ( ctx , "GET" , "https://localhost" , nil )
require . NoError ( t , err )
resp , _ := rt . RoundTrip ( req )
if resp != nil && resp . Body != nil {
_ = resp . Body . Close ( )
}
tlsConfig , err := transport . TLSConfigFor ( transportConfig )
require . NoError ( t , err )
if tlsConfig != nil && tlsConfig . GetClientCertificate != nil {
cert , err := tlsConfig . GetClientCertificate ( nil )
require . NoError ( t , err )
2021-05-07 21:22:08 +00:00
if len ( cert . Certificate ) > 0 {
require . Len ( t , cert . Certificate , 1 )
publicKey := pem . EncodeToMemory ( & pem . Block {
Type : "CERTIFICATE" ,
Bytes : cert . Certificate [ 0 ] ,
} )
out . ClientCertificateData = string ( publicKey )
2021-04-09 21:52:53 +00:00
2021-05-07 21:22:08 +00:00
privateKey , err := keyutil . MarshalPrivateKeyToPEM ( cert . PrivateKey )
require . NoError ( t , err )
out . ClientKeyData = string ( privateKey )
}
2021-04-09 21:52:53 +00:00
}
if * out == ( loginv1alpha1 . ClusterCredential { } ) {
t . Fatal ( "failed to get creds for config" )
}
return out
}
2021-05-10 04:50:59 +00:00
2021-05-10 17:22:51 +00:00
func getUIDAndExtraViaCSR ( ctx context . Context , t * testing . T , uid string , client kubernetes . Interface ) ( string , map [ string ] certificatesv1beta1 . ExtraValue ) {
2021-05-10 04:50:59 +00:00
t . Helper ( )
privateKey , err := ecdsa . GenerateKey ( elliptic . P256 ( ) , rand . Reader )
require . NoError ( t , err )
csrPEM , err := cert . MakeCSR ( privateKey , & pkix . Name {
CommonName : "panda-man" ,
Organization : [ ] string { "living-the-dream" , "need-more-sleep" } ,
} , nil , nil )
require . NoError ( t , err )
csrName , _ , err := csr . RequestCertificate (
client ,
csrPEM ,
"" ,
certificatesv1 . KubeAPIServerClientSignerName ,
2021-08-09 23:16:50 +00:00
nil ,
2021-05-10 04:50:59 +00:00
[ ] certificatesv1 . KeyUsage { certificatesv1 . UsageClientAuth } ,
privateKey ,
)
require . NoError ( t , err )
csReq , err := client . CertificatesV1beta1 ( ) . CertificateSigningRequests ( ) . Get ( ctx , csrName , metav1 . GetOptions { } )
require . NoError ( t , err )
err = client . CertificatesV1beta1 ( ) . CertificateSigningRequests ( ) . Delete ( ctx , csrName , metav1 . DeleteOptions { } )
require . NoError ( t , err )
2021-05-10 17:22:51 +00:00
outUID := uid // in the future this may not be empty on some clusters
if len ( outUID ) == 0 {
outUID = csReq . Spec . UID
}
return outUID , csReq . Spec . Extra
2021-05-10 04:50:59 +00:00
}
2021-08-01 22:19:53 +00:00
func parallelIfNotEKS ( t * testing . T ) {
if testlib . IntegrationEnv ( t ) . KubernetesDistribution == testlib . EKSDistro {
return
}
t . Parallel ( )
}