Merge branch 'main' into upstream_refresh_revocation_during_gc
This commit is contained in:
commit
91eed1ab24
@ -114,11 +114,12 @@ docker build .
|
|||||||
- [`kind`](https://kind.sigs.k8s.io/docs/user/quick-start)
|
- [`kind`](https://kind.sigs.k8s.io/docs/user/quick-start)
|
||||||
- [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
- [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||||
- [`ytt`](https://carvel.dev/#getting-started)
|
- [`ytt`](https://carvel.dev/#getting-started)
|
||||||
|
- [`nmap`](https://nmap.org/download.html)
|
||||||
|
|
||||||
On macOS, these tools can be installed with [Homebrew](https://brew.sh/) (assuming you have Chrome installed already):
|
On macOS, these tools can be installed with [Homebrew](https://brew.sh/) (assuming you have Chrome installed already):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
brew install kind k14s/tap/ytt k14s/tap/kapp kubectl chromedriver && brew cask install docker
|
brew install kind k14s/tap/ytt k14s/tap/kapp kubectl chromedriver nmap && brew cask install docker
|
||||||
```
|
```
|
||||||
|
|
||||||
1. Create a kind cluster, compile, create container images, and install Pinniped and supporting dependencies using:
|
1. Create a kind cluster, compile, create container images, and install Pinniped and supporting dependencies using:
|
||||||
|
@ -5,7 +5,6 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@ -28,13 +27,13 @@ import (
|
|||||||
_ "k8s.io/client-go/plugin/pkg/client/auth" // Adds handlers for various dynamic auth plugins in client-go
|
_ "k8s.io/client-go/plugin/pkg/client/auth" // Adds handlers for various dynamic auth plugins in client-go
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||||
"k8s.io/client-go/transport"
|
|
||||||
|
|
||||||
conciergev1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1"
|
conciergev1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1"
|
||||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
configv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||||
idpdiscoveryv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/idpdiscovery/v1alpha1"
|
idpdiscoveryv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/idpdiscovery/v1alpha1"
|
||||||
conciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
conciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
||||||
"go.pinniped.dev/internal/groupsuffix"
|
"go.pinniped.dev/internal/groupsuffix"
|
||||||
|
"go.pinniped.dev/internal/net/phttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
type kubeconfigDeps struct {
|
type kubeconfigDeps struct {
|
||||||
@ -664,17 +663,8 @@ func validateKubeconfig(ctx context.Context, flags getKubeconfigParams, kubeconf
|
|||||||
return fmt.Errorf("invalid kubeconfig (no certificateAuthorityData)")
|
return fmt.Errorf("invalid kubeconfig (no certificateAuthorityData)")
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := &http.Client{
|
httpClient := phttp.Default(kubeconfigCA)
|
||||||
Transport: &http.Transport{
|
httpClient.Timeout = 10 * time.Second
|
||||||
TLSClientConfig: &tls.Config{
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
RootCAs: kubeconfigCA,
|
|
||||||
},
|
|
||||||
Proxy: http.ProxyFromEnvironment,
|
|
||||||
TLSHandshakeTimeout: 10 * time.Second,
|
|
||||||
},
|
|
||||||
Timeout: 10 * time.Second,
|
|
||||||
}
|
|
||||||
|
|
||||||
ticker := time.NewTicker(2 * time.Second)
|
ticker := time.NewTicker(2 * time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
@ -781,21 +771,14 @@ func discoverSupervisorUpstreamIDP(ctx context.Context, flags *getKubeconfigPara
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newDiscoveryHTTPClient(caBundleFlag caBundleFlag) (*http.Client, error) {
|
func newDiscoveryHTTPClient(caBundleFlag caBundleFlag) (*http.Client, error) {
|
||||||
t := &http.Transport{
|
var rootCAs *x509.CertPool
|
||||||
TLSClientConfig: &tls.Config{MinVersion: tls.VersionTLS12},
|
|
||||||
Proxy: http.ProxyFromEnvironment,
|
|
||||||
}
|
|
||||||
httpClient := &http.Client{Transport: t}
|
|
||||||
if caBundleFlag != nil {
|
if caBundleFlag != nil {
|
||||||
rootCAs := x509.NewCertPool()
|
rootCAs = x509.NewCertPool()
|
||||||
ok := rootCAs.AppendCertsFromPEM(caBundleFlag)
|
if ok := rootCAs.AppendCertsFromPEM(caBundleFlag); !ok {
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unable to fetch OIDC discovery data from issuer: could not parse CA bundle")
|
return nil, fmt.Errorf("unable to fetch OIDC discovery data from issuer: could not parse CA bundle")
|
||||||
}
|
}
|
||||||
t.TLSClientConfig.RootCAs = rootCAs
|
|
||||||
}
|
}
|
||||||
httpClient.Transport = transport.DebugWrappers(httpClient.Transport)
|
return phttp.Default(rootCAs), nil
|
||||||
return httpClient, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func discoverIDPsDiscoveryEndpointURL(ctx context.Context, issuer string, httpClient *http.Client) (string, error) {
|
func discoverIDPsDiscoveryEndpointURL(ctx context.Context, issuer string, httpClient *http.Client) (string, error) {
|
||||||
|
@ -5,7 +5,6 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@ -21,12 +20,12 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||||
"k8s.io/client-go/transport"
|
|
||||||
"k8s.io/klog/v2/klogr"
|
"k8s.io/klog/v2/klogr"
|
||||||
|
|
||||||
idpdiscoveryv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/idpdiscovery/v1alpha1"
|
idpdiscoveryv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/idpdiscovery/v1alpha1"
|
||||||
"go.pinniped.dev/internal/execcredcache"
|
"go.pinniped.dev/internal/execcredcache"
|
||||||
"go.pinniped.dev/internal/groupsuffix"
|
"go.pinniped.dev/internal/groupsuffix"
|
||||||
|
"go.pinniped.dev/internal/net/phttp"
|
||||||
"go.pinniped.dev/internal/plog"
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/pkg/conciergeclient"
|
"go.pinniped.dev/pkg/conciergeclient"
|
||||||
"go.pinniped.dev/pkg/oidcclient"
|
"go.pinniped.dev/pkg/oidcclient"
|
||||||
@ -308,18 +307,7 @@ func makeClient(caBundlePaths []string, caBundleData []string) (*http.Client, er
|
|||||||
}
|
}
|
||||||
pool.AppendCertsFromPEM(pem)
|
pool.AppendCertsFromPEM(pem)
|
||||||
}
|
}
|
||||||
client := &http.Client{
|
return phttp.Default(pool), nil
|
||||||
Transport: &http.Transport{
|
|
||||||
Proxy: http.ProxyFromEnvironment,
|
|
||||||
TLSClientConfig: &tls.Config{
|
|
||||||
RootCAs: pool,
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
client.Transport = transport.DebugWrappers(client.Transport)
|
|
||||||
return client, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential {
|
func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential {
|
||||||
@ -338,7 +326,7 @@ func tokenCredential(token *oidctypes.Token) *clientauthv1beta1.ExecCredential {
|
|||||||
return &cred
|
return &cred
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetLogLevel(lookupEnv func(string) (string, bool)) (*plog.PLogger, error) {
|
func SetLogLevel(lookupEnv func(string) (string, bool)) (plog.Logger, error) {
|
||||||
debug, _ := lookupEnv("PINNIPED_DEBUG")
|
debug, _ := lookupEnv("PINNIPED_DEBUG")
|
||||||
if debug == "true" {
|
if debug == "true" {
|
||||||
err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug)
|
err := plog.ValidateAndSetLogLevelGlobally(plog.LevelDebug)
|
||||||
@ -347,7 +335,7 @@ func SetLogLevel(lookupEnv func(string) (string, bool)) (*plog.PLogger, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger := plog.New("Pinniped login: ")
|
logger := plog.New("Pinniped login: ")
|
||||||
return &logger, nil
|
return logger, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// mustGetConfigDir returns a directory that follows the XDG base directory convention:
|
// mustGetConfigDir returns a directory that follows the XDG base directory convention:
|
||||||
|
@ -132,6 +132,7 @@ check_dependency kubectl "Please install kubectl. e.g. 'brew install kubectl' fo
|
|||||||
check_dependency htpasswd "Please install htpasswd. Should be pre-installed on MacOS. Usually found in 'apache2-utils' package for linux."
|
check_dependency htpasswd "Please install htpasswd. Should be pre-installed on MacOS. Usually found in 'apache2-utils' package for linux."
|
||||||
check_dependency openssl "Please install openssl. Should be pre-installed on MacOS."
|
check_dependency openssl "Please install openssl. Should be pre-installed on MacOS."
|
||||||
check_dependency chromedriver "Please install chromedriver. e.g. 'brew install chromedriver' for MacOS"
|
check_dependency chromedriver "Please install chromedriver. e.g. 'brew install chromedriver' for MacOS"
|
||||||
|
check_dependency nmap "Please install nmap. e.g. 'brew install nmap' for MacOS"
|
||||||
|
|
||||||
# Check that Chrome and chromedriver versions match. If chromedriver falls a couple versions behind
|
# Check that Chrome and chromedriver versions match. If chromedriver falls a couple versions behind
|
||||||
# then usually tests start to fail with strange error messages.
|
# then usually tests start to fail with strange error messages.
|
||||||
@ -158,6 +159,12 @@ if [ "$(kubectl version --client=true --short | cut -d '.' -f 2)" -lt 18 ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Require nmap >= 7.92.x
|
||||||
|
if [ "$(nmap -V | grep 'Nmap version' | cut -d ' ' -f 3 | cut -d '.' -f 2)" -lt 92 ]; then
|
||||||
|
log_error "nmap >= 7.92.x is required, you have $(nmap -V | grep 'Nmap version' | cut -d ' ' -f 3)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "$clean_kind" == "yes" ]]; then
|
if [[ "$clean_kind" == "yes" ]]; then
|
||||||
log_note "Deleting running kind cluster to prepare from a clean slate..."
|
log_note "Deleting running kind cluster to prepare from a clean slate..."
|
||||||
./hack/kind-down.sh
|
./hack/kind-down.sh
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
@ -14,6 +15,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
authenticationv1 "k8s.io/api/authentication/v1"
|
authenticationv1 "k8s.io/api/authentication/v1"
|
||||||
@ -47,6 +49,7 @@ import (
|
|||||||
"k8s.io/client-go/transport"
|
"k8s.io/client-go/transport"
|
||||||
|
|
||||||
"go.pinniped.dev/internal/constable"
|
"go.pinniped.dev/internal/constable"
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
"go.pinniped.dev/internal/dynamiccert"
|
"go.pinniped.dev/internal/dynamiccert"
|
||||||
"go.pinniped.dev/internal/httputil/securityheader"
|
"go.pinniped.dev/internal/httputil/securityheader"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
@ -70,13 +73,14 @@ func New(
|
|||||||
dynamicCertProvider dynamiccert.Private,
|
dynamicCertProvider dynamiccert.Private,
|
||||||
impersonationProxySignerCA dynamiccert.Public,
|
impersonationProxySignerCA dynamiccert.Public,
|
||||||
) (func(stopCh <-chan struct{}) error, error) {
|
) (func(stopCh <-chan struct{}) error, error) {
|
||||||
return newInternal(port, dynamicCertProvider, impersonationProxySignerCA, nil, nil, nil)
|
return newInternal(port, dynamicCertProvider, impersonationProxySignerCA, kubeclient.Secure, nil, nil, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newInternal( //nolint:funlen // yeah, it's kind of long.
|
func newInternal( //nolint:funlen // yeah, it's kind of long.
|
||||||
port int,
|
port int,
|
||||||
dynamicCertProvider dynamiccert.Private,
|
dynamicCertProvider dynamiccert.Private,
|
||||||
impersonationProxySignerCA dynamiccert.Public,
|
impersonationProxySignerCA dynamiccert.Public,
|
||||||
|
restConfigFunc ptls.RestConfigFunc, // for unit testing, should always be kubeclient.Secure in production
|
||||||
clientOpts []kubeclient.Option, // for unit testing, should always be nil in production
|
clientOpts []kubeclient.Option, // for unit testing, should always be nil in production
|
||||||
recOpts func(*genericoptions.RecommendedOptions), // for unit testing, should always be nil in production
|
recOpts func(*genericoptions.RecommendedOptions), // for unit testing, should always be nil in production
|
||||||
recConfig func(*genericapiserver.RecommendedConfig), // for unit testing, should always be nil in production
|
recConfig func(*genericapiserver.RecommendedConfig), // for unit testing, should always be nil in production
|
||||||
@ -100,6 +104,13 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
|
|||||||
recommendedOptions.SecureServing.ServerCert.GeneratedCert = dynamicCertProvider // serving certs (end user facing)
|
recommendedOptions.SecureServing.ServerCert.GeneratedCert = dynamicCertProvider // serving certs (end user facing)
|
||||||
recommendedOptions.SecureServing.BindPort = port
|
recommendedOptions.SecureServing.BindPort = port
|
||||||
|
|
||||||
|
// secure TLS for connections coming from external clients and going to the Kube API server
|
||||||
|
// this is best effort because not all options provide the right hooks to override TLS config
|
||||||
|
// since any client could connect to the impersonation proxy, this uses the default TLS config
|
||||||
|
if err := ptls.DefaultRecommendedOptions(recommendedOptions, restConfigFunc); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to secure recommended options: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Wire up the impersonation proxy signer CA as another valid authenticator for client cert auth,
|
// Wire up the impersonation proxy signer CA as another valid authenticator for client cert auth,
|
||||||
// along with the Kube API server's CA.
|
// along with the Kube API server's CA.
|
||||||
// Note: any changes to the Authentication stack need to be kept in sync with any assumptions made
|
// Note: any changes to the Authentication stack need to be kept in sync with any assumptions made
|
||||||
@ -258,7 +269,7 @@ func newInternal( //nolint:funlen // yeah, it's kind of long.
|
|||||||
|
|
||||||
delegatingAuthorizer := serverConfig.Authorization.Authorizer
|
delegatingAuthorizer := serverConfig.Authorization.Authorizer
|
||||||
customReasonAuthorizer := &comparableAuthorizer{
|
customReasonAuthorizer := &comparableAuthorizer{
|
||||||
authorizerFunc: func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
|
AuthorizerFunc: func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||||
const baseReason = "decision made by impersonation-proxy.concierge.pinniped.dev"
|
const baseReason = "decision made by impersonation-proxy.concierge.pinniped.dev"
|
||||||
switch a.GetVerb() {
|
switch a.GetVerb() {
|
||||||
case "":
|
case "":
|
||||||
@ -347,14 +358,14 @@ func getReverseProxyClient(clientOpts []kubeclient.Option) (*kubeclient.Client,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
impersonationProxyRestConfig = rest.AnonymousClientConfig(impersonationProxyRestConfig)
|
impersonationProxyRestConfig = kubeclient.SecureAnonymousClientConfig(impersonationProxyRestConfig)
|
||||||
impersonationProxyRestConfig.BearerTokenFile = tokenFile
|
impersonationProxyRestConfig.BearerTokenFile = tokenFile
|
||||||
|
|
||||||
return kubeclient.New(kubeclient.WithConfig(impersonationProxyRestConfig))
|
return kubeclient.New(kubeclient.WithConfig(impersonationProxyRestConfig))
|
||||||
}
|
}
|
||||||
|
|
||||||
func isAnonymousAuthEnabled(config *rest.Config) (bool, error) {
|
func isAnonymousAuthEnabled(config *rest.Config) (bool, error) {
|
||||||
anonymousConfig := rest.AnonymousClientConfig(config)
|
anonymousConfig := kubeclient.SecureAnonymousClientConfig(config)
|
||||||
|
|
||||||
// we do not need either of these but RESTClientFor complains if they are not set
|
// we do not need either of these but RESTClientFor complains if they are not set
|
||||||
anonymousConfig.GroupVersion = &schema.GroupVersion{}
|
anonymousConfig.GroupVersion = &schema.GroupVersion{}
|
||||||
@ -413,14 +424,7 @@ type comparableAuthenticator struct {
|
|||||||
|
|
||||||
// No-op wrapping around AuthorizerFunc to allow for comparisons.
|
// No-op wrapping around AuthorizerFunc to allow for comparisons.
|
||||||
type comparableAuthorizer struct {
|
type comparableAuthorizer struct {
|
||||||
authorizerFunc
|
authorizer.AuthorizerFunc
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: delete when we pick up https://github.com/kubernetes/kubernetes/pull/100963
|
|
||||||
type authorizerFunc func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error)
|
|
||||||
|
|
||||||
func (f authorizerFunc) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
|
|
||||||
return f(ctx, a)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func withBearerTokenPreservation(delegate http.Handler) http.Handler {
|
func withBearerTokenPreservation(delegate http.Handler) http.Handler {
|
||||||
@ -462,16 +466,16 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not get http/1.1 round tripper: %w", err)
|
return nil, fmt.Errorf("could not get http/1.1 round tripper: %w", err)
|
||||||
}
|
}
|
||||||
http1RoundTripperAnonymous, err := getTransportForProtocol(rest.AnonymousClientConfig(restConfig), "http/1.1")
|
http1RoundTripperAnonymous, err := getTransportForProtocol(kubeclient.SecureAnonymousClientConfig(restConfig), "http/1.1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not get http/1.1 anonymous round tripper: %w", err)
|
return nil, fmt.Errorf("could not get http/1.1 anonymous round tripper: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
http2RoundTripper, err := getTransportForProtocol(restConfig, "h2")
|
http2RoundTripper, err := getTransportForProtocol(restConfig, "h2") // TODO figure out why this leads to still supporting http1
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not get http/2.0 round tripper: %w", err)
|
return nil, fmt.Errorf("could not get http/2.0 round tripper: %w", err)
|
||||||
}
|
}
|
||||||
http2RoundTripperAnonymous, err := getTransportForProtocol(rest.AnonymousClientConfig(restConfig), "h2")
|
http2RoundTripperAnonymous, err := getTransportForProtocol(kubeclient.SecureAnonymousClientConfig(restConfig), "h2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not get http/2.0 anonymous round tripper: %w", err)
|
return nil, fmt.Errorf("could not get http/2.0 anonymous round tripper: %w", err)
|
||||||
}
|
}
|
||||||
@ -565,6 +569,12 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi
|
|||||||
r.Header.Del("X-Forwarded-For")
|
r.Header.Del("X-Forwarded-For")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// the http2 code seems to call Close concurrently which can lead to data races
|
||||||
|
if r.Body != nil {
|
||||||
|
r = utilnet.CloneRequest(r)
|
||||||
|
r.Body = &safeReadWriteCloser{rc: r.Body}
|
||||||
|
}
|
||||||
|
|
||||||
reverseProxy := httputil.NewSingleHostReverseProxy(serverURL)
|
reverseProxy := httputil.NewSingleHostReverseProxy(serverURL)
|
||||||
reverseProxy.Transport = rt
|
reverseProxy.Transport = rt
|
||||||
reverseProxy.FlushInterval = 200 * time.Millisecond // the "watch" verb will not work without this line
|
reverseProxy.FlushInterval = 200 * time.Millisecond // the "watch" verb will not work without this line
|
||||||
@ -573,6 +583,43 @@ func newImpersonationReverseProxyFunc(restConfig *rest.Config) (func(*genericapi
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ io.ReadWriteCloser = &safeReadWriteCloser{}
|
||||||
|
|
||||||
|
type safeReadWriteCloser struct {
|
||||||
|
m sync.Mutex // all methods allowed concurrently, this only guards concurrent calls to Close
|
||||||
|
|
||||||
|
rc io.ReadCloser
|
||||||
|
|
||||||
|
once sync.Once // set up rwc and writeErr
|
||||||
|
rwc io.ReadWriteCloser
|
||||||
|
writeErr error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *safeReadWriteCloser) Read(p []byte) (int, error) {
|
||||||
|
return r.rc.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *safeReadWriteCloser) Write(p []byte) (int, error) {
|
||||||
|
r.once.Do(func() {
|
||||||
|
var ok bool
|
||||||
|
r.rwc, ok = r.rc.(io.ReadWriteCloser)
|
||||||
|
if !ok { // this method should only be caused during flows that switch protocols
|
||||||
|
r.writeErr = fmt.Errorf("switching protocols failed: io.ReadCloser %T is not io.ReadWriteCloser", r.rc)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if r.writeErr != nil {
|
||||||
|
return 0, r.writeErr
|
||||||
|
}
|
||||||
|
return r.rwc.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *safeReadWriteCloser) Close() error {
|
||||||
|
r.m.Lock()
|
||||||
|
defer r.m.Unlock()
|
||||||
|
|
||||||
|
return r.rc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
func ensureNoImpersonationHeaders(r *http.Request) error {
|
func ensureNoImpersonationHeaders(r *http.Request) error {
|
||||||
for key := range r.Header {
|
for key := range r.Header {
|
||||||
// even though we have unit tests that try to cover this case, it is hard to tell if Go does
|
// even though we have unit tests that try to cover this case, it is hard to tell if Go does
|
||||||
@ -759,5 +806,14 @@ func getTransportForProtocol(restConfig *rest.Config, protocol string) (http.Rou
|
|||||||
}
|
}
|
||||||
transportConfig.TLS.NextProtos = []string{protocol}
|
transportConfig.TLS.NextProtos = []string{protocol}
|
||||||
|
|
||||||
return transport.New(transportConfig)
|
rt, err := transport.New(transportConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not build transport: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := kubeclient.AssertSecureTransport(rt); err != nil {
|
||||||
|
return nil, err // make sure we only use a secure TLS config
|
||||||
|
}
|
||||||
|
|
||||||
|
return rt, nil
|
||||||
}
|
}
|
||||||
|
@ -36,6 +36,7 @@ import (
|
|||||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||||
genericoptions "k8s.io/apiserver/pkg/server/options"
|
genericoptions "k8s.io/apiserver/pkg/server/options"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/clientcmd/api"
|
"k8s.io/client-go/tools/clientcmd/api"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
@ -44,12 +45,13 @@ import (
|
|||||||
loginv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
|
loginv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
|
||||||
"go.pinniped.dev/internal/certauthority"
|
"go.pinniped.dev/internal/certauthority"
|
||||||
"go.pinniped.dev/internal/constable"
|
"go.pinniped.dev/internal/constable"
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
"go.pinniped.dev/internal/dynamiccert"
|
"go.pinniped.dev/internal/dynamiccert"
|
||||||
"go.pinniped.dev/internal/groupsuffix"
|
"go.pinniped.dev/internal/groupsuffix"
|
||||||
"go.pinniped.dev/internal/here"
|
"go.pinniped.dev/internal/here"
|
||||||
"go.pinniped.dev/internal/httputil/roundtripper"
|
"go.pinniped.dev/internal/httputil/roundtripper"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
"go.pinniped.dev/internal/testutil"
|
"go.pinniped.dev/internal/testutil/tlsserver"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestImpersonator(t *testing.T) {
|
func TestImpersonator(t *testing.T) {
|
||||||
@ -697,7 +699,9 @@ func TestImpersonator(t *testing.T) {
|
|||||||
// will proxy incoming calls to this fake server.
|
// will proxy incoming calls to this fake server.
|
||||||
testKubeAPIServerWasCalled := false
|
testKubeAPIServerWasCalled := false
|
||||||
var testKubeAPIServerSawHeaders http.Header
|
var testKubeAPIServerSawHeaders http.Header
|
||||||
testKubeAPIServerCA, testKubeAPIServerURL := testutil.TLSTestServer(t, func(w http.ResponseWriter, r *http.Request) {
|
testKubeAPIServer := tlsserver.TLSTestServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
tlsserver.AssertTLS(t, r, ptls.Secure)
|
||||||
|
|
||||||
switch r.URL.Path {
|
switch r.URL.Path {
|
||||||
case "/api/v1/namespaces/kube-system/configmaps":
|
case "/api/v1/namespaces/kube-system/configmaps":
|
||||||
require.Equal(t, http.MethodGet, r.Method)
|
require.Equal(t, http.MethodGet, r.Method)
|
||||||
@ -708,6 +712,13 @@ func TestImpersonator(t *testing.T) {
|
|||||||
http.NotFound(w, r)
|
http.NotFound(w, r)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
case "/apis/flowcontrol.apiserver.k8s.io/v1beta1/prioritylevelconfigurations",
|
||||||
|
"/apis/flowcontrol.apiserver.k8s.io/v1beta1/flowschemas":
|
||||||
|
// ignore requests related to priority and fairness logic
|
||||||
|
require.Equal(t, http.MethodGet, r.Method)
|
||||||
|
http.NotFound(w, r)
|
||||||
|
return
|
||||||
|
|
||||||
case "/api/v1/namespaces":
|
case "/api/v1/namespaces":
|
||||||
require.Equal(t, http.MethodGet, r.Method)
|
require.Equal(t, http.MethodGet, r.Method)
|
||||||
|
|
||||||
@ -785,13 +796,13 @@ func TestImpersonator(t *testing.T) {
|
|||||||
require.Fail(t, "fake Kube API server got an unexpected request", "path: %s", r.URL.Path)
|
require.Fail(t, "fake Kube API server got an unexpected request", "path: %s", r.URL.Path)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
})
|
}), tlsserver.RecordTLSHello)
|
||||||
|
|
||||||
// Create the client config that the impersonation server should use to talk to the Kube API server.
|
// Create the client config that the impersonation server should use to talk to the Kube API server.
|
||||||
testKubeAPIServerKubeconfig := rest.Config{
|
testKubeAPIServerKubeconfig := rest.Config{
|
||||||
Host: testKubeAPIServerURL,
|
Host: testKubeAPIServer.URL,
|
||||||
BearerToken: "some-service-account-token",
|
BearerToken: "some-service-account-token",
|
||||||
TLSClientConfig: rest.TLSClientConfig{CAData: []byte(testKubeAPIServerCA)},
|
TLSClientConfig: rest.TLSClientConfig{CAData: tlsserver.TLSTestServerCA(testKubeAPIServer)},
|
||||||
BearerTokenFile: tt.kubeAPIServerClientBearerTokenFile,
|
BearerTokenFile: tt.kubeAPIServerClientBearerTokenFile,
|
||||||
}
|
}
|
||||||
clientOpts := []kubeclient.Option{kubeclient.WithConfig(&testKubeAPIServerKubeconfig)}
|
clientOpts := []kubeclient.Option{kubeclient.WithConfig(&testKubeAPIServerKubeconfig)}
|
||||||
@ -800,7 +811,6 @@ func TestImpersonator(t *testing.T) {
|
|||||||
recOpts := func(options *genericoptions.RecommendedOptions) {
|
recOpts := func(options *genericoptions.RecommendedOptions) {
|
||||||
options.Authentication.RemoteKubeConfigFileOptional = true
|
options.Authentication.RemoteKubeConfigFileOptional = true
|
||||||
options.Authorization.RemoteKubeConfigFileOptional = true
|
options.Authorization.RemoteKubeConfigFileOptional = true
|
||||||
options.CoreAPI = nil
|
|
||||||
options.Admission = nil
|
options.Admission = nil
|
||||||
options.SecureServing.Listener = listener // use our listener with the dynamic port
|
options.SecureServing.Listener = listener // use our listener with the dynamic port
|
||||||
}
|
}
|
||||||
@ -814,8 +824,8 @@ func TestImpersonator(t *testing.T) {
|
|||||||
// Allow standard REST verbs to be authorized so that tests pass without invasive changes
|
// Allow standard REST verbs to be authorized so that tests pass without invasive changes
|
||||||
recConfig := func(config *genericapiserver.RecommendedConfig) {
|
recConfig := func(config *genericapiserver.RecommendedConfig) {
|
||||||
authz := config.Authorization.Authorizer.(*comparableAuthorizer)
|
authz := config.Authorization.Authorizer.(*comparableAuthorizer)
|
||||||
delegate := authz.authorizerFunc
|
delegate := authz.AuthorizerFunc
|
||||||
authz.authorizerFunc = func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
|
authz.AuthorizerFunc = func(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||||
recorder.record(a)
|
recorder.record(a)
|
||||||
switch a.GetVerb() {
|
switch a.GetVerb() {
|
||||||
case "create", "get", "list":
|
case "create", "get", "list":
|
||||||
@ -826,8 +836,15 @@ func TestImpersonator(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
restConfigFunc := func(config *rest.Config) (kubernetes.Interface, *rest.Config, error) {
|
||||||
|
if config == nil {
|
||||||
|
config = &testKubeAPIServerKubeconfig
|
||||||
|
}
|
||||||
|
return kubeclient.Secure(config)
|
||||||
|
}
|
||||||
|
|
||||||
// Create an impersonator. Use an invalid port number to make sure our listener override works.
|
// Create an impersonator. Use an invalid port number to make sure our listener override works.
|
||||||
runner, constructionErr := newInternal(-1000, certKeyContent, caContent, clientOpts, recOpts, recConfig)
|
runner, constructionErr := newInternal(-1000, certKeyContent, caContent, restConfigFunc, clientOpts, recOpts, recConfig)
|
||||||
if len(tt.wantConstructionError) > 0 {
|
if len(tt.wantConstructionError) > 0 {
|
||||||
require.EqualError(t, constructionErr, tt.wantConstructionError)
|
require.EqualError(t, constructionErr, tt.wantConstructionError)
|
||||||
require.Nil(t, runner)
|
require.Nil(t, runner)
|
||||||
@ -864,7 +881,7 @@ func TestImpersonator(t *testing.T) {
|
|||||||
return rt
|
return rt
|
||||||
}
|
}
|
||||||
|
|
||||||
return roundtripper.Func(func(req *http.Request) (*http.Response, error) {
|
return roundtripper.WrapFunc(rt, func(req *http.Request) (*http.Response, error) {
|
||||||
req = req.Clone(req.Context())
|
req = req.Clone(req.Context())
|
||||||
tt.clientMutateHeaders(req.Header)
|
tt.clientMutateHeaders(req.Header)
|
||||||
return rt.RoundTrip(req)
|
return rt.RoundTrip(req)
|
||||||
@ -928,10 +945,10 @@ func TestImpersonator(t *testing.T) {
|
|||||||
|
|
||||||
// anonymous TCR should always work
|
// anonymous TCR should always work
|
||||||
|
|
||||||
tcrRegGroup, err := kubeclient.New(kubeclient.WithConfig(rest.AnonymousClientConfig(clientKubeconfig)))
|
tcrRegGroup, err := kubeclient.New(kubeclient.WithConfig(kubeclient.SecureAnonymousClientConfig(clientKubeconfig)))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
tcrOtherGroup, err := kubeclient.New(kubeclient.WithConfig(rest.AnonymousClientConfig(clientKubeconfig)),
|
tcrOtherGroup, err := kubeclient.New(kubeclient.WithConfig(kubeclient.SecureAnonymousClientConfig(clientKubeconfig)),
|
||||||
kubeclient.WithMiddleware(groupsuffix.New("walrus.tld")))
|
kubeclient.WithMiddleware(groupsuffix.New("walrus.tld")))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -950,7 +967,7 @@ func TestImpersonator(t *testing.T) {
|
|||||||
|
|
||||||
// these calls should only work when anonymous auth is enabled
|
// these calls should only work when anonymous auth is enabled
|
||||||
|
|
||||||
anonymousConfig := rest.AnonymousClientConfig(clientKubeconfig)
|
anonymousConfig := kubeclient.SecureAnonymousClientConfig(clientKubeconfig)
|
||||||
anonymousConfig.GroupVersion = &schema.GroupVersion{
|
anonymousConfig.GroupVersion = &schema.GroupVersion{
|
||||||
Group: "not-concierge.walrus.tld",
|
Group: "not-concierge.walrus.tld",
|
||||||
Version: "v1",
|
Version: "v1",
|
||||||
@ -989,7 +1006,7 @@ func TestImpersonator(t *testing.T) {
|
|||||||
|
|
||||||
// this should always fail as unauthorized (even for TCR) because the cert is not valid
|
// this should always fail as unauthorized (even for TCR) because the cert is not valid
|
||||||
|
|
||||||
badCertConfig := rest.AnonymousClientConfig(clientKubeconfig)
|
badCertConfig := kubeclient.SecureAnonymousClientConfig(clientKubeconfig)
|
||||||
badCert := newClientCert(t, unrelatedCA, "bad-user", []string{"bad-group"})
|
badCert := newClientCert(t, unrelatedCA, "bad-user", []string{"bad-group"})
|
||||||
badCertConfig.TLSClientConfig.CertData = badCert.certPEM
|
badCertConfig.TLSClientConfig.CertData = badCert.certPEM
|
||||||
badCertConfig.TLSClientConfig.KeyData = badCert.keyPEM
|
badCertConfig.TLSClientConfig.KeyData = badCert.keyPEM
|
||||||
@ -1041,7 +1058,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
|
|||||||
ExecProvider: &api.ExecConfig{},
|
ExecProvider: &api.ExecConfig{},
|
||||||
AuthProvider: &api.AuthProviderConfig{},
|
AuthProvider: &api.AuthProviderConfig{},
|
||||||
},
|
},
|
||||||
wantCreationErr: "could not get http/1.1 round tripper: could not get in-cluster transport config: execProvider and authProvider cannot be used in combination",
|
wantCreationErr: "could not create secure client config: execProvider and authProvider cannot be used in combination",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "fail to get transport from config",
|
name: "fail to get transport from config",
|
||||||
@ -1051,7 +1068,7 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
|
|||||||
Transport: http.DefaultTransport,
|
Transport: http.DefaultTransport,
|
||||||
TLSClientConfig: rest.TLSClientConfig{Insecure: true},
|
TLSClientConfig: rest.TLSClientConfig{Insecure: true},
|
||||||
},
|
},
|
||||||
wantCreationErr: "could not get http/1.1 round tripper: using a custom transport with TLS certificate options or the insecure flag is not allowed",
|
wantCreationErr: "could not create secure client config: failed to build transport: using a custom transport with TLS certificate options or the insecure flag is not allowed",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Impersonate-User header already in request",
|
name: "Impersonate-User header already in request",
|
||||||
@ -1761,7 +1778,9 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
|
|||||||
|
|
||||||
testKubeAPIServerWasCalled := false
|
testKubeAPIServerWasCalled := false
|
||||||
testKubeAPIServerSawHeaders := http.Header{}
|
testKubeAPIServerSawHeaders := http.Header{}
|
||||||
testKubeAPIServerCA, testKubeAPIServerURL := testutil.TLSTestServer(t, func(w http.ResponseWriter, r *http.Request) {
|
testKubeAPIServer := tlsserver.TLSTestServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
tlsserver.AssertTLS(t, r, ptls.Secure)
|
||||||
|
|
||||||
testKubeAPIServerWasCalled = true
|
testKubeAPIServerWasCalled = true
|
||||||
testKubeAPIServerSawHeaders = r.Header
|
testKubeAPIServerSawHeaders = r.Header
|
||||||
if tt.kubeAPIServerStatusCode != http.StatusOK {
|
if tt.kubeAPIServerStatusCode != http.StatusOK {
|
||||||
@ -1769,17 +1788,26 @@ func TestImpersonatorHTTPHandler(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
_, _ = w.Write([]byte("successful proxied response"))
|
_, _ = w.Write([]byte("successful proxied response"))
|
||||||
}
|
}
|
||||||
})
|
}), tlsserver.RecordTLSHello)
|
||||||
|
|
||||||
testKubeAPIServerKubeconfig := rest.Config{
|
testKubeAPIServerKubeconfig := rest.Config{
|
||||||
Host: testKubeAPIServerURL,
|
Host: testKubeAPIServer.URL,
|
||||||
BearerToken: "some-service-account-token",
|
BearerToken: "some-service-account-token",
|
||||||
TLSClientConfig: rest.TLSClientConfig{CAData: []byte(testKubeAPIServerCA)},
|
TLSClientConfig: rest.TLSClientConfig{CAData: tlsserver.TLSTestServerCA(testKubeAPIServer)},
|
||||||
}
|
}
|
||||||
if tt.restConfig == nil {
|
if tt.restConfig == nil {
|
||||||
tt.restConfig = &testKubeAPIServerKubeconfig
|
tt.restConfig = &testKubeAPIServerKubeconfig
|
||||||
}
|
}
|
||||||
|
|
||||||
impersonatorHTTPHandlerFunc, err := newImpersonationReverseProxyFunc(tt.restConfig)
|
// mimic how newInternal would call newImpersonationReverseProxyFunc
|
||||||
|
impersonatorHTTPHandlerFunc, err := func() (func(*genericapiserver.Config) http.Handler, error) {
|
||||||
|
kubeClientForProxy, err := kubeclient.New(kubeclient.WithConfig(tt.restConfig))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newImpersonationReverseProxyFunc(rest.CopyConfig(kubeClientForProxy.ProtoConfig))
|
||||||
|
}()
|
||||||
|
|
||||||
if tt.wantCreationErr != "" {
|
if tt.wantCreationErr != "" {
|
||||||
require.EqualError(t, err, tt.wantCreationErr)
|
require.EqualError(t, err, tt.wantCreationErr)
|
||||||
require.Nil(t, impersonatorHTTPHandlerFunc)
|
require.Nil(t, impersonatorHTTPHandlerFunc)
|
||||||
|
@ -29,10 +29,12 @@ import (
|
|||||||
"go.pinniped.dev/internal/controller/authenticator/authncache"
|
"go.pinniped.dev/internal/controller/authenticator/authncache"
|
||||||
"go.pinniped.dev/internal/controllerinit"
|
"go.pinniped.dev/internal/controllerinit"
|
||||||
"go.pinniped.dev/internal/controllermanager"
|
"go.pinniped.dev/internal/controllermanager"
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
"go.pinniped.dev/internal/downward"
|
"go.pinniped.dev/internal/downward"
|
||||||
"go.pinniped.dev/internal/dynamiccert"
|
"go.pinniped.dev/internal/dynamiccert"
|
||||||
"go.pinniped.dev/internal/here"
|
"go.pinniped.dev/internal/here"
|
||||||
"go.pinniped.dev/internal/issuer"
|
"go.pinniped.dev/internal/issuer"
|
||||||
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
"go.pinniped.dev/internal/plog"
|
"go.pinniped.dev/internal/plog"
|
||||||
"go.pinniped.dev/internal/registry/credentialrequest"
|
"go.pinniped.dev/internal/registry/credentialrequest"
|
||||||
)
|
)
|
||||||
@ -209,6 +211,13 @@ func getAggregatedAPIServerConfig(
|
|||||||
recommendedOptions.SecureServing.ServerCert.GeneratedCert = dynamicCertProvider
|
recommendedOptions.SecureServing.ServerCert.GeneratedCert = dynamicCertProvider
|
||||||
recommendedOptions.SecureServing.BindPort = 8443 // Don't run on default 443 because that requires root
|
recommendedOptions.SecureServing.BindPort = 8443 // Don't run on default 443 because that requires root
|
||||||
|
|
||||||
|
// secure TLS for connections coming from and going to the Kube API server
|
||||||
|
// this is best effort because not all options provide the right hooks to override TLS config
|
||||||
|
// since our only client is the Kube API server, this uses the most secure TLS config
|
||||||
|
if err := ptls.SecureRecommendedOptions(recommendedOptions, kubeclient.Secure); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to secure recommended options: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
serverConfig := genericapiserver.NewRecommendedConfig(codecs)
|
serverConfig := genericapiserver.NewRecommendedConfig(codecs)
|
||||||
// Note that among other things, this ApplyTo() function copies
|
// Note that among other things, this ApplyTo() function copies
|
||||||
// `recommendedOptions.SecureServing.ServerCert.GeneratedCert` into
|
// `recommendedOptions.SecureServing.ServerCert.GeneratedCert` into
|
||||||
@ -220,7 +229,7 @@ func getAggregatedAPIServerConfig(
|
|||||||
// If the provider later starts returning certs, then the API server
|
// If the provider later starts returning certs, then the API server
|
||||||
// will use them to handle the incoming requests successfully.
|
// will use them to handle the incoming requests successfully.
|
||||||
if err := recommendedOptions.ApplyTo(serverConfig); err != nil {
|
if err := recommendedOptions.ApplyTo(serverConfig); err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("failed to apply recommended options: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
apiServerConfig := &apiserver.Config{
|
apiServerConfig := &apiserver.Config{
|
||||||
|
@ -9,6 +9,8 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/client-go/util/cert"
|
||||||
|
|
||||||
auth1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1"
|
auth1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -23,19 +25,20 @@ type Closer interface {
|
|||||||
// CABundle returns a PEM-encoded CA bundle from the provided spec. If the provided spec is nil, a
|
// CABundle returns a PEM-encoded CA bundle from the provided spec. If the provided spec is nil, a
|
||||||
// nil CA bundle will be returned. If the provided spec contains a CA bundle that is not properly
|
// nil CA bundle will be returned. If the provided spec contains a CA bundle that is not properly
|
||||||
// encoded, an error will be returned.
|
// encoded, an error will be returned.
|
||||||
func CABundle(spec *auth1alpha1.TLSSpec) ([]byte, error) {
|
func CABundle(spec *auth1alpha1.TLSSpec) (*x509.CertPool, []byte, error) {
|
||||||
if spec == nil || len(spec.CertificateAuthorityData) == 0 {
|
if spec == nil || len(spec.CertificateAuthorityData) == 0 {
|
||||||
return nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pem, err := base64.StdEncoding.DecodeString(spec.CertificateAuthorityData)
|
pem, err := base64.StdEncoding.DecodeString(spec.CertificateAuthorityData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if ok := x509.NewCertPool().AppendCertsFromPEM(pem); !ok {
|
rootCAs, err := cert.NewPoolFromBytes(pem)
|
||||||
return nil, fmt.Errorf("certificateAuthorityData is not valid PEM")
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("certificateAuthorityData is not valid PEM: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return pem, nil
|
return rootCAs, pem, nil
|
||||||
}
|
}
|
||||||
|
@ -6,9 +6,13 @@
|
|||||||
package jwtcachefiller
|
package jwtcachefiller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
coreosoidc "github.com/coreos/go-oidc/v3/oidc"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"gopkg.in/square/go-jose.v2"
|
"gopkg.in/square/go-jose.v2"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
@ -23,6 +27,7 @@ import (
|
|||||||
pinnipedauthenticator "go.pinniped.dev/internal/controller/authenticator"
|
pinnipedauthenticator "go.pinniped.dev/internal/controller/authenticator"
|
||||||
"go.pinniped.dev/internal/controller/authenticator/authncache"
|
"go.pinniped.dev/internal/controller/authenticator/authncache"
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
|
"go.pinniped.dev/internal/net/phttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// These default values come from the way that the Supervisor issues and signs tokens. We make these
|
// These default values come from the way that the Supervisor issues and signs tokens. We make these
|
||||||
@ -145,7 +150,7 @@ func (c *controller) extractValueAsJWTAuthenticator(value authncache.Value) *jwt
|
|||||||
|
|
||||||
// newJWTAuthenticator creates a jwt authenticator from the provided spec.
|
// newJWTAuthenticator creates a jwt authenticator from the provided spec.
|
||||||
func newJWTAuthenticator(spec *auth1alpha1.JWTAuthenticatorSpec) (*jwtAuthenticator, error) {
|
func newJWTAuthenticator(spec *auth1alpha1.JWTAuthenticatorSpec) (*jwtAuthenticator, error) {
|
||||||
caBundle, err := pinnipedauthenticator.CABundle(spec.TLS)
|
rootCAs, caBundle, err := pinnipedauthenticator.CABundle(spec.TLS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid TLS configuration: %w", err)
|
return nil, fmt.Errorf("invalid TLS configuration: %w", err)
|
||||||
}
|
}
|
||||||
@ -167,20 +172,51 @@ func newJWTAuthenticator(spec *auth1alpha1.JWTAuthenticatorSpec) (*jwtAuthentica
|
|||||||
groupsClaim = defaultGroupsClaim
|
groupsClaim = defaultGroupsClaim
|
||||||
}
|
}
|
||||||
|
|
||||||
authenticator, err := oidc.New(oidc.Options{
|
// copied from Kube OIDC code
|
||||||
|
issuerURL, err := url.Parse(spec.Issuer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if issuerURL.Scheme != "https" {
|
||||||
|
return nil, fmt.Errorf("issuer (%q) has invalid scheme (%q), require 'https'", spec.Issuer, issuerURL.Scheme)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := phttp.Default(rootCAs)
|
||||||
|
client.Timeout = 30 * time.Second // copied from Kube OIDC code
|
||||||
|
|
||||||
|
ctx := coreosoidc.ClientContext(context.Background(), client)
|
||||||
|
|
||||||
|
provider, err := coreosoidc.NewProvider(ctx, spec.Issuer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not initialize provider: %w", err)
|
||||||
|
}
|
||||||
|
providerJSON := &struct {
|
||||||
|
JWKSURL string `json:"jwks_uri"`
|
||||||
|
}{}
|
||||||
|
if err := provider.Claims(providerJSON); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not get provider jwks_uri: %w", err) // should be impossible because coreosoidc.NewProvider validates this
|
||||||
|
}
|
||||||
|
if len(providerJSON.JWKSURL) == 0 {
|
||||||
|
return nil, fmt.Errorf("issuer %q does not have jwks_uri set", spec.Issuer)
|
||||||
|
}
|
||||||
|
|
||||||
|
oidcAuthenticator, err := oidc.New(oidc.Options{
|
||||||
IssuerURL: spec.Issuer,
|
IssuerURL: spec.Issuer,
|
||||||
|
KeySet: coreosoidc.NewRemoteKeySet(ctx, providerJSON.JWKSURL),
|
||||||
ClientID: spec.Audience,
|
ClientID: spec.Audience,
|
||||||
UsernameClaim: usernameClaim,
|
UsernameClaim: usernameClaim,
|
||||||
GroupsClaim: groupsClaim,
|
GroupsClaim: groupsClaim,
|
||||||
SupportedSigningAlgs: defaultSupportedSigningAlgos(),
|
SupportedSigningAlgs: defaultSupportedSigningAlgos(),
|
||||||
CAContentProvider: caContentProvider,
|
// this is still needed for distributed claim resolution, meaning this uses a http client that does not honor our TLS config
|
||||||
|
// TODO fix when we pick up https://github.com/kubernetes/kubernetes/pull/106141
|
||||||
|
CAContentProvider: caContentProvider,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not initialize authenticator: %w", err)
|
return nil, fmt.Errorf("could not initialize authenticator: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &jwtAuthenticator{
|
return &jwtAuthenticator{
|
||||||
tokenAuthenticatorCloser: authenticator,
|
tokenAuthenticatorCloser: oidcAuthenticator,
|
||||||
spec: spec,
|
spec: spec,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,6 @@ import (
|
|||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -35,8 +34,10 @@ import (
|
|||||||
pinnipedinformers "go.pinniped.dev/generated/latest/client/concierge/informers/externalversions"
|
pinnipedinformers "go.pinniped.dev/generated/latest/client/concierge/informers/externalversions"
|
||||||
"go.pinniped.dev/internal/controller/authenticator/authncache"
|
"go.pinniped.dev/internal/controller/authenticator/authncache"
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
"go.pinniped.dev/internal/mocks/mocktokenauthenticatorcloser"
|
"go.pinniped.dev/internal/mocks/mocktokenauthenticatorcloser"
|
||||||
"go.pinniped.dev/internal/testutil/testlogger"
|
"go.pinniped.dev/internal/testutil/testlogger"
|
||||||
|
"go.pinniped.dev/internal/testutil/tlsserver"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestController(t *testing.T) {
|
func TestController(t *testing.T) {
|
||||||
@ -57,8 +58,10 @@ func TestController(t *testing.T) {
|
|||||||
goodRSASigningAlgo := jose.RS256
|
goodRSASigningAlgo := jose.RS256
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
server := httptest.NewTLSServer(mux)
|
server := tlsserver.TLSTestServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
t.Cleanup(server.Close)
|
tlsserver.AssertTLS(t, r, ptls.Default)
|
||||||
|
mux.ServeHTTP(w, r)
|
||||||
|
}), tlsserver.RecordTLSHello)
|
||||||
|
|
||||||
mux.Handle("/.well-known/openid-configuration", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
mux.Handle("/.well-known/openid-configuration", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
@ -290,11 +293,7 @@ func TestController(t *testing.T) {
|
|||||||
Spec: *missingTLSJWTAuthenticatorSpec,
|
Spec: *missingTLSJWTAuthenticatorSpec,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantLogs: []string{
|
wantErr: `failed to build jwt authenticator: could not initialize provider: Get "` + goodIssuer + `/.well-known/openid-configuration": x509: certificate signed by unknown authority`,
|
||||||
`jwtcachefiller-controller "level"=0 "msg"="added new jwt authenticator" "issuer"="` + goodIssuer + `" "jwtAuthenticator"={"name":"test-name"}`,
|
|
||||||
},
|
|
||||||
wantCacheEntries: 1,
|
|
||||||
runTestsOnResultingAuthenticator: false, // skip the tests because the authenticator left in the cache doesn't have the CA for our test discovery server
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid jwt authenticator CA",
|
name: "invalid jwt authenticator CA",
|
||||||
|
@ -91,7 +91,7 @@ func newWebhookAuthenticator(
|
|||||||
defer func() { _ = os.Remove(temp.Name()) }()
|
defer func() { _ = os.Remove(temp.Name()) }()
|
||||||
|
|
||||||
cluster := &clientcmdapi.Cluster{Server: spec.Endpoint}
|
cluster := &clientcmdapi.Cluster{Server: spec.Endpoint}
|
||||||
cluster.CertificateAuthorityData, err = pinnipedauthenticator.CABundle(spec.TLS)
|
_, cluster.CertificateAuthorityData, err = pinnipedauthenticator.CABundle(spec.TLS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid TLS configuration: %w", err)
|
return nil, fmt.Errorf("invalid TLS configuration: %w", err)
|
||||||
}
|
}
|
||||||
@ -118,5 +118,7 @@ func newWebhookAuthenticator(
|
|||||||
// custom proxy stuff used by the API server.
|
// custom proxy stuff used by the API server.
|
||||||
var customDial net.DialFunc
|
var customDial net.DialFunc
|
||||||
|
|
||||||
|
// this uses a http client that does not honor our TLS config
|
||||||
|
// TODO fix when we pick up https://github.com/kubernetes/kubernetes/pull/106155
|
||||||
return webhook.New(temp.Name(), version, implicitAuds, *webhook.DefaultRetryBackoff(), customDial)
|
return webhook.New(temp.Name(), version, implicitAuds, *webhook.DefaultRetryBackoff(), customDial)
|
||||||
}
|
}
|
||||||
|
@ -141,7 +141,7 @@ func TestNewWebhookAuthenticator(t *testing.T) {
|
|||||||
TLS: &auth1alpha1.TLSSpec{CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte("bad data"))},
|
TLS: &auth1alpha1.TLSSpec{CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte("bad data"))},
|
||||||
}, ioutil.TempFile, clientcmd.WriteToFile)
|
}, ioutil.TempFile, clientcmd.WriteToFile)
|
||||||
require.Nil(t, res)
|
require.Nil(t, res)
|
||||||
require.EqualError(t, err, "invalid TLS configuration: certificateAuthorityData is not valid PEM")
|
require.EqualError(t, err, "invalid TLS configuration: certificateAuthorityData is not valid PEM: data does not contain any valid RSA or ECDSA certificates")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("valid config with no TLS spec", func(t *testing.T) {
|
t.Run("valid config with no TLS spec", func(t *testing.T) {
|
||||||
|
@ -26,6 +26,8 @@ type kubeClientPodCommandExecutor struct {
|
|||||||
// NewPodCommandExecutor returns a PodCommandExecutor that will interact with a pod via the provided
|
// NewPodCommandExecutor returns a PodCommandExecutor that will interact with a pod via the provided
|
||||||
// kubeConfig and corresponding kubeClient.
|
// kubeConfig and corresponding kubeClient.
|
||||||
func NewPodCommandExecutor(kubeConfig *restclient.Config, kubeClient kubernetes.Interface) PodCommandExecutor {
|
func NewPodCommandExecutor(kubeConfig *restclient.Config, kubeClient kubernetes.Interface) PodCommandExecutor {
|
||||||
|
kubeConfig = restclient.CopyConfig(kubeConfig)
|
||||||
|
kubeConfig.NextProtos = []string{"http/1.1"} // we explicitly need to upgrade from http1 to spdy, exec cannot use http2
|
||||||
return &kubeClientPodCommandExecutor{kubeConfig: kubeConfig, kubeClient: kubeClient}
|
return &kubeClientPodCommandExecutor{kubeConfig: kubeConfig, kubeClient: kubeClient}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -0,0 +1,44 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package kubecertagent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
|
"go.pinniped.dev/internal/testutil/tlsserver"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSecureTLS(t *testing.T) {
|
||||||
|
var sawRequest bool
|
||||||
|
server := tlsserver.TLSTestServer(t, http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {
|
||||||
|
tlsserver.AssertTLS(t, r, ptls.Secure)
|
||||||
|
sawRequest = true
|
||||||
|
}), tlsserver.RecordTLSHello)
|
||||||
|
|
||||||
|
config := &rest.Config{
|
||||||
|
Host: server.URL,
|
||||||
|
TLSClientConfig: rest.TLSClientConfig{
|
||||||
|
CAData: tlsserver.TLSTestServerCA(server),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := kubeclient.New(kubeclient.WithConfig(config))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// build this exactly like our production could does
|
||||||
|
podCommandExecutor := NewPodCommandExecutor(client.JSONConfig, client.Kubernetes)
|
||||||
|
|
||||||
|
got, err := podCommandExecutor.Exec("podNamespace", "podName", "command", "arg1", "arg2")
|
||||||
|
require.Equal(t, &errors.StatusError{}, err)
|
||||||
|
require.Empty(t, got)
|
||||||
|
|
||||||
|
require.True(t, sawRequest)
|
||||||
|
}
|
@ -6,7 +6,6 @@ package oidcupstreamwatcher
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -35,6 +34,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/controller/conditionsutil"
|
"go.pinniped.dev/internal/controller/conditionsutil"
|
||||||
"go.pinniped.dev/internal/controller/supervisorconfig/upstreamwatchers"
|
"go.pinniped.dev/internal/controller/supervisorconfig/upstreamwatchers"
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
|
"go.pinniped.dev/internal/net/phttp"
|
||||||
"go.pinniped.dev/internal/oidc/provider"
|
"go.pinniped.dev/internal/oidc/provider"
|
||||||
"go.pinniped.dev/internal/upstreamoidc"
|
"go.pinniped.dev/internal/upstreamoidc"
|
||||||
)
|
)
|
||||||
@ -313,7 +313,8 @@ func (c *oidcWatcherController) validateIssuer(ctx context.Context, upstream *v1
|
|||||||
|
|
||||||
// If the provider does not exist in the cache, do a fresh discovery lookup and save to the cache.
|
// If the provider does not exist in the cache, do a fresh discovery lookup and save to the cache.
|
||||||
if discoveredProvider == nil {
|
if discoveredProvider == nil {
|
||||||
tlsConfig, err := getTLSConfig(upstream)
|
var err error
|
||||||
|
httpClient, err = getClient(upstream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &v1alpha1.Condition{
|
return &v1alpha1.Condition{
|
||||||
Type: typeOIDCDiscoverySucceeded,
|
Type: typeOIDCDiscoverySucceeded,
|
||||||
@ -323,14 +324,6 @@ func (c *oidcWatcherController) validateIssuer(ctx context.Context, upstream *v1
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient = &http.Client{
|
|
||||||
Timeout: time.Minute,
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Proxy: http.ProxyFromEnvironment,
|
|
||||||
TLSClientConfig: tlsConfig,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
discoveredProvider, err = oidc.NewProvider(oidc.ClientContext(ctx, httpClient), upstream.Spec.Issuer)
|
discoveredProvider, err = oidc.NewProvider(oidc.ClientContext(ctx, httpClient), upstream.Spec.Issuer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
const klogLevelTrace = 6
|
const klogLevelTrace = 6
|
||||||
@ -444,13 +437,9 @@ func (c *oidcWatcherController) updateStatus(ctx context.Context, upstream *v1al
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTLSConfig(upstream *v1alpha1.OIDCIdentityProvider) (*tls.Config, error) {
|
func getClient(upstream *v1alpha1.OIDCIdentityProvider) (*http.Client, error) {
|
||||||
result := tls.Config{
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
}
|
|
||||||
|
|
||||||
if upstream.Spec.TLS == nil || upstream.Spec.TLS.CertificateAuthorityData == "" {
|
if upstream.Spec.TLS == nil || upstream.Spec.TLS.CertificateAuthorityData == "" {
|
||||||
return &result, nil
|
return defaultClientShortTimeout(nil), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
bundle, err := base64.StdEncoding.DecodeString(upstream.Spec.TLS.CertificateAuthorityData)
|
bundle, err := base64.StdEncoding.DecodeString(upstream.Spec.TLS.CertificateAuthorityData)
|
||||||
@ -458,12 +447,18 @@ func getTLSConfig(upstream *v1alpha1.OIDCIdentityProvider) (*tls.Config, error)
|
|||||||
return nil, fmt.Errorf("spec.certificateAuthorityData is invalid: %w", err)
|
return nil, fmt.Errorf("spec.certificateAuthorityData is invalid: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
result.RootCAs = x509.NewCertPool()
|
rootCAs := x509.NewCertPool()
|
||||||
if !result.RootCAs.AppendCertsFromPEM(bundle) {
|
if !rootCAs.AppendCertsFromPEM(bundle) {
|
||||||
return nil, fmt.Errorf("spec.certificateAuthorityData is invalid: %w", upstreamwatchers.ErrNoCertificates)
|
return nil, fmt.Errorf("spec.certificateAuthorityData is invalid: %w", upstreamwatchers.ErrNoCertificates)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &result, nil
|
return defaultClientShortTimeout(rootCAs), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultClientShortTimeout(rootCAs *x509.CertPool) *http.Client {
|
||||||
|
c := phttp.Default(rootCAs)
|
||||||
|
c.Timeout = time.Minute
|
||||||
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
func computeScopes(additionalScopes []string) []string {
|
func computeScopes(additionalScopes []string) []string {
|
||||||
|
@ -19,6 +19,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/apimachinery/pkg/util/net"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
|
||||||
@ -1170,8 +1171,7 @@ oidc: issuer did not match the issuer returned by provider, expected "` + testIs
|
|||||||
|
|
||||||
// We always want to use the proxy from env on these clients, so although the following assertions
|
// We always want to use the proxy from env on these clients, so although the following assertions
|
||||||
// are a little hacky, this is a cheap way to test that we are using it.
|
// are a little hacky, this is a cheap way to test that we are using it.
|
||||||
actualTransport, ok := actualIDP.Client.Transport.(*http.Transport)
|
actualTransport := unwrapTransport(t, actualIDP.Client.Transport)
|
||||||
require.True(t, ok, "expected cached provider to have client with Transport of type *http.Transport")
|
|
||||||
httpProxyFromEnvFunction := reflect.ValueOf(http.ProxyFromEnvironment).Pointer()
|
httpProxyFromEnvFunction := reflect.ValueOf(http.ProxyFromEnvironment).Pointer()
|
||||||
actualTransportProxyFunction := reflect.ValueOf(actualTransport.Proxy).Pointer()
|
actualTransportProxyFunction := reflect.ValueOf(actualTransport.Proxy).Pointer()
|
||||||
require.Equal(t, httpProxyFromEnvFunction, actualTransportProxyFunction,
|
require.Equal(t, httpProxyFromEnvFunction, actualTransportProxyFunction,
|
||||||
@ -1197,6 +1197,22 @@ oidc: issuer did not match the issuer returned by provider, expected "` + testIs
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func unwrapTransport(t *testing.T, rt http.RoundTripper) *http.Transport {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
switch baseRT := rt.(type) {
|
||||||
|
case *http.Transport:
|
||||||
|
return baseRT
|
||||||
|
|
||||||
|
case net.RoundTripperWrapper:
|
||||||
|
return unwrapTransport(t, baseRT.WrappedRoundTripper())
|
||||||
|
|
||||||
|
default:
|
||||||
|
t.Fatalf("expected cached provider to have client with Transport of type *http.Transport, got: %T", baseRT)
|
||||||
|
return nil // unreachable
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func normalizeOIDCUpstreams(upstreams []v1alpha1.OIDCIdentityProvider, now metav1.Time) []v1alpha1.OIDCIdentityProvider {
|
func normalizeOIDCUpstreams(upstreams []v1alpha1.OIDCIdentityProvider, now metav1.Time) []v1alpha1.OIDCIdentityProvider {
|
||||||
result := make([]v1alpha1.OIDCIdentityProvider, 0, len(upstreams))
|
result := make([]v1alpha1.OIDCIdentityProvider, 0, len(upstreams))
|
||||||
for _, u := range upstreams {
|
for _, u := range upstreams {
|
||||||
|
15
internal/crypto/ptls/old.go
Normal file
15
internal/crypto/ptls/old.go
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
//go:build !go1.17
|
||||||
|
// +build !go1.17
|
||||||
|
|
||||||
|
package ptls
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// cause compile time failure if an older version of Go is used
|
||||||
|
`Pinniped's TLS configuration makes assumptions about how the Go standard library implementation of TLS works.
|
||||||
|
It particular, we rely on the server controlling cipher suite selection. For these assumptions to hold, Pinniped
|
||||||
|
must be compiled with Go 1.17+. If you are seeing this error message, your attempt to compile Pinniped with an
|
||||||
|
older Go compiler was explicitly failed to prevent an unsafe configuration.`
|
||||||
|
}
|
215
internal/crypto/ptls/ptls.go
Normal file
215
internal/crypto/ptls/ptls.go
Normal file
@ -0,0 +1,215 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package ptls
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/apiserver/pkg/admission"
|
||||||
|
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||||
|
"k8s.io/apiserver/pkg/server/options"
|
||||||
|
kubeinformers "k8s.io/client-go/informers"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
"k8s.io/client-go/transport"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO decide if we need to expose the four TLS levels (secure, default, default-ldap, legacy) as config.
|
||||||
|
|
||||||
|
type ConfigFunc func(*x509.CertPool) *tls.Config
|
||||||
|
|
||||||
|
func Default(rootCAs *x509.CertPool) *tls.Config {
|
||||||
|
return &tls.Config{
|
||||||
|
// Can't use SSLv3 because of POODLE and BEAST
|
||||||
|
// Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
|
||||||
|
// Can't use TLSv1.1 because of RC4 cipher usage
|
||||||
|
//
|
||||||
|
// The Kubernetes API Server must use TLS 1.2, at a minimum,
|
||||||
|
// to protect the confidentiality of sensitive data during electronic dissemination.
|
||||||
|
// https://stigviewer.com/stig/kubernetes/2021-06-17/finding/V-242378
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
|
||||||
|
// the order does not matter in go 1.17+ https://go.dev/blog/tls-cipher-suites
|
||||||
|
// we match crypto/tls.cipherSuitesPreferenceOrder because it makes unit tests easier to write
|
||||||
|
// this list is ignored when TLS 1.3 is used
|
||||||
|
//
|
||||||
|
// as of 2021-10-19, Mozilla Guideline v5.6, Go 1.17.2, intermediate configuration, supports:
|
||||||
|
// - Firefox 27
|
||||||
|
// - Android 4.4.2
|
||||||
|
// - Chrome 31
|
||||||
|
// - Edge
|
||||||
|
// - IE 11 on Windows 7
|
||||||
|
// - Java 8u31
|
||||||
|
// - OpenSSL 1.0.1
|
||||||
|
// - Opera 20
|
||||||
|
// - Safari 9
|
||||||
|
// https://ssl-config.mozilla.org/#server=go&version=1.17.2&config=intermediate&guideline=5.6
|
||||||
|
//
|
||||||
|
// The Kubernetes API server must use approved cipher suites.
|
||||||
|
// https://stigviewer.com/stig/kubernetes/2021-06-17/finding/V-242418
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
// these are all AEADs with ECDHE, some use ChaCha20Poly1305 while others use AES-GCM
|
||||||
|
// this provides forward secrecy, confidentiality and authenticity of data
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||||
|
},
|
||||||
|
|
||||||
|
// enable HTTP2 for go's 1.7 HTTP Server
|
||||||
|
// setting this explicitly is only required in very specific circumstances
|
||||||
|
// it is simpler to just set it here than to try and determine if we need to
|
||||||
|
NextProtos: []string{"h2", "http/1.1"},
|
||||||
|
|
||||||
|
// optional root CAs, nil means use the host's root CA set
|
||||||
|
RootCAs: rootCAs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Secure(rootCAs *x509.CertPool) *tls.Config {
|
||||||
|
// as of 2021-10-19, Mozilla Guideline v5.6, Go 1.17.2, modern configuration, supports:
|
||||||
|
// - Firefox 63
|
||||||
|
// - Android 10.0
|
||||||
|
// - Chrome 70
|
||||||
|
// - Edge 75
|
||||||
|
// - Java 11
|
||||||
|
// - OpenSSL 1.1.1
|
||||||
|
// - Opera 57
|
||||||
|
// - Safari 12.1
|
||||||
|
// https://ssl-config.mozilla.org/#server=go&version=1.17.2&config=modern&guideline=5.6
|
||||||
|
c := Default(rootCAs)
|
||||||
|
c.MinVersion = tls.VersionTLS13 // max out the security
|
||||||
|
c.CipherSuites = []uint16{
|
||||||
|
// TLS 1.3 ciphers are not configurable, but we need to explicitly set them here to make our client hello behave correctly
|
||||||
|
// See https://github.com/golang/go/pull/49293
|
||||||
|
tls.TLS_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_CHACHA20_POLY1305_SHA256,
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func DefaultLDAP(rootCAs *x509.CertPool) *tls.Config {
|
||||||
|
c := Default(rootCAs)
|
||||||
|
// add less secure ciphers to support the default AWS Active Directory config
|
||||||
|
c.CipherSuites = append(c.CipherSuites,
|
||||||
|
// CBC with ECDHE
|
||||||
|
// this provides forward secrecy and confidentiality of data but not authenticity
|
||||||
|
// MAC-then-Encrypt CBC ciphers are susceptible to padding oracle attacks
|
||||||
|
// See https://crypto.stackexchange.com/a/205 and https://crypto.stackexchange.com/a/224
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func Legacy(rootCAs *x509.CertPool) *tls.Config {
|
||||||
|
c := Default(rootCAs)
|
||||||
|
// add all the ciphers (even the crappy ones) except the ones that Go considers to be outright broken like 3DES
|
||||||
|
c.CipherSuites = suitesToIDs(tls.CipherSuites())
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func suitesToIDs(suites []*tls.CipherSuite) []uint16 {
|
||||||
|
out := make([]uint16, 0, len(suites))
|
||||||
|
for _, suite := range suites {
|
||||||
|
suite := suite
|
||||||
|
out = append(out, suite.ID)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func Merge(tlsConfigFunc ConfigFunc, tlsConfig *tls.Config) {
|
||||||
|
secureTLSConfig := tlsConfigFunc(nil)
|
||||||
|
|
||||||
|
// override the core security knobs of the TLS config
|
||||||
|
// note that these have to be kept in sync with Default / Secure above
|
||||||
|
tlsConfig.MinVersion = secureTLSConfig.MinVersion
|
||||||
|
tlsConfig.CipherSuites = secureTLSConfig.CipherSuites
|
||||||
|
|
||||||
|
// if the TLS config already states what protocols it wants to use, honor that instead of overriding
|
||||||
|
if len(tlsConfig.NextProtos) == 0 {
|
||||||
|
tlsConfig.NextProtos = secureTLSConfig.NextProtos
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestConfigFunc allows this package to not depend on the kubeclient package.
|
||||||
|
type RestConfigFunc func(*rest.Config) (kubernetes.Interface, *rest.Config, error)
|
||||||
|
|
||||||
|
func DefaultRecommendedOptions(opts *options.RecommendedOptions, f RestConfigFunc) error {
|
||||||
|
defaultServing(opts.SecureServing)
|
||||||
|
return secureClient(opts, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
func SecureRecommendedOptions(opts *options.RecommendedOptions, f RestConfigFunc) error {
|
||||||
|
secureServing(opts.SecureServing)
|
||||||
|
return secureClient(opts, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultServing(opts *options.SecureServingOptionsWithLoopback) {
|
||||||
|
c := Default(nil)
|
||||||
|
cipherSuites := make([]string, 0, len(c.CipherSuites))
|
||||||
|
for _, id := range c.CipherSuites {
|
||||||
|
cipherSuites = append(cipherSuites, tls.CipherSuiteName(id))
|
||||||
|
}
|
||||||
|
opts.CipherSuites = cipherSuites
|
||||||
|
|
||||||
|
opts.MinTLSVersion = "VersionTLS12"
|
||||||
|
}
|
||||||
|
|
||||||
|
func secureServing(opts *options.SecureServingOptionsWithLoopback) {
|
||||||
|
opts.MinTLSVersion = "VersionTLS13"
|
||||||
|
opts.CipherSuites = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func secureClient(opts *options.RecommendedOptions, f RestConfigFunc) error {
|
||||||
|
inClusterClient, inClusterConfig, err := f(nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to build in cluster client: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n, z := opts.Authentication.RemoteKubeConfigFile, opts.Authorization.RemoteKubeConfigFile; len(n) > 0 || len(z) > 0 {
|
||||||
|
return fmt.Errorf("delgating auth is not using in-cluster config:\nauthentication=%s\nauthorization=%s", n, z)
|
||||||
|
}
|
||||||
|
|
||||||
|
// delegated authn and authz provide easy hooks for us to set the TLS config.
|
||||||
|
// however, the underlying clients use client-go's global TLS cache with an
|
||||||
|
// in-cluster config. to make this safe, we simply do the mutation once.
|
||||||
|
wrapperFunc := wrapTransportOnce(inClusterConfig.WrapTransport)
|
||||||
|
opts.Authentication.CustomRoundTripperFn = wrapperFunc
|
||||||
|
opts.Authorization.CustomRoundTripperFn = wrapperFunc
|
||||||
|
|
||||||
|
opts.CoreAPI = nil // set this to nil to make sure our ExtraAdmissionInitializers is used
|
||||||
|
baseExtraAdmissionInitializers := opts.ExtraAdmissionInitializers
|
||||||
|
opts.ExtraAdmissionInitializers = func(c *genericapiserver.RecommendedConfig) ([]admission.PluginInitializer, error) {
|
||||||
|
// abuse this closure to rewrite how we load admission plugins
|
||||||
|
c.ClientConfig = inClusterConfig
|
||||||
|
c.SharedInformerFactory = kubeinformers.NewSharedInformerFactory(inClusterClient, 0)
|
||||||
|
|
||||||
|
// abuse this closure to rewrite our loopback config
|
||||||
|
// this is mostly future proofing for post start hooks
|
||||||
|
_, loopbackConfig, err := f(c.LoopbackClientConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to build loopback config: %w", err)
|
||||||
|
}
|
||||||
|
c.LoopbackClientConfig = loopbackConfig
|
||||||
|
|
||||||
|
return baseExtraAdmissionInitializers(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapTransportOnce(f transport.WrapperFunc) transport.WrapperFunc {
|
||||||
|
var once sync.Once
|
||||||
|
return func(rt http.RoundTripper) http.RoundTripper {
|
||||||
|
once.Do(func() {
|
||||||
|
_ = f(rt) // assume in-place mutation
|
||||||
|
})
|
||||||
|
return rt
|
||||||
|
}
|
||||||
|
}
|
253
internal/crypto/ptls/ptls_test.go
Normal file
253
internal/crypto/ptls/ptls_test.go
Normal file
@ -0,0 +1,253 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package ptls
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"k8s.io/apiserver/pkg/server/options"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDefaultServing(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
opts := &options.SecureServingOptionsWithLoopback{SecureServingOptions: &options.SecureServingOptions{}}
|
||||||
|
defaultServing(opts)
|
||||||
|
require.Equal(t, options.SecureServingOptionsWithLoopback{
|
||||||
|
SecureServingOptions: &options.SecureServingOptions{
|
||||||
|
CipherSuites: []string{
|
||||||
|
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||||
|
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||||
|
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||||
|
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||||
|
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
|
||||||
|
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
|
||||||
|
},
|
||||||
|
MinTLSVersion: "VersionTLS12",
|
||||||
|
},
|
||||||
|
}, *opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSecureServing(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
opts := &options.SecureServingOptionsWithLoopback{SecureServingOptions: &options.SecureServingOptions{}}
|
||||||
|
secureServing(opts)
|
||||||
|
require.Equal(t, options.SecureServingOptionsWithLoopback{
|
||||||
|
SecureServingOptions: &options.SecureServingOptions{
|
||||||
|
MinTLSVersion: "VersionTLS13",
|
||||||
|
},
|
||||||
|
}, *opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMerge(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
tlsConfigFunc ConfigFunc
|
||||||
|
tlsConfig *tls.Config
|
||||||
|
want *tls.Config
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "default no protos",
|
||||||
|
tlsConfigFunc: Default,
|
||||||
|
tlsConfig: &tls.Config{
|
||||||
|
ServerName: "something-to-check-passthrough",
|
||||||
|
},
|
||||||
|
want: &tls.Config{
|
||||||
|
ServerName: "something-to-check-passthrough",
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||||
|
},
|
||||||
|
NextProtos: []string{"h2", "http/1.1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "default with protos",
|
||||||
|
tlsConfigFunc: Default,
|
||||||
|
tlsConfig: &tls.Config{
|
||||||
|
ServerName: "a different thing for passthrough",
|
||||||
|
NextProtos: []string{"panda"},
|
||||||
|
},
|
||||||
|
want: &tls.Config{
|
||||||
|
ServerName: "a different thing for passthrough",
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||||
|
},
|
||||||
|
NextProtos: []string{"panda"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "secure no protos",
|
||||||
|
tlsConfigFunc: Secure,
|
||||||
|
tlsConfig: &tls.Config{
|
||||||
|
ServerName: "something-to-check-passthrough",
|
||||||
|
},
|
||||||
|
want: &tls.Config{
|
||||||
|
ServerName: "something-to-check-passthrough",
|
||||||
|
MinVersion: tls.VersionTLS13,
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_CHACHA20_POLY1305_SHA256,
|
||||||
|
},
|
||||||
|
NextProtos: []string{"h2", "http/1.1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "secure with protos",
|
||||||
|
tlsConfigFunc: Secure,
|
||||||
|
tlsConfig: &tls.Config{
|
||||||
|
ServerName: "a different thing for passthrough",
|
||||||
|
NextProtos: []string{"panda"},
|
||||||
|
},
|
||||||
|
want: &tls.Config{
|
||||||
|
ServerName: "a different thing for passthrough",
|
||||||
|
MinVersion: tls.VersionTLS13,
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_CHACHA20_POLY1305_SHA256,
|
||||||
|
},
|
||||||
|
NextProtos: []string{"panda"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "default ldap no protos",
|
||||||
|
tlsConfigFunc: DefaultLDAP,
|
||||||
|
tlsConfig: &tls.Config{
|
||||||
|
ServerName: "something-to-check-passthrough",
|
||||||
|
},
|
||||||
|
want: &tls.Config{
|
||||||
|
ServerName: "something-to-check-passthrough",
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, //nolint: gosec // yeah, I know it is a bad cipher, but AD sucks
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
},
|
||||||
|
NextProtos: []string{"h2", "http/1.1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "default ldap with protos",
|
||||||
|
tlsConfigFunc: DefaultLDAP,
|
||||||
|
tlsConfig: &tls.Config{
|
||||||
|
ServerName: "a different thing for passthrough",
|
||||||
|
NextProtos: []string{"panda"},
|
||||||
|
},
|
||||||
|
want: &tls.Config{
|
||||||
|
ServerName: "a different thing for passthrough",
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, //nolint: gosec // yeah, I know it is a bad cipher, but AD sucks
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
},
|
||||||
|
NextProtos: []string{"panda"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "legacy no protos",
|
||||||
|
tlsConfigFunc: Legacy,
|
||||||
|
tlsConfig: &tls.Config{
|
||||||
|
ServerName: "something-to-check-passthrough",
|
||||||
|
},
|
||||||
|
want: &tls.Config{
|
||||||
|
ServerName: "something-to-check-passthrough",
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_RSA_WITH_AES_128_CBC_SHA, //nolint: gosec // yeah, I know it is a bad cipher, this is the legacy config
|
||||||
|
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_CHACHA20_POLY1305_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||||
|
},
|
||||||
|
NextProtos: []string{"h2", "http/1.1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "legacy with protos",
|
||||||
|
tlsConfigFunc: Legacy,
|
||||||
|
tlsConfig: &tls.Config{
|
||||||
|
ServerName: "a different thing for passthrough",
|
||||||
|
NextProtos: []string{"panda"},
|
||||||
|
},
|
||||||
|
want: &tls.Config{
|
||||||
|
ServerName: "a different thing for passthrough",
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_RSA_WITH_AES_128_CBC_SHA, //nolint: gosec // yeah, I know it is a bad cipher, this is the legacy config
|
||||||
|
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_CHACHA20_POLY1305_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||||
|
},
|
||||||
|
NextProtos: []string{"panda"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
Merge(tt.tlsConfigFunc, tt.tlsConfig)
|
||||||
|
require.Equal(t, tt.want, tt.tlsConfig)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -19,6 +19,7 @@ import (
|
|||||||
"k8s.io/apiserver/pkg/storage/names"
|
"k8s.io/apiserver/pkg/storage/names"
|
||||||
|
|
||||||
"go.pinniped.dev/internal/certauthority"
|
"go.pinniped.dev/internal/certauthority"
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
"go.pinniped.dev/test/testlib"
|
"go.pinniped.dev/test/testlib"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -160,11 +161,8 @@ func TestProviderWithDynamicServingCertificateController(t *testing.T) {
|
|||||||
err = certKeyContent.SetCertKeyContent(cert, key)
|
err = certKeyContent.SetCertKeyContent(cert, key)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
tlsConfig := &tls.Config{
|
tlsConfig := ptls.Default(nil)
|
||||||
MinVersion: tls.VersionTLS12,
|
tlsConfig.ClientAuth = tls.RequestClientCert
|
||||||
NextProtos: []string{"h2", "http/1.1"},
|
|
||||||
ClientAuth: tls.RequestClientCert,
|
|
||||||
}
|
|
||||||
|
|
||||||
dynamicCertificateController := dynamiccertificates.NewDynamicServingCertificateController(
|
dynamicCertificateController := dynamiccertificates.NewDynamicServingCertificateController(
|
||||||
tlsConfig,
|
tlsConfig,
|
||||||
|
@ -3,7 +3,11 @@
|
|||||||
|
|
||||||
package roundtripper
|
package roundtripper
|
||||||
|
|
||||||
import "net/http"
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/util/net"
|
||||||
|
)
|
||||||
|
|
||||||
var _ http.RoundTripper = Func(nil)
|
var _ http.RoundTripper = Func(nil)
|
||||||
|
|
||||||
@ -12,3 +16,22 @@ type Func func(*http.Request) (*http.Response, error)
|
|||||||
func (f Func) RoundTrip(req *http.Request) (*http.Response, error) {
|
func (f Func) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
return f(req)
|
return f(req)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ net.RoundTripperWrapper = &wrapper{}
|
||||||
|
|
||||||
|
type wrapper struct {
|
||||||
|
delegate http.RoundTripper
|
||||||
|
f Func
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wrapper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
return w.f.RoundTrip(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wrapper) WrappedRoundTripper() http.RoundTripper {
|
||||||
|
return w.delegate
|
||||||
|
}
|
||||||
|
|
||||||
|
func WrapFunc(delegate http.RoundTripper, f Func) net.RoundTripperWrapper {
|
||||||
|
return &wrapper{delegate: delegate, f: f}
|
||||||
|
}
|
||||||
|
@ -4,9 +4,17 @@
|
|||||||
package kubeclient
|
package kubeclient
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/util/net"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
kubescheme "k8s.io/client-go/kubernetes/scheme"
|
kubescheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
@ -17,6 +25,7 @@ import (
|
|||||||
pinnipedconciergeclientsetscheme "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/scheme"
|
pinnipedconciergeclientsetscheme "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/scheme"
|
||||||
pinnipedsupervisorclientset "go.pinniped.dev/generated/latest/client/supervisor/clientset/versioned"
|
pinnipedsupervisorclientset "go.pinniped.dev/generated/latest/client/supervisor/clientset/versioned"
|
||||||
pinnipedsupervisorclientsetscheme "go.pinniped.dev/generated/latest/client/supervisor/clientset/versioned/scheme"
|
pinnipedsupervisorclientsetscheme "go.pinniped.dev/generated/latest/client/supervisor/clientset/versioned/scheme"
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Client struct {
|
type Client struct {
|
||||||
@ -44,11 +53,16 @@ func New(opts ...Option) (*Client, error) {
|
|||||||
WithConfig(inClusterConfig)(c) // make sure all writes to clientConfig flow through one code path
|
WithConfig(inClusterConfig)(c) // make sure all writes to clientConfig flow through one code path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
secureKubeConfig, err := createSecureKubeConfig(c.config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not create secure client config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// explicitly use json when talking to CRD APIs
|
// explicitly use json when talking to CRD APIs
|
||||||
jsonKubeConfig := createJSONKubeConfig(c.config)
|
jsonKubeConfig := createJSONKubeConfig(secureKubeConfig)
|
||||||
|
|
||||||
// explicitly use protobuf when talking to built-in kube APIs
|
// explicitly use protobuf when talking to built-in kube APIs
|
||||||
protoKubeConfig := createProtoKubeConfig(c.config)
|
protoKubeConfig := createProtoKubeConfig(secureKubeConfig)
|
||||||
|
|
||||||
// Connect to the core Kubernetes API.
|
// Connect to the core Kubernetes API.
|
||||||
k8sClient, err := kubernetes.NewForConfig(configWithWrapper(protoKubeConfig, kubescheme.Scheme, kubescheme.Codecs, c.middlewares, c.transportWrapper))
|
k8sClient, err := kubernetes.NewForConfig(configWithWrapper(protoKubeConfig, kubescheme.Scheme, kubescheme.Codecs, c.middlewares, c.transportWrapper))
|
||||||
@ -107,3 +121,142 @@ func createProtoKubeConfig(kubeConfig *restclient.Config) *restclient.Config {
|
|||||||
protoKubeConfig.ContentType = runtime.ContentTypeProtobuf
|
protoKubeConfig.ContentType = runtime.ContentTypeProtobuf
|
||||||
return protoKubeConfig
|
return protoKubeConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createSecureKubeConfig returns a copy of the input config with the WrapTransport
|
||||||
|
// enhanced to use the secure TLS configuration of the ptls / phttp packages.
|
||||||
|
func createSecureKubeConfig(kubeConfig *restclient.Config) (*restclient.Config, error) {
|
||||||
|
secureKubeConfig := restclient.CopyConfig(kubeConfig)
|
||||||
|
|
||||||
|
// by setting proxy to always be non-nil, we bust the client-go global TLS config cache.
|
||||||
|
// this is required to make our wrapper function work without data races. the unit tests
|
||||||
|
// associated with this code run in parallel to assert that we are not using the cache.
|
||||||
|
// see k8s.io/client-go/transport.tlsConfigKey
|
||||||
|
if secureKubeConfig.Proxy == nil {
|
||||||
|
secureKubeConfig.Proxy = net.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure restclient.TLSConfigFor always returns a non-nil TLS config
|
||||||
|
if len(secureKubeConfig.NextProtos) == 0 {
|
||||||
|
secureKubeConfig.NextProtos = ptls.Secure(nil).NextProtos
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsConfigTest, err := restclient.TLSConfigFor(secureKubeConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err // should never happen because our input config should always be valid
|
||||||
|
}
|
||||||
|
if tlsConfigTest == nil {
|
||||||
|
return nil, fmt.Errorf("unexpected empty TLS config") // should never happen because we set NextProtos above
|
||||||
|
}
|
||||||
|
|
||||||
|
secureKubeConfig.Wrap(func(rt http.RoundTripper) http.RoundTripper {
|
||||||
|
defer func() {
|
||||||
|
if err := AssertSecureTransport(rt); err != nil {
|
||||||
|
panic(err) // not sure what the point of this function would be if it failed to make the config secure
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
tlsConfig, err := netTLSClientConfig(rt)
|
||||||
|
if err != nil {
|
||||||
|
// this assumes none of our production code calls Wrap or messes with WrapTransport.
|
||||||
|
// this is a reasonable assumption because all such code should live in this package
|
||||||
|
// and all such code should run after this function is called, not before. the kube
|
||||||
|
// codebase uses transport wrappers that can be unwrapped to access the underlying
|
||||||
|
// TLS config.
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if tlsConfig == nil {
|
||||||
|
panic("unexpected empty TLS config") // we validate this case above via tlsConfigTest
|
||||||
|
}
|
||||||
|
|
||||||
|
// mutate the TLS config into our desired state before it is used
|
||||||
|
ptls.Merge(ptls.Secure, tlsConfig)
|
||||||
|
|
||||||
|
return rt // return the input transport since we mutated it in-place
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := AssertSecureConfig(secureKubeConfig); err != nil {
|
||||||
|
return nil, err // not sure what the point of this function would be if it failed to make the config secure
|
||||||
|
}
|
||||||
|
|
||||||
|
return secureKubeConfig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecureAnonymousClientConfig has the same properties as restclient.AnonymousClientConfig
|
||||||
|
// while still enforcing the secure TLS configuration of the ptls / phttp packages.
|
||||||
|
func SecureAnonymousClientConfig(kubeConfig *restclient.Config) *restclient.Config {
|
||||||
|
kubeConfig = restclient.AnonymousClientConfig(kubeConfig)
|
||||||
|
secureKubeConfig, err := createSecureKubeConfig(kubeConfig)
|
||||||
|
if err != nil {
|
||||||
|
panic(err) // should never happen as this would only fail on invalid CA data, which would never work anyway
|
||||||
|
}
|
||||||
|
if err := AssertSecureConfig(secureKubeConfig); err != nil {
|
||||||
|
panic(err) // not sure what the point of this function would be if it failed to make the config secure
|
||||||
|
}
|
||||||
|
return secureKubeConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func AssertSecureConfig(kubeConfig *restclient.Config) error {
|
||||||
|
rt, err := restclient.TransportFor(kubeConfig)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to build transport: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return AssertSecureTransport(rt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func AssertSecureTransport(rt http.RoundTripper) error {
|
||||||
|
tlsConfig, err := netTLSClientConfig(rt)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get TLS config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsConfigCopy := tlsConfig.Clone()
|
||||||
|
ptls.Merge(ptls.Secure, tlsConfigCopy) // only mutate the copy
|
||||||
|
|
||||||
|
//nolint: gosec // the empty TLS config here is not used
|
||||||
|
if diff := cmp.Diff(tlsConfigCopy, tlsConfig,
|
||||||
|
cmpopts.IgnoreUnexported(tls.Config{}, x509.CertPool{}),
|
||||||
|
cmpopts.IgnoreFields(tls.Config{}, "GetClientCertificate"),
|
||||||
|
); len(diff) != 0 {
|
||||||
|
return fmt.Errorf("tls config is not secure:\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func netTLSClientConfig(rt http.RoundTripper) (*tls.Config, error) {
|
||||||
|
tlsConfig, err := net.TLSClientConfig(rt)
|
||||||
|
if err == nil {
|
||||||
|
return tlsConfig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO fix when we pick up https://github.com/kubernetes/kubernetes/pull/106014
|
||||||
|
if err.Error() == "unknown transport type: *exec.roundTripper" {
|
||||||
|
return net.TLSClientConfig(extractRTUnsafe(rt))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractRTUnsafe(rt http.RoundTripper) (out http.RoundTripper) {
|
||||||
|
for wrapper, ok := rt.(net.RoundTripperWrapper); ok; wrapper, ok = rt.(net.RoundTripperWrapper) {
|
||||||
|
// keep peeling the wrappers until we get to the exec.roundTripper
|
||||||
|
rt = wrapper.WrappedRoundTripper()
|
||||||
|
}
|
||||||
|
|
||||||
|
// this is some dark magic to read a private field
|
||||||
|
baseField := reflect.ValueOf(rt).Elem().FieldByName("base")
|
||||||
|
basePointer := (*http.RoundTripper)(unsafe.Pointer(baseField.UnsafeAddr()))
|
||||||
|
|
||||||
|
return *basePointer
|
||||||
|
}
|
||||||
|
|
||||||
|
func Secure(config *restclient.Config) (kubernetes.Interface, *restclient.Config, error) {
|
||||||
|
// our middleware does not apply to the returned restclient.Config, therefore, this
|
||||||
|
// client not having a leader election lock is irrelevant since it would not be enforced
|
||||||
|
secureClient, err := New(WithConfig(config)) // handles nil config correctly
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("failed to build secure client: %w", err)
|
||||||
|
}
|
||||||
|
return secureClient.Kubernetes, secureClient.ProtoConfig, nil
|
||||||
|
}
|
||||||
|
@ -7,22 +7,31 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
clientauthenticationv1 "k8s.io/client-go/pkg/apis/clientauthentication/v1"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
|
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||||
"k8s.io/client-go/transport"
|
"k8s.io/client-go/transport"
|
||||||
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
|
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
|
||||||
|
|
||||||
|
// register all client-go auth plugins.
|
||||||
|
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||||
|
|
||||||
conciergeconfigv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
conciergeconfigv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/config/v1alpha1"
|
||||||
supervisorconfigv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/config/v1alpha1"
|
supervisorconfigv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/config/v1alpha1"
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
|
"go.pinniped.dev/internal/httputil/roundtripper"
|
||||||
"go.pinniped.dev/internal/testutil/fakekubeapi"
|
"go.pinniped.dev/internal/testutil/fakekubeapi"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -514,9 +523,12 @@ func TestKubeclient(t *testing.T) {
|
|||||||
return []*spyMiddleware{newSimpleMiddleware(t, true, false, false)}
|
return []*spyMiddleware{newSimpleMiddleware(t, true, false, false)}
|
||||||
},
|
},
|
||||||
editRestConfig: func(t *testing.T, restConfig *rest.Config) {
|
editRestConfig: func(t *testing.T, restConfig *rest.Config) {
|
||||||
restConfig.Dial = func(_ context.Context, _, _ string) (net.Conn, error) {
|
// avoid messing with restConfig.Dial since it breaks client-go TLS cache logic
|
||||||
return nil, fmt.Errorf("some fake connection error")
|
restConfig.Wrap(func(rt http.RoundTripper) http.RoundTripper {
|
||||||
}
|
return roundtripper.WrapFunc(rt, func(_ *http.Request) (*http.Response, error) {
|
||||||
|
return nil, fmt.Errorf("some fake connection error")
|
||||||
|
})
|
||||||
|
})
|
||||||
},
|
},
|
||||||
reallyRunTest: func(t *testing.T, c *Client) {
|
reallyRunTest: func(t *testing.T, c *Client) {
|
||||||
_, err := c.PinnipedSupervisor.
|
_, err := c.PinnipedSupervisor.
|
||||||
@ -591,8 +603,7 @@ func TestKubeclient(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
test := test
|
test := test
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
server, restConfig := fakekubeapi.Start(t, nil)
|
_, restConfig := fakekubeapi.Start(t, nil)
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
if test.editRestConfig != nil {
|
if test.editRestConfig != nil {
|
||||||
test.editRestConfig(t, restConfig)
|
test.editRestConfig(t, restConfig)
|
||||||
@ -754,21 +765,45 @@ func newFailingMiddleware(t *testing.T, name string, mutateReqFails, mutateRespF
|
|||||||
}
|
}
|
||||||
|
|
||||||
type wantCloser struct {
|
type wantCloser struct {
|
||||||
io.ReadCloser
|
m sync.Mutex
|
||||||
closeCount int
|
|
||||||
closeCalls []string
|
_rc io.ReadCloser
|
||||||
couldReadBytesJustBeforeClosing bool
|
_closeCalls []string
|
||||||
|
_couldReadBytesJustBeforeClosing bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wc *wantCloser) Close() error {
|
func (w *wantCloser) Close() error {
|
||||||
wc.closeCount++
|
w.m.Lock()
|
||||||
wc.closeCalls = append(wc.closeCalls, getCaller())
|
defer w.m.Unlock()
|
||||||
n, _ := wc.ReadCloser.Read([]byte{0})
|
|
||||||
|
w._closeCalls = append(w._closeCalls, getCaller())
|
||||||
|
n, _ := w._rc.Read([]byte{0})
|
||||||
if n > 0 {
|
if n > 0 {
|
||||||
// there were still bytes left to be read
|
// there were still bytes left to be read
|
||||||
wc.couldReadBytesJustBeforeClosing = true
|
w._couldReadBytesJustBeforeClosing = true
|
||||||
}
|
}
|
||||||
return wc.ReadCloser.Close()
|
return w._rc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wantCloser) Read(p []byte) (int, error) {
|
||||||
|
w.m.Lock()
|
||||||
|
defer w.m.Unlock()
|
||||||
|
|
||||||
|
return w._rc.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wantCloser) couldRead() bool {
|
||||||
|
w.m.Lock()
|
||||||
|
defer w.m.Unlock()
|
||||||
|
|
||||||
|
return w._couldReadBytesJustBeforeClosing
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wantCloser) calls() []string {
|
||||||
|
w.m.Lock()
|
||||||
|
defer w.m.Unlock()
|
||||||
|
|
||||||
|
return w._closeCalls
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCaller() string {
|
func getCaller() string {
|
||||||
@ -785,11 +820,14 @@ func getCaller() string {
|
|||||||
func wantCloseReqWrapper(t *testing.T) transport.WrapperFunc {
|
func wantCloseReqWrapper(t *testing.T) transport.WrapperFunc {
|
||||||
caller := getCaller()
|
caller := getCaller()
|
||||||
return func(rt http.RoundTripper) http.RoundTripper {
|
return func(rt http.RoundTripper) http.RoundTripper {
|
||||||
return roundTripperFunc(func(req *http.Request) (bool, *http.Response, error) {
|
return roundtripper.WrapFunc(rt, roundTripperFunc(func(req *http.Request) (bool, *http.Response, error) {
|
||||||
if req.Body != nil {
|
if req.Body != nil {
|
||||||
wc := &wantCloser{ReadCloser: req.Body}
|
wc := &wantCloser{_rc: req.Body}
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
require.Equalf(t, wc.closeCount, 1, "did not close req body expected number of times at %s for req %#v; actual calls = %s", caller, req, wc.closeCalls)
|
require.Eventuallyf(t, func() bool {
|
||||||
|
return 1 == len(wc.calls())
|
||||||
|
}, 5*time.Second, 100*time.Millisecond,
|
||||||
|
"did not close req body expected number of times at %s for req %#v; actual calls = %s", caller, req, wc.calls())
|
||||||
})
|
})
|
||||||
req.Body = wc
|
req.Body = wc
|
||||||
}
|
}
|
||||||
@ -800,9 +838,12 @@ func wantCloseReqWrapper(t *testing.T) transport.WrapperFunc {
|
|||||||
if originalErr != nil {
|
if originalErr != nil {
|
||||||
return nil, originalErr
|
return nil, originalErr
|
||||||
}
|
}
|
||||||
wc := &wantCloser{ReadCloser: originalBodyCopy}
|
wc := &wantCloser{_rc: originalBodyCopy}
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
require.Equalf(t, wc.closeCount, 1, "did not close req body copy expected number of times at %s for req %#v; actual calls = %s", caller, req, wc.closeCalls)
|
require.Eventuallyf(t, func() bool {
|
||||||
|
return 1 == len(wc.calls())
|
||||||
|
}, 5*time.Second, 100*time.Millisecond,
|
||||||
|
"did not close req body copy expected number of times at %s for req %#v; actual calls = %s", caller, req, wc.calls())
|
||||||
})
|
})
|
||||||
return wc, nil
|
return wc, nil
|
||||||
}
|
}
|
||||||
@ -810,7 +851,7 @@ func wantCloseReqWrapper(t *testing.T) transport.WrapperFunc {
|
|||||||
|
|
||||||
resp, err := rt.RoundTrip(req)
|
resp, err := rt.RoundTrip(req)
|
||||||
return false, resp, err
|
return false, resp, err
|
||||||
})
|
}).RoundTrip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -819,20 +860,24 @@ func wantCloseReqWrapper(t *testing.T) transport.WrapperFunc {
|
|||||||
func wantCloseRespWrapper(t *testing.T) transport.WrapperFunc {
|
func wantCloseRespWrapper(t *testing.T) transport.WrapperFunc {
|
||||||
caller := getCaller()
|
caller := getCaller()
|
||||||
return func(rt http.RoundTripper) http.RoundTripper {
|
return func(rt http.RoundTripper) http.RoundTripper {
|
||||||
return roundTripperFunc(func(req *http.Request) (bool, *http.Response, error) {
|
return roundtripper.WrapFunc(rt, roundTripperFunc(func(req *http.Request) (bool, *http.Response, error) {
|
||||||
resp, err := rt.RoundTrip(req)
|
resp, err := rt.RoundTrip(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// request failed, so there is no response body to watch for Close() calls on
|
// request failed, so there is no response body to watch for Close() calls on
|
||||||
return false, resp, err
|
return false, resp, err
|
||||||
}
|
}
|
||||||
wc := &wantCloser{ReadCloser: resp.Body}
|
wc := &wantCloser{_rc: resp.Body}
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
require.False(t, wc.couldReadBytesJustBeforeClosing, "did not consume all response body bytes before closing %s", caller)
|
require.Eventuallyf(t, func() bool {
|
||||||
require.Equalf(t, wc.closeCount, 1, "did not close resp body expected number of times at %s for req %#v; actual calls = %s", caller, req, wc.closeCalls)
|
return wc.couldRead() == false &&
|
||||||
|
1 == len(wc.calls())
|
||||||
|
}, 5*time.Second, 10*time.Millisecond,
|
||||||
|
`did not close resp body expected number of times at %s for req %#v; actual calls = %s
|
||||||
|
did not consume all response body bytes before closing %s, couldRead=%v`, caller, req, wc.calls(), caller, wc.couldRead())
|
||||||
})
|
})
|
||||||
resp.Body = wc
|
resp.Body = wc
|
||||||
return false, resp, err
|
return false, resp, err
|
||||||
})
|
}).RoundTrip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -895,3 +940,221 @@ func createGetFederationDomainTest(t *testing.T, client *Client) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, goodFederationDomain, federationDomain)
|
require.Equal(t, goodFederationDomain, federationDomain)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestUnwrap ensures that the Client struct returned by this package only contains
|
||||||
|
// transports that can be fully unwrapped to get access to the underlying TLS config.
|
||||||
|
func TestUnwrap(t *testing.T) {
|
||||||
|
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||||
|
|
||||||
|
server, restConfig := fakekubeapi.Start(t, nil)
|
||||||
|
|
||||||
|
serverSubjects := server.Client().Transport.(*http.Transport).TLSClientConfig.RootCAs.Subjects()
|
||||||
|
|
||||||
|
t.Run("regular client", func(t *testing.T) {
|
||||||
|
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||||
|
|
||||||
|
regularClient := makeClient(t, restConfig, func(_ *rest.Config) {})
|
||||||
|
|
||||||
|
testUnwrap(t, regularClient, serverSubjects)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("exec client", func(t *testing.T) {
|
||||||
|
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||||
|
|
||||||
|
execClient := makeClient(t, restConfig, func(config *rest.Config) {
|
||||||
|
config.ExecProvider = &clientcmdapi.ExecConfig{
|
||||||
|
Command: "echo",
|
||||||
|
Args: []string{"pandas are awesome"},
|
||||||
|
APIVersion: clientauthenticationv1.SchemeGroupVersion.String(),
|
||||||
|
InteractiveMode: clientcmdapi.NeverExecInteractiveMode,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
testUnwrap(t, execClient, serverSubjects)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("gcp client", func(t *testing.T) {
|
||||||
|
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||||
|
|
||||||
|
gcpClient := makeClient(t, restConfig, func(config *rest.Config) {
|
||||||
|
config.AuthProvider = &clientcmdapi.AuthProviderConfig{
|
||||||
|
Name: "gcp",
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
testUnwrap(t, gcpClient, serverSubjects)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("oidc client", func(t *testing.T) {
|
||||||
|
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||||
|
|
||||||
|
oidcClient := makeClient(t, restConfig, func(config *rest.Config) {
|
||||||
|
config.AuthProvider = &clientcmdapi.AuthProviderConfig{
|
||||||
|
Name: "oidc",
|
||||||
|
Config: map[string]string{
|
||||||
|
"idp-issuer-url": "https://pandas.local",
|
||||||
|
"client-id": "walrus",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
testUnwrap(t, oidcClient, serverSubjects)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("azure client", func(t *testing.T) {
|
||||||
|
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||||
|
|
||||||
|
azureClient := makeClient(t, restConfig, func(config *rest.Config) {
|
||||||
|
config.AuthProvider = &clientcmdapi.AuthProviderConfig{
|
||||||
|
Name: "azure",
|
||||||
|
Config: map[string]string{
|
||||||
|
"client-id": "pinny",
|
||||||
|
"tenant-id": "danger",
|
||||||
|
"apiserver-id": "1234",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
testUnwrap(t, azureClient, serverSubjects)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUnwrap(t *testing.T, client *Client, serverSubjects [][]byte) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
rt http.RoundTripper
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "core v1",
|
||||||
|
rt: extractTransport(client.Kubernetes.CoreV1()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "coordination v1",
|
||||||
|
rt: extractTransport(client.Kubernetes.CoordinationV1()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "api registration v1",
|
||||||
|
rt: extractTransport(client.Aggregation.ApiregistrationV1()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "concierge login",
|
||||||
|
rt: extractTransport(client.PinnipedConcierge.LoginV1alpha1()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "concierge config",
|
||||||
|
rt: extractTransport(client.PinnipedConcierge.ConfigV1alpha1()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "supervisor idp",
|
||||||
|
rt: extractTransport(client.PinnipedSupervisor.IDPV1alpha1()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "supervisor config",
|
||||||
|
rt: extractTransport(client.PinnipedSupervisor.ConfigV1alpha1()),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "json config",
|
||||||
|
rt: configToTransport(t, client.JSONConfig),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "proto config",
|
||||||
|
rt: configToTransport(t, client.ProtoConfig),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "anonymous json config",
|
||||||
|
rt: configToTransport(t, SecureAnonymousClientConfig(client.JSONConfig)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "anonymous proto config",
|
||||||
|
rt: configToTransport(t, SecureAnonymousClientConfig(client.ProtoConfig)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "json config - no cache",
|
||||||
|
rt: configToTransport(t, bustTLSCache(client.JSONConfig)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "proto config - no cache",
|
||||||
|
rt: configToTransport(t, bustTLSCache(client.ProtoConfig)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "anonymous json config - no cache, inner bust",
|
||||||
|
rt: configToTransport(t, SecureAnonymousClientConfig(bustTLSCache(client.JSONConfig))),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "anonymous proto config - no cache, inner bust",
|
||||||
|
rt: configToTransport(t, SecureAnonymousClientConfig(bustTLSCache(client.ProtoConfig))),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "anonymous json config - no cache, double bust",
|
||||||
|
rt: configToTransport(t, bustTLSCache(SecureAnonymousClientConfig(bustTLSCache(client.JSONConfig)))),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "anonymous proto config - no cache, double bust",
|
||||||
|
rt: configToTransport(t, bustTLSCache(SecureAnonymousClientConfig(bustTLSCache(client.ProtoConfig)))),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "anonymous json config - no cache, outer bust",
|
||||||
|
rt: configToTransport(t, bustTLSCache(SecureAnonymousClientConfig(client.JSONConfig))),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "anonymous proto config - no cache, outer bust",
|
||||||
|
rt: configToTransport(t, bustTLSCache(SecureAnonymousClientConfig(client.ProtoConfig))),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel() // make sure to run in parallel to confirm that our client-go TLS cache busting works (i.e. assert no data races)
|
||||||
|
|
||||||
|
tlsConfig, err := netTLSClientConfig(tt.rt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, tlsConfig)
|
||||||
|
|
||||||
|
secureTLSConfig := ptls.Secure(nil)
|
||||||
|
|
||||||
|
require.Equal(t, secureTLSConfig.MinVersion, tlsConfig.MinVersion)
|
||||||
|
require.Equal(t, secureTLSConfig.CipherSuites, tlsConfig.CipherSuites)
|
||||||
|
require.Equal(t, secureTLSConfig.NextProtos, tlsConfig.NextProtos)
|
||||||
|
|
||||||
|
// x509.CertPool has some embedded functions that make it hard to compare so just look at the subjects
|
||||||
|
require.Equal(t, serverSubjects, tlsConfig.RootCAs.Subjects())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type restClientGetter interface {
|
||||||
|
RESTClient() rest.Interface
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractTransport(getter restClientGetter) http.RoundTripper {
|
||||||
|
return getter.RESTClient().(*rest.RESTClient).Client.Transport
|
||||||
|
}
|
||||||
|
|
||||||
|
func configToTransport(t *testing.T, config *rest.Config) http.RoundTripper {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
rt, err := rest.TransportFor(config)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return rt
|
||||||
|
}
|
||||||
|
|
||||||
|
func bustTLSCache(config *rest.Config) *rest.Config {
|
||||||
|
c := rest.CopyConfig(config)
|
||||||
|
c.Proxy = func(h *http.Request) (*url.URL, error) {
|
||||||
|
return nil, nil // having a non-nil proxy func makes client-go not cache the TLS config
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeClient(t *testing.T, restConfig *rest.Config, f func(*rest.Config)) *Client {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
restConfig = rest.CopyConfig(restConfig)
|
||||||
|
|
||||||
|
f(restConfig)
|
||||||
|
|
||||||
|
client, err := New(WithConfig(restConfig))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return client
|
||||||
|
}
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/transport"
|
"k8s.io/client-go/transport"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/httputil/roundtripper"
|
||||||
"go.pinniped.dev/internal/plog"
|
"go.pinniped.dev/internal/plog"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -78,7 +79,7 @@ func newWrapper(
|
|||||||
middlewares []Middleware,
|
middlewares []Middleware,
|
||||||
) transport.WrapperFunc {
|
) transport.WrapperFunc {
|
||||||
return func(rt http.RoundTripper) http.RoundTripper {
|
return func(rt http.RoundTripper) http.RoundTripper {
|
||||||
return roundTripperFunc(func(req *http.Request) (bool, *http.Response, error) {
|
return roundtripper.WrapFunc(rt, roundTripperFunc(func(req *http.Request) (bool, *http.Response, error) {
|
||||||
reqInfo, err := resolver.NewRequestInfo(reqWithoutPrefix(req, hostURL, apiPathPrefix))
|
reqInfo, err := resolver.NewRequestInfo(reqWithoutPrefix(req, hostURL, apiPathPrefix))
|
||||||
if err != nil || !reqInfo.IsResourceRequest {
|
if err != nil || !reqInfo.IsResourceRequest {
|
||||||
resp, err := rt.RoundTrip(req) // we only handle kube resource requests
|
resp, err := rt.RoundTrip(req) // we only handle kube resource requests
|
||||||
@ -120,7 +121,7 @@ func newWrapper(
|
|||||||
resp, err := rt.RoundTrip(req) // we only handle certain verbs
|
resp, err := rt.RoundTrip(req) // we only handle certain verbs
|
||||||
return false, resp, err
|
return false, resp, err
|
||||||
}
|
}
|
||||||
})
|
}).RoundTrip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,6 +36,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/constable"
|
"go.pinniped.dev/internal/constable"
|
||||||
"go.pinniped.dev/internal/controller/apicerts"
|
"go.pinniped.dev/internal/controller/apicerts"
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
"go.pinniped.dev/internal/dynamiccert"
|
"go.pinniped.dev/internal/dynamiccert"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
"go.pinniped.dev/internal/plog"
|
"go.pinniped.dev/internal/plog"
|
||||||
@ -71,16 +72,15 @@ func newWebhook(
|
|||||||
// start runs the webhook in a separate goroutine and returns whether or not the
|
// start runs the webhook in a separate goroutine and returns whether or not the
|
||||||
// webhook was started successfully.
|
// webhook was started successfully.
|
||||||
func (w *webhook) start(ctx context.Context, l net.Listener) error {
|
func (w *webhook) start(ctx context.Context, l net.Listener) error {
|
||||||
|
c := ptls.Secure(nil)
|
||||||
|
c.GetCertificate = func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||||
|
certPEM, keyPEM := w.certProvider.CurrentCertKeyContent()
|
||||||
|
cert, err := tls.X509KeyPair(certPEM, keyPEM)
|
||||||
|
return &cert, err
|
||||||
|
}
|
||||||
server := http.Server{
|
server := http.Server{
|
||||||
Handler: w,
|
Handler: w,
|
||||||
TLSConfig: &tls.Config{
|
TLSConfig: c,
|
||||||
MinVersion: tls.VersionTLS13,
|
|
||||||
GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
|
||||||
certPEM, keyPEM := w.certProvider.CurrentCertKeyContent()
|
|
||||||
cert, err := tls.X509KeyPair(certPEM, keyPEM)
|
|
||||||
return &cert, err
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
errCh := make(chan error)
|
errCh := make(chan error)
|
||||||
|
@ -6,7 +6,6 @@ package localuserauthenticator
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -25,6 +24,7 @@ import (
|
|||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
kubeinformers "k8s.io/client-go/informers"
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
@ -32,6 +32,7 @@ import (
|
|||||||
|
|
||||||
"go.pinniped.dev/internal/certauthority"
|
"go.pinniped.dev/internal/certauthority"
|
||||||
"go.pinniped.dev/internal/dynamiccert"
|
"go.pinniped.dev/internal/dynamiccert"
|
||||||
|
"go.pinniped.dev/internal/net/phttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestWebhook(t *testing.T) {
|
func TestWebhook(t *testing.T) {
|
||||||
@ -102,7 +103,7 @@ func TestWebhook(t *testing.T) {
|
|||||||
defer func() { _ = l.Close() }()
|
defer func() { _ = l.Close() }()
|
||||||
require.NoError(t, w.start(ctx, l))
|
require.NoError(t, w.start(ctx, l))
|
||||||
|
|
||||||
client := newClient(caBundle, serverName)
|
client := newClient(t, caBundle, serverName)
|
||||||
|
|
||||||
goodURL := fmt.Sprintf("https://%s/authenticate", l.Addr().String())
|
goodURL := fmt.Sprintf("https://%s/authenticate", l.Addr().String())
|
||||||
goodRequestHeaders := map[string][]string{
|
goodRequestHeaders := map[string][]string{
|
||||||
@ -475,18 +476,20 @@ func newCertProvider(t *testing.T) (dynamiccert.Private, []byte, string) {
|
|||||||
|
|
||||||
// newClient creates an http.Client that can be used to make an HTTPS call to a
|
// newClient creates an http.Client that can be used to make an HTTPS call to a
|
||||||
// service whose serving certs can be verified by the provided CA bundle.
|
// service whose serving certs can be verified by the provided CA bundle.
|
||||||
func newClient(caBundle []byte, serverName string) *http.Client {
|
func newClient(t *testing.T, caBundle []byte, serverName string) *http.Client {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
rootCAs := x509.NewCertPool()
|
rootCAs := x509.NewCertPool()
|
||||||
rootCAs.AppendCertsFromPEM(caBundle)
|
ok := rootCAs.AppendCertsFromPEM(caBundle)
|
||||||
return &http.Client{
|
require.True(t, ok)
|
||||||
Transport: &http.Transport{
|
|
||||||
TLSClientConfig: &tls.Config{
|
c := phttp.Secure(rootCAs)
|
||||||
MinVersion: tls.VersionTLS13,
|
|
||||||
RootCAs: rootCAs,
|
tlsConfig, err := utilnet.TLSClientConfig(c.Transport)
|
||||||
ServerName: serverName,
|
require.NoError(t, err)
|
||||||
},
|
tlsConfig.ServerName = serverName
|
||||||
},
|
|
||||||
}
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// newTokenReviewBody creates an io.ReadCloser that contains a JSON-encodeed
|
// newTokenReviewBody creates an io.ReadCloser that contains a JSON-encodeed
|
||||||
|
118
internal/net/phttp/debug.go
Normal file
118
internal/net/phttp/debug.go
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package phttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"k8s.io/client-go/transport"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/httputil/roundtripper"
|
||||||
|
)
|
||||||
|
|
||||||
|
func safeDebugWrappers(rt http.RoundTripper, f transport.WrapperFunc, shouldLog func() bool) http.RoundTripper {
|
||||||
|
return roundtripper.WrapFunc(rt, func(req *http.Request) (*http.Response, error) {
|
||||||
|
// minor optimization to avoid the cleaning logic when the debug wrappers are unused
|
||||||
|
// note: do not make this entire wrapper conditional on shouldLog() - the output is allowed to change at runtime
|
||||||
|
if !shouldLog() {
|
||||||
|
return rt.RoundTrip(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
resp *http.Response
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
debugRT := f(roundtripper.Func(func(_ *http.Request) (*http.Response, error) {
|
||||||
|
// this call needs to be inside this closure so that the debug wrappers can time it
|
||||||
|
// note also that it takes the original (real) request
|
||||||
|
resp, err = rt.RoundTrip(req)
|
||||||
|
|
||||||
|
cleanedResp := cleanResp(resp) // do not leak the user's password during the password grant
|
||||||
|
|
||||||
|
return cleanedResp, err
|
||||||
|
}))
|
||||||
|
|
||||||
|
// run the debug wrappers for their side effects (i.e. logging)
|
||||||
|
// the output is ignored because the input is not the real request
|
||||||
|
cleanedReq := cleanReq(req) // do not leak the user's password during the password grant
|
||||||
|
_, _ = debugRT.RoundTrip(cleanedReq)
|
||||||
|
|
||||||
|
return resp, err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanReq(req *http.Request) *http.Request {
|
||||||
|
// only pass back things we know to be safe to log
|
||||||
|
return &http.Request{
|
||||||
|
Method: req.Method,
|
||||||
|
URL: cleanURL(req.URL),
|
||||||
|
Header: cleanHeader(req.Header),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanResp(resp *http.Response) *http.Response {
|
||||||
|
if resp == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// only pass back things we know to be safe to log
|
||||||
|
return &http.Response{
|
||||||
|
Status: resp.Status,
|
||||||
|
Header: cleanHeader(resp.Header),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanURL(u *url.URL) *url.URL {
|
||||||
|
var user *url.Userinfo
|
||||||
|
if len(u.User.Username()) > 0 {
|
||||||
|
user = url.User("masked_username")
|
||||||
|
}
|
||||||
|
|
||||||
|
var opaque string
|
||||||
|
if len(u.Opaque) > 0 {
|
||||||
|
opaque = "masked_opaque_data"
|
||||||
|
}
|
||||||
|
|
||||||
|
var fragment string
|
||||||
|
if len(u.Fragment) > 0 || len(u.RawFragment) > 0 {
|
||||||
|
fragment = "masked_fragment"
|
||||||
|
}
|
||||||
|
|
||||||
|
// only pass back things we know to be safe to log
|
||||||
|
return &url.URL{
|
||||||
|
Scheme: u.Scheme,
|
||||||
|
Opaque: opaque,
|
||||||
|
User: user,
|
||||||
|
Host: u.Host,
|
||||||
|
Path: u.Path,
|
||||||
|
RawPath: u.RawPath,
|
||||||
|
ForceQuery: u.ForceQuery,
|
||||||
|
RawQuery: cleanQuery(u.Query()),
|
||||||
|
Fragment: fragment,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanQuery(query url.Values) string {
|
||||||
|
if len(query) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
out := url.Values(cleanHeader(http.Header(query))) // cast so we can re-use logic
|
||||||
|
return out.Encode()
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanHeader(header http.Header) http.Header {
|
||||||
|
if len(header) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mask := []string{"masked_value"}
|
||||||
|
out := make(http.Header, len(header))
|
||||||
|
for key := range header {
|
||||||
|
out[key] = mask // only copy the keys
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
340
internal/net/phttp/debug_test.go
Normal file
340
internal/net/phttp/debug_test.go
Normal file
@ -0,0 +1,340 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package phttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/constable"
|
||||||
|
"go.pinniped.dev/internal/httputil/roundtripper"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_safeDebugWrappers_shouldLog(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var rtFuncCalled, wrapFuncCalled, innerRTCalled int
|
||||||
|
var shouldLog, skipInnerRT bool
|
||||||
|
|
||||||
|
shouldLogFunc := func() bool { return shouldLog }
|
||||||
|
|
||||||
|
rtFunc := roundtripper.Func(func(_ *http.Request) (*http.Response, error) {
|
||||||
|
rtFuncCalled++
|
||||||
|
return nil, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
wrapFunc := func(rt http.RoundTripper) http.RoundTripper {
|
||||||
|
wrapFuncCalled++
|
||||||
|
return roundtripper.Func(func(r *http.Request) (*http.Response, error) {
|
||||||
|
innerRTCalled++
|
||||||
|
if skipInnerRT {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return rt.RoundTrip(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
r := testReq(t, nil, nil)
|
||||||
|
|
||||||
|
out := safeDebugWrappers(rtFunc, wrapFunc, shouldLogFunc)
|
||||||
|
|
||||||
|
// assert that shouldLogFunc is dynamically honored
|
||||||
|
|
||||||
|
_, _ = out.RoundTrip(r) //nolint:bodyclose
|
||||||
|
|
||||||
|
require.Equal(t, 1, rtFuncCalled)
|
||||||
|
require.Equal(t, 0, wrapFuncCalled)
|
||||||
|
require.Equal(t, 0, innerRTCalled)
|
||||||
|
|
||||||
|
shouldLog = true
|
||||||
|
|
||||||
|
_, _ = out.RoundTrip(r) //nolint:bodyclose
|
||||||
|
|
||||||
|
require.Equal(t, 2, rtFuncCalled)
|
||||||
|
require.Equal(t, 1, wrapFuncCalled)
|
||||||
|
require.Equal(t, 1, innerRTCalled)
|
||||||
|
|
||||||
|
shouldLog = false
|
||||||
|
|
||||||
|
_, _ = out.RoundTrip(r) //nolint:bodyclose
|
||||||
|
|
||||||
|
require.Equal(t, 3, rtFuncCalled)
|
||||||
|
require.Equal(t, 1, wrapFuncCalled)
|
||||||
|
require.Equal(t, 1, innerRTCalled)
|
||||||
|
|
||||||
|
shouldLog = true
|
||||||
|
|
||||||
|
_, _ = out.RoundTrip(r) //nolint:bodyclose
|
||||||
|
|
||||||
|
require.Equal(t, 4, rtFuncCalled)
|
||||||
|
require.Equal(t, 2, wrapFuncCalled)
|
||||||
|
require.Equal(t, 2, innerRTCalled)
|
||||||
|
|
||||||
|
// assert that wrapFunc controls rtFunc being called
|
||||||
|
|
||||||
|
skipInnerRT = true
|
||||||
|
|
||||||
|
_, _ = out.RoundTrip(r) //nolint:bodyclose
|
||||||
|
|
||||||
|
require.Equal(t, 4, rtFuncCalled)
|
||||||
|
require.Equal(t, 3, wrapFuncCalled)
|
||||||
|
require.Equal(t, 3, innerRTCalled)
|
||||||
|
|
||||||
|
skipInnerRT = false
|
||||||
|
|
||||||
|
_, _ = out.RoundTrip(r) //nolint:bodyclose
|
||||||
|
|
||||||
|
require.Equal(t, 5, rtFuncCalled)
|
||||||
|
require.Equal(t, 4, wrapFuncCalled)
|
||||||
|
require.Equal(t, 4, innerRTCalled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_safeDebugWrappers_clean(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
shouldLog bool
|
||||||
|
inReq, wantReq *http.Request
|
||||||
|
inResp, wantResp *http.Response
|
||||||
|
inErr error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "header cleaned",
|
||||||
|
shouldLog: true,
|
||||||
|
inReq: testReq(t, http.Header{"hello": {"from", "earth"}}, nil),
|
||||||
|
wantReq: testCleanReq(http.Header{"hello": {"masked_value"}}, nil),
|
||||||
|
inResp: testResp(t, http.Header{"bye": {"for", "now"}}), //nolint:bodyclose
|
||||||
|
wantResp: testCleanResp(http.Header{"bye": {"masked_value"}}), //nolint:bodyclose
|
||||||
|
inErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "header cleaned error",
|
||||||
|
shouldLog: true,
|
||||||
|
inReq: testReq(t, http.Header{"see": {"from", "mars"}}, nil),
|
||||||
|
wantReq: testCleanReq(http.Header{"see": {"masked_value"}}, nil),
|
||||||
|
inResp: testResp(t, http.Header{"bear": {"is", "a"}}), //nolint:bodyclose
|
||||||
|
wantResp: testCleanResp(http.Header{"bear": {"masked_value"}}), //nolint:bodyclose
|
||||||
|
inErr: constable.Error("some error"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "header cleaned error nil resp",
|
||||||
|
shouldLog: true,
|
||||||
|
inReq: testReq(t, http.Header{"see": {"from", "mars"}}, nil),
|
||||||
|
wantReq: testCleanReq(http.Header{"see": {"masked_value"}}, nil),
|
||||||
|
inResp: nil,
|
||||||
|
wantResp: nil,
|
||||||
|
inErr: constable.Error("some other error"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "header cleaned no log",
|
||||||
|
shouldLog: false,
|
||||||
|
inReq: testReq(t, http.Header{"sky": {"is", "blue"}}, nil),
|
||||||
|
wantReq: nil,
|
||||||
|
inResp: testResp(t, http.Header{"night": {"is", "dark"}}), //nolint:bodyclose
|
||||||
|
wantResp: nil,
|
||||||
|
inErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "url cleaned, all fields",
|
||||||
|
shouldLog: true,
|
||||||
|
inReq: testReq(t, nil, &url.URL{
|
||||||
|
Scheme: "sc",
|
||||||
|
Opaque: "op",
|
||||||
|
User: url.UserPassword("us", "pa"),
|
||||||
|
Host: "ho",
|
||||||
|
Path: "pa",
|
||||||
|
RawPath: "rap",
|
||||||
|
ForceQuery: true,
|
||||||
|
RawQuery: "key1=val1&key2=val2",
|
||||||
|
Fragment: "fra",
|
||||||
|
RawFragment: "rawf",
|
||||||
|
}),
|
||||||
|
wantReq: testCleanReq(nil, &url.URL{
|
||||||
|
Scheme: "sc",
|
||||||
|
Opaque: "masked_opaque_data",
|
||||||
|
User: url.User("masked_username"),
|
||||||
|
Host: "ho",
|
||||||
|
Path: "pa",
|
||||||
|
RawPath: "rap",
|
||||||
|
ForceQuery: true,
|
||||||
|
RawQuery: "key1=masked_value&key2=masked_value",
|
||||||
|
Fragment: "masked_fragment",
|
||||||
|
RawFragment: "",
|
||||||
|
}),
|
||||||
|
inResp: testResp(t, http.Header{"sun": {"yellow"}}), //nolint:bodyclose
|
||||||
|
wantResp: testCleanResp(http.Header{"sun": {"masked_value"}}), //nolint:bodyclose
|
||||||
|
inErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "url cleaned, some fields",
|
||||||
|
shouldLog: true,
|
||||||
|
inReq: testReq(t, nil, &url.URL{
|
||||||
|
Scheme: "sc",
|
||||||
|
Opaque: "",
|
||||||
|
User: nil,
|
||||||
|
Host: "ho",
|
||||||
|
Path: "pa",
|
||||||
|
RawPath: "rap",
|
||||||
|
ForceQuery: false,
|
||||||
|
RawQuery: "key3=val3&key4=val4",
|
||||||
|
Fragment: "",
|
||||||
|
RawFragment: "",
|
||||||
|
}),
|
||||||
|
wantReq: testCleanReq(nil, &url.URL{
|
||||||
|
Scheme: "sc",
|
||||||
|
Opaque: "",
|
||||||
|
User: nil,
|
||||||
|
Host: "ho",
|
||||||
|
Path: "pa",
|
||||||
|
RawPath: "rap",
|
||||||
|
ForceQuery: false,
|
||||||
|
RawQuery: "key3=masked_value&key4=masked_value",
|
||||||
|
Fragment: "",
|
||||||
|
RawFragment: "",
|
||||||
|
}),
|
||||||
|
inResp: testResp(t, http.Header{"sun": {"yellow"}}), //nolint:bodyclose
|
||||||
|
wantResp: testCleanResp(http.Header{"sun": {"masked_value"}}), //nolint:bodyclose
|
||||||
|
inErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "header and url cleaned, all fields with error",
|
||||||
|
shouldLog: true,
|
||||||
|
inReq: testReq(t, http.Header{"zone": {"of", "the", "enders"}, "welcome": {"home"}}, &url.URL{
|
||||||
|
Scheme: "sc2",
|
||||||
|
Opaque: "op2",
|
||||||
|
User: url.UserPassword("us2", "pa2"),
|
||||||
|
Host: "ho2",
|
||||||
|
Path: "pa2",
|
||||||
|
RawPath: "rap2",
|
||||||
|
ForceQuery: true,
|
||||||
|
RawQuery: "a=b&c=d&e=f&a=1&a=2",
|
||||||
|
Fragment: "fra2",
|
||||||
|
RawFragment: "rawf2",
|
||||||
|
}),
|
||||||
|
wantReq: testCleanReq(http.Header{"zone": {"masked_value"}, "welcome": {"masked_value"}}, &url.URL{
|
||||||
|
Scheme: "sc2",
|
||||||
|
Opaque: "masked_opaque_data",
|
||||||
|
User: url.User("masked_username"),
|
||||||
|
Host: "ho2",
|
||||||
|
Path: "pa2",
|
||||||
|
RawPath: "rap2",
|
||||||
|
ForceQuery: true,
|
||||||
|
RawQuery: "a=masked_value&c=masked_value&e=masked_value",
|
||||||
|
Fragment: "masked_fragment",
|
||||||
|
RawFragment: "",
|
||||||
|
}),
|
||||||
|
inResp: testResp(t, http.Header{"moon": {"white"}}), //nolint:bodyclose
|
||||||
|
wantResp: testCleanResp(http.Header{"moon": {"masked_value"}}), //nolint:bodyclose
|
||||||
|
inErr: constable.Error("yay pandas"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var rtCalled, wrapCalled, innerCalled bool
|
||||||
|
|
||||||
|
rtFunc := roundtripper.Func(func(r *http.Request) (*http.Response, error) {
|
||||||
|
rtCalled = true
|
||||||
|
require.Equal(t, tt.inReq, r)
|
||||||
|
return tt.inResp, tt.inErr
|
||||||
|
})
|
||||||
|
|
||||||
|
var gotReq *http.Request
|
||||||
|
var gotResp *http.Response
|
||||||
|
var gotErr error
|
||||||
|
|
||||||
|
wrapFunc := func(rt http.RoundTripper) http.RoundTripper {
|
||||||
|
wrapCalled = true
|
||||||
|
return roundtripper.Func(func(r *http.Request) (*http.Response, error) {
|
||||||
|
innerCalled = true
|
||||||
|
|
||||||
|
gotReq = r
|
||||||
|
|
||||||
|
resp, err := rt.RoundTrip(r) //nolint:bodyclose
|
||||||
|
|
||||||
|
gotResp = resp
|
||||||
|
gotErr = err
|
||||||
|
|
||||||
|
return resp, err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
out := safeDebugWrappers(rtFunc, wrapFunc, func() bool { return tt.shouldLog })
|
||||||
|
|
||||||
|
resp, err := out.RoundTrip(tt.inReq) //nolint:bodyclose
|
||||||
|
|
||||||
|
require.Equal(t, tt.inResp, resp)
|
||||||
|
require.Equal(t, tt.inErr, err)
|
||||||
|
require.True(t, rtCalled)
|
||||||
|
require.Equal(t, tt.shouldLog, wrapCalled)
|
||||||
|
require.Equal(t, tt.shouldLog, innerCalled)
|
||||||
|
|
||||||
|
require.Equal(t, tt.wantReq, gotReq)
|
||||||
|
require.Equal(t, tt.wantResp, gotResp)
|
||||||
|
require.Equal(t, tt.inErr, gotErr)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testReq(t *testing.T, header http.Header, u *url.URL) *http.Request {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
r, err := http.NewRequestWithContext(context.Background(), http.MethodGet, "https://overwritten.com", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if u == nil {
|
||||||
|
u = &url.URL{}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.URL = u
|
||||||
|
r.Header = header
|
||||||
|
|
||||||
|
// something non-nil for testing
|
||||||
|
r.Body = io.NopCloser(&bytes.Buffer{})
|
||||||
|
r.Form = url.Values{"a": {"b"}}
|
||||||
|
r.PostForm = url.Values{"c": {"d"}}
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func testResp(t *testing.T, header http.Header) *http.Response {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
return &http.Response{
|
||||||
|
Status: "pandas are the best",
|
||||||
|
Header: header,
|
||||||
|
|
||||||
|
// something non-nil for testing
|
||||||
|
Body: io.NopCloser(&bytes.Buffer{}),
|
||||||
|
Request: testReq(t, header, nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testCleanReq(header http.Header, u *url.URL) *http.Request {
|
||||||
|
if u == nil {
|
||||||
|
u = &url.URL{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &http.Request{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: u,
|
||||||
|
Header: header,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testCleanResp(header http.Header) *http.Response {
|
||||||
|
return &http.Response{
|
||||||
|
Status: "pandas are the best",
|
||||||
|
Header: header,
|
||||||
|
}
|
||||||
|
}
|
48
internal/net/phttp/phttp.go
Normal file
48
internal/net/phttp/phttp.go
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package phttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/x509"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/util/net"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
"k8s.io/client-go/transport"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
|
"go.pinniped.dev/internal/plog"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Default(rootCAs *x509.CertPool) *http.Client {
|
||||||
|
return buildClient(ptls.Default, rootCAs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Secure(rootCAs *x509.CertPool) *http.Client {
|
||||||
|
return buildClient(ptls.Secure, rootCAs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildClient(tlsConfigFunc ptls.ConfigFunc, rootCAs *x509.CertPool) *http.Client {
|
||||||
|
baseRT := defaultTransport()
|
||||||
|
baseRT.TLSClientConfig = tlsConfigFunc(rootCAs)
|
||||||
|
|
||||||
|
return &http.Client{
|
||||||
|
Transport: defaultWrap(baseRT),
|
||||||
|
Timeout: 3 * time.Hour, // make it impossible for requests to hang indefinitely
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultTransport() *http.Transport {
|
||||||
|
baseRT := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
|
net.SetTransportDefaults(baseRT)
|
||||||
|
baseRT.MaxIdleConnsPerHost = 25 // copied from client-go
|
||||||
|
return baseRT
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultWrap(rt http.RoundTripper) http.RoundTripper {
|
||||||
|
rt = safeDebugWrappers(rt, transport.DebugWrappers, func() bool { return plog.Enabled(plog.LevelTrace) })
|
||||||
|
rt = transport.NewUserAgentRoundTripper(rest.DefaultKubernetesUserAgent(), rt)
|
||||||
|
return rt
|
||||||
|
}
|
116
internal/net/phttp/phttp_test.go
Normal file
116
internal/net/phttp/phttp_test.go
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package phttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"k8s.io/apimachinery/pkg/util/net"
|
||||||
|
"k8s.io/client-go/util/cert"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
|
"go.pinniped.dev/internal/testutil/tlsserver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestUnwrap ensures that the http.Client structs returned by this package contain
|
||||||
|
// a transport that can be fully unwrapped to get access to the underlying TLS config.
|
||||||
|
func TestUnwrap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
f func(*x509.CertPool) *http.Client
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "default",
|
||||||
|
f: Default,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "secure",
|
||||||
|
f: Secure,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
p, err := x509.SystemCertPool()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
c := tt.f(p)
|
||||||
|
|
||||||
|
tlsConfig, err := net.TLSClientConfig(c.Transport)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, tlsConfig)
|
||||||
|
|
||||||
|
require.NotEmpty(t, tlsConfig.NextProtos)
|
||||||
|
require.GreaterOrEqual(t, tlsConfig.MinVersion, uint16(tls.VersionTLS12))
|
||||||
|
require.Equal(t, p, tlsConfig.RootCAs)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClient(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
clientFunc func(*x509.CertPool) *http.Client
|
||||||
|
configFunc ptls.ConfigFunc
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "default",
|
||||||
|
clientFunc: Default,
|
||||||
|
configFunc: ptls.Default,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "secure",
|
||||||
|
clientFunc: Secure,
|
||||||
|
configFunc: ptls.Secure,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var sawRequest bool
|
||||||
|
server := tlsserver.TLSTestServer(t, http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {
|
||||||
|
tlsserver.AssertTLS(t, r, tt.configFunc)
|
||||||
|
assertUserAgent(t, r)
|
||||||
|
sawRequest = true
|
||||||
|
}), tlsserver.RecordTLSHello)
|
||||||
|
|
||||||
|
rootCAs, err := cert.NewPoolFromBytes(tlsserver.TLSTestServerCA(server))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
c := tt.clientFunc(rootCAs)
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, server.URL, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
resp, err := c.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, resp.Body.Close())
|
||||||
|
|
||||||
|
require.True(t, sawRequest)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertUserAgent(t *testing.T, r *http.Request) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
ua := r.Header.Get("user-agent")
|
||||||
|
|
||||||
|
// use assert instead of require to not break the http.Handler with a panic
|
||||||
|
assert.Contains(t, ua, ") kubernetes/")
|
||||||
|
}
|
@ -9,7 +9,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
oidc2 "github.com/coreos/go-oidc/v3/oidc"
|
coreosoidc "github.com/coreos/go-oidc/v3/oidc"
|
||||||
"github.com/ory/fosite"
|
"github.com/ory/fosite"
|
||||||
"github.com/ory/fosite/handler/openid"
|
"github.com/ory/fosite/handler/openid"
|
||||||
"github.com/ory/fosite/token/jwt"
|
"github.com/ory/fosite/token/jwt"
|
||||||
@ -60,8 +60,8 @@ func MakeDownstreamSession(subject string, username string, groups []string, cus
|
|||||||
|
|
||||||
// GrantScopesIfRequested auto-grants the scopes for which we do not require end-user approval, if they were requested.
|
// GrantScopesIfRequested auto-grants the scopes for which we do not require end-user approval, if they were requested.
|
||||||
func GrantScopesIfRequested(authorizeRequester fosite.AuthorizeRequester) {
|
func GrantScopesIfRequested(authorizeRequester fosite.AuthorizeRequester) {
|
||||||
oidc.GrantScopeIfRequested(authorizeRequester, oidc2.ScopeOpenID)
|
oidc.GrantScopeIfRequested(authorizeRequester, coreosoidc.ScopeOpenID)
|
||||||
oidc.GrantScopeIfRequested(authorizeRequester, oidc2.ScopeOfflineAccess)
|
oidc.GrantScopeIfRequested(authorizeRequester, coreosoidc.ScopeOfflineAccess)
|
||||||
oidc.GrantScopeIfRequested(authorizeRequester, "pinniped:request-audience")
|
oidc.GrantScopeIfRequested(authorizeRequester, "pinniped:request-audience")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ func RemoveKlogGlobalFlags() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// KRef is (mostly) copied from klog - it is a standard way to represent a a metav1.Object in logs
|
// KRef is (mostly) copied from klog - it is a standard way to represent a metav1.Object in logs
|
||||||
// when you only have access to the namespace and name of the object.
|
// when you only have access to the namespace and name of the object.
|
||||||
func KRef(namespace, name string) string {
|
func KRef(namespace, name string) string {
|
||||||
return fmt.Sprintf("%s/%s", namespace, name)
|
return fmt.Sprintf("%s/%s", namespace, name)
|
||||||
@ -44,33 +44,19 @@ func KObj(obj klog.KMetadata) string {
|
|||||||
return fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName())
|
return fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName())
|
||||||
}
|
}
|
||||||
|
|
||||||
func klogLevelForPlogLevel(plogLevel LogLevel) (klogLevel klog.Level) {
|
func klogLevelForPlogLevel(plogLevel LogLevel) klog.Level {
|
||||||
switch plogLevel {
|
switch plogLevel {
|
||||||
case LevelWarning:
|
case LevelWarning:
|
||||||
klogLevel = klogLevelWarning // unset means minimal logs (Error and Warning)
|
return klogLevelWarning // unset means minimal logs (Error and Warning)
|
||||||
case LevelInfo:
|
case LevelInfo:
|
||||||
klogLevel = klogLevelInfo
|
return klogLevelInfo
|
||||||
case LevelDebug:
|
case LevelDebug:
|
||||||
klogLevel = klogLevelDebug
|
return klogLevelDebug
|
||||||
case LevelTrace:
|
case LevelTrace:
|
||||||
klogLevel = klogLevelTrace
|
return klogLevelTrace
|
||||||
case LevelAll:
|
case LevelAll:
|
||||||
klogLevel = klogLevelAll + 100 // make all really mean all
|
return klogLevelAll + 100 // make all really mean all
|
||||||
default:
|
default:
|
||||||
klogLevel = -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func getKlogLevel() klog.Level {
|
|
||||||
// hack around klog not exposing a Get method
|
|
||||||
for i := klog.Level(0); i < 256; i++ {
|
|
||||||
if klog.V(i).Enabled() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return i - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1
|
|
||||||
}
|
}
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"k8s.io/component-base/logs"
|
"k8s.io/component-base/logs"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"go.pinniped.dev/internal/constable"
|
"go.pinniped.dev/internal/constable"
|
||||||
)
|
)
|
||||||
@ -54,5 +55,5 @@ func ValidateAndSetLogLevelGlobally(level LogLevel) error {
|
|||||||
// Enabled returns whether the provided plog level is enabled, i.e., whether print statements at the
|
// Enabled returns whether the provided plog level is enabled, i.e., whether print statements at the
|
||||||
// provided level will show up.
|
// provided level will show up.
|
||||||
func Enabled(level LogLevel) bool {
|
func Enabled(level LogLevel) bool {
|
||||||
return getKlogLevel() >= klogLevelForPlogLevel(level)
|
return klog.V(klogLevelForPlogLevel(level)).Enabled()
|
||||||
}
|
}
|
||||||
|
@ -114,3 +114,15 @@ func undoGlobalLogLevelChanges(t *testing.T, originalLogLevel klog.Level) {
|
|||||||
_, err := logs.GlogSetter(strconv.Itoa(int(originalLogLevel)))
|
_, err := logs.GlogSetter(strconv.Itoa(int(originalLogLevel)))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getKlogLevel() klog.Level {
|
||||||
|
// hack around klog not exposing a Get method
|
||||||
|
for i := klog.Level(0); i < 256; i++ {
|
||||||
|
if klog.V(i).Enabled() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return i - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
@ -26,13 +26,11 @@
|
|||||||
// act of desperation to determine why the system is broken.
|
// act of desperation to determine why the system is broken.
|
||||||
package plog
|
package plog
|
||||||
|
|
||||||
import (
|
import "k8s.io/klog/v2"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
const errorKey = "error"
|
const errorKey = "error"
|
||||||
|
|
||||||
type _ interface {
|
type Logger interface {
|
||||||
Error(msg string, err error, keysAndValues ...interface{})
|
Error(msg string, err error, keysAndValues ...interface{})
|
||||||
Warning(msg string, keysAndValues ...interface{})
|
Warning(msg string, keysAndValues ...interface{})
|
||||||
WarningErr(msg string, err error, keysAndValues ...interface{})
|
WarningErr(msg string, err error, keysAndValues ...interface{})
|
||||||
@ -45,23 +43,23 @@ type _ interface {
|
|||||||
All(msg string, keysAndValues ...interface{})
|
All(msg string, keysAndValues ...interface{})
|
||||||
}
|
}
|
||||||
|
|
||||||
type PLogger struct {
|
type pLogger struct {
|
||||||
prefix string
|
prefix string
|
||||||
depth int
|
depth int
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(prefix string) PLogger {
|
func New(prefix string) Logger {
|
||||||
return PLogger{
|
return &pLogger{
|
||||||
depth: 0,
|
depth: 0,
|
||||||
prefix: prefix,
|
prefix: prefix,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PLogger) Error(msg string, err error, keysAndValues ...interface{}) {
|
func (p *pLogger) Error(msg string, err error, keysAndValues ...interface{}) {
|
||||||
klog.ErrorSDepth(p.depth+1, err, p.prefix+msg, keysAndValues...)
|
klog.ErrorSDepth(p.depth+1, err, p.prefix+msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PLogger) warningDepth(msg string, depth int, keysAndValues ...interface{}) {
|
func (p *pLogger) warningDepth(msg string, depth int, keysAndValues ...interface{}) {
|
||||||
// klog's structured logging has no concept of a warning (i.e. no WarningS function)
|
// klog's structured logging has no concept of a warning (i.e. no WarningS function)
|
||||||
// Thus we use info at log level zero as a proxy
|
// Thus we use info at log level zero as a proxy
|
||||||
// klog's info logs have an I prefix and its warning logs have a W prefix
|
// klog's info logs have an I prefix and its warning logs have a W prefix
|
||||||
@ -72,111 +70,111 @@ func (p *PLogger) warningDepth(msg string, depth int, keysAndValues ...interface
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PLogger) Warning(msg string, keysAndValues ...interface{}) {
|
func (p *pLogger) Warning(msg string, keysAndValues ...interface{}) {
|
||||||
p.warningDepth(msg, p.depth+1, keysAndValues...)
|
p.warningDepth(msg, p.depth+1, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use WarningErr to issue a Warning message with an error object as part of the message.
|
// Use WarningErr to issue a Warning message with an error object as part of the message.
|
||||||
func (p *PLogger) WarningErr(msg string, err error, keysAndValues ...interface{}) {
|
func (p *pLogger) WarningErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
p.warningDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
p.warningDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PLogger) infoDepth(msg string, depth int, keysAndValues ...interface{}) {
|
func (p *pLogger) infoDepth(msg string, depth int, keysAndValues ...interface{}) {
|
||||||
if klog.V(klogLevelInfo).Enabled() {
|
if klog.V(klogLevelInfo).Enabled() {
|
||||||
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PLogger) Info(msg string, keysAndValues ...interface{}) {
|
func (p *pLogger) Info(msg string, keysAndValues ...interface{}) {
|
||||||
p.infoDepth(msg, p.depth+1, keysAndValues...)
|
p.infoDepth(msg, p.depth+1, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use InfoErr to log an expected error, e.g. validation failure of an http parameter.
|
// Use InfoErr to log an expected error, e.g. validation failure of an http parameter.
|
||||||
func (p *PLogger) InfoErr(msg string, err error, keysAndValues ...interface{}) {
|
func (p *pLogger) InfoErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
p.infoDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
p.infoDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PLogger) debugDepth(msg string, depth int, keysAndValues ...interface{}) {
|
func (p *pLogger) debugDepth(msg string, depth int, keysAndValues ...interface{}) {
|
||||||
if klog.V(klogLevelDebug).Enabled() {
|
if klog.V(klogLevelDebug).Enabled() {
|
||||||
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PLogger) Debug(msg string, keysAndValues ...interface{}) {
|
func (p *pLogger) Debug(msg string, keysAndValues ...interface{}) {
|
||||||
p.debugDepth(msg, p.depth+1, keysAndValues...)
|
p.debugDepth(msg, p.depth+1, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use DebugErr to issue a Debug message with an error object as part of the message.
|
// Use DebugErr to issue a Debug message with an error object as part of the message.
|
||||||
func (p *PLogger) DebugErr(msg string, err error, keysAndValues ...interface{}) {
|
func (p *pLogger) DebugErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
p.debugDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
p.debugDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PLogger) traceDepth(msg string, depth int, keysAndValues ...interface{}) {
|
func (p *pLogger) traceDepth(msg string, depth int, keysAndValues ...interface{}) {
|
||||||
if klog.V(klogLevelTrace).Enabled() {
|
if klog.V(klogLevelTrace).Enabled() {
|
||||||
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
klog.InfoSDepth(depth+1, p.prefix+msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PLogger) Trace(msg string, keysAndValues ...interface{}) {
|
func (p *pLogger) Trace(msg string, keysAndValues ...interface{}) {
|
||||||
p.traceDepth(msg, p.depth+1, keysAndValues...)
|
p.traceDepth(msg, p.depth+1, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use TraceErr to issue a Trace message with an error object as part of the message.
|
// Use TraceErr to issue a Trace message with an error object as part of the message.
|
||||||
func (p *PLogger) TraceErr(msg string, err error, keysAndValues ...interface{}) {
|
func (p *pLogger) TraceErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
p.traceDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
p.traceDepth(msg, p.depth+1, append([]interface{}{errorKey, err}, keysAndValues...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PLogger) All(msg string, keysAndValues ...interface{}) {
|
func (p *pLogger) All(msg string, keysAndValues ...interface{}) {
|
||||||
if klog.V(klogLevelAll).Enabled() {
|
if klog.V(klogLevelAll).Enabled() {
|
||||||
klog.InfoSDepth(p.depth+1, p.prefix+msg, keysAndValues...)
|
klog.InfoSDepth(p.depth+1, p.prefix+msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var pLogger = PLogger{ //nolint:gochecknoglobals
|
var logger Logger = &pLogger{ //nolint:gochecknoglobals
|
||||||
depth: 1,
|
depth: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use Error to log an unexpected system error.
|
// Use Error to log an unexpected system error.
|
||||||
func Error(msg string, err error, keysAndValues ...interface{}) {
|
func Error(msg string, err error, keysAndValues ...interface{}) {
|
||||||
pLogger.Error(msg, err, keysAndValues...)
|
logger.Error(msg, err, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Warning(msg string, keysAndValues ...interface{}) {
|
func Warning(msg string, keysAndValues ...interface{}) {
|
||||||
pLogger.Warning(msg, keysAndValues...)
|
logger.Warning(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use WarningErr to issue a Warning message with an error object as part of the message.
|
// Use WarningErr to issue a Warning message with an error object as part of the message.
|
||||||
func WarningErr(msg string, err error, keysAndValues ...interface{}) {
|
func WarningErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
pLogger.WarningErr(msg, err, keysAndValues...)
|
logger.WarningErr(msg, err, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Info(msg string, keysAndValues ...interface{}) {
|
func Info(msg string, keysAndValues ...interface{}) {
|
||||||
pLogger.Info(msg, keysAndValues...)
|
logger.Info(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use InfoErr to log an expected error, e.g. validation failure of an http parameter.
|
// Use InfoErr to log an expected error, e.g. validation failure of an http parameter.
|
||||||
func InfoErr(msg string, err error, keysAndValues ...interface{}) {
|
func InfoErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
pLogger.InfoErr(msg, err, keysAndValues...)
|
logger.InfoErr(msg, err, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Debug(msg string, keysAndValues ...interface{}) {
|
func Debug(msg string, keysAndValues ...interface{}) {
|
||||||
pLogger.Debug(msg, keysAndValues...)
|
logger.Debug(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use DebugErr to issue a Debug message with an error object as part of the message.
|
// Use DebugErr to issue a Debug message with an error object as part of the message.
|
||||||
func DebugErr(msg string, err error, keysAndValues ...interface{}) {
|
func DebugErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
pLogger.DebugErr(msg, err, keysAndValues...)
|
logger.DebugErr(msg, err, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Trace(msg string, keysAndValues ...interface{}) {
|
func Trace(msg string, keysAndValues ...interface{}) {
|
||||||
pLogger.Trace(msg, keysAndValues...)
|
logger.Trace(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use TraceErr to issue a Trace message with an error object as part of the message.
|
// Use TraceErr to issue a Trace message with an error object as part of the message.
|
||||||
func TraceErr(msg string, err error, keysAndValues ...interface{}) {
|
func TraceErr(msg string, err error, keysAndValues ...interface{}) {
|
||||||
pLogger.TraceErr(msg, err, keysAndValues...)
|
logger.TraceErr(msg, err, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func All(msg string, keysAndValues ...interface{}) {
|
func All(msg string, keysAndValues ...interface{}) {
|
||||||
pLogger.All(msg, keysAndValues...)
|
logger.All(msg, keysAndValues...)
|
||||||
}
|
}
|
||||||
|
@ -41,6 +41,7 @@ import (
|
|||||||
"go.pinniped.dev/internal/controller/supervisorstorage"
|
"go.pinniped.dev/internal/controller/supervisorstorage"
|
||||||
"go.pinniped.dev/internal/controllerinit"
|
"go.pinniped.dev/internal/controllerinit"
|
||||||
"go.pinniped.dev/internal/controllerlib"
|
"go.pinniped.dev/internal/controllerlib"
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
"go.pinniped.dev/internal/deploymentref"
|
"go.pinniped.dev/internal/deploymentref"
|
||||||
"go.pinniped.dev/internal/downward"
|
"go.pinniped.dev/internal/downward"
|
||||||
"go.pinniped.dev/internal/groupsuffix"
|
"go.pinniped.dev/internal/groupsuffix"
|
||||||
@ -393,23 +394,22 @@ func runSupervisor(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
|
|||||||
defer func() { _ = httpListener.Close() }()
|
defer func() { _ = httpListener.Close() }()
|
||||||
startServer(ctx, shutdown, httpListener, oidProvidersManager)
|
startServer(ctx, shutdown, httpListener, oidProvidersManager)
|
||||||
|
|
||||||
|
c := ptls.Default(nil)
|
||||||
|
c.GetCertificate = func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||||
|
cert := dynamicTLSCertProvider.GetTLSCert(strings.ToLower(info.ServerName))
|
||||||
|
defaultCert := dynamicTLSCertProvider.GetDefaultTLSCert()
|
||||||
|
plog.Debug("GetCertificate called for port 8443",
|
||||||
|
"info.ServerName", info.ServerName,
|
||||||
|
"foundSNICert", cert != nil,
|
||||||
|
"foundDefaultCert", defaultCert != nil,
|
||||||
|
)
|
||||||
|
if cert == nil {
|
||||||
|
cert = defaultCert
|
||||||
|
}
|
||||||
|
return cert, nil
|
||||||
|
}
|
||||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||||
httpsListener, err := tls.Listen("tcp", ":8443", &tls.Config{
|
httpsListener, err := tls.Listen("tcp", ":8443", c)
|
||||||
MinVersion: tls.VersionTLS12, // Allow v1.2 because clients like the default `curl` on MacOS don't support 1.3 yet.
|
|
||||||
GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
|
||||||
cert := dynamicTLSCertProvider.GetTLSCert(strings.ToLower(info.ServerName))
|
|
||||||
defaultCert := dynamicTLSCertProvider.GetDefaultTLSCert()
|
|
||||||
plog.Debug("GetCertificate called for port 8443",
|
|
||||||
"info.ServerName", info.ServerName,
|
|
||||||
"foundSNICert", cert != nil,
|
|
||||||
"foundDefaultCert", defaultCert != nil,
|
|
||||||
)
|
|
||||||
if cert == nil {
|
|
||||||
cert = defaultCert
|
|
||||||
}
|
|
||||||
return cert, nil
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create listener: %w", err)
|
return fmt.Errorf("cannot create listener: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
package fakekubeapi
|
package fakekubeapi
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/pem"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"mime"
|
"mime"
|
||||||
@ -37,7 +36,9 @@ import (
|
|||||||
|
|
||||||
pinnipedconciergeclientsetscheme "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/scheme"
|
pinnipedconciergeclientsetscheme "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned/scheme"
|
||||||
pinnipedsupervisorclientsetscheme "go.pinniped.dev/generated/latest/client/supervisor/clientset/versioned/scheme"
|
pinnipedsupervisorclientsetscheme "go.pinniped.dev/generated/latest/client/supervisor/clientset/versioned/scheme"
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
"go.pinniped.dev/internal/httputil/httperr"
|
"go.pinniped.dev/internal/httputil/httperr"
|
||||||
|
"go.pinniped.dev/internal/testutil/tlsserver"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Start starts an httptest.Server (with TLS) that pretends to be a Kube API server.
|
// Start starts an httptest.Server (with TLS) that pretends to be a Kube API server.
|
||||||
@ -54,7 +55,9 @@ func Start(t *testing.T, resources map[string]runtime.Object) (*httptest.Server,
|
|||||||
resources = make(map[string]runtime.Object)
|
resources = make(map[string]runtime.Object)
|
||||||
}
|
}
|
||||||
|
|
||||||
server := httptest.NewTLSServer(httperr.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (err error) {
|
server := tlsserver.TLSTestServer(t, httperr.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
||||||
|
tlsserver.AssertTLS(t, r, ptls.Secure)
|
||||||
|
|
||||||
obj, err := decodeObj(r)
|
obj, err := decodeObj(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -74,11 +77,11 @@ func Start(t *testing.T, resources map[string]runtime.Object) (*httptest.Server,
|
|||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}))
|
}), tlsserver.RecordTLSHello)
|
||||||
restConfig := &restclient.Config{
|
restConfig := &restclient.Config{
|
||||||
Host: server.URL,
|
Host: server.URL,
|
||||||
TLSClientConfig: restclient.TLSClientConfig{
|
TLSClientConfig: restclient.TLSClientConfig{
|
||||||
CAData: pem.EncodeToMemory(&pem.Block{Bytes: server.Certificate().Raw, Type: "CERTIFICATE"}),
|
CAData: tlsserver.TLSTestServerCA(server),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return server, restConfig
|
return server, restConfig
|
||||||
|
@ -5,39 +5,36 @@ package testutil
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/pem"
|
|
||||||
"errors"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
|
"go.pinniped.dev/internal/testutil/tlsserver"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TLSTestServer starts a test server listening on a local port using a test CA. It returns the PEM CA bundle and the
|
// TLSTestServer starts a test server listening on a local port using a test CA. It returns the PEM CA bundle and the
|
||||||
// URL of the listening server. The lifetime of the server is bound to the provided *testing.T.
|
// URL of the listening server. The lifetime of the server is bound to the provided *testing.T.
|
||||||
func TLSTestServer(t *testing.T, handler http.HandlerFunc) (caBundlePEM string, url string) {
|
func TLSTestServer(t *testing.T, handler http.HandlerFunc) (caBundlePEM, url string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
server := httptest.NewTLSServer(handler)
|
|
||||||
t.Cleanup(server.Close)
|
|
||||||
|
|
||||||
caBundle := string(pem.EncodeToMemory(&pem.Block{
|
server := tlsserver.TLSTestServer(t, handler, nil)
|
||||||
Type: "CERTIFICATE",
|
|
||||||
Bytes: server.TLS.Certificates[0].Certificate[0],
|
return string(tlsserver.TLSTestServerCA(server)), server.URL
|
||||||
}))
|
|
||||||
return caBundle, server.URL
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TLSTestServerWithCert(t *testing.T, handler http.HandlerFunc, certificate *tls.Certificate) (url string) {
|
func TLSTestServerWithCert(t *testing.T, handler http.HandlerFunc, certificate *tls.Certificate) (url string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
|
c := ptls.Default(nil) // mimic API server config
|
||||||
|
c.Certificates = []tls.Certificate{*certificate}
|
||||||
|
|
||||||
server := http.Server{
|
server := http.Server{
|
||||||
TLSConfig: &tls.Config{
|
TLSConfig: c,
|
||||||
Certificates: []tls.Certificate{*certificate},
|
Handler: handler,
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
},
|
|
||||||
Handler: handler,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||||
|
110
internal/testutil/tlsserver/tlsserver.go
Normal file
110
internal/testutil/tlsserver/tlsserver.go
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package tlsserver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/pem"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ctxKey int
|
||||||
|
|
||||||
|
const (
|
||||||
|
mapKey ctxKey = iota + 1
|
||||||
|
helloKey
|
||||||
|
)
|
||||||
|
|
||||||
|
func TLSTestServer(t *testing.T, handler http.Handler, f func(*httptest.Server)) *httptest.Server {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
server := httptest.NewUnstartedServer(handler)
|
||||||
|
server.TLS = ptls.Default(nil) // mimic API server config
|
||||||
|
if f != nil {
|
||||||
|
f(server)
|
||||||
|
}
|
||||||
|
server.StartTLS()
|
||||||
|
t.Cleanup(server.Close)
|
||||||
|
return server
|
||||||
|
}
|
||||||
|
|
||||||
|
func TLSTestServerCA(server *httptest.Server) []byte {
|
||||||
|
return pem.EncodeToMemory(&pem.Block{
|
||||||
|
Type: "CERTIFICATE",
|
||||||
|
Bytes: server.Certificate().Raw,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecordTLSHello(server *httptest.Server) {
|
||||||
|
server.Config.ConnContext = func(ctx context.Context, _ net.Conn) context.Context {
|
||||||
|
return context.WithValue(ctx, mapKey, &sync.Map{})
|
||||||
|
}
|
||||||
|
|
||||||
|
server.TLS.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) {
|
||||||
|
m, ok := getCtxMap(info.Context())
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("could not find ctx map")
|
||||||
|
}
|
||||||
|
if actual, loaded := m.LoadOrStore(helloKey, info); loaded && !reflect.DeepEqual(info, actual) {
|
||||||
|
return nil, fmt.Errorf("different client hello seen")
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func AssertTLS(t *testing.T, r *http.Request, tlsConfigFunc ptls.ConfigFunc) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
m, ok := getCtxMap(r.Context())
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
h, ok := m.Load(helloKey)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
info, ok := h.(*tls.ClientHelloInfo)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
tlsConfig := tlsConfigFunc(nil)
|
||||||
|
|
||||||
|
supportedVersions := []uint16{tlsConfig.MinVersion}
|
||||||
|
ciphers := tlsConfig.CipherSuites
|
||||||
|
|
||||||
|
if secureTLSConfig := ptls.Secure(nil); tlsConfig.MinVersion != secureTLSConfig.MinVersion {
|
||||||
|
supportedVersions = append([]uint16{secureTLSConfig.MinVersion}, supportedVersions...)
|
||||||
|
ciphers = append(ciphers, secureTLSConfig.CipherSuites...)
|
||||||
|
}
|
||||||
|
|
||||||
|
protos := tlsConfig.NextProtos
|
||||||
|
if httpstream.IsUpgradeRequest(r) {
|
||||||
|
protos = tlsConfig.NextProtos[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// use assert instead of require to not break the http.Handler with a panic
|
||||||
|
ok1 := assert.Equal(t, supportedVersions, info.SupportedVersions)
|
||||||
|
ok2 := assert.Equal(t, ciphers, info.CipherSuites)
|
||||||
|
ok3 := assert.Equal(t, protos, info.SupportedProtos)
|
||||||
|
|
||||||
|
if all := ok1 && ok2 && ok3; !all {
|
||||||
|
t.Errorf("insecure TLS detected for %q %q %q upgrade=%v supportedVersions=%v ciphers=%v protos=%v",
|
||||||
|
r.Proto, r.Method, r.URL.String(), httpstream.IsUpgradeRequest(r), ok1, ok2, ok3)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCtxMap(ctx context.Context) (*sync.Map, bool) {
|
||||||
|
m, ok := ctx.Value(mapKey).(*sync.Map)
|
||||||
|
return m, ok
|
||||||
|
}
|
@ -25,6 +25,7 @@ import (
|
|||||||
"k8s.io/utils/trace"
|
"k8s.io/utils/trace"
|
||||||
|
|
||||||
"go.pinniped.dev/internal/authenticators"
|
"go.pinniped.dev/internal/authenticators"
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
"go.pinniped.dev/internal/endpointaddr"
|
"go.pinniped.dev/internal/endpointaddr"
|
||||||
"go.pinniped.dev/internal/oidc/downstreamsession"
|
"go.pinniped.dev/internal/oidc/downstreamsession"
|
||||||
"go.pinniped.dev/internal/oidc/provider"
|
"go.pinniped.dev/internal/oidc/provider"
|
||||||
@ -328,7 +329,7 @@ func (p *Provider) tlsConfig() (*tls.Config, error) {
|
|||||||
return nil, fmt.Errorf("could not parse CA bundle")
|
return nil, fmt.Errorf("could not parse CA bundle")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &tls.Config{MinVersion: tls.VersionTLS12, RootCAs: rootCAs}, nil
|
return ptls.DefaultLDAP(rootCAs), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// A name for this upstream provider.
|
// A name for this upstream provider.
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -22,9 +23,11 @@ import (
|
|||||||
|
|
||||||
"go.pinniped.dev/internal/authenticators"
|
"go.pinniped.dev/internal/authenticators"
|
||||||
"go.pinniped.dev/internal/certauthority"
|
"go.pinniped.dev/internal/certauthority"
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
"go.pinniped.dev/internal/endpointaddr"
|
"go.pinniped.dev/internal/endpointaddr"
|
||||||
"go.pinniped.dev/internal/mocks/mockldapconn"
|
"go.pinniped.dev/internal/mocks/mockldapconn"
|
||||||
"go.pinniped.dev/internal/testutil"
|
"go.pinniped.dev/internal/testutil"
|
||||||
|
"go.pinniped.dev/internal/testutil/tlsserver"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -1689,10 +1692,22 @@ func TestGetURL(t *testing.T) {
|
|||||||
|
|
||||||
// Testing of host parsing, TLS negotiation, and CA bundle, etc. for the production code's dialer.
|
// Testing of host parsing, TLS negotiation, and CA bundle, etc. for the production code's dialer.
|
||||||
func TestRealTLSDialing(t *testing.T) {
|
func TestRealTLSDialing(t *testing.T) {
|
||||||
testServerCABundle, testServerURL := testutil.TLSTestServer(t, func(w http.ResponseWriter, r *http.Request) {})
|
testServer := tlsserver.TLSTestServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}),
|
||||||
parsedURL, err := url.Parse(testServerURL)
|
func(server *httptest.Server) {
|
||||||
|
tlsserver.RecordTLSHello(server)
|
||||||
|
recordFunc := server.TLS.GetConfigForClient
|
||||||
|
server.TLS.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) {
|
||||||
|
_, _ = recordFunc(info)
|
||||||
|
r, err := http.NewRequestWithContext(info.Context(), http.MethodGet, "/this-is-ldap", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
tlsserver.AssertTLS(t, r, ptls.DefaultLDAP)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
})
|
||||||
|
parsedURL, err := url.Parse(testServer.URL)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
testServerHostAndPort := parsedURL.Host
|
testServerHostAndPort := parsedURL.Host
|
||||||
|
testServerCABundle := tlsserver.TLSTestServerCA(testServer)
|
||||||
|
|
||||||
caForTestServerWithBadCertName, err := certauthority.New("Test CA", time.Hour)
|
caForTestServerWithBadCertName, err := certauthority.New("Test CA", time.Hour)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -1720,7 +1735,7 @@ func TestRealTLSDialing(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "happy path",
|
name: "happy path",
|
||||||
host: testServerHostAndPort,
|
host: testServerHostAndPort,
|
||||||
caBundle: []byte(testServerCABundle),
|
caBundle: testServerCABundle,
|
||||||
connProto: TLS,
|
connProto: TLS,
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
},
|
},
|
||||||
@ -1751,7 +1766,7 @@ func TestRealTLSDialing(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "invalid host with TLS",
|
name: "invalid host with TLS",
|
||||||
host: "this:is:not:a:valid:hostname",
|
host: "this:is:not:a:valid:hostname",
|
||||||
caBundle: []byte(testServerCABundle),
|
caBundle: testServerCABundle,
|
||||||
connProto: TLS,
|
connProto: TLS,
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
wantError: `LDAP Result Code 200 "Network Error": host "this:is:not:a:valid:hostname" is not a valid hostname or IP address`,
|
wantError: `LDAP Result Code 200 "Network Error": host "this:is:not:a:valid:hostname" is not a valid hostname or IP address`,
|
||||||
@ -1759,7 +1774,7 @@ func TestRealTLSDialing(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "invalid host with StartTLS",
|
name: "invalid host with StartTLS",
|
||||||
host: "this:is:not:a:valid:hostname",
|
host: "this:is:not:a:valid:hostname",
|
||||||
caBundle: []byte(testServerCABundle),
|
caBundle: testServerCABundle,
|
||||||
connProto: StartTLS,
|
connProto: StartTLS,
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
wantError: `LDAP Result Code 200 "Network Error": host "this:is:not:a:valid:hostname" is not a valid hostname or IP address`,
|
wantError: `LDAP Result Code 200 "Network Error": host "this:is:not:a:valid:hostname" is not a valid hostname or IP address`,
|
||||||
@ -1776,7 +1791,7 @@ func TestRealTLSDialing(t *testing.T) {
|
|||||||
name: "cannot connect to host",
|
name: "cannot connect to host",
|
||||||
// This is assuming that this port was not reclaimed by another app since the test setup ran. Seems safe enough.
|
// This is assuming that this port was not reclaimed by another app since the test setup ran. Seems safe enough.
|
||||||
host: recentlyClaimedHostAndPort,
|
host: recentlyClaimedHostAndPort,
|
||||||
caBundle: []byte(testServerCABundle),
|
caBundle: testServerCABundle,
|
||||||
connProto: TLS,
|
connProto: TLS,
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
wantError: fmt.Sprintf(`LDAP Result Code 200 "Network Error": dial tcp %s: connect: connection refused`, recentlyClaimedHostAndPort),
|
wantError: fmt.Sprintf(`LDAP Result Code 200 "Network Error": dial tcp %s: connect: connection refused`, recentlyClaimedHostAndPort),
|
||||||
@ -1784,7 +1799,7 @@ func TestRealTLSDialing(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "pays attention to the passed context",
|
name: "pays attention to the passed context",
|
||||||
host: testServerHostAndPort,
|
host: testServerHostAndPort,
|
||||||
caBundle: []byte(testServerCABundle),
|
caBundle: testServerCABundle,
|
||||||
connProto: TLS,
|
connProto: TLS,
|
||||||
context: alreadyCancelledContext,
|
context: alreadyCancelledContext,
|
||||||
wantError: fmt.Sprintf(`LDAP Result Code 200 "Network Error": dial tcp %s: operation was canceled`, testServerHostAndPort),
|
wantError: fmt.Sprintf(`LDAP Result Code 200 "Network Error": dial tcp %s: operation was canceled`, testServerHostAndPort),
|
||||||
@ -1792,7 +1807,7 @@ func TestRealTLSDialing(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "unsupported connection protocol",
|
name: "unsupported connection protocol",
|
||||||
host: testServerHostAndPort,
|
host: testServerHostAndPort,
|
||||||
caBundle: []byte(testServerCABundle),
|
caBundle: testServerCABundle,
|
||||||
connProto: "bad usage of this type",
|
connProto: "bad usage of this type",
|
||||||
context: alreadyCancelledContext,
|
context: alreadyCancelledContext,
|
||||||
wantError: `LDAP Result Code 200 "Network Error": did not specify valid ConnectionProtocol`,
|
wantError: `LDAP Result Code 200 "Network Error": did not specify valid ConnectionProtocol`,
|
||||||
@ -1824,7 +1839,7 @@ func TestRealTLSDialing(t *testing.T) {
|
|||||||
|
|
||||||
// Indirectly checking that the Dialer method constructed the ldap.Conn with isTLS set to true,
|
// Indirectly checking that the Dialer method constructed the ldap.Conn with isTLS set to true,
|
||||||
// since this is always the correct behavior unless/until we want to support StartTLS.
|
// since this is always the correct behavior unless/until we want to support StartTLS.
|
||||||
err := conn.(*ldap.Conn).StartTLS(&tls.Config{})
|
err := conn.(*ldap.Conn).StartTLS(ptls.DefaultLDAP(nil))
|
||||||
require.EqualError(t, err, `LDAP Result Code 200 "Network Error": ldap: already encrypted`)
|
require.EqualError(t, err, `LDAP Result Code 200 "Network Error": ldap: already encrypted`)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
supervisoroidc "go.pinniped.dev/generated/latest/apis/supervisor/oidc"
|
supervisoroidc "go.pinniped.dev/generated/latest/apis/supervisor/oidc"
|
||||||
"go.pinniped.dev/internal/httputil/httperr"
|
"go.pinniped.dev/internal/httputil/httperr"
|
||||||
"go.pinniped.dev/internal/httputil/securityheader"
|
"go.pinniped.dev/internal/httputil/securityheader"
|
||||||
|
"go.pinniped.dev/internal/net/phttp"
|
||||||
"go.pinniped.dev/internal/oidc/provider"
|
"go.pinniped.dev/internal/oidc/provider"
|
||||||
"go.pinniped.dev/internal/upstreamoidc"
|
"go.pinniped.dev/internal/upstreamoidc"
|
||||||
"go.pinniped.dev/pkg/oidcclient/nonce"
|
"go.pinniped.dev/pkg/oidcclient/nonce"
|
||||||
@ -274,7 +275,7 @@ func Login(issuer string, clientID string, opts ...Option) (*oidctypes.Token, er
|
|||||||
ctx: context.Background(),
|
ctx: context.Background(),
|
||||||
logger: logr.Discard(), // discard logs unless a logger is specified
|
logger: logr.Discard(), // discard logs unless a logger is specified
|
||||||
callbacks: make(chan callbackResult, 2),
|
callbacks: make(chan callbackResult, 2),
|
||||||
httpClient: http.DefaultClient,
|
httpClient: phttp.Default(nil),
|
||||||
|
|
||||||
// Default implementations of external dependencies (to be mocked in tests).
|
// Default implementations of external dependencies (to be mocked in tests).
|
||||||
generateState: state.Generate,
|
generateState: state.Generate,
|
||||||
|
@ -108,7 +108,14 @@ func TestCLIGetKubeconfigStaticToken_Parallel(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func runPinnipedCLI(t *testing.T, envVars []string, pinnipedExe string, args ...string) (string, string) {
|
type testingT interface {
|
||||||
|
Helper()
|
||||||
|
Errorf(format string, args ...interface{})
|
||||||
|
FailNow()
|
||||||
|
Logf(format string, args ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func runPinnipedCLI(t testingT, envVars []string, pinnipedExe string, args ...string) (string, string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
var stdout, stderr bytes.Buffer
|
var stdout, stderr bytes.Buffer
|
||||||
|
@ -45,8 +45,10 @@ func TestUnsuccessfulCredentialRequest_Parallel(t *testing.T) {
|
|||||||
require.Equal(t, "authentication failed", *response.Status.Message)
|
require.Equal(t, "authentication failed", *response.Status.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TCRs are non-mutating and safe to run in parallel with serial tests, see main_test.go.
|
// TestSuccessfulCredentialRequest cannot run in parallel because runPinnipedLoginOIDC uses a fixed port
|
||||||
func TestSuccessfulCredentialRequest_Parallel(t *testing.T) {
|
// for its localhost listener via --listen-port=env.CLIUpstreamOIDC.CallbackURL.Port() per oidcLoginCommand.
|
||||||
|
// Since ports are global to the process, tests using oidcLoginCommand must be run serially.
|
||||||
|
func TestSuccessfulCredentialRequest(t *testing.T) {
|
||||||
env := testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
env := testlib.IntegrationEnv(t).WithCapability(testlib.ClusterSigningKeyIsAvailable)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 6*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 6*time.Minute)
|
||||||
|
@ -65,6 +65,7 @@ import (
|
|||||||
identityv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/identity/v1alpha1"
|
identityv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/identity/v1alpha1"
|
||||||
loginv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
|
loginv1alpha1 "go.pinniped.dev/generated/latest/apis/concierge/login/v1alpha1"
|
||||||
pinnipedconciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
pinnipedconciergeclientset "go.pinniped.dev/generated/latest/client/concierge/clientset/versioned"
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
"go.pinniped.dev/internal/httputil/roundtripper"
|
"go.pinniped.dev/internal/httputil/roundtripper"
|
||||||
"go.pinniped.dev/internal/kubeclient"
|
"go.pinniped.dev/internal/kubeclient"
|
||||||
"go.pinniped.dev/internal/testutil"
|
"go.pinniped.dev/internal/testutil"
|
||||||
@ -305,20 +306,18 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
Verb: "get", Group: "", Version: "v1", Resource: "namespaces",
|
Verb: "get", Group: "", Version: "v1", Resource: "namespaces",
|
||||||
})
|
})
|
||||||
|
|
||||||
// Get pods in concierge namespace and pick one.
|
// Get pods in supervisor namespace and pick one.
|
||||||
// this is for tests that require performing actions against a running pod. We use the concierge pod because we already have it handy.
|
// this is for tests that require performing actions against a running pod.
|
||||||
// We want to make sure it's a concierge pod (not cert agent), because we need to be able to port-forward a running port.
|
// We use the supervisor pod because we already have it handy and need to port-forward a running port.
|
||||||
pods, err := adminClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx, metav1.ListOptions{})
|
// We avoid using the concierge for this because it requires TLS 1.3 which is not support by older versions of curl.
|
||||||
|
supervisorPods, err := adminClient.CoreV1().Pods(env.SupervisorNamespace).List(ctx,
|
||||||
|
metav1.ListOptions{LabelSelector: "deployment.pinniped.dev=supervisor"})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, len(pods.Items), 0)
|
require.NotEmpty(t, supervisorPods.Items, "could not find supervisor pods")
|
||||||
var conciergePod *corev1.Pod
|
supervisorPod := supervisorPods.Items[0]
|
||||||
for _, pod := range pods.Items {
|
|
||||||
pod := pod
|
// make sure the supervisor has a default TLS cert during this test so that it can handle a TLS connection
|
||||||
if !strings.Contains(pod.Name, "kube-cert-agent") {
|
createSupervisorDefaultTLSCertificateSecretIfNeeded(ctx, t)
|
||||||
conciergePod = &pod
|
|
||||||
}
|
|
||||||
}
|
|
||||||
require.NotNil(t, conciergePod, "could not find a concierge pod")
|
|
||||||
|
|
||||||
// Test that the user can perform basic actions through the client with their username and group membership
|
// Test that the user can perform basic actions through the client with their username and group membership
|
||||||
// influencing RBAC checks correctly.
|
// influencing RBAC checks correctly.
|
||||||
@ -346,7 +345,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
// Run the kubectl port-forward command.
|
// Run the kubectl port-forward command.
|
||||||
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
|
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
portForwardCmd, _, portForwardStderr := kubectlCommand(timeout, t, kubeconfigPath, envVarsWithProxy, "port-forward", "--namespace", env.ConciergeNamespace, conciergePod.Name, "10443:8443")
|
portForwardCmd, _, portForwardStderr := kubectlCommand(timeout, t, kubeconfigPath, envVarsWithProxy, "port-forward", "--namespace", supervisorPod.Namespace, supervisorPod.Name, "10443:8443")
|
||||||
portForwardCmd.Env = envVarsWithProxy
|
portForwardCmd.Env = envVarsWithProxy
|
||||||
|
|
||||||
// Start, but don't wait for the command to finish.
|
// Start, but don't wait for the command to finish.
|
||||||
@ -366,7 +365,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
for time.Now().Before(startTime.Add(70 * time.Second)) {
|
for time.Now().Before(startTime.Add(70 * time.Second)) {
|
||||||
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:10443") // -sS turns off the progressbar but still prints errors
|
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:10443/healthz") // -sS turns off the progressbar but still prints errors
|
||||||
curlCmd.Stdout = &curlStdOut
|
curlCmd.Stdout = &curlStdOut
|
||||||
curlCmd.Stderr = &curlStdErr
|
curlCmd.Stderr = &curlStdErr
|
||||||
curlErr := curlCmd.Run()
|
curlErr := curlCmd.Run()
|
||||||
@ -382,7 +381,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
// curl the endpoint once more, once 70 seconds has elapsed, to make sure the connection is still open.
|
// curl the endpoint once more, once 70 seconds has elapsed, to make sure the connection is still open.
|
||||||
timeout, cancelFunc = context.WithTimeout(ctx, 30*time.Second)
|
timeout, cancelFunc = context.WithTimeout(ctx, 30*time.Second)
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:10443") // -sS turns off the progressbar but still prints errors
|
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:10443/healthz") // -sS turns off the progressbar but still prints errors
|
||||||
curlCmd.Stdout = &curlStdOut
|
curlCmd.Stdout = &curlStdOut
|
||||||
curlCmd.Stderr = &curlStdErr
|
curlCmd.Stderr = &curlStdErr
|
||||||
curlErr := curlCmd.Run()
|
curlErr := curlCmd.Run()
|
||||||
@ -392,9 +391,8 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
t.Log("curlStdErr: " + curlStdErr.String())
|
t.Log("curlStdErr: " + curlStdErr.String())
|
||||||
t.Log("stdout: " + curlStdOut.String())
|
t.Log("stdout: " + curlStdOut.String())
|
||||||
}
|
}
|
||||||
// We expect this to 403, but all we care is that it gets through.
|
|
||||||
require.NoError(t, curlErr)
|
require.NoError(t, curlErr)
|
||||||
require.Contains(t, curlStdOut.String(), `"forbidden: User \"system:anonymous\" cannot get path \"/\""`)
|
require.Contains(t, curlStdOut.String(), "okokokokok") // a few successful healthz responses
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("kubectl port-forward and keeping the connection open for over a minute (idle)", func(t *testing.T) {
|
t.Run("kubectl port-forward and keeping the connection open for over a minute (idle)", func(t *testing.T) {
|
||||||
@ -404,7 +402,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
// Run the kubectl port-forward command.
|
// Run the kubectl port-forward command.
|
||||||
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
|
timeout, cancelFunc := context.WithTimeout(ctx, 2*time.Minute)
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
portForwardCmd, _, portForwardStderr := kubectlCommand(timeout, t, kubeconfigPath, envVarsWithProxy, "port-forward", "--namespace", env.ConciergeNamespace, conciergePod.Name, "10444:8443")
|
portForwardCmd, _, portForwardStderr := kubectlCommand(timeout, t, kubeconfigPath, envVarsWithProxy, "port-forward", "--namespace", supervisorPod.Namespace, supervisorPod.Name, "10444:8443")
|
||||||
portForwardCmd.Env = envVarsWithProxy
|
portForwardCmd.Env = envVarsWithProxy
|
||||||
|
|
||||||
// Start, but don't wait for the command to finish.
|
// Start, but don't wait for the command to finish.
|
||||||
@ -414,13 +412,13 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
assert.EqualErrorf(t, portForwardCmd.Wait(), "signal: killed", `wanted "kubectl port-forward" to get signaled because context was cancelled (stderr: %q)`, portForwardStderr.String())
|
assert.EqualErrorf(t, portForwardCmd.Wait(), "signal: killed", `wanted "kubectl port-forward" to get signaled because context was cancelled (stderr: %q)`, portForwardStderr.String())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Wait to see if we time out. The default timeout is 60 seconds, but the server should recognize this this
|
// Wait to see if we time out. The default timeout is 60 seconds, but the server should recognize that this
|
||||||
// is going to be a long-running command and keep the connection open as long as the client stays connected.
|
// is going to be a long-running command and keep the connection open as long as the client stays connected.
|
||||||
time.Sleep(70 * time.Second)
|
time.Sleep(70 * time.Second)
|
||||||
|
|
||||||
timeout, cancelFunc = context.WithTimeout(ctx, 2*time.Minute)
|
timeout, cancelFunc = context.WithTimeout(ctx, 2*time.Minute)
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:10444") // -sS turns off the progressbar but still prints errors
|
curlCmd := exec.CommandContext(timeout, "curl", "-k", "-sS", "https://127.0.0.1:10444/healthz") // -sS turns off the progressbar but still prints errors
|
||||||
var curlStdOut, curlStdErr bytes.Buffer
|
var curlStdOut, curlStdErr bytes.Buffer
|
||||||
curlCmd.Stdout = &curlStdOut
|
curlCmd.Stdout = &curlStdOut
|
||||||
curlCmd.Stderr = &curlStdErr
|
curlCmd.Stderr = &curlStdErr
|
||||||
@ -430,9 +428,8 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
t.Log("curlStdErr: " + curlStdErr.String())
|
t.Log("curlStdErr: " + curlStdErr.String())
|
||||||
t.Log("stdout: " + curlStdOut.String())
|
t.Log("stdout: " + curlStdOut.String())
|
||||||
}
|
}
|
||||||
// We expect this to 403, but all we care is that it gets through.
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Contains(t, curlStdOut.String(), `"forbidden: User \"system:anonymous\" cannot get path \"/\""`)
|
require.Equal(t, curlStdOut.String(), "ok")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("using and watching all the basic verbs", func(t *testing.T) {
|
t.Run("using and watching all the basic verbs", func(t *testing.T) {
|
||||||
@ -760,7 +757,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
clusterAdminCredentials, impersonationProxyURL, impersonationProxyCACertPEM, nil,
|
clusterAdminCredentials, impersonationProxyURL, impersonationProxyCACertPEM, nil,
|
||||||
)
|
)
|
||||||
nestedImpersonationUIDOnly.Wrap(func(rt http.RoundTripper) http.RoundTripper {
|
nestedImpersonationUIDOnly.Wrap(func(rt http.RoundTripper) http.RoundTripper {
|
||||||
return roundtripper.Func(func(r *http.Request) (*http.Response, error) {
|
return roundtripper.WrapFunc(rt, func(r *http.Request) (*http.Response, error) {
|
||||||
r.Header.Set("iMperSONATE-uid", "some-awesome-uid")
|
r.Header.Set("iMperSONATE-uid", "some-awesome-uid")
|
||||||
return rt.RoundTrip(r)
|
return rt.RoundTrip(r)
|
||||||
})
|
})
|
||||||
@ -798,7 +795,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
nestedImpersonationUID.Wrap(func(rt http.RoundTripper) http.RoundTripper {
|
nestedImpersonationUID.Wrap(func(rt http.RoundTripper) http.RoundTripper {
|
||||||
return roundtripper.Func(func(r *http.Request) (*http.Response, error) {
|
return roundtripper.WrapFunc(rt, func(r *http.Request) (*http.Response, error) {
|
||||||
r.Header.Set("imperSONate-uiD", "some-fancy-uid")
|
r.Header.Set("imperSONate-uiD", "some-fancy-uid")
|
||||||
return rt.RoundTrip(r)
|
return rt.RoundTrip(r)
|
||||||
})
|
})
|
||||||
@ -1115,7 +1112,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
|
|
||||||
// run the kubectl logs command
|
// run the kubectl logs command
|
||||||
logLinesCount := 10
|
logLinesCount := 10
|
||||||
stdout, err = runKubectl(t, kubeconfigPath, envVarsWithProxy, "logs", "--namespace", conciergePod.Namespace, conciergePod.Name, fmt.Sprintf("--tail=%d", logLinesCount))
|
stdout, err = runKubectl(t, kubeconfigPath, envVarsWithProxy, "logs", "--namespace", supervisorPod.Namespace, supervisorPod.Name, fmt.Sprintf("--tail=%d", logLinesCount))
|
||||||
require.NoError(t, err, `"kubectl logs" failed`)
|
require.NoError(t, err, `"kubectl logs" failed`)
|
||||||
// Expect _approximately_ logLinesCount lines in the output
|
// Expect _approximately_ logLinesCount lines in the output
|
||||||
// (we can't match 100% exactly due to https://github.com/kubernetes/kubernetes/issues/72628).
|
// (we can't match 100% exactly due to https://github.com/kubernetes/kubernetes/issues/72628).
|
||||||
@ -1500,6 +1497,20 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("assert impersonator runs with secure TLS config", func(t *testing.T) {
|
||||||
|
parallelIfNotEKS(t)
|
||||||
|
|
||||||
|
cancelCtx, cancel := context.WithCancel(ctx)
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
startKubectlPortForward(cancelCtx, t, "10445", "443", env.ConciergeAppName+"-proxy", env.ConciergeNamespace)
|
||||||
|
|
||||||
|
stdout, stderr := runNmapSSLEnum(t, "127.0.0.1", 10445)
|
||||||
|
|
||||||
|
require.Empty(t, stderr)
|
||||||
|
require.Contains(t, stdout, getExpectedCiphers(ptls.Default), "stdout:\n%s", stdout)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("assert correct impersonator service account is being used", func(t *testing.T) {
|
t.Run("assert correct impersonator service account is being used", func(t *testing.T) {
|
||||||
@ -1956,7 +1967,7 @@ func performImpersonatorDiscovery(ctx context.Context, t *testing.T, env *testli
|
|||||||
// probe each pod directly for readiness since the concierge status is a lie - it just means a single pod is ready
|
// probe each pod directly for readiness since the concierge status is a lie - it just means a single pod is ready
|
||||||
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
||||||
pods, err := adminClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx,
|
pods, err := adminClient.CoreV1().Pods(env.ConciergeNamespace).List(ctx,
|
||||||
metav1.ListOptions{LabelSelector: "app=" + env.ConciergeAppName + ",!kube-cert-agent.pinniped.dev"}) // TODO replace with deployment.pinniped.dev=concierge
|
metav1.ListOptions{LabelSelector: "deployment.pinniped.dev=concierge"})
|
||||||
requireEventually.NoError(err)
|
requireEventually.NoError(err)
|
||||||
requireEventually.Len(pods.Items, 2) // has to stay in sync with the defaults in our YAML
|
requireEventually.Len(pods.Items, 2) // has to stay in sync with the defaults in our YAML
|
||||||
|
|
||||||
@ -2373,7 +2384,7 @@ func getCredForConfig(t *testing.T, config *rest.Config) *loginv1alpha1.ClusterC
|
|||||||
config = rest.CopyConfig(config)
|
config = rest.CopyConfig(config)
|
||||||
|
|
||||||
config.Wrap(func(rt http.RoundTripper) http.RoundTripper {
|
config.Wrap(func(rt http.RoundTripper) http.RoundTripper {
|
||||||
return roundtripper.Func(func(req *http.Request) (*http.Response, error) {
|
return roundtripper.WrapFunc(rt, func(req *http.Request) (*http.Response, error) {
|
||||||
resp, err := rt.RoundTrip(req)
|
resp, err := rt.RoundTrip(req)
|
||||||
|
|
||||||
r := req
|
r := req
|
||||||
|
@ -759,8 +759,6 @@ func startLongRunningCommandAndWaitForInitialOutput(
|
|||||||
cmd := exec.CommandContext(ctx, command, args...)
|
cmd := exec.CommandContext(ctx, command, args...)
|
||||||
|
|
||||||
var stdoutBuf, stderrBuf syncBuffer
|
var stdoutBuf, stderrBuf syncBuffer
|
||||||
cmd.Stdout = &stdoutBuf
|
|
||||||
cmd.Stderr = &stderrBuf
|
|
||||||
cmd.Stdout = io.MultiWriter(os.Stdout, &stdoutBuf)
|
cmd.Stdout = io.MultiWriter(os.Stdout, &stdoutBuf)
|
||||||
cmd.Stderr = io.MultiWriter(os.Stderr, &stderrBuf)
|
cmd.Stderr = io.MultiWriter(os.Stderr, &stderrBuf)
|
||||||
|
|
||||||
|
256
test/integration/securetls_test.go
Normal file
256
test/integration/securetls_test.go
Normal file
@ -0,0 +1,256 @@
|
|||||||
|
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os/exec"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"go.pinniped.dev/internal/crypto/ptls"
|
||||||
|
"go.pinniped.dev/internal/testutil/tlsserver"
|
||||||
|
"go.pinniped.dev/test/testlib"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TLS checks safe to run in parallel with serial tests, see main_test.go.
|
||||||
|
func TestSecureTLSPinnipedCLIToKAS_Parallel(t *testing.T) {
|
||||||
|
_ = testlib.IntegrationEnv(t)
|
||||||
|
|
||||||
|
server := tlsserver.TLSTestServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
tlsserver.AssertTLS(t, r, ptls.Secure) // pinniped CLI uses ptls.Secure when talking to KAS
|
||||||
|
w.Header().Set("content-type", "application/json")
|
||||||
|
fmt.Fprint(w, `{"kind":"TokenCredentialRequest","apiVersion":"login.concierge.pinniped.dev/v1alpha1",`+
|
||||||
|
`"status":{"credential":{"token":"some-fancy-token"}}}`)
|
||||||
|
}), tlsserver.RecordTLSHello)
|
||||||
|
|
||||||
|
ca := tlsserver.TLSTestServerCA(server)
|
||||||
|
|
||||||
|
pinnipedExe := testlib.PinnipedCLIPath(t)
|
||||||
|
|
||||||
|
stdout, stderr := runPinnipedCLI(t, nil, pinnipedExe, "login", "static",
|
||||||
|
"--token", "does-not-matter",
|
||||||
|
"--concierge-authenticator-type", "webhook",
|
||||||
|
"--concierge-authenticator-name", "does-not-matter",
|
||||||
|
"--concierge-ca-bundle-data", base64.StdEncoding.EncodeToString(ca),
|
||||||
|
"--concierge-endpoint", server.URL,
|
||||||
|
"--enable-concierge",
|
||||||
|
"--credential-cache", "",
|
||||||
|
)
|
||||||
|
|
||||||
|
require.Empty(t, stderr)
|
||||||
|
require.Equal(t, `{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1",`+
|
||||||
|
`"spec":{"interactive":false},"status":{"expirationTimestamp":null,"token":"some-fancy-token"}}
|
||||||
|
`, stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TLS checks safe to run in parallel with serial tests, see main_test.go.
|
||||||
|
func TestSecureTLSPinnipedCLIToSupervisor_Parallel(t *testing.T) {
|
||||||
|
_ = testlib.IntegrationEnv(t)
|
||||||
|
|
||||||
|
server := tlsserver.TLSTestServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
tlsserver.AssertTLS(t, r, ptls.Default) // pinniped CLI uses ptls.Default when talking to supervisor
|
||||||
|
w.Header().Set("content-type", "application/json")
|
||||||
|
fmt.Fprint(w, `{"issuer":"https://not-a-good-issuer"}`)
|
||||||
|
}), tlsserver.RecordTLSHello)
|
||||||
|
|
||||||
|
ca := tlsserver.TLSTestServerCA(server)
|
||||||
|
|
||||||
|
pinnipedExe := testlib.PinnipedCLIPath(t)
|
||||||
|
|
||||||
|
stdout, stderr := runPinnipedCLI(&fakeT{T: t}, nil, pinnipedExe, "login", "oidc",
|
||||||
|
"--ca-bundle-data", base64.StdEncoding.EncodeToString(ca),
|
||||||
|
"--issuer", server.URL,
|
||||||
|
"--credential-cache", "",
|
||||||
|
"--upstream-identity-provider-flow", "cli_password",
|
||||||
|
"--upstream-identity-provider-name", "does-not-matter",
|
||||||
|
"--upstream-identity-provider-type", "oidc",
|
||||||
|
)
|
||||||
|
|
||||||
|
require.Equal(t, `Error: could not complete Pinniped login: could not perform OIDC discovery for "`+
|
||||||
|
server.URL+`": oidc: issuer did not match the issuer returned by provider, expected "`+
|
||||||
|
server.URL+`" got "https://not-a-good-issuer"
|
||||||
|
`, stderr)
|
||||||
|
require.Empty(t, stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TLS checks safe to run in parallel with serial tests, see main_test.go.
|
||||||
|
func TestSecureTLSConciergeAggregatedAPI_Parallel(t *testing.T) {
|
||||||
|
env := testlib.IntegrationEnv(t)
|
||||||
|
|
||||||
|
cancelCtx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
startKubectlPortForward(cancelCtx, t, "10446", "443", env.ConciergeAppName+"-api", env.ConciergeNamespace)
|
||||||
|
|
||||||
|
stdout, stderr := runNmapSSLEnum(t, "127.0.0.1", 10446)
|
||||||
|
|
||||||
|
require.Empty(t, stderr)
|
||||||
|
require.Contains(t, stdout, getExpectedCiphers(ptls.Secure), "stdout:\n%s", stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSecureTLSSupervisor(t *testing.T) { // does not run in parallel because of the createSupervisorDefaultTLSCertificateSecretIfNeeded call
|
||||||
|
env := testlib.IntegrationEnv(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
// make sure the supervisor has a default TLS cert during this test so that it can handle a TLS connection
|
||||||
|
createSupervisorDefaultTLSCertificateSecretIfNeeded(ctx, t)
|
||||||
|
|
||||||
|
startKubectlPortForward(ctx, t, "10447", "443", env.SupervisorAppName+"-nodeport", env.SupervisorNamespace)
|
||||||
|
|
||||||
|
stdout, stderr := runNmapSSLEnum(t, "127.0.0.1", 10447)
|
||||||
|
|
||||||
|
// supervisor's cert is ECDSA
|
||||||
|
defaultECDSAOnly := func(rootCAs *x509.CertPool) *tls.Config {
|
||||||
|
c := ptls.Default(rootCAs)
|
||||||
|
ciphers := make([]uint16, 0, len(c.CipherSuites)/2)
|
||||||
|
for _, id := range c.CipherSuites {
|
||||||
|
id := id
|
||||||
|
if !strings.Contains(tls.CipherSuiteName(id), "_ECDSA_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ciphers = append(ciphers, id)
|
||||||
|
}
|
||||||
|
c.CipherSuites = ciphers
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Empty(t, stderr)
|
||||||
|
require.Contains(t, stdout, getExpectedCiphers(defaultECDSAOnly), "stdout:\n%s", stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
type fakeT struct {
|
||||||
|
*testing.T
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *fakeT) FailNow() {
|
||||||
|
t.Errorf("fakeT ignored FailNow")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *fakeT) Errorf(format string, args ...interface{}) {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
if !t.Failed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.Logf("reporting previously ignored errors since main test failed:\n"+format, args...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func runNmapSSLEnum(t *testing.T, host string, port uint16) (string, string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
version, err := exec.CommandContext(ctx, "nmap", "-V").CombinedOutput()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
versionMatches := regexp.MustCompile(`Nmap version 7\.(?P<minor>\d+)`).FindStringSubmatch(string(version))
|
||||||
|
require.Len(t, versionMatches, 2)
|
||||||
|
minorVersion, err := strconv.Atoi(versionMatches[1])
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.GreaterOrEqual(t, minorVersion, 92, "nmap >= 7.92.x is required")
|
||||||
|
|
||||||
|
var stdout, stderr bytes.Buffer
|
||||||
|
//nolint:gosec // we are not performing malicious argument injection against ourselves
|
||||||
|
cmd := exec.CommandContext(ctx, "nmap", "--script", "ssl-enum-ciphers",
|
||||||
|
"-p", strconv.FormatUint(uint64(port), 10),
|
||||||
|
host,
|
||||||
|
)
|
||||||
|
cmd.Stdout = &stdout
|
||||||
|
cmd.Stderr = &stderr
|
||||||
|
|
||||||
|
require.NoErrorf(t, cmd.Run(), "stderr:\n%s\n\nstdout:\n%s\n\n", stderr.String(), stdout.String())
|
||||||
|
|
||||||
|
return stdout.String(), stderr.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getExpectedCiphers(configFunc ptls.ConfigFunc) string {
|
||||||
|
config := configFunc(nil)
|
||||||
|
secureConfig := ptls.Secure(nil)
|
||||||
|
|
||||||
|
skip12 := config.MinVersion == secureConfig.MinVersion
|
||||||
|
|
||||||
|
var tls12Bit, tls13Bit string
|
||||||
|
|
||||||
|
if !skip12 {
|
||||||
|
sort.SliceStable(config.CipherSuites, func(i, j int) bool {
|
||||||
|
a := tls.CipherSuiteName(config.CipherSuites[i])
|
||||||
|
b := tls.CipherSuiteName(config.CipherSuites[j])
|
||||||
|
|
||||||
|
ok1 := strings.Contains(a, "_ECDSA_")
|
||||||
|
ok2 := strings.Contains(b, "_ECDSA_")
|
||||||
|
|
||||||
|
if ok1 && ok2 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return ok1
|
||||||
|
})
|
||||||
|
|
||||||
|
var s strings.Builder
|
||||||
|
for i, id := range config.CipherSuites {
|
||||||
|
s.WriteString(fmt.Sprintf(tls12Item, tls.CipherSuiteName(id)))
|
||||||
|
if i == len(config.CipherSuites)-1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s.WriteString("\n")
|
||||||
|
}
|
||||||
|
tls12Bit = fmt.Sprintf(tls12Base, s.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
var s strings.Builder
|
||||||
|
for i, id := range secureConfig.CipherSuites {
|
||||||
|
s.WriteString(fmt.Sprintf(tls13Item, strings.Replace(tls.CipherSuiteName(id), "TLS_", "TLS_AKE_WITH_", 1)))
|
||||||
|
if i == len(secureConfig.CipherSuites)-1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s.WriteString("\n")
|
||||||
|
}
|
||||||
|
tls13Bit = fmt.Sprintf(tls13Base, s.String())
|
||||||
|
|
||||||
|
return fmt.Sprintf(baseItem, tls12Bit, tls13Bit)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// this surrounds the tls 1.2 and 1.3 text in a way that guarantees that other TLS versions are not supported.
|
||||||
|
baseItem = `/tcp open unknown
|
||||||
|
| ssl-enum-ciphers: %s%s
|
||||||
|
|_ least strength: A
|
||||||
|
|
||||||
|
Nmap done: 1 IP address (1 host up) scanned in`
|
||||||
|
|
||||||
|
// the "cipher preference: client" bit a bug in nmap.
|
||||||
|
// https://github.com/nmap/nmap/issues/1691#issuecomment-536919978
|
||||||
|
tls12Base = `
|
||||||
|
| TLSv1.2:
|
||||||
|
| ciphers:
|
||||||
|
%s
|
||||||
|
| compressors:
|
||||||
|
| NULL
|
||||||
|
| cipher preference: client`
|
||||||
|
|
||||||
|
tls13Base = `
|
||||||
|
| TLSv1.3:
|
||||||
|
| ciphers:
|
||||||
|
%s
|
||||||
|
| cipher preference: server`
|
||||||
|
|
||||||
|
tls12Item = `| %s (secp256r1) - A`
|
||||||
|
tls13Item = `| %s (ecdh_x25519) - A`
|
||||||
|
)
|
@ -320,6 +320,22 @@ func createTLSCertificateSecret(ctx context.Context, t *testing.T, ns string, ho
|
|||||||
return ca
|
return ca
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createSupervisorDefaultTLSCertificateSecretIfNeeded(ctx context.Context, t *testing.T) {
|
||||||
|
env := testlib.IntegrationEnv(t)
|
||||||
|
adminClient := testlib.NewKubernetesClientset(t)
|
||||||
|
|
||||||
|
ns := env.SupervisorNamespace
|
||||||
|
name := defaultTLSCertSecretName(env)
|
||||||
|
|
||||||
|
_, err := adminClient.CoreV1().Secrets(ns).Get(ctx, name, metav1.GetOptions{})
|
||||||
|
|
||||||
|
if k8serrors.IsNotFound(err) {
|
||||||
|
_ = createTLSCertificateSecret(ctx, t, ns, "cert-hostname-doesnt-matter", nil, name, adminClient)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func temporarilyRemoveAllFederationDomainsAndDefaultTLSCertSecret(
|
func temporarilyRemoveAllFederationDomainsAndDefaultTLSCertSecret(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
|
@ -132,7 +132,7 @@ func newClientsetWithConfig(t *testing.T, config *rest.Config) kubernetes.Interf
|
|||||||
func NewAnonymousClientRestConfig(t *testing.T) *rest.Config {
|
func NewAnonymousClientRestConfig(t *testing.T) *rest.Config {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
return rest.AnonymousClientConfig(NewClientConfig(t))
|
return kubeclient.SecureAnonymousClientConfig(NewClientConfig(t))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Starting with an anonymous client config, add a cert and key to use for authentication in the API server.
|
// Starting with an anonymous client config, add a cert and key to use for authentication in the API server.
|
||||||
|
Loading…
Reference in New Issue
Block a user