Merge pull request #589 from vmware-tanzu/ldap-get-kubeconfig
WIP: Support for Supervisor upstream LDAP IDPs in `pinniped get kubeconfig`
This commit is contained in:
commit
20b86ac0a9
@ -8,8 +8,10 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -62,6 +64,8 @@ type getKubeconfigOIDCParams struct {
|
||||
debugSessionCache bool
|
||||
caBundle caBundleFlag
|
||||
requestAudience string
|
||||
upstreamIDPName string
|
||||
upstreamIDPType string
|
||||
}
|
||||
|
||||
type getKubeconfigConciergeParams struct {
|
||||
@ -91,6 +95,19 @@ type getKubeconfigParams struct {
|
||||
credentialCachePathSet bool
|
||||
}
|
||||
|
||||
type supervisorOIDCDiscoveryResponse struct {
|
||||
PinnipedIDPsEndpoint string `json:"pinniped_identity_providers_endpoint"`
|
||||
}
|
||||
|
||||
type supervisorIDPsDiscoveryResponse struct {
|
||||
PinnipedIDPs []pinnipedIDPResponse `json:"pinniped_identity_providers"`
|
||||
}
|
||||
|
||||
type pinnipedIDPResponse struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
func kubeconfigCommand(deps kubeconfigDeps) *cobra.Command {
|
||||
var (
|
||||
cmd = &cobra.Command{
|
||||
@ -128,6 +145,8 @@ func kubeconfigCommand(deps kubeconfigDeps) *cobra.Command {
|
||||
f.Var(&flags.oidc.caBundle, "oidc-ca-bundle", "Path to TLS certificate authority bundle (PEM format, optional, can be repeated)")
|
||||
f.BoolVar(&flags.oidc.debugSessionCache, "oidc-debug-session-cache", false, "Print debug logs related to the OpenID Connect session cache")
|
||||
f.StringVar(&flags.oidc.requestAudience, "oidc-request-audience", "", "Request a token with an alternate audience using RFC8693 token exchange")
|
||||
f.StringVar(&flags.oidc.upstreamIDPName, "upstream-identity-provider-name", "", "The name of the upstream identity provider used during login with a Supervisor")
|
||||
f.StringVar(&flags.oidc.upstreamIDPType, "upstream-identity-provider-type", "", "The type of the upstream identity provider used during login with a Supervisor (e.g. 'oidc', 'ldap')")
|
||||
f.StringVar(&flags.kubeconfigPath, "kubeconfig", os.Getenv("KUBECONFIG"), "Path to kubeconfig file")
|
||||
f.StringVar(&flags.kubeconfigContextOverride, "kubeconfig-context", "", "Kubeconfig context name (default: current active context)")
|
||||
f.BoolVar(&flags.skipValidate, "skip-validation", false, "Skip final validation of the kubeconfig (default: false)")
|
||||
@ -165,19 +184,6 @@ func runGetKubeconfig(ctx context.Context, out io.Writer, deps kubeconfigDeps, f
|
||||
return fmt.Errorf("invalid API group suffix: %w", err)
|
||||
}
|
||||
|
||||
execConfig := clientcmdapi.ExecConfig{
|
||||
APIVersion: clientauthenticationv1beta1.SchemeGroupVersion.String(),
|
||||
Args: []string{},
|
||||
Env: []clientcmdapi.ExecEnvVar{},
|
||||
}
|
||||
|
||||
var err error
|
||||
execConfig.Command, err = deps.getPathToSelf()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not determine the Pinniped executable path: %w", err)
|
||||
}
|
||||
execConfig.ProvideClusterInfo = true
|
||||
|
||||
clientConfig := newClientConfig(flags.kubeconfigPath, flags.kubeconfigContextOverride)
|
||||
currentKubeConfig, err := clientConfig.RawConfig()
|
||||
if err != nil {
|
||||
@ -221,6 +227,47 @@ func runGetKubeconfig(ctx context.Context, out io.Writer, deps kubeconfigDeps, f
|
||||
if err := discoverAuthenticatorParams(authenticator, &flags, deps.log); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Point kubectl at the concierge endpoint.
|
||||
cluster.Server = flags.concierge.endpoint
|
||||
cluster.CertificateAuthorityData = flags.concierge.caBundle
|
||||
}
|
||||
|
||||
// If there is an issuer, and if both upstream flags are not already set, then try to discover Supervisor upstream IDP.
|
||||
if len(flags.oidc.issuer) > 0 && (flags.oidc.upstreamIDPType == "" || flags.oidc.upstreamIDPName == "") {
|
||||
if err := discoverSupervisorUpstreamIDP(ctx, &flags); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
execConfig, err := newExecConfig(deps, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kubeconfig := newExecKubeconfig(cluster, execConfig, newKubeconfigNames)
|
||||
if err := validateKubeconfig(ctx, flags, kubeconfig, deps.log); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeConfigAsYAML(out, kubeconfig)
|
||||
}
|
||||
|
||||
func newExecConfig(deps kubeconfigDeps, flags getKubeconfigParams) (*clientcmdapi.ExecConfig, error) {
|
||||
execConfig := &clientcmdapi.ExecConfig{
|
||||
APIVersion: clientauthenticationv1beta1.SchemeGroupVersion.String(),
|
||||
Args: []string{},
|
||||
Env: []clientcmdapi.ExecEnvVar{},
|
||||
ProvideClusterInfo: true,
|
||||
}
|
||||
|
||||
var err error
|
||||
execConfig.Command, err = deps.getPathToSelf()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not determine the Pinniped executable path: %w", err)
|
||||
}
|
||||
|
||||
if !flags.concierge.disabled {
|
||||
// Append the flags to configure the Concierge credential exchange at runtime.
|
||||
execConfig.Args = append(execConfig.Args,
|
||||
"--enable-concierge",
|
||||
@ -230,10 +277,6 @@ func runGetKubeconfig(ctx context.Context, out io.Writer, deps kubeconfigDeps, f
|
||||
"--concierge-endpoint="+flags.concierge.endpoint,
|
||||
"--concierge-ca-bundle-data="+base64.StdEncoding.EncodeToString(flags.concierge.caBundle),
|
||||
)
|
||||
|
||||
// Point kubectl at the concierge endpoint.
|
||||
cluster.Server = flags.concierge.endpoint
|
||||
cluster.CertificateAuthorityData = flags.concierge.caBundle
|
||||
}
|
||||
|
||||
// If --credential-cache is set, pass it through.
|
||||
@ -244,7 +287,7 @@ func runGetKubeconfig(ctx context.Context, out io.Writer, deps kubeconfigDeps, f
|
||||
// If one of the --static-* flags was passed, output a config that runs `pinniped login static`.
|
||||
if flags.staticToken != "" || flags.staticTokenEnvName != "" {
|
||||
if flags.staticToken != "" && flags.staticTokenEnvName != "" {
|
||||
return fmt.Errorf("only one of --static-token and --static-token-env can be specified")
|
||||
return nil, fmt.Errorf("only one of --static-token and --static-token-env can be specified")
|
||||
}
|
||||
execConfig.Args = append([]string{"login", "static"}, execConfig.Args...)
|
||||
if flags.staticToken != "" {
|
||||
@ -253,18 +296,13 @@ func runGetKubeconfig(ctx context.Context, out io.Writer, deps kubeconfigDeps, f
|
||||
if flags.staticTokenEnvName != "" {
|
||||
execConfig.Args = append(execConfig.Args, "--token-env="+flags.staticTokenEnvName)
|
||||
}
|
||||
|
||||
kubeconfig := newExecKubeconfig(cluster, &execConfig, newKubeconfigNames)
|
||||
if err := validateKubeconfig(ctx, flags, kubeconfig, deps.log); err != nil {
|
||||
return err
|
||||
}
|
||||
return writeConfigAsYAML(out, kubeconfig)
|
||||
return execConfig, nil
|
||||
}
|
||||
|
||||
// Otherwise continue to parse the OIDC-related flags and output a config that runs `pinniped login oidc`.
|
||||
execConfig.Args = append([]string{"login", "oidc"}, execConfig.Args...)
|
||||
if flags.oidc.issuer == "" {
|
||||
return fmt.Errorf("could not autodiscover --oidc-issuer and none was provided")
|
||||
return nil, fmt.Errorf("could not autodiscover --oidc-issuer and none was provided")
|
||||
}
|
||||
execConfig.Args = append(execConfig.Args,
|
||||
"--issuer="+flags.oidc.issuer,
|
||||
@ -289,11 +327,14 @@ func runGetKubeconfig(ctx context.Context, out io.Writer, deps kubeconfigDeps, f
|
||||
if flags.oidc.requestAudience != "" {
|
||||
execConfig.Args = append(execConfig.Args, "--request-audience="+flags.oidc.requestAudience)
|
||||
}
|
||||
kubeconfig := newExecKubeconfig(cluster, &execConfig, newKubeconfigNames)
|
||||
if err := validateKubeconfig(ctx, flags, kubeconfig, deps.log); err != nil {
|
||||
return err
|
||||
if flags.oidc.upstreamIDPName != "" {
|
||||
execConfig.Args = append(execConfig.Args, "--upstream-identity-provider-name="+flags.oidc.upstreamIDPName)
|
||||
}
|
||||
return writeConfigAsYAML(out, kubeconfig)
|
||||
if flags.oidc.upstreamIDPType != "" {
|
||||
execConfig.Args = append(execConfig.Args, "--upstream-identity-provider-type="+flags.oidc.upstreamIDPType)
|
||||
}
|
||||
|
||||
return execConfig, nil
|
||||
}
|
||||
|
||||
type kubeconfigNames struct{ ContextName, UserName, ClusterName string }
|
||||
@ -688,3 +729,164 @@ func hasPendingStrategy(credentialIssuer *configv1alpha1.CredentialIssuer) bool
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func discoverSupervisorUpstreamIDP(ctx context.Context, flags *getKubeconfigParams) error {
|
||||
transport := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{MinVersion: tls.VersionTLS12},
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
httpClient := &http.Client{Transport: transport}
|
||||
if flags.oidc.caBundle != nil {
|
||||
rootCAs := x509.NewCertPool()
|
||||
ok := rootCAs.AppendCertsFromPEM(flags.oidc.caBundle)
|
||||
if !ok {
|
||||
return fmt.Errorf("unable to fetch OIDC discovery data from issuer: could not parse CA bundle")
|
||||
}
|
||||
transport.TLSClientConfig.RootCAs = rootCAs
|
||||
}
|
||||
|
||||
pinnipedIDPsEndpoint, err := discoverIDPsDiscoveryEndpointURL(ctx, flags.oidc.issuer, httpClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pinnipedIDPsEndpoint == "" {
|
||||
// The issuer is not advertising itself as a Pinniped Supervisor which supports upstream IDP discovery.
|
||||
return nil
|
||||
}
|
||||
|
||||
upstreamIDPs, err := discoverAllAvailableSupervisorUpstreamIDPs(ctx, pinnipedIDPsEndpoint, httpClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(upstreamIDPs) == 1 {
|
||||
flags.oidc.upstreamIDPName = upstreamIDPs[0].Name
|
||||
flags.oidc.upstreamIDPType = upstreamIDPs[0].Type
|
||||
} else if len(upstreamIDPs) > 1 {
|
||||
idpName, idpType, err := selectUpstreamIDP(upstreamIDPs, flags.oidc.upstreamIDPName, flags.oidc.upstreamIDPType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flags.oidc.upstreamIDPName = idpName
|
||||
flags.oidc.upstreamIDPType = idpType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func discoverIDPsDiscoveryEndpointURL(ctx context.Context, issuer string, httpClient *http.Client) (string, error) {
|
||||
issuerDiscoveryURL := issuer + "/.well-known/openid-configuration"
|
||||
request, err := http.NewRequestWithContext(ctx, http.MethodGet, issuerDiscoveryURL, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("while forming request to issuer URL: %w", err)
|
||||
}
|
||||
|
||||
response, err := httpClient.Do(request)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to fetch OIDC discovery data from issuer: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = response.Body.Close()
|
||||
}()
|
||||
if response.StatusCode == http.StatusNotFound {
|
||||
// 404 Not Found is not an error because OIDC discovery is an optional part of the OIDC spec.
|
||||
return "", nil
|
||||
}
|
||||
if response.StatusCode != http.StatusOK {
|
||||
// Other types of error responses aside from 404 are not expected.
|
||||
return "", fmt.Errorf("unable to fetch OIDC discovery data from issuer: unexpected http response status: %s", response.Status)
|
||||
}
|
||||
|
||||
rawBody, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to fetch OIDC discovery data from issuer: could not read response body: %w", err)
|
||||
}
|
||||
|
||||
var body supervisorOIDCDiscoveryResponse
|
||||
err = json.Unmarshal(rawBody, &body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to fetch OIDC discovery data from issuer: could not parse response JSON: %w", err)
|
||||
}
|
||||
|
||||
return body.PinnipedIDPsEndpoint, nil
|
||||
}
|
||||
|
||||
func discoverAllAvailableSupervisorUpstreamIDPs(ctx context.Context, pinnipedIDPsEndpoint string, httpClient *http.Client) ([]pinnipedIDPResponse, error) {
|
||||
request, err := http.NewRequestWithContext(ctx, http.MethodGet, pinnipedIDPsEndpoint, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("while forming request to IDP discovery URL: %w", err)
|
||||
}
|
||||
|
||||
response, err := httpClient.Do(request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to fetch IDP discovery data from issuer: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = response.Body.Close()
|
||||
}()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unable to fetch IDP discovery data from issuer: unexpected http response status: %s", response.Status)
|
||||
}
|
||||
|
||||
rawBody, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to fetch IDP discovery data from issuer: could not read response body: %w", err)
|
||||
}
|
||||
|
||||
var body supervisorIDPsDiscoveryResponse
|
||||
err = json.Unmarshal(rawBody, &body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to fetch IDP discovery data from issuer: could not parse response JSON: %w", err)
|
||||
}
|
||||
|
||||
return body.PinnipedIDPs, nil
|
||||
}
|
||||
|
||||
func selectUpstreamIDP(pinnipedIDPs []pinnipedIDPResponse, idpName, idpType string) (string, string, error) {
|
||||
pinnipedIDPsString, _ := json.Marshal(pinnipedIDPs)
|
||||
switch {
|
||||
case idpType != "":
|
||||
discoveredName := ""
|
||||
for _, idp := range pinnipedIDPs {
|
||||
if idp.Type == idpType {
|
||||
if discoveredName != "" {
|
||||
return "", "", fmt.Errorf(
|
||||
"multiple Supervisor upstream identity providers of type \"%s\" were found,"+
|
||||
" so the --upstream-identity-provider-name flag must be specified. "+
|
||||
"Found these upstreams: %s",
|
||||
idpType, pinnipedIDPsString)
|
||||
}
|
||||
discoveredName = idp.Name
|
||||
}
|
||||
}
|
||||
if discoveredName == "" {
|
||||
return "", "", fmt.Errorf(
|
||||
"no Supervisor upstream identity providers of type \"%s\" were found."+
|
||||
" Found these upstreams: %s", idpType, pinnipedIDPsString)
|
||||
}
|
||||
return discoveredName, idpType, nil
|
||||
case idpName != "":
|
||||
discoveredType := ""
|
||||
for _, idp := range pinnipedIDPs {
|
||||
if idp.Name == idpName {
|
||||
if discoveredType != "" {
|
||||
return "", "", fmt.Errorf(
|
||||
"multiple Supervisor upstream identity providers with name \"%s\" were found,"+
|
||||
" so the --upstream-identity-provider-type flag must be specified. Found these upstreams: %s",
|
||||
idpName, pinnipedIDPsString)
|
||||
}
|
||||
discoveredType = idp.Type
|
||||
}
|
||||
}
|
||||
if discoveredType == "" {
|
||||
return "", "", fmt.Errorf(
|
||||
"no Supervisor upstream identity providers with name \"%s\" were found."+
|
||||
" Found these upstreams: %s", idpName, pinnipedIDPsString)
|
||||
}
|
||||
return idpName, discoveredType, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf(
|
||||
"multiple Supervisor upstream identity providers were found,"+
|
||||
" so the --upstream-identity-provider-name/--upstream-identity-provider-type flags must be specified."+
|
||||
" Found these upstreams: %s",
|
||||
pinnipedIDPsString)
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
1
go.mod
1
go.mod
@ -6,6 +6,7 @@ require (
|
||||
cloud.google.com/go v0.60.0 // indirect
|
||||
github.com/MakeNowJust/heredoc/v2 v2.0.1
|
||||
github.com/coreos/go-oidc/v3 v3.0.0
|
||||
github.com/creack/pty v1.1.11
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/go-ldap/ldap/v3 v3.3.0
|
||||
github.com/go-logr/logr v0.4.0
|
||||
|
1
go.sum
1
go.sum
@ -148,6 +148,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package discovery provides a handler for the OIDC discovery endpoint.
|
||||
@ -37,6 +37,17 @@ type Metadata struct {
|
||||
ClaimsSupported []string `json:"claims_supported"`
|
||||
|
||||
// ^^^ Optional ^^^
|
||||
|
||||
// vvv Custom vvv
|
||||
|
||||
PinnipedIDPsEndpoint string `json:"pinniped_identity_providers_endpoint"`
|
||||
|
||||
// ^^^ Custom ^^^
|
||||
}
|
||||
|
||||
type IdentityProviderMetadata struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// NewHandler returns an http.Handler that serves an OIDC discovery endpoint.
|
||||
@ -46,6 +57,7 @@ func NewHandler(issuerURL string) http.Handler {
|
||||
AuthorizationEndpoint: issuerURL + oidc.AuthorizationEndpointPath,
|
||||
TokenEndpoint: issuerURL + oidc.TokenEndpointPath,
|
||||
JWKSURI: issuerURL + oidc.JWKSEndpointPath,
|
||||
PinnipedIDPsEndpoint: issuerURL + oidc.PinnipedIDPsPath,
|
||||
ResponseTypesSupported: []string{"code"},
|
||||
SubjectTypesSupported: []string{"public"},
|
||||
IDTokenSigningAlgValuesSupported: []string{"ES256"},
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package discovery
|
||||
@ -39,6 +39,7 @@ func TestDiscovery(t *testing.T) {
|
||||
AuthorizationEndpoint: "https://some-issuer.com/some/path/oauth2/authorize",
|
||||
TokenEndpoint: "https://some-issuer.com/some/path/oauth2/token",
|
||||
JWKSURI: "https://some-issuer.com/some/path/jwks.json",
|
||||
PinnipedIDPsEndpoint: "https://some-issuer.com/some/path/pinniped_identity_providers",
|
||||
ResponseTypesSupported: []string{"code"},
|
||||
SubjectTypesSupported: []string{"public"},
|
||||
IDTokenSigningAlgValuesSupported: []string{"ES256"},
|
||||
|
75
internal/oidc/idpdiscovery/idp_discovery_handler.go
Normal file
75
internal/oidc/idpdiscovery/idp_discovery_handler.go
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package idpdiscovery provides a handler for the upstream IDP discovery endpoint.
|
||||
package idpdiscovery
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"go.pinniped.dev/internal/oidc"
|
||||
)
|
||||
|
||||
const (
|
||||
idpDiscoveryTypeLDAP = "ldap"
|
||||
idpDiscoveryTypeOIDC = "oidc"
|
||||
)
|
||||
|
||||
type response struct {
|
||||
IDPs []identityProviderResponse `json:"pinniped_identity_providers"`
|
||||
}
|
||||
|
||||
type identityProviderResponse struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// NewHandler returns an http.Handler that serves the upstream IDP discovery endpoint.
|
||||
func NewHandler(upstreamIDPs oidc.UpstreamIdentityProvidersLister) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, `Method not allowed (try GET)`, http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
encodedMetadata, encodeErr := responseAsJSON(upstreamIDPs)
|
||||
if encodeErr != nil {
|
||||
http.Error(w, encodeErr.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if _, err := w.Write(encodedMetadata); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func responseAsJSON(upstreamIDPs oidc.UpstreamIdentityProvidersLister) ([]byte, error) {
|
||||
r := response{
|
||||
IDPs: []identityProviderResponse{},
|
||||
}
|
||||
|
||||
// The cache of IDPs could change at any time, so always recalculate the list.
|
||||
for _, provider := range upstreamIDPs.GetLDAPIdentityProviders() {
|
||||
r.IDPs = append(r.IDPs, identityProviderResponse{Name: provider.GetName(), Type: idpDiscoveryTypeLDAP})
|
||||
}
|
||||
for _, provider := range upstreamIDPs.GetOIDCIdentityProviders() {
|
||||
r.IDPs = append(r.IDPs, identityProviderResponse{Name: provider.GetName(), Type: idpDiscoveryTypeOIDC})
|
||||
}
|
||||
|
||||
// Nobody like an API that changes the results unnecessarily. :)
|
||||
sort.SliceStable(r.IDPs, func(i, j int) bool {
|
||||
return r.IDPs[i].Name < r.IDPs[j].Name
|
||||
})
|
||||
|
||||
var b bytes.Buffer
|
||||
encodeErr := json.NewEncoder(&b).Encode(&r)
|
||||
encodedMetadata := b.Bytes()
|
||||
|
||||
return encodedMetadata, encodeErr
|
||||
}
|
126
internal/oidc/idpdiscovery/idp_discovery_handler_test.go
Normal file
126
internal/oidc/idpdiscovery/idp_discovery_handler_test.go
Normal file
@ -0,0 +1,126 @@
|
||||
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package idpdiscovery
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.pinniped.dev/internal/oidc"
|
||||
"go.pinniped.dev/internal/oidc/provider"
|
||||
"go.pinniped.dev/internal/testutil/oidctestutil"
|
||||
)
|
||||
|
||||
func TestIDPDiscovery(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
method string
|
||||
path string
|
||||
|
||||
wantStatus int
|
||||
wantContentType string
|
||||
wantFirstResponseBodyJSON interface{}
|
||||
wantSecondResponseBodyJSON interface{}
|
||||
wantBodyString string
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
method: http.MethodGet,
|
||||
path: "/some/path" + oidc.WellKnownEndpointPath,
|
||||
wantStatus: http.StatusOK,
|
||||
wantContentType: "application/json",
|
||||
wantFirstResponseBodyJSON: &response{
|
||||
IDPs: []identityProviderResponse{
|
||||
{Name: "a-some-ldap-idp", Type: "ldap"},
|
||||
{Name: "a-some-oidc-idp", Type: "oidc"},
|
||||
{Name: "x-some-idp", Type: "ldap"},
|
||||
{Name: "x-some-idp", Type: "oidc"},
|
||||
{Name: "z-some-ldap-idp", Type: "ldap"},
|
||||
{Name: "z-some-oidc-idp", Type: "oidc"},
|
||||
},
|
||||
},
|
||||
wantSecondResponseBodyJSON: &response{
|
||||
IDPs: []identityProviderResponse{
|
||||
{Name: "some-other-ldap-idp-1", Type: "ldap"},
|
||||
{Name: "some-other-ldap-idp-2", Type: "ldap"},
|
||||
{Name: "some-other-oidc-idp-1", Type: "oidc"},
|
||||
{Name: "some-other-oidc-idp-2", Type: "oidc"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bad method",
|
||||
method: http.MethodPost,
|
||||
path: oidc.WellKnownEndpointPath,
|
||||
wantStatus: http.StatusMethodNotAllowed,
|
||||
wantContentType: "text/plain; charset=utf-8",
|
||||
wantBodyString: "Method not allowed (try GET)\n",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
idpLister := oidctestutil.NewUpstreamIDPListerBuilder().
|
||||
WithOIDC(&oidctestutil.TestUpstreamOIDCIdentityProvider{Name: "z-some-oidc-idp"}).
|
||||
WithOIDC(&oidctestutil.TestUpstreamOIDCIdentityProvider{Name: "x-some-idp"}).
|
||||
WithLDAP(&oidctestutil.TestUpstreamLDAPIdentityProvider{Name: "a-some-ldap-idp"}).
|
||||
WithOIDC(&oidctestutil.TestUpstreamOIDCIdentityProvider{Name: "a-some-oidc-idp"}).
|
||||
WithLDAP(&oidctestutil.TestUpstreamLDAPIdentityProvider{Name: "z-some-ldap-idp"}).
|
||||
WithLDAP(&oidctestutil.TestUpstreamLDAPIdentityProvider{Name: "x-some-idp"}).
|
||||
Build()
|
||||
|
||||
handler := NewHandler(idpLister)
|
||||
req := httptest.NewRequest(test.method, test.path, nil)
|
||||
rsp := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rsp, req)
|
||||
|
||||
require.Equal(t, test.wantStatus, rsp.Code)
|
||||
|
||||
require.Equal(t, test.wantContentType, rsp.Header().Get("Content-Type"))
|
||||
|
||||
if test.wantFirstResponseBodyJSON != nil {
|
||||
wantJSON, err := json.Marshal(test.wantFirstResponseBodyJSON)
|
||||
require.NoError(t, err)
|
||||
require.JSONEq(t, string(wantJSON), rsp.Body.String())
|
||||
}
|
||||
|
||||
if test.wantBodyString != "" {
|
||||
require.Equal(t, test.wantBodyString, rsp.Body.String())
|
||||
}
|
||||
|
||||
// Change the list of IDPs in the cache.
|
||||
idpLister.SetLDAPIdentityProviders([]provider.UpstreamLDAPIdentityProviderI{
|
||||
&oidctestutil.TestUpstreamLDAPIdentityProvider{Name: "some-other-ldap-idp-1"},
|
||||
&oidctestutil.TestUpstreamLDAPIdentityProvider{Name: "some-other-ldap-idp-2"},
|
||||
})
|
||||
idpLister.SetOIDCIdentityProviders([]provider.UpstreamOIDCIdentityProviderI{
|
||||
&oidctestutil.TestUpstreamOIDCIdentityProvider{Name: "some-other-oidc-idp-1"},
|
||||
&oidctestutil.TestUpstreamOIDCIdentityProvider{Name: "some-other-oidc-idp-2"},
|
||||
})
|
||||
|
||||
// Make the same request to the same handler instance again, and expect different results.
|
||||
rsp = httptest.NewRecorder()
|
||||
handler.ServeHTTP(rsp, req)
|
||||
|
||||
require.Equal(t, test.wantStatus, rsp.Code)
|
||||
|
||||
require.Equal(t, test.wantContentType, rsp.Header().Get("Content-Type"))
|
||||
|
||||
if test.wantFirstResponseBodyJSON != nil {
|
||||
wantJSON, err := json.Marshal(test.wantSecondResponseBodyJSON)
|
||||
require.NoError(t, err)
|
||||
require.JSONEq(t, string(wantJSON), rsp.Body.String())
|
||||
}
|
||||
|
||||
if test.wantBodyString != "" {
|
||||
require.Equal(t, test.wantBodyString, rsp.Body.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -24,6 +24,7 @@ const (
|
||||
TokenEndpointPath = "/oauth2/token" //nolint:gosec // ignore lint warning that this is a credential
|
||||
CallbackEndpointPath = "/callback"
|
||||
JWKSEndpointPath = "/jwks.json"
|
||||
PinnipedIDPsPath = "/pinniped_identity_providers"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
"go.pinniped.dev/internal/oidc/csrftoken"
|
||||
"go.pinniped.dev/internal/oidc/discovery"
|
||||
"go.pinniped.dev/internal/oidc/dynamiccodec"
|
||||
"go.pinniped.dev/internal/oidc/idpdiscovery"
|
||||
"go.pinniped.dev/internal/oidc/jwks"
|
||||
"go.pinniped.dev/internal/oidc/provider"
|
||||
"go.pinniped.dev/internal/oidc/token"
|
||||
@ -106,6 +107,8 @@ func (m *Manager) SetProviders(federationDomains ...*provider.FederationDomainIs
|
||||
|
||||
m.providerHandlers[(issuerHostWithPath + oidc.JWKSEndpointPath)] = jwks.NewHandler(issuer, m.dynamicJWKSProvider)
|
||||
|
||||
m.providerHandlers[(issuerHostWithPath + oidc.PinnipedIDPsPath)] = idpdiscovery.NewHandler(m.upstreamIDPs)
|
||||
|
||||
m.providerHandlers[(issuerHostWithPath + oidc.AuthorizationEndpointPath)] = auth.NewHandler(
|
||||
issuer,
|
||||
m.upstreamIDPs,
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@ -52,6 +53,8 @@ func TestManager(t *testing.T) {
|
||||
issuer2DifferentCaseHostname = "https://exAmPlE.Com/some/path/more/deeply/nested/path"
|
||||
issuer2KeyID = "issuer2-key"
|
||||
upstreamIDPAuthorizationURL = "https://test-upstream.com/auth"
|
||||
upstreamIDPName = "test-idp"
|
||||
upstreamIDPType = "oidc"
|
||||
downstreamClientID = "pinniped-cli"
|
||||
downstreamRedirectURL = "http://127.0.0.1:12345/callback"
|
||||
|
||||
@ -68,7 +71,7 @@ func TestManager(t *testing.T) {
|
||||
return req
|
||||
}
|
||||
|
||||
requireDiscoveryRequestToBeHandled := func(requestIssuer, requestURLSuffix, expectedIssuerInResponse string) {
|
||||
requireDiscoveryRequestToBeHandled := func(requestIssuer, requestURLSuffix, expectedIssuer string) {
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
subject.ServeHTTP(recorder, newGetRequest(requestIssuer+oidc.WellKnownEndpointPath+requestURLSuffix))
|
||||
@ -82,7 +85,25 @@ func TestManager(t *testing.T) {
|
||||
parsedDiscoveryResult := discovery.Metadata{}
|
||||
err = json.Unmarshal(responseBody, &parsedDiscoveryResult)
|
||||
r.NoError(err)
|
||||
r.Equal(expectedIssuerInResponse, parsedDiscoveryResult.Issuer)
|
||||
r.Equal(expectedIssuer, parsedDiscoveryResult.Issuer)
|
||||
r.Equal(parsedDiscoveryResult.PinnipedIDPsEndpoint, expectedIssuer+oidc.PinnipedIDPsPath)
|
||||
}
|
||||
|
||||
requirePinnipedIDPsDiscoveryRequestToBeHandled := func(requestIssuer, requestURLSuffix, expectedIDPName, expectedIDPType string) {
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
subject.ServeHTTP(recorder, newGetRequest(requestIssuer+oidc.PinnipedIDPsPath+requestURLSuffix))
|
||||
|
||||
r.False(fallbackHandlerWasCalled)
|
||||
|
||||
// Minimal check to ensure that the right IDP discovery endpoint was called
|
||||
r.Equal(http.StatusOK, recorder.Code)
|
||||
responseBody, err := ioutil.ReadAll(recorder.Body)
|
||||
r.NoError(err)
|
||||
r.Equal(
|
||||
fmt.Sprintf(`{"pinniped_identity_providers":[{"name":"%s","type":"%s"}]}`+"\n", expectedIDPName, expectedIDPType),
|
||||
string(responseBody),
|
||||
)
|
||||
}
|
||||
|
||||
requireAuthorizationRequestToBeHandled := func(requestIssuer, requestURLSuffix, expectedRedirectLocationPrefix string) (string, string) {
|
||||
@ -222,7 +243,7 @@ func TestManager(t *testing.T) {
|
||||
parsedUpstreamIDPAuthorizationURL, err := url.Parse(upstreamIDPAuthorizationURL)
|
||||
r.NoError(err)
|
||||
idpLister := oidctestutil.NewUpstreamIDPListerBuilder().WithOIDC(&oidctestutil.TestUpstreamOIDCIdentityProvider{
|
||||
Name: "test-idp",
|
||||
Name: upstreamIDPName,
|
||||
ClientID: "test-client-id",
|
||||
AuthorizationURL: *parsedUpstreamIDPAuthorizationURL,
|
||||
Scopes: []string{"test-scope"},
|
||||
@ -293,6 +314,15 @@ func TestManager(t *testing.T) {
|
||||
requireDiscoveryRequestToBeHandled(issuer2DifferentCaseHostname, "", issuer2)
|
||||
requireDiscoveryRequestToBeHandled(issuer2DifferentCaseHostname, "?some=query", issuer2)
|
||||
|
||||
requirePinnipedIDPsDiscoveryRequestToBeHandled(issuer1, "", upstreamIDPName, upstreamIDPType)
|
||||
requirePinnipedIDPsDiscoveryRequestToBeHandled(issuer2, "", upstreamIDPName, upstreamIDPType)
|
||||
requirePinnipedIDPsDiscoveryRequestToBeHandled(issuer2, "?some=query", upstreamIDPName, upstreamIDPType)
|
||||
|
||||
// Hostnames are case-insensitive, so test that we can handle that.
|
||||
requirePinnipedIDPsDiscoveryRequestToBeHandled(issuer1DifferentCaseHostname, "", upstreamIDPName, upstreamIDPType)
|
||||
requirePinnipedIDPsDiscoveryRequestToBeHandled(issuer2DifferentCaseHostname, "", upstreamIDPName, upstreamIDPType)
|
||||
requirePinnipedIDPsDiscoveryRequestToBeHandled(issuer2DifferentCaseHostname, "?some=query", upstreamIDPName, upstreamIDPType)
|
||||
|
||||
issuer1JWKS := requireJWKSRequestToBeHandled(issuer1, "", issuer1KeyID)
|
||||
issuer2JWKS := requireJWKSRequestToBeHandled(issuer2, "", issuer2KeyID)
|
||||
requireJWKSRequestToBeHandled(issuer2, "?some=query", issuer2KeyID)
|
||||
|
@ -1,9 +1,16 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package testutil
|
||||
|
||||
import "io"
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// ErrorWriter implements io.Writer by returning a fixed error.
|
||||
type ErrorWriter struct {
|
||||
@ -13,3 +20,19 @@ type ErrorWriter struct {
|
||||
var _ io.Writer = &ErrorWriter{}
|
||||
|
||||
func (e *ErrorWriter) Write([]byte) (int, error) { return 0, e.ReturnError }
|
||||
|
||||
func WriteStringToTempFile(t *testing.T, filename string, fileBody string) *os.File {
|
||||
t.Helper()
|
||||
f, err := ioutil.TempFile("", filename)
|
||||
require.NoError(t, err)
|
||||
deferMe := func() {
|
||||
err := os.Remove(f.Name())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
t.Cleanup(deferMe)
|
||||
_, err = f.WriteString(fileBody)
|
||||
require.NoError(t, err)
|
||||
err = f.Close()
|
||||
require.NoError(t, err)
|
||||
return f
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
@ -21,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
coreosoidc "github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/creack/pty"
|
||||
"github.com/stretchr/testify/require"
|
||||
authorizationv1 "k8s.io/api/authorization/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@ -30,6 +32,7 @@ import (
|
||||
configv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/config/v1alpha1"
|
||||
idpv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/idp/v1alpha1"
|
||||
"go.pinniped.dev/internal/certauthority"
|
||||
"go.pinniped.dev/internal/here"
|
||||
"go.pinniped.dev/internal/oidc"
|
||||
"go.pinniped.dev/internal/testutil"
|
||||
"go.pinniped.dev/pkg/oidcclient"
|
||||
@ -92,24 +95,6 @@ func TestE2EFullIntegration(t *testing.T) {
|
||||
configv1alpha1.SuccessFederationDomainStatusCondition,
|
||||
)
|
||||
|
||||
// Create upstream OIDC provider and wait for it to become ready.
|
||||
library.CreateTestOIDCIdentityProvider(t, idpv1alpha1.OIDCIdentityProviderSpec{
|
||||
Issuer: env.SupervisorUpstreamOIDC.Issuer,
|
||||
TLS: &idpv1alpha1.TLSSpec{
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorUpstreamOIDC.CABundle)),
|
||||
},
|
||||
AuthorizationConfig: idpv1alpha1.OIDCAuthorizationConfig{
|
||||
AdditionalScopes: env.SupervisorUpstreamOIDC.AdditionalScopes,
|
||||
},
|
||||
Claims: idpv1alpha1.OIDCClaims{
|
||||
Username: env.SupervisorUpstreamOIDC.UsernameClaim,
|
||||
Groups: env.SupervisorUpstreamOIDC.GroupsClaim,
|
||||
},
|
||||
Client: idpv1alpha1.OIDCClient{
|
||||
SecretName: library.CreateClientCredsSecret(t, env.SupervisorUpstreamOIDC.ClientID, env.SupervisorUpstreamOIDC.ClientSecret).Name,
|
||||
},
|
||||
}, idpv1alpha1.PhaseReady)
|
||||
|
||||
// Create a JWTAuthenticator that will validate the tokens from the downstream issuer.
|
||||
clusterAudience := "test-cluster-" + library.RandHex(t, 8)
|
||||
authenticator := library.CreateTestJWTAuthenticator(ctx, t, authv1alpha.JWTAuthenticatorSpec{
|
||||
@ -118,158 +103,314 @@ func TestE2EFullIntegration(t *testing.T) {
|
||||
TLS: &authv1alpha.TLSSpec{CertificateAuthorityData: testCABundleBase64},
|
||||
})
|
||||
|
||||
// Create a ClusterRoleBinding to give our test user from the upstream read-only access to the cluster.
|
||||
library.CreateTestClusterRoleBinding(t,
|
||||
rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: env.SupervisorUpstreamOIDC.Username},
|
||||
rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "view"},
|
||||
)
|
||||
library.WaitForUserToHaveAccess(t, env.SupervisorUpstreamOIDC.Username, []string{}, &authorizationv1.ResourceAttributes{
|
||||
Verb: "get",
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
Resource: "namespaces",
|
||||
// Add an OIDC upstream IDP and try using it to authenticate during kubectl commands.
|
||||
t.Run("with Supervisor OIDC upstream IDP", func(t *testing.T) {
|
||||
expectedUsername := env.SupervisorUpstreamOIDC.Username
|
||||
expectedGroups := env.SupervisorUpstreamOIDC.ExpectedGroups
|
||||
|
||||
// Create a ClusterRoleBinding to give our test user from the upstream read-only access to the cluster.
|
||||
library.CreateTestClusterRoleBinding(t,
|
||||
rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: expectedUsername},
|
||||
rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "view"},
|
||||
)
|
||||
library.WaitForUserToHaveAccess(t, expectedUsername, []string{}, &authorizationv1.ResourceAttributes{
|
||||
Verb: "get",
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
Resource: "namespaces",
|
||||
})
|
||||
|
||||
// Create upstream OIDC provider and wait for it to become ready.
|
||||
library.CreateTestOIDCIdentityProvider(t, idpv1alpha1.OIDCIdentityProviderSpec{
|
||||
Issuer: env.SupervisorUpstreamOIDC.Issuer,
|
||||
TLS: &idpv1alpha1.TLSSpec{
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorUpstreamOIDC.CABundle)),
|
||||
},
|
||||
AuthorizationConfig: idpv1alpha1.OIDCAuthorizationConfig{
|
||||
AdditionalScopes: env.SupervisorUpstreamOIDC.AdditionalScopes,
|
||||
},
|
||||
Claims: idpv1alpha1.OIDCClaims{
|
||||
Username: env.SupervisorUpstreamOIDC.UsernameClaim,
|
||||
Groups: env.SupervisorUpstreamOIDC.GroupsClaim,
|
||||
},
|
||||
Client: idpv1alpha1.OIDCClient{
|
||||
SecretName: library.CreateClientCredsSecret(t, env.SupervisorUpstreamOIDC.ClientID, env.SupervisorUpstreamOIDC.ClientSecret).Name,
|
||||
},
|
||||
}, idpv1alpha1.PhaseReady)
|
||||
|
||||
// Use a specific session cache for this test.
|
||||
sessionCachePath := tempDir + "/oidc-test-sessions.yaml"
|
||||
|
||||
kubeconfigPath := runPinnipedGetKubeconfig(t, env, pinnipedExe, tempDir, []string{
|
||||
"get", "kubeconfig",
|
||||
"--concierge-api-group-suffix", env.APIGroupSuffix,
|
||||
"--concierge-authenticator-type", "jwt",
|
||||
"--concierge-authenticator-name", authenticator.Name,
|
||||
"--oidc-skip-browser",
|
||||
"--oidc-ca-bundle", testCABundlePath,
|
||||
"--oidc-session-cache", sessionCachePath,
|
||||
})
|
||||
|
||||
// Run "kubectl get namespaces" which should trigger a browser login via the plugin.
|
||||
start := time.Now()
|
||||
kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
|
||||
kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...)
|
||||
stderrPipe, err := kubectlCmd.StderrPipe()
|
||||
require.NoError(t, err)
|
||||
stdoutPipe, err := kubectlCmd.StdoutPipe()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("starting kubectl subprocess")
|
||||
require.NoError(t, kubectlCmd.Start())
|
||||
t.Cleanup(func() {
|
||||
err := kubectlCmd.Wait()
|
||||
t.Logf("kubectl subprocess exited with code %d", kubectlCmd.ProcessState.ExitCode())
|
||||
stdout, stdoutErr := ioutil.ReadAll(stdoutPipe)
|
||||
if stdoutErr != nil {
|
||||
stdout = []byte("<error reading stdout: " + stdoutErr.Error() + ">")
|
||||
}
|
||||
stderr, stderrErr := ioutil.ReadAll(stderrPipe)
|
||||
if stderrErr != nil {
|
||||
stderr = []byte("<error reading stderr: " + stderrErr.Error() + ">")
|
||||
}
|
||||
require.NoErrorf(t, err, "kubectl process did not exit cleanly, stdout/stderr: %q/%q", string(stdout), string(stderr))
|
||||
})
|
||||
|
||||
// Start a background goroutine to read stderr from the CLI and parse out the login URL.
|
||||
loginURLChan := make(chan string)
|
||||
spawnTestGoroutine(t, func() (err error) {
|
||||
defer func() {
|
||||
closeErr := stderrPipe.Close()
|
||||
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
|
||||
return
|
||||
}
|
||||
if err == nil {
|
||||
err = fmt.Errorf("stderr stream closed with error: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
reader := bufio.NewReader(library.NewLoggerReader(t, "stderr", stderrPipe))
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not read login URL line from stderr: %w", err)
|
||||
}
|
||||
const prompt = "Please log in: "
|
||||
if !strings.HasPrefix(line, prompt) {
|
||||
return fmt.Errorf("expected %q to have prefix %q", line, prompt)
|
||||
}
|
||||
loginURLChan <- strings.TrimPrefix(line, prompt)
|
||||
return readAndExpectEmpty(reader)
|
||||
})
|
||||
|
||||
// Start a background goroutine to read stdout from kubectl and return the result as a string.
|
||||
kubectlOutputChan := make(chan string)
|
||||
spawnTestGoroutine(t, func() (err error) {
|
||||
defer func() {
|
||||
closeErr := stdoutPipe.Close()
|
||||
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
|
||||
return
|
||||
}
|
||||
if err == nil {
|
||||
err = fmt.Errorf("stdout stream closed with error: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
output, err := ioutil.ReadAll(stdoutPipe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Logf("kubectl output:\n%s\n", output)
|
||||
kubectlOutputChan <- string(output)
|
||||
return nil
|
||||
})
|
||||
|
||||
// Wait for the CLI to print out the login URL and open the browser to it.
|
||||
t.Logf("waiting for CLI to output login URL")
|
||||
var loginURL string
|
||||
select {
|
||||
case <-time.After(1 * time.Minute):
|
||||
require.Fail(t, "timed out waiting for login URL")
|
||||
case loginURL = <-loginURLChan:
|
||||
}
|
||||
t.Logf("navigating to login page")
|
||||
require.NoError(t, page.Navigate(loginURL))
|
||||
|
||||
// Expect to be redirected to the upstream provider and log in.
|
||||
browsertest.LoginToUpstream(t, page, env.SupervisorUpstreamOIDC)
|
||||
|
||||
// Expect to be redirected to the localhost callback.
|
||||
t.Logf("waiting for redirect to callback")
|
||||
browsertest.WaitForURL(t, page, regexp.MustCompile(`\Ahttp://127\.0\.0\.1:[0-9]+/callback\?.+\z`))
|
||||
|
||||
// Wait for the "pre" element that gets rendered for a `text/plain` page, and
|
||||
// assert that it contains the success message.
|
||||
t.Logf("verifying success page")
|
||||
browsertest.WaitForVisibleElements(t, page, "pre")
|
||||
msg, err := page.First("pre").Text()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "you have been logged in and may now close this tab", msg)
|
||||
|
||||
// Expect the CLI to output a list of namespaces in JSON format.
|
||||
t.Logf("waiting for kubectl to output namespace list JSON")
|
||||
var kubectlOutput string
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
require.Fail(t, "timed out waiting for kubectl output")
|
||||
case kubectlOutput = <-kubectlOutputChan:
|
||||
}
|
||||
require.Greaterf(t, len(strings.Split(kubectlOutput, "\n")), 2, "expected some namespaces to be returned, got %q", kubectlOutput)
|
||||
t.Logf("first kubectl command took %s", time.Since(start).String())
|
||||
|
||||
requireUserCanUseKubectlWithoutAuthenticatingAgain(ctx, t, env,
|
||||
downstream,
|
||||
kubeconfigPath,
|
||||
sessionCachePath,
|
||||
pinnipedExe,
|
||||
expectedUsername,
|
||||
expectedGroups,
|
||||
)
|
||||
})
|
||||
|
||||
// Use a specific session cache for this test.
|
||||
sessionCachePath := tempDir + "/sessions.yaml"
|
||||
// Add an LDAP upstream IDP and try using it to authenticate during kubectl commands.
|
||||
t.Run("with Supervisor LDAP upstream IDP", func(t *testing.T) {
|
||||
expectedUsername := env.SupervisorUpstreamLDAP.TestUserMailAttributeValue
|
||||
expectedGroups := []string{} // LDAP groups are not implemented yet
|
||||
|
||||
// Run "pinniped get kubeconfig" to get a kubeconfig YAML.
|
||||
kubeconfigYAML, stderr := runPinnipedCLI(t, nil, pinnipedExe, "get", "kubeconfig",
|
||||
"--concierge-api-group-suffix", env.APIGroupSuffix,
|
||||
"--concierge-authenticator-type", "jwt",
|
||||
"--concierge-authenticator-name", authenticator.Name,
|
||||
"--oidc-skip-browser",
|
||||
"--oidc-ca-bundle", testCABundlePath,
|
||||
"--oidc-session-cache", sessionCachePath,
|
||||
)
|
||||
t.Logf("stderr output from 'pinniped get kubeconfig':\n%s\n\n", stderr)
|
||||
t.Logf("test kubeconfig:\n%s\n\n", kubeconfigYAML)
|
||||
// Create a ClusterRoleBinding to give our test user from the upstream read-only access to the cluster.
|
||||
library.CreateTestClusterRoleBinding(t,
|
||||
rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: expectedUsername},
|
||||
rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "view"},
|
||||
)
|
||||
library.WaitForUserToHaveAccess(t, expectedUsername, []string{}, &authorizationv1.ResourceAttributes{
|
||||
Verb: "get",
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
Resource: "namespaces",
|
||||
})
|
||||
|
||||
restConfig := library.NewRestConfigFromKubeconfig(t, kubeconfigYAML)
|
||||
require.NotNil(t, restConfig.ExecProvider)
|
||||
require.Equal(t, []string{"login", "oidc"}, restConfig.ExecProvider.Args[:2])
|
||||
kubeconfigPath := filepath.Join(tempDir, "kubeconfig.yaml")
|
||||
require.NoError(t, ioutil.WriteFile(kubeconfigPath, []byte(kubeconfigYAML), 0600))
|
||||
// Put the bind service account's info into a Secret.
|
||||
bindSecret := library.CreateTestSecret(t, env.SupervisorNamespace, "ldap-service-account", corev1.SecretTypeBasicAuth,
|
||||
map[string]string{
|
||||
corev1.BasicAuthUsernameKey: env.SupervisorUpstreamLDAP.BindUsername,
|
||||
corev1.BasicAuthPasswordKey: env.SupervisorUpstreamLDAP.BindPassword,
|
||||
},
|
||||
)
|
||||
|
||||
// Run "kubectl get namespaces" which should trigger a browser login via the plugin.
|
||||
start := time.Now()
|
||||
// Create upstream LDAP provider and wait for it to become ready.
|
||||
library.CreateTestLDAPIdentityProvider(t, idpv1alpha1.LDAPIdentityProviderSpec{
|
||||
Host: env.SupervisorUpstreamLDAP.Host,
|
||||
TLS: &idpv1alpha1.TLSSpec{
|
||||
CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(env.SupervisorUpstreamLDAP.CABundle)),
|
||||
},
|
||||
Bind: idpv1alpha1.LDAPIdentityProviderBind{
|
||||
SecretName: bindSecret.Name,
|
||||
},
|
||||
UserSearch: idpv1alpha1.LDAPIdentityProviderUserSearch{
|
||||
Base: env.SupervisorUpstreamLDAP.UserSearchBase,
|
||||
Filter: "",
|
||||
Attributes: idpv1alpha1.LDAPIdentityProviderUserSearchAttributes{
|
||||
Username: env.SupervisorUpstreamLDAP.TestUserMailAttributeName,
|
||||
UID: env.SupervisorUpstreamLDAP.TestUserUniqueIDAttributeName,
|
||||
},
|
||||
},
|
||||
}, idpv1alpha1.LDAPPhaseReady)
|
||||
|
||||
// Use a specific session cache for this test.
|
||||
sessionCachePath := tempDir + "/ldap-test-sessions.yaml"
|
||||
|
||||
kubeconfigPath := runPinnipedGetKubeconfig(t, env, pinnipedExe, tempDir, []string{
|
||||
"get", "kubeconfig",
|
||||
"--concierge-api-group-suffix", env.APIGroupSuffix,
|
||||
"--concierge-authenticator-type", "jwt",
|
||||
"--concierge-authenticator-name", authenticator.Name,
|
||||
"--oidc-session-cache", sessionCachePath,
|
||||
})
|
||||
|
||||
// Run "kubectl get namespaces" which should trigger an LDAP-style login CLI prompt via the plugin.
|
||||
start := time.Now()
|
||||
kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
|
||||
kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...)
|
||||
ptyFile, err := pty.Start(kubectlCmd)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the subprocess to print the username prompt, then type the user's username.
|
||||
readFromFileUntilStringIsSeen(t, ptyFile, "Username: ")
|
||||
_, err = ptyFile.WriteString(expectedUsername + "\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the subprocess to print the password prompt, then type the user's password.
|
||||
readFromFileUntilStringIsSeen(t, ptyFile, "Password: ")
|
||||
_, err = ptyFile.WriteString(env.SupervisorUpstreamLDAP.TestUserPassword + "\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read all of the remaining output from the subprocess until EOF.
|
||||
remainingOutput, err := ioutil.ReadAll(ptyFile)
|
||||
require.NoError(t, err)
|
||||
require.Greaterf(t, len(strings.Split(string(remainingOutput), "\n")), 2, "expected some namespaces to be returned, got %q", string(remainingOutput))
|
||||
t.Logf("first kubectl command took %s", time.Since(start).String())
|
||||
|
||||
requireUserCanUseKubectlWithoutAuthenticatingAgain(ctx, t, env,
|
||||
downstream,
|
||||
kubeconfigPath,
|
||||
sessionCachePath,
|
||||
pinnipedExe,
|
||||
expectedUsername,
|
||||
expectedGroups,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
func readFromFileUntilStringIsSeen(t *testing.T, f *os.File, until string) string {
|
||||
readFromFile := ""
|
||||
|
||||
library.RequireEventuallyWithoutError(t, func() (bool, error) {
|
||||
someOutput, foundEOF := readAvailableOutput(t, f)
|
||||
readFromFile += someOutput
|
||||
if strings.Contains(readFromFile, until) {
|
||||
return true, nil // found it! finished.
|
||||
}
|
||||
if foundEOF {
|
||||
return false, fmt.Errorf("reached EOF of subcommand's output without seeing expected string %q", until)
|
||||
}
|
||||
return false, nil // keep waiting and reading
|
||||
}, 1*time.Minute, 1*time.Second)
|
||||
|
||||
return readFromFile
|
||||
}
|
||||
|
||||
func readAvailableOutput(t *testing.T, r io.Reader) (string, bool) {
|
||||
buf := make([]byte, 1024)
|
||||
n, err := r.Read(buf)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return string(buf[:n]), true
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
return string(buf[:n]), false
|
||||
}
|
||||
|
||||
func requireUserCanUseKubectlWithoutAuthenticatingAgain(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
env *library.TestEnv,
|
||||
downstream *configv1alpha1.FederationDomain,
|
||||
kubeconfigPath string,
|
||||
sessionCachePath string,
|
||||
pinnipedExe string,
|
||||
expectedUsername string,
|
||||
expectedGroups []string,
|
||||
) {
|
||||
// Run kubectl, which should work without any prompting for authentication.
|
||||
kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
|
||||
kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...)
|
||||
stderrPipe, err := kubectlCmd.StderrPipe()
|
||||
require.NoError(t, err)
|
||||
stdoutPipe, err := kubectlCmd.StdoutPipe()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("starting kubectl subprocess")
|
||||
require.NoError(t, kubectlCmd.Start())
|
||||
t.Cleanup(func() {
|
||||
err := kubectlCmd.Wait()
|
||||
t.Logf("kubectl subprocess exited with code %d", kubectlCmd.ProcessState.ExitCode())
|
||||
stdout, stdoutErr := ioutil.ReadAll(stdoutPipe)
|
||||
if stdoutErr != nil {
|
||||
stdout = []byte("<error reading stdout: " + stdoutErr.Error() + ">")
|
||||
}
|
||||
stderr, stderrErr := ioutil.ReadAll(stderrPipe)
|
||||
if stderrErr != nil {
|
||||
stderr = []byte("<error reading stderr: " + stderrErr.Error() + ">")
|
||||
}
|
||||
require.NoErrorf(t, err, "kubectl process did not exit cleanly, stdout/stderr: %q/%q", string(stdout), string(stderr))
|
||||
})
|
||||
|
||||
// Start a background goroutine to read stderr from the CLI and parse out the login URL.
|
||||
loginURLChan := make(chan string)
|
||||
spawnTestGoroutine(t, func() (err error) {
|
||||
defer func() {
|
||||
closeErr := stderrPipe.Close()
|
||||
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
|
||||
return
|
||||
}
|
||||
if err == nil {
|
||||
err = fmt.Errorf("stderr stream closed with error: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
reader := bufio.NewReader(library.NewLoggerReader(t, "stderr", stderrPipe))
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not read login URL line from stderr: %w", err)
|
||||
}
|
||||
const prompt = "Please log in: "
|
||||
if !strings.HasPrefix(line, prompt) {
|
||||
return fmt.Errorf("expected %q to have prefix %q", line, prompt)
|
||||
}
|
||||
loginURLChan <- strings.TrimPrefix(line, prompt)
|
||||
return readAndExpectEmpty(reader)
|
||||
})
|
||||
|
||||
// Start a background goroutine to read stdout from kubectl and return the result as a string.
|
||||
kubectlOutputChan := make(chan string)
|
||||
spawnTestGoroutine(t, func() (err error) {
|
||||
defer func() {
|
||||
closeErr := stdoutPipe.Close()
|
||||
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
|
||||
return
|
||||
}
|
||||
if err == nil {
|
||||
err = fmt.Errorf("stdout stream closed with error: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
output, err := ioutil.ReadAll(stdoutPipe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Logf("kubectl output:\n%s\n", output)
|
||||
kubectlOutputChan <- string(output)
|
||||
return nil
|
||||
})
|
||||
|
||||
// Wait for the CLI to print out the login URL and open the browser to it.
|
||||
t.Logf("waiting for CLI to output login URL")
|
||||
var loginURL string
|
||||
select {
|
||||
case <-time.After(1 * time.Minute):
|
||||
require.Fail(t, "timed out waiting for login URL")
|
||||
case loginURL = <-loginURLChan:
|
||||
}
|
||||
t.Logf("navigating to login page")
|
||||
require.NoError(t, page.Navigate(loginURL))
|
||||
|
||||
// Expect to be redirected to the upstream provider and log in.
|
||||
browsertest.LoginToUpstream(t, page, env.SupervisorUpstreamOIDC)
|
||||
|
||||
// Expect to be redirected to the localhost callback.
|
||||
t.Logf("waiting for redirect to callback")
|
||||
browsertest.WaitForURL(t, page, regexp.MustCompile(`\Ahttp://127\.0\.0\.1:[0-9]+/callback\?.+\z`))
|
||||
|
||||
// Wait for the "pre" element that gets rendered for a `text/plain` page, and
|
||||
// assert that it contains the success message.
|
||||
t.Logf("verifying success page")
|
||||
browsertest.WaitForVisibleElements(t, page, "pre")
|
||||
msg, err := page.First("pre").Text()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "you have been logged in and may now close this tab", msg)
|
||||
|
||||
// Expect the CLI to output a list of namespaces in JSON format.
|
||||
t.Logf("waiting for kubectl to output namespace list JSON")
|
||||
var kubectlOutput string
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
require.Fail(t, "timed out waiting for kubectl output")
|
||||
case kubectlOutput = <-kubectlOutputChan:
|
||||
}
|
||||
require.Greaterf(t, len(strings.Split(kubectlOutput, "\n")), 2, "expected some namespaces to be returned, got %q", kubectlOutput)
|
||||
t.Logf("first kubectl command took %s", time.Since(start).String())
|
||||
|
||||
// Run kubectl again, which should work with no browser interaction.
|
||||
kubectlCmd2 := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
|
||||
kubectlCmd2.Env = append(os.Environ(), env.ProxyEnv()...)
|
||||
start = time.Now()
|
||||
kubectlOutput2, err := kubectlCmd2.CombinedOutput()
|
||||
startTime := time.Now()
|
||||
kubectlOutput2, err := kubectlCmd.CombinedOutput()
|
||||
require.NoError(t, err)
|
||||
require.Greaterf(t, len(bytes.Split(kubectlOutput2, []byte("\n"))), 2, "expected some namespaces to be returned again")
|
||||
t.Logf("second kubectl command took %s", time.Since(start).String())
|
||||
t.Logf("second kubectl command took %s", time.Since(startTime).String())
|
||||
|
||||
// probe our cache for the current ID token as a proxy for a whoami API
|
||||
// Probe our cache for the current ID token as a proxy for a whoami API.
|
||||
cache := filesession.New(sessionCachePath, filesession.WithErrorReporter(func(err error) {
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
@ -285,49 +426,52 @@ func TestE2EFullIntegration(t *testing.T) {
|
||||
require.NotNil(t, token)
|
||||
|
||||
idTokenClaims := token.IDToken.Claims
|
||||
require.Equal(t, env.SupervisorUpstreamOIDC.Username, idTokenClaims[oidc.DownstreamUsernameClaim])
|
||||
require.Equal(t, expectedUsername, idTokenClaims[oidc.DownstreamUsernameClaim])
|
||||
|
||||
// The groups claim in the file ends up as an []interface{}, so adjust our expectation to match.
|
||||
expectedGroups := make([]interface{}, 0, len(env.SupervisorUpstreamOIDC.ExpectedGroups))
|
||||
for _, g := range env.SupervisorUpstreamOIDC.ExpectedGroups {
|
||||
expectedGroups = append(expectedGroups, g)
|
||||
expectedGroupsAsEmptyInterfaces := make([]interface{}, 0, len(expectedGroups))
|
||||
for _, g := range expectedGroups {
|
||||
expectedGroupsAsEmptyInterfaces = append(expectedGroupsAsEmptyInterfaces, g)
|
||||
}
|
||||
require.Equal(t, expectedGroups, idTokenClaims[oidc.DownstreamGroupsClaim])
|
||||
require.Equal(t, expectedGroupsAsEmptyInterfaces, idTokenClaims[oidc.DownstreamGroupsClaim])
|
||||
|
||||
// confirm we are the right user according to Kube
|
||||
expectedYAMLGroups := func() string {
|
||||
var b strings.Builder
|
||||
for _, g := range env.SupervisorUpstreamOIDC.ExpectedGroups {
|
||||
for _, g := range expectedGroups {
|
||||
b.WriteString("\n")
|
||||
b.WriteString(` - `)
|
||||
b.WriteString(g)
|
||||
}
|
||||
return b.String()
|
||||
}()
|
||||
|
||||
// Confirm we are the right user according to Kube by calling the whoami API.
|
||||
kubectlCmd3 := exec.CommandContext(ctx, "kubectl", "create", "-f", "-", "-o", "yaml", "--kubeconfig", kubeconfigPath)
|
||||
kubectlCmd3.Env = append(os.Environ(), env.ProxyEnv()...)
|
||||
kubectlCmd3.Stdin = strings.NewReader(`
|
||||
apiVersion: identity.concierge.` + env.APIGroupSuffix + `/v1alpha1
|
||||
kind: WhoAmIRequest
|
||||
`)
|
||||
kubectlCmd3.Stdin = strings.NewReader(here.Docf(`
|
||||
apiVersion: identity.concierge.%s/v1alpha1
|
||||
kind: WhoAmIRequest
|
||||
`, env.APIGroupSuffix))
|
||||
|
||||
kubectlOutput3, err := kubectlCmd3.CombinedOutput()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t,
|
||||
`apiVersion: identity.concierge.`+env.APIGroupSuffix+`/v1alpha1
|
||||
kind: WhoAmIRequest
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
spec: {}
|
||||
status:
|
||||
kubernetesUserInfo:
|
||||
user:
|
||||
groups:`+expectedYAMLGroups+`
|
||||
- system:authenticated
|
||||
username: `+env.SupervisorUpstreamOIDC.Username+`
|
||||
`,
|
||||
|
||||
require.Equal(t, here.Docf(`
|
||||
apiVersion: identity.concierge.%s/v1alpha1
|
||||
kind: WhoAmIRequest
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
spec: {}
|
||||
status:
|
||||
kubernetesUserInfo:
|
||||
user:
|
||||
groups:%s
|
||||
- system:authenticated
|
||||
username: %s
|
||||
`, env.APIGroupSuffix, expectedYAMLGroups, expectedUsername),
|
||||
string(kubectlOutput3))
|
||||
|
||||
expectedGroupsPlusAuthenticated := append([]string{}, env.SupervisorUpstreamOIDC.ExpectedGroups...)
|
||||
expectedGroupsPlusAuthenticated := append([]string{}, expectedGroups...)
|
||||
expectedGroupsPlusAuthenticated = append(expectedGroupsPlusAuthenticated, "system:authenticated")
|
||||
// Validate that `pinniped whoami` returns the correct identity.
|
||||
assertWhoami(
|
||||
@ -336,7 +480,24 @@ status:
|
||||
true,
|
||||
pinnipedExe,
|
||||
kubeconfigPath,
|
||||
env.SupervisorUpstreamOIDC.Username,
|
||||
expectedUsername,
|
||||
expectedGroupsPlusAuthenticated,
|
||||
)
|
||||
}
|
||||
|
||||
func runPinnipedGetKubeconfig(t *testing.T, env *library.TestEnv, pinnipedExe string, tempDir string, pinnipedCLICommand []string) string {
|
||||
// Run "pinniped get kubeconfig" to get a kubeconfig YAML.
|
||||
envVarsWithProxy := append(os.Environ(), env.ProxyEnv()...)
|
||||
kubeconfigYAML, stderr := runPinnipedCLI(t, envVarsWithProxy, pinnipedExe, pinnipedCLICommand...)
|
||||
t.Logf("stderr output from 'pinniped get kubeconfig':\n%s\n\n", stderr)
|
||||
t.Logf("test kubeconfig:\n%s\n\n", kubeconfigYAML)
|
||||
|
||||
restConfig := library.NewRestConfigFromKubeconfig(t, kubeconfigYAML)
|
||||
require.NotNil(t, restConfig.ExecProvider)
|
||||
require.Equal(t, []string{"login", "oidc"}, restConfig.ExecProvider.Args[:2])
|
||||
|
||||
kubeconfigPath := filepath.Join(tempDir, "kubeconfig.yaml")
|
||||
require.NoError(t, ioutil.WriteFile(kubeconfigPath, []byte(kubeconfigYAML), 0600))
|
||||
|
||||
return kubeconfigPath
|
||||
}
|
||||
|
@ -483,7 +483,8 @@ func requireWellKnownEndpointIsWorking(t *testing.T, supervisorScheme, superviso
|
||||
"response_types_supported": ["code"],
|
||||
"claims_supported": ["groups"],
|
||||
"subject_types_supported": ["public"],
|
||||
"id_token_signing_alg_values_supported": ["ES256"]
|
||||
"id_token_signing_alg_values_supported": ["ES256"],
|
||||
"pinniped_idps": []
|
||||
}`)
|
||||
expectedJSON := fmt.Sprintf(expectedResultTemplate, issuerName, issuerName, issuerName, issuerName)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user