Merge branch 'main' of github.com:vmware-tanzu/pinniped into active-directory-identity-provider
This commit is contained in:
commit
474266f918
@ -1,14 +1,23 @@
|
|||||||
./.*
|
# This is effectively a copy of the .gitignore file.
|
||||||
# Allow .git to get copied into the container so we can use it in hack/get-ldflags.sh during build.
|
# The whole git repo, including the .git directory, should get copied into the Docker build context,
|
||||||
!/.git
|
# to enable the use of hack/get-ldflags.sh inside the Dockerfile.
|
||||||
./*.md
|
# When you change the .gitignore file, please consider also changing this file.
|
||||||
./*.yaml
|
|
||||||
./apis
|
# Binaries for programs and plugins
|
||||||
./deploy
|
*.exe
|
||||||
./Dockerfile
|
*.exe~
|
||||||
./generated/1.1*
|
*.dll
|
||||||
./internal/mocks
|
*.so
|
||||||
./LICENSE
|
*.dylib
|
||||||
./site/
|
|
||||||
./test
|
# Test binary, built with `go test -c`
|
||||||
**/*_test.go
|
*.test
|
||||||
|
|
||||||
|
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||||
|
*.out
|
||||||
|
|
||||||
|
# GoLand
|
||||||
|
.idea
|
||||||
|
|
||||||
|
# MacOS Desktop Services Store
|
||||||
|
.DS_Store
|
||||||
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -1,3 +1,6 @@
|
|||||||
|
# When you change this file, please consider also changing the .dockerignore file.
|
||||||
|
# See comments at the top of .dockerignore for more information.
|
||||||
|
|
||||||
# Binaries for programs and plugins
|
# Binaries for programs and plugins
|
||||||
*.exe
|
*.exe
|
||||||
*.exe~
|
*.exe~
|
||||||
@ -11,9 +14,6 @@
|
|||||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||||
*.out
|
*.out
|
||||||
|
|
||||||
# Dependency directories (remove the comment below to include it)
|
|
||||||
# vendor/
|
|
||||||
|
|
||||||
# GoLand
|
# GoLand
|
||||||
.idea
|
.idea
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
FROM golang:1.16.5 as build-env
|
FROM golang:1.16.6 as build-env
|
||||||
|
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
COPY . .
|
COPY . .
|
||||||
|
14
go.mod
14
go.mod
@ -34,14 +34,14 @@ require (
|
|||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
golang.org/x/term v0.0.0-20210503060354-a79de5458b56
|
golang.org/x/term v0.0.0-20210503060354-a79de5458b56
|
||||||
gopkg.in/square/go-jose.v2 v2.6.0
|
gopkg.in/square/go-jose.v2 v2.6.0
|
||||||
k8s.io/api v0.21.2
|
k8s.io/api v0.21.3
|
||||||
k8s.io/apimachinery v0.21.2
|
k8s.io/apimachinery v0.21.3
|
||||||
k8s.io/apiserver v0.21.2
|
k8s.io/apiserver v0.21.3
|
||||||
k8s.io/client-go v0.21.2
|
k8s.io/client-go v0.21.3
|
||||||
k8s.io/component-base v0.21.2
|
k8s.io/component-base v0.21.3
|
||||||
k8s.io/gengo v0.0.0-20210203185629-de9496dff47b
|
k8s.io/gengo v0.0.0-20210203185629-de9496dff47b
|
||||||
k8s.io/klog/v2 v2.9.0
|
k8s.io/klog/v2 v2.10.0
|
||||||
k8s.io/kube-aggregator v0.21.2
|
k8s.io/kube-aggregator v0.21.3
|
||||||
k8s.io/utils v0.0.0-20210521133846-da695404a2bc
|
k8s.io/utils v0.0.0-20210521133846-da695404a2bc
|
||||||
sigs.k8s.io/yaml v1.2.0
|
sigs.k8s.io/yaml v1.2.0
|
||||||
)
|
)
|
||||||
|
34
go.sum
34
go.sum
@ -1779,17 +1779,17 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
|
|||||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
|
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
|
||||||
k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y=
|
k8s.io/api v0.21.3 h1:cblWILbLO8ar+Fj6xdDGr603HRsf8Wu9E9rngJeprZQ=
|
||||||
k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU=
|
k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg=
|
||||||
k8s.io/apimachinery v0.21.2 h1:vezUc/BHqWlQDnZ+XkrpXSmnANSLbpnlpwo0Lhk0gpc=
|
k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII=
|
||||||
k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM=
|
k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI=
|
||||||
k8s.io/apiserver v0.21.2 h1:vfGLD8biFXHzbcIEXyW3652lDwkV8tZEFJAaS2iuJlw=
|
k8s.io/apiserver v0.21.3 h1:QxAgE1ZPQG5cPlHScHTnLxP9H/kU3zjH1Vnd8G+n5OI=
|
||||||
k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw=
|
k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU=
|
||||||
k8s.io/client-go v0.21.2 h1:Q1j4L/iMN4pTw6Y4DWppBoUxgKO8LbffEMVEV00MUp0=
|
k8s.io/client-go v0.21.3 h1:J9nxZTOmvkInRDCzcSNQmPJbDYN/PjlxXT9Mos3HcLg=
|
||||||
k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA=
|
k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU=
|
||||||
k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U=
|
k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo=
|
||||||
k8s.io/component-base v0.21.2 h1:EsnmFFoJ86cEywC0DoIkAUiEV6fjgauNugiw1lmIjs4=
|
k8s.io/component-base v0.21.3 h1:4WuuXY3Npa+iFfi2aDRiOz+anhNvRfye0859ZgfC5Og=
|
||||||
k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc=
|
k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ=
|
||||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||||
k8s.io/gengo v0.0.0-20210203185629-de9496dff47b h1:bAU8IlrMA6KbP0dIg/sVSJn95pDCUHDZx0DpTGrf2v4=
|
k8s.io/gengo v0.0.0-20210203185629-de9496dff47b h1:bAU8IlrMA6KbP0dIg/sVSJn95pDCUHDZx0DpTGrf2v4=
|
||||||
@ -1797,10 +1797,10 @@ k8s.io/gengo v0.0.0-20210203185629-de9496dff47b/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE
|
|||||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||||
k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||||
k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
|
k8s.io/klog/v2 v2.10.0 h1:R2HDMDJsHVTHA2n4RjwbeYXdOcBymXdX/JRb1v0VGhE=
|
||||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
k8s.io/klog/v2 v2.10.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||||
k8s.io/kube-aggregator v0.21.2 h1:G7pZL3ajCxU+DUmePW/N+UTkBFtWWx0CmXgVPFQjEtE=
|
k8s.io/kube-aggregator v0.21.3 h1:jS/6ZZGPCkBQhzGGusAd2St+KP/FtQBCXOCOo3H7/U4=
|
||||||
k8s.io/kube-aggregator v0.21.2/go.mod h1:7NgmUXJziySAJ7GxMRBBwcJay7MLUoxms31fw/ICpYk=
|
k8s.io/kube-aggregator v0.21.3/go.mod h1:9OIUuR5KIsNZYP/Xsh4HBsaqbS7ICJpRz3XSKtKajRc=
|
||||||
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0=
|
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0=
|
||||||
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
|
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
|
||||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||||
@ -1818,8 +1818,8 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
|||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19 h1:0jaDAAxtqIrrqas4vtTqxct4xS5kHfRNycTRLTyJmVM=
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19 h1:0jaDAAxtqIrrqas4vtTqxct4xS5kHfRNycTRLTyJmVM=
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8=
|
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||||
|
@ -8,9 +8,11 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -53,6 +55,7 @@ const (
|
|||||||
caCrtKey = "ca.crt"
|
caCrtKey = "ca.crt"
|
||||||
caKeyKey = "ca.key"
|
caKeyKey = "ca.key"
|
||||||
appLabelKey = "app"
|
appLabelKey = "app"
|
||||||
|
annotationKeysKey = "credentialissuer.pinniped.dev/annotation-keys"
|
||||||
)
|
)
|
||||||
|
|
||||||
type impersonatorConfigController struct {
|
type impersonatorConfigController struct {
|
||||||
@ -521,34 +524,93 @@ func (c *impersonatorConfigController) ensureClusterIPServiceIsStopped(ctx conte
|
|||||||
return utilerrors.FilterOut(err, k8serrors.IsNotFound)
|
return utilerrors.FilterOut(err, k8serrors.IsNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *impersonatorConfigController) createOrUpdateService(ctx context.Context, service *v1.Service) error {
|
func (c *impersonatorConfigController) createOrUpdateService(ctx context.Context, desiredService *v1.Service) error {
|
||||||
log := c.infoLog.WithValues("serviceType", service.Spec.Type, "service", klog.KObj(service))
|
log := c.infoLog.WithValues("serviceType", desiredService.Spec.Type, "service", klog.KObj(desiredService))
|
||||||
existing, err := c.servicesInformer.Lister().Services(c.namespace).Get(service.Name)
|
|
||||||
|
// Prepare to remember which annotation keys were added from the CredentialIssuer spec, both for
|
||||||
|
// creates and for updates, in case someone removes a key from the spec in the future. We would like
|
||||||
|
// to be able to detect that the missing key means that we should remove the key. This is needed to
|
||||||
|
// differentiate it from a key that was added by another actor, which we should not remove.
|
||||||
|
// But don't bother recording the requested annotations if there were no annotations requested.
|
||||||
|
desiredAnnotationKeys := make([]string, 0, len(desiredService.Annotations))
|
||||||
|
for k := range desiredService.Annotations {
|
||||||
|
desiredAnnotationKeys = append(desiredAnnotationKeys, k)
|
||||||
|
}
|
||||||
|
if len(desiredAnnotationKeys) > 0 {
|
||||||
|
// Sort them since they come out of the map in no particular order.
|
||||||
|
sort.Strings(desiredAnnotationKeys)
|
||||||
|
keysJSONArray, err := json.Marshal(desiredAnnotationKeys)
|
||||||
|
if err != nil {
|
||||||
|
return err // This shouldn't really happen. We should always be able to marshal an array of strings.
|
||||||
|
}
|
||||||
|
// Save the desired annotations to a bookkeeping annotation.
|
||||||
|
desiredService.Annotations[annotationKeysKey] = string(keysJSONArray)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the Service from the informer, and create it if it does not already exist.
|
||||||
|
existingService, err := c.servicesInformer.Lister().Services(c.namespace).Get(desiredService.Name)
|
||||||
if k8serrors.IsNotFound(err) {
|
if k8serrors.IsNotFound(err) {
|
||||||
log.Info("creating service for impersonation proxy")
|
log.Info("creating service for impersonation proxy")
|
||||||
_, err := c.k8sClient.CoreV1().Services(c.namespace).Create(ctx, service, metav1.CreateOptions{})
|
_, err := c.k8sClient.CoreV1().Services(c.namespace).Create(ctx, desiredService, metav1.CreateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update only the specific fields that are meaningfully part of our desired state.
|
// The Service already exists, so update only the specific fields that are meaningfully part of our desired state.
|
||||||
updated := existing.DeepCopy()
|
updatedService := existingService.DeepCopy()
|
||||||
updated.ObjectMeta.Labels = service.ObjectMeta.Labels
|
updatedService.ObjectMeta.Labels = desiredService.ObjectMeta.Labels
|
||||||
updated.ObjectMeta.Annotations = service.ObjectMeta.Annotations
|
updatedService.Spec.LoadBalancerIP = desiredService.Spec.LoadBalancerIP
|
||||||
updated.Spec.LoadBalancerIP = service.Spec.LoadBalancerIP
|
updatedService.Spec.Type = desiredService.Spec.Type
|
||||||
updated.Spec.Type = service.Spec.Type
|
updatedService.Spec.Selector = desiredService.Spec.Selector
|
||||||
updated.Spec.Selector = service.Spec.Selector
|
|
||||||
|
// Do not simply overwrite the existing annotations with the desired annotations. Instead, merge-overwrite.
|
||||||
|
// Another actor in the system, like a human user or a non-Pinniped controller, might have updated the
|
||||||
|
// existing Service's annotations. If they did, then we do not want to overwrite those keys expect for
|
||||||
|
// the specific keys that are from the CredentialIssuer's spec, because if we overwrite keys belonging
|
||||||
|
// to another controller then we could end up infinitely flapping back and forth with the other controller,
|
||||||
|
// both updating that annotation on the Service.
|
||||||
|
if updatedService.Annotations == nil {
|
||||||
|
updatedService.Annotations = map[string]string{}
|
||||||
|
}
|
||||||
|
for k, v := range desiredService.Annotations {
|
||||||
|
updatedService.Annotations[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the the existing Service contains a record of previous annotations that were added by this controller.
|
||||||
|
// Note that in an upgrade, older versions of Pinniped might have created the Service without this bookkeeping annotation.
|
||||||
|
oldDesiredAnnotationKeysJSON, foundOldDesiredAnnotationKeysJSON := existingService.Annotations[annotationKeysKey]
|
||||||
|
oldDesiredAnnotationKeys := []string{}
|
||||||
|
if foundOldDesiredAnnotationKeysJSON {
|
||||||
|
_ = json.Unmarshal([]byte(oldDesiredAnnotationKeysJSON), &oldDesiredAnnotationKeys)
|
||||||
|
// In the unlikely event that we cannot parse the value of our bookkeeping annotation, just act like it
|
||||||
|
// wasn't present and update it to the new value that it should have based on the current desired state.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if any annotations which were previously in the CredentialIssuer spec are now gone from the spec,
|
||||||
|
// which means that those now-missing annotations should get deleted.
|
||||||
|
for _, oldKey := range oldDesiredAnnotationKeys {
|
||||||
|
if _, existsInDesired := desiredService.Annotations[oldKey]; !existsInDesired {
|
||||||
|
delete(updatedService.Annotations, oldKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no annotations were requested, then remove the special bookkeeping annotation which might be
|
||||||
|
// leftover from a previous update. During the next update, non-existence will be taken to mean
|
||||||
|
// that no annotations were previously requested by the CredentialIssuer spec.
|
||||||
|
if len(desiredAnnotationKeys) == 0 {
|
||||||
|
delete(updatedService.Annotations, annotationKeysKey)
|
||||||
|
}
|
||||||
|
|
||||||
// If our updates didn't change anything, we're done.
|
// If our updates didn't change anything, we're done.
|
||||||
if equality.Semantic.DeepEqual(existing, updated) {
|
if equality.Semantic.DeepEqual(existingService, updatedService) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise apply the updates.
|
// Otherwise apply the updates.
|
||||||
c.infoLog.Info("updating service for impersonation proxy")
|
c.infoLog.Info("updating service for impersonation proxy")
|
||||||
_, err = c.k8sClient.CoreV1().Services(c.namespace).Update(ctx, updated, metav1.UpdateOptions{})
|
_, err = c.k8sClient.CoreV1().Services(c.namespace).Update(ctx, updatedService, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -785,6 +785,13 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var addServiceToTrackers = func(service *corev1.Service, clients ...*kubernetesfake.Clientset) {
|
||||||
|
for _, client := range clients {
|
||||||
|
serviceCopy := service.DeepCopy()
|
||||||
|
r.NoError(client.Tracker().Add(serviceCopy))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var deleteServiceFromTracker = func(resourceName string, client *kubernetesfake.Clientset) {
|
var deleteServiceFromTracker = func(resourceName string, client *kubernetesfake.Clientset) {
|
||||||
r.NoError(client.Tracker().Delete(
|
r.NoError(client.Tracker().Delete(
|
||||||
schema.GroupVersionResource{Version: "v1", Resource: "services"},
|
schema.GroupVersionResource{Version: "v1", Resource: "services"},
|
||||||
@ -1644,7 +1651,6 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
when("credentialissuer has service type loadbalancer and custom annotations", func() {
|
when("credentialissuer has service type loadbalancer and custom annotations", func() {
|
||||||
annotations := map[string]string{"some-annotation-key": "some-annotation-value"}
|
|
||||||
it.Before(func() {
|
it.Before(func() {
|
||||||
addCredentialIssuerToTrackers(v1alpha1.CredentialIssuer{
|
addCredentialIssuerToTrackers(v1alpha1.CredentialIssuer{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: credentialIssuerResourceName},
|
ObjectMeta: metav1.ObjectMeta{Name: credentialIssuerResourceName},
|
||||||
@ -1653,7 +1659,7 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
|
|||||||
Mode: v1alpha1.ImpersonationProxyModeEnabled,
|
Mode: v1alpha1.ImpersonationProxyModeEnabled,
|
||||||
Service: v1alpha1.ImpersonationProxyServiceSpec{
|
Service: v1alpha1.ImpersonationProxyServiceSpec{
|
||||||
Type: v1alpha1.ImpersonationProxyServiceTypeLoadBalancer,
|
Type: v1alpha1.ImpersonationProxyServiceTypeLoadBalancer,
|
||||||
Annotations: annotations,
|
Annotations: map[string]string{"some-annotation-key": "some-annotation-value"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1667,7 +1673,10 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
|
|||||||
r.Len(kubeAPIClient.Actions(), 3)
|
r.Len(kubeAPIClient.Actions(), 3)
|
||||||
requireNodesListed(kubeAPIClient.Actions()[0])
|
requireNodesListed(kubeAPIClient.Actions()[0])
|
||||||
lbService := requireLoadBalancerWasCreated(kubeAPIClient.Actions()[1])
|
lbService := requireLoadBalancerWasCreated(kubeAPIClient.Actions()[1])
|
||||||
require.Equal(t, lbService.Annotations, annotations)
|
require.Equal(t, lbService.Annotations, map[string]string{
|
||||||
|
"some-annotation-key": "some-annotation-value",
|
||||||
|
"credentialissuer.pinniped.dev/annotation-keys": `["some-annotation-key"]`,
|
||||||
|
})
|
||||||
requireCASecretWasCreated(kubeAPIClient.Actions()[2])
|
requireCASecretWasCreated(kubeAPIClient.Actions()[2])
|
||||||
requireTLSServerIsRunningWithoutCerts()
|
requireTLSServerIsRunningWithoutCerts()
|
||||||
requireCredentialIssuer(newPendingStrategyWaitingForLB())
|
requireCredentialIssuer(newPendingStrategyWaitingForLB())
|
||||||
@ -2386,20 +2395,30 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
|
|||||||
requireCredentialIssuer(newSuccessStrategy(localhostIP, ca))
|
requireCredentialIssuer(newSuccessStrategy(localhostIP, ca))
|
||||||
requireSigningCertProviderHasLoadedCerts(signingCACertPEM, signingCAKeyPEM)
|
requireSigningCertProviderHasLoadedCerts(signingCACertPEM, signingCAKeyPEM)
|
||||||
|
|
||||||
|
// Simulate another actor in the system, like a human user or a non-Pinniped controller,
|
||||||
|
// updating the new Service's annotations. The map was nil, so we can overwrite the whole thing,
|
||||||
|
lbService.Annotations = map[string]string{
|
||||||
|
"annotation-from-unrelated-controller-key": "annotation-from-unrelated-controller-val",
|
||||||
|
"my-annotation-key": "my-annotation-from-unrelated-controller-val",
|
||||||
|
}
|
||||||
|
|
||||||
// Simulate the informer cache's background update from its watch.
|
// Simulate the informer cache's background update from its watch.
|
||||||
addObjectFromCreateActionToInformerAndWait(kubeAPIClient.Actions()[1], kubeInformers.Core().V1().Services())
|
addObjectToKubeInformerAndWait(lbService, kubeInformers.Core().V1().Services())
|
||||||
addObjectFromCreateActionToInformerAndWait(kubeAPIClient.Actions()[2], kubeInformers.Core().V1().Secrets())
|
addObjectFromCreateActionToInformerAndWait(kubeAPIClient.Actions()[2], kubeInformers.Core().V1().Secrets())
|
||||||
addObjectFromCreateActionToInformerAndWait(kubeAPIClient.Actions()[3], kubeInformers.Core().V1().Secrets())
|
addObjectFromCreateActionToInformerAndWait(kubeAPIClient.Actions()[3], kubeInformers.Core().V1().Secrets())
|
||||||
|
|
||||||
// Add annotations to the spec.
|
r.NoError(runControllerSync())
|
||||||
annotations := map[string]string{"my-annotation-key": "my-annotation-val"}
|
r.Len(kubeAPIClient.Actions(), 4) // no new actions because the controller decides there is nothing to update on the Service
|
||||||
|
|
||||||
|
// Add annotations to the CredentialIssuer spec.
|
||||||
|
credentialIssuerAnnotations := map[string]string{"my-annotation-key": "my-annotation-val"}
|
||||||
updateCredentialIssuerInInformerAndWait(credentialIssuerResourceName, v1alpha1.CredentialIssuerSpec{
|
updateCredentialIssuerInInformerAndWait(credentialIssuerResourceName, v1alpha1.CredentialIssuerSpec{
|
||||||
ImpersonationProxy: &v1alpha1.ImpersonationProxySpec{
|
ImpersonationProxy: &v1alpha1.ImpersonationProxySpec{
|
||||||
Mode: v1alpha1.ImpersonationProxyModeEnabled,
|
Mode: v1alpha1.ImpersonationProxyModeEnabled,
|
||||||
ExternalEndpoint: localhostIP,
|
ExternalEndpoint: localhostIP,
|
||||||
Service: v1alpha1.ImpersonationProxyServiceSpec{
|
Service: v1alpha1.ImpersonationProxyServiceSpec{
|
||||||
Type: v1alpha1.ImpersonationProxyServiceTypeLoadBalancer,
|
Type: v1alpha1.ImpersonationProxyServiceTypeLoadBalancer,
|
||||||
Annotations: annotations,
|
Annotations: credentialIssuerAnnotations,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, pinnipedInformers.Config().V1alpha1().CredentialIssuers())
|
}, pinnipedInformers.Config().V1alpha1().CredentialIssuers())
|
||||||
@ -2407,7 +2426,14 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
|
|||||||
r.NoError(runControllerSync())
|
r.NoError(runControllerSync())
|
||||||
r.Len(kubeAPIClient.Actions(), 5) // one more item to update the loadbalancer
|
r.Len(kubeAPIClient.Actions(), 5) // one more item to update the loadbalancer
|
||||||
lbService = requireLoadBalancerWasUpdated(kubeAPIClient.Actions()[4])
|
lbService = requireLoadBalancerWasUpdated(kubeAPIClient.Actions()[4])
|
||||||
require.Equal(t, annotations, lbService.Annotations) // now the annotations should exist on the load balancer
|
require.Equal(t, map[string]string{
|
||||||
|
// Now the CredentialIssuer annotations should be merged on the load balancer.
|
||||||
|
// In the unlikely case where keys conflict, the CredentialIssuer value overwrites the other value.
|
||||||
|
// Otherwise the annotations from the other actor should not be modified.
|
||||||
|
"annotation-from-unrelated-controller-key": "annotation-from-unrelated-controller-val",
|
||||||
|
"my-annotation-key": "my-annotation-val",
|
||||||
|
"credentialissuer.pinniped.dev/annotation-keys": `["my-annotation-key"]`,
|
||||||
|
}, lbService.Annotations)
|
||||||
requireTLSServerIsRunning(ca, testServerAddr(), nil)
|
requireTLSServerIsRunning(ca, testServerAddr(), nil)
|
||||||
requireCredentialIssuer(newSuccessStrategy(localhostIP, ca))
|
requireCredentialIssuer(newSuccessStrategy(localhostIP, ca))
|
||||||
requireSigningCertProviderHasLoadedCerts(signingCACertPEM, signingCAKeyPEM)
|
requireSigningCertProviderHasLoadedCerts(signingCACertPEM, signingCAKeyPEM)
|
||||||
@ -2447,20 +2473,30 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
|
|||||||
requireCredentialIssuer(newSuccessStrategy(localhostIP, ca))
|
requireCredentialIssuer(newSuccessStrategy(localhostIP, ca))
|
||||||
requireSigningCertProviderHasLoadedCerts(signingCACertPEM, signingCAKeyPEM)
|
requireSigningCertProviderHasLoadedCerts(signingCACertPEM, signingCAKeyPEM)
|
||||||
|
|
||||||
|
// Simulate another actor in the system, like a human user or a non-Pinniped controller,
|
||||||
|
// updating the new Service's annotations.
|
||||||
|
clusterIPService.Annotations = map[string]string{
|
||||||
|
"annotation-from-unrelated-controller-key": "annotation-from-unrelated-controller-val",
|
||||||
|
"my-annotation-key": "my-annotation-from-unrelated-controller-val",
|
||||||
|
}
|
||||||
|
|
||||||
// Simulate the informer cache's background update from its watch.
|
// Simulate the informer cache's background update from its watch.
|
||||||
addObjectFromCreateActionToInformerAndWait(kubeAPIClient.Actions()[1], kubeInformers.Core().V1().Services())
|
addObjectToKubeInformerAndWait(clusterIPService, kubeInformers.Core().V1().Services())
|
||||||
addObjectFromCreateActionToInformerAndWait(kubeAPIClient.Actions()[2], kubeInformers.Core().V1().Secrets())
|
addObjectFromCreateActionToInformerAndWait(kubeAPIClient.Actions()[2], kubeInformers.Core().V1().Secrets())
|
||||||
addObjectFromCreateActionToInformerAndWait(kubeAPIClient.Actions()[3], kubeInformers.Core().V1().Secrets())
|
addObjectFromCreateActionToInformerAndWait(kubeAPIClient.Actions()[3], kubeInformers.Core().V1().Secrets())
|
||||||
|
|
||||||
// Add annotations to the spec.
|
r.NoError(runControllerSync())
|
||||||
annotations := map[string]string{"my-annotation-key": "my-annotation-val"}
|
r.Len(kubeAPIClient.Actions(), 4) // no new actions because the controller decides there is nothing to update on the Service
|
||||||
|
|
||||||
|
// Add annotations to the CredentialIssuer spec.
|
||||||
|
credentialIssuerAnnotations := map[string]string{"my-annotation-key": "my-annotation-val"}
|
||||||
updateCredentialIssuerInInformerAndWait(credentialIssuerResourceName, v1alpha1.CredentialIssuerSpec{
|
updateCredentialIssuerInInformerAndWait(credentialIssuerResourceName, v1alpha1.CredentialIssuerSpec{
|
||||||
ImpersonationProxy: &v1alpha1.ImpersonationProxySpec{
|
ImpersonationProxy: &v1alpha1.ImpersonationProxySpec{
|
||||||
Mode: v1alpha1.ImpersonationProxyModeEnabled,
|
Mode: v1alpha1.ImpersonationProxyModeEnabled,
|
||||||
ExternalEndpoint: localhostIP,
|
ExternalEndpoint: localhostIP,
|
||||||
Service: v1alpha1.ImpersonationProxyServiceSpec{
|
Service: v1alpha1.ImpersonationProxyServiceSpec{
|
||||||
Type: v1alpha1.ImpersonationProxyServiceTypeClusterIP,
|
Type: v1alpha1.ImpersonationProxyServiceTypeClusterIP,
|
||||||
Annotations: annotations,
|
Annotations: credentialIssuerAnnotations,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, pinnipedInformers.Config().V1alpha1().CredentialIssuers())
|
}, pinnipedInformers.Config().V1alpha1().CredentialIssuers())
|
||||||
@ -2468,7 +2504,173 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
|
|||||||
r.NoError(runControllerSync())
|
r.NoError(runControllerSync())
|
||||||
r.Len(kubeAPIClient.Actions(), 5) // one more item to update the loadbalancer
|
r.Len(kubeAPIClient.Actions(), 5) // one more item to update the loadbalancer
|
||||||
clusterIPService = requireClusterIPWasUpdated(kubeAPIClient.Actions()[4])
|
clusterIPService = requireClusterIPWasUpdated(kubeAPIClient.Actions()[4])
|
||||||
require.Equal(t, annotations, clusterIPService.Annotations) // now the annotations should exist on the load balancer
|
require.Equal(t, map[string]string{
|
||||||
|
// Now the CredentialIssuer annotations should be merged on the load balancer.
|
||||||
|
// In the unlikely case where keys conflict, the CredentialIssuer value overwrites the other value.
|
||||||
|
// Otherwise the annotations from the other actor should not be modified.
|
||||||
|
"annotation-from-unrelated-controller-key": "annotation-from-unrelated-controller-val",
|
||||||
|
"my-annotation-key": "my-annotation-val",
|
||||||
|
"credentialissuer.pinniped.dev/annotation-keys": `["my-annotation-key"]`,
|
||||||
|
}, clusterIPService.Annotations)
|
||||||
|
requireTLSServerIsRunning(ca, testServerAddr(), nil)
|
||||||
|
requireCredentialIssuer(newSuccessStrategy(localhostIP, ca))
|
||||||
|
requireSigningCertProviderHasLoadedCerts(signingCACertPEM, signingCAKeyPEM)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("requesting a load balancer via CredentialIssuer with annotations, then updating the CredentialIssuer annotations to remove one", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
addSecretToTrackers(signingCASecret, kubeInformerClient)
|
||||||
|
addCredentialIssuerToTrackers(v1alpha1.CredentialIssuer{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: credentialIssuerResourceName},
|
||||||
|
Spec: v1alpha1.CredentialIssuerSpec{
|
||||||
|
ImpersonationProxy: &v1alpha1.ImpersonationProxySpec{
|
||||||
|
Mode: v1alpha1.ImpersonationProxyModeEnabled,
|
||||||
|
ExternalEndpoint: localhostIP,
|
||||||
|
Service: v1alpha1.ImpersonationProxyServiceSpec{
|
||||||
|
Type: v1alpha1.ImpersonationProxyServiceTypeLoadBalancer,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"my-initial-annotation1-key": "my-initial-annotation1-val",
|
||||||
|
"my-initial-annotation2-key": "my-initial-annotation2-val",
|
||||||
|
"my-initial-annotation3-key": "my-initial-annotation3-val",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, pinnipedInformerClient, pinnipedAPIClient)
|
||||||
|
addNodeWithRoleToTracker("worker", kubeAPIClient)
|
||||||
|
})
|
||||||
|
|
||||||
|
it("creates the load balancer with annotations, then removes the removed annotation", func() {
|
||||||
|
startInformersAndController()
|
||||||
|
|
||||||
|
// Should have started in "enabled" mode with service type load balancer, so one is created.
|
||||||
|
r.NoError(runControllerSync())
|
||||||
|
r.Len(kubeAPIClient.Actions(), 4)
|
||||||
|
requireNodesListed(kubeAPIClient.Actions()[0])
|
||||||
|
lbService := requireLoadBalancerWasCreated(kubeAPIClient.Actions()[1])
|
||||||
|
require.Equal(t, map[string]string{
|
||||||
|
"my-initial-annotation1-key": "my-initial-annotation1-val",
|
||||||
|
"my-initial-annotation2-key": "my-initial-annotation2-val",
|
||||||
|
"my-initial-annotation3-key": "my-initial-annotation3-val",
|
||||||
|
"credentialissuer.pinniped.dev/annotation-keys": `["my-initial-annotation1-key","my-initial-annotation2-key","my-initial-annotation3-key"]`,
|
||||||
|
}, lbService.Annotations) // there should be some annotations at first
|
||||||
|
ca := requireCASecretWasCreated(kubeAPIClient.Actions()[2])
|
||||||
|
requireTLSSecretWasCreated(kubeAPIClient.Actions()[3], ca)
|
||||||
|
requireTLSServerIsRunning(ca, testServerAddr(), nil)
|
||||||
|
requireCredentialIssuer(newSuccessStrategy(localhostIP, ca))
|
||||||
|
requireSigningCertProviderHasLoadedCerts(signingCACertPEM, signingCAKeyPEM)
|
||||||
|
|
||||||
|
// Simulate another actor in the system, like a human user or a non-Pinniped controller,
|
||||||
|
// updating the new Service to add another annotation.
|
||||||
|
lbService.Annotations["annotation-from-unrelated-controller-key"] = "annotation-from-unrelated-controller-val"
|
||||||
|
|
||||||
|
// Simulate the informer cache's background update from its watch.
|
||||||
|
addObjectToKubeInformerAndWait(lbService, kubeInformers.Core().V1().Services())
|
||||||
|
addObjectFromCreateActionToInformerAndWait(kubeAPIClient.Actions()[2], kubeInformers.Core().V1().Secrets())
|
||||||
|
addObjectFromCreateActionToInformerAndWait(kubeAPIClient.Actions()[3], kubeInformers.Core().V1().Secrets())
|
||||||
|
|
||||||
|
r.NoError(runControllerSync())
|
||||||
|
r.Len(kubeAPIClient.Actions(), 4) // no new actions because the controller decides there is nothing to update on the Service
|
||||||
|
|
||||||
|
// Remove one of the annotations from the CredentialIssuer spec.
|
||||||
|
updateCredentialIssuerInInformerAndWait(credentialIssuerResourceName, v1alpha1.CredentialIssuerSpec{
|
||||||
|
ImpersonationProxy: &v1alpha1.ImpersonationProxySpec{
|
||||||
|
Mode: v1alpha1.ImpersonationProxyModeEnabled,
|
||||||
|
ExternalEndpoint: localhostIP,
|
||||||
|
Service: v1alpha1.ImpersonationProxyServiceSpec{
|
||||||
|
Type: v1alpha1.ImpersonationProxyServiceTypeLoadBalancer,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"my-initial-annotation1-key": "my-initial-annotation1-val",
|
||||||
|
"my-initial-annotation3-key": "my-initial-annotation3-val",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, pinnipedInformers.Config().V1alpha1().CredentialIssuers())
|
||||||
|
|
||||||
|
r.NoError(runControllerSync())
|
||||||
|
r.Len(kubeAPIClient.Actions(), 5) // one more item to update the loadbalancer
|
||||||
|
lbService = requireLoadBalancerWasUpdated(kubeAPIClient.Actions()[4])
|
||||||
|
require.Equal(t, map[string]string{
|
||||||
|
// Now the CredentialIssuer annotations should be merged on the load balancer.
|
||||||
|
// Since the user removed the "my-initial-annotation2-key" key from the CredentialIssuer spec,
|
||||||
|
// it should be removed from the Service.
|
||||||
|
// The annotations from the other actor should not be modified.
|
||||||
|
"annotation-from-unrelated-controller-key": "annotation-from-unrelated-controller-val",
|
||||||
|
"my-initial-annotation1-key": "my-initial-annotation1-val",
|
||||||
|
"my-initial-annotation3-key": "my-initial-annotation3-val",
|
||||||
|
"credentialissuer.pinniped.dev/annotation-keys": `["my-initial-annotation1-key","my-initial-annotation3-key"]`,
|
||||||
|
}, lbService.Annotations)
|
||||||
|
requireTLSServerIsRunning(ca, testServerAddr(), nil)
|
||||||
|
requireCredentialIssuer(newSuccessStrategy(localhostIP, ca))
|
||||||
|
requireSigningCertProviderHasLoadedCerts(signingCACertPEM, signingCAKeyPEM)
|
||||||
|
|
||||||
|
// Remove all the rest of the annotations from the CredentialIssuer spec so there are none remaining.
|
||||||
|
updateCredentialIssuerInInformerAndWait(credentialIssuerResourceName, v1alpha1.CredentialIssuerSpec{
|
||||||
|
ImpersonationProxy: &v1alpha1.ImpersonationProxySpec{
|
||||||
|
Mode: v1alpha1.ImpersonationProxyModeEnabled,
|
||||||
|
ExternalEndpoint: localhostIP,
|
||||||
|
Service: v1alpha1.ImpersonationProxyServiceSpec{
|
||||||
|
Type: v1alpha1.ImpersonationProxyServiceTypeLoadBalancer,
|
||||||
|
Annotations: map[string]string{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, pinnipedInformers.Config().V1alpha1().CredentialIssuers())
|
||||||
|
|
||||||
|
r.NoError(runControllerSync())
|
||||||
|
r.Len(kubeAPIClient.Actions(), 6) // one more item to update the loadbalancer
|
||||||
|
lbService = requireLoadBalancerWasUpdated(kubeAPIClient.Actions()[5])
|
||||||
|
require.Equal(t, map[string]string{
|
||||||
|
// Since the user removed all annotations from the CredentialIssuer spec,
|
||||||
|
// they should all be removed from the Service, along with the special bookkeeping annotation too.
|
||||||
|
// The annotations from the other actor should not be modified.
|
||||||
|
"annotation-from-unrelated-controller-key": "annotation-from-unrelated-controller-val",
|
||||||
|
}, lbService.Annotations)
|
||||||
|
requireTLSServerIsRunning(ca, testServerAddr(), nil)
|
||||||
|
requireCredentialIssuer(newSuccessStrategy(localhostIP, ca))
|
||||||
|
requireSigningCertProviderHasLoadedCerts(signingCACertPEM, signingCAKeyPEM)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
when("requesting a load balancer via CredentialIssuer, but there is already a load balancer with an invalid bookkeeping annotation value", func() {
|
||||||
|
it.Before(func() {
|
||||||
|
addSecretToTrackers(signingCASecret, kubeInformerClient)
|
||||||
|
addCredentialIssuerToTrackers(v1alpha1.CredentialIssuer{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: credentialIssuerResourceName},
|
||||||
|
Spec: v1alpha1.CredentialIssuerSpec{
|
||||||
|
ImpersonationProxy: &v1alpha1.ImpersonationProxySpec{
|
||||||
|
Mode: v1alpha1.ImpersonationProxyModeEnabled,
|
||||||
|
ExternalEndpoint: localhostIP,
|
||||||
|
Service: v1alpha1.ImpersonationProxyServiceSpec{
|
||||||
|
Type: v1alpha1.ImpersonationProxyServiceTypeLoadBalancer,
|
||||||
|
Annotations: map[string]string{"some-annotation": "annotation-value"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, pinnipedInformerClient, pinnipedAPIClient)
|
||||||
|
addNodeWithRoleToTracker("worker", kubeAPIClient)
|
||||||
|
// Add a Service with a messed up bookkeeping annotation.
|
||||||
|
loadBalancerService := newLoadBalancerService(loadBalancerServiceName, corev1.ServiceStatus{})
|
||||||
|
loadBalancerService.Annotations = map[string]string{
|
||||||
|
annotationKeysKey: `["this is not valid json`,
|
||||||
|
}
|
||||||
|
addServiceToTrackers(loadBalancerService, kubeInformerClient, kubeAPIClient)
|
||||||
|
})
|
||||||
|
|
||||||
|
it("just acts like the annotation wasn't present since that is better than becoming inoperable", func() {
|
||||||
|
startInformersAndController()
|
||||||
|
|
||||||
|
// Should have started in "enabled" mode with service type load balancer, so one is created.
|
||||||
|
r.NoError(runControllerSync())
|
||||||
|
r.Len(kubeAPIClient.Actions(), 4)
|
||||||
|
requireNodesListed(kubeAPIClient.Actions()[0])
|
||||||
|
lbService := requireLoadBalancerWasUpdated(kubeAPIClient.Actions()[1])
|
||||||
|
require.Equal(t, map[string]string{
|
||||||
|
"some-annotation": "annotation-value",
|
||||||
|
"credentialissuer.pinniped.dev/annotation-keys": `["some-annotation"]`,
|
||||||
|
}, lbService.Annotations)
|
||||||
|
ca := requireCASecretWasCreated(kubeAPIClient.Actions()[2])
|
||||||
|
requireTLSSecretWasCreated(kubeAPIClient.Actions()[3], ca)
|
||||||
requireTLSServerIsRunning(ca, testServerAddr(), nil)
|
requireTLSServerIsRunning(ca, testServerAddr(), nil)
|
||||||
requireCredentialIssuer(newSuccessStrategy(localhostIP, ca))
|
requireCredentialIssuer(newSuccessStrategy(localhostIP, ca))
|
||||||
requireSigningCertProviderHasLoadedCerts(signingCACertPEM, signingCAKeyPEM)
|
requireSigningCertProviderHasLoadedCerts(signingCACertPEM, signingCAKeyPEM)
|
||||||
|
@ -60,6 +60,11 @@ const (
|
|||||||
defaultLDAPUsernamePrompt = "Username: "
|
defaultLDAPUsernamePrompt = "Username: "
|
||||||
defaultLDAPPasswordPrompt = "Password: "
|
defaultLDAPPasswordPrompt = "Password: "
|
||||||
|
|
||||||
|
// For CLI-based auth, such as with LDAP upstream identity providers, the user may use these environment variables
|
||||||
|
// to avoid getting interactively prompted for username and password.
|
||||||
|
defaultUsernameEnvVarName = "PINNIPED_USERNAME"
|
||||||
|
defaultPasswordEnvVarName = "PINNIPED_PASSWORD" //nolint:gosec // this is not a credential
|
||||||
|
|
||||||
httpLocationHeaderName = "Location"
|
httpLocationHeaderName = "Location"
|
||||||
|
|
||||||
debugLogLevel = 4
|
debugLogLevel = 4
|
||||||
@ -99,6 +104,7 @@ type handlerState struct {
|
|||||||
generatePKCE func() (pkce.Code, error)
|
generatePKCE func() (pkce.Code, error)
|
||||||
generateNonce func() (nonce.Nonce, error)
|
generateNonce func() (nonce.Nonce, error)
|
||||||
openURL func(string) error
|
openURL func(string) error
|
||||||
|
getEnv func(key string) string
|
||||||
listen func(string, string) (net.Listener, error)
|
listen func(string, string) (net.Listener, error)
|
||||||
isTTY func(int) bool
|
isTTY func(int) bool
|
||||||
getProvider func(*oauth2.Config, *oidc.Provider, *http.Client) provider.UpstreamOIDCIdentityProviderI
|
getProvider func(*oauth2.Config, *oidc.Provider, *http.Client) provider.UpstreamOIDCIdentityProviderI
|
||||||
@ -276,6 +282,7 @@ func Login(issuer string, clientID string, opts ...Option) (*oidctypes.Token, er
|
|||||||
generateNonce: nonce.Generate,
|
generateNonce: nonce.Generate,
|
||||||
generatePKCE: pkce.Generate,
|
generatePKCE: pkce.Generate,
|
||||||
openURL: browser.OpenURL,
|
openURL: browser.OpenURL,
|
||||||
|
getEnv: os.Getenv,
|
||||||
listen: net.Listen,
|
listen: net.Listen,
|
||||||
isTTY: term.IsTerminal,
|
isTTY: term.IsTerminal,
|
||||||
getProvider: upstreamoidc.New,
|
getProvider: upstreamoidc.New,
|
||||||
@ -403,14 +410,10 @@ func (h *handlerState) baseLogin() (*oidctypes.Token, error) {
|
|||||||
// Make a direct call to the authorize endpoint, including the user's username and password on custom http headers,
|
// Make a direct call to the authorize endpoint, including the user's username and password on custom http headers,
|
||||||
// and parse the authcode from the response. Exchange the authcode for tokens. Return the tokens or an error.
|
// and parse the authcode from the response. Exchange the authcode for tokens. Return the tokens or an error.
|
||||||
func (h *handlerState) cliBasedAuth(authorizeOptions *[]oauth2.AuthCodeOption) (*oidctypes.Token, error) {
|
func (h *handlerState) cliBasedAuth(authorizeOptions *[]oauth2.AuthCodeOption) (*oidctypes.Token, error) {
|
||||||
// Ask the user for their username and password.
|
// Ask the user for their username and password, or get them from env vars.
|
||||||
username, err := h.promptForValue(h.ctx, defaultLDAPUsernamePrompt)
|
username, password, err := h.getUsernameAndPassword()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error prompting for username: %w", err)
|
return nil, err
|
||||||
}
|
|
||||||
password, err := h.promptForSecret(h.ctx, defaultLDAPPasswordPrompt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error prompting for password: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make a callback URL even though we won't be listening on this port, because providing a redirect URL is
|
// Make a callback URL even though we won't be listening on this port, because providing a redirect URL is
|
||||||
@ -500,6 +503,33 @@ func (h *handlerState) cliBasedAuth(authorizeOptions *[]oauth2.AuthCodeOption) (
|
|||||||
return token, nil
|
return token, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Prompt for the user's username and password, or read them from env vars if they are available.
|
||||||
|
func (h *handlerState) getUsernameAndPassword() (string, string, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
username := h.getEnv(defaultUsernameEnvVarName)
|
||||||
|
if username == "" {
|
||||||
|
username, err = h.promptForValue(h.ctx, defaultLDAPUsernamePrompt)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf("error prompting for username: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
h.logger.V(debugLogLevel).Info("Pinniped: Read username from environment variable", "name", defaultUsernameEnvVarName)
|
||||||
|
}
|
||||||
|
|
||||||
|
password := h.getEnv(defaultPasswordEnvVarName)
|
||||||
|
if password == "" {
|
||||||
|
password, err = h.promptForSecret(h.ctx, defaultLDAPPasswordPrompt)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf("error prompting for password: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
h.logger.V(debugLogLevel).Info("Pinniped: Read password from environment variable", "name", defaultPasswordEnvVarName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return username, password, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Open a web browser, or ask the user to open a web browser, to visit the authorize endpoint.
|
// Open a web browser, or ask the user to open a web browser, to visit the authorize endpoint.
|
||||||
// Create a localhost callback listener which exchanges the authcode for tokens. Return the tokens or an error.
|
// Create a localhost callback listener which exchanges the authcode for tokens. Return the tokens or an error.
|
||||||
func (h *handlerState) webBrowserBasedAuth(authorizeOptions *[]oauth2.AuthCodeOption) (*oidctypes.Token, error) {
|
func (h *handlerState) webBrowserBasedAuth(authorizeOptions *[]oauth2.AuthCodeOption) (*oidctypes.Token, error) {
|
||||||
|
@ -993,7 +993,7 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
|
|||||||
wantErr: "error during authorization code exchange: some authcode exchange or token validation error",
|
wantErr: "error during authorization code exchange: some authcode exchange or token validation error",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "successful ldap login",
|
name: "successful ldap login with prompts for username and password",
|
||||||
clientID: "test-client-id",
|
clientID: "test-client-id",
|
||||||
opt: func(t *testing.T) Option {
|
opt: func(t *testing.T) Option {
|
||||||
return func(h *handlerState) error {
|
return func(h *handlerState) error {
|
||||||
@ -1011,6 +1011,9 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
|
|||||||
h.generateState = func() (state.State, error) { return "test-state", nil }
|
h.generateState = func() (state.State, error) { return "test-state", nil }
|
||||||
h.generatePKCE = func() (pkce.Code, error) { return "test-pkce", nil }
|
h.generatePKCE = func() (pkce.Code, error) { return "test-pkce", nil }
|
||||||
h.generateNonce = func() (nonce.Nonce, error) { return "test-nonce", nil }
|
h.generateNonce = func() (nonce.Nonce, error) { return "test-nonce", nil }
|
||||||
|
h.getEnv = func(_ string) string {
|
||||||
|
return "" // asking for any env var returns empty as if it were unset
|
||||||
|
}
|
||||||
h.promptForValue = func(_ context.Context, promptLabel string) (string, error) {
|
h.promptForValue = func(_ context.Context, promptLabel string) (string, error) {
|
||||||
require.Equal(t, "Username: ", promptLabel)
|
require.Equal(t, "Username: ", promptLabel)
|
||||||
return "some-upstream-username", nil
|
return "some-upstream-username", nil
|
||||||
@ -1089,6 +1092,117 @@ func TestLogin(t *testing.T) { // nolint:gocyclo
|
|||||||
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
wantLogs: []string{"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\""},
|
||||||
wantToken: &testToken,
|
wantToken: &testToken,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "successful ldap login with env vars for username and password",
|
||||||
|
clientID: "test-client-id",
|
||||||
|
opt: func(t *testing.T) Option {
|
||||||
|
return func(h *handlerState) error {
|
||||||
|
fakeAuthCode := "test-authcode-value"
|
||||||
|
|
||||||
|
h.getProvider = func(_ *oauth2.Config, _ *oidc.Provider, _ *http.Client) provider.UpstreamOIDCIdentityProviderI {
|
||||||
|
mock := mockUpstream(t)
|
||||||
|
mock.EXPECT().
|
||||||
|
ExchangeAuthcodeAndValidateTokens(
|
||||||
|
gomock.Any(), fakeAuthCode, pkce.Code("test-pkce"), nonce.Nonce("test-nonce"), "http://127.0.0.1:0/callback").
|
||||||
|
Return(&testToken, nil)
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
h.generateState = func() (state.State, error) { return "test-state", nil }
|
||||||
|
h.generatePKCE = func() (pkce.Code, error) { return "test-pkce", nil }
|
||||||
|
h.generateNonce = func() (nonce.Nonce, error) { return "test-nonce", nil }
|
||||||
|
h.getEnv = func(key string) string {
|
||||||
|
switch key {
|
||||||
|
case "PINNIPED_USERNAME":
|
||||||
|
return "some-upstream-username"
|
||||||
|
case "PINNIPED_PASSWORD":
|
||||||
|
return "some-upstream-password"
|
||||||
|
default:
|
||||||
|
return "" // all other env vars are treated as if they are unset
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.promptForValue = func(_ context.Context, promptLabel string) (string, error) {
|
||||||
|
require.FailNow(t, fmt.Sprintf("saw unexpected prompt from the CLI: %q", promptLabel))
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
h.promptForSecret = func(_ context.Context, promptLabel string) (string, error) {
|
||||||
|
require.FailNow(t, fmt.Sprintf("saw unexpected prompt from the CLI: %q", promptLabel))
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cache := &mockSessionCache{t: t, getReturnsToken: nil}
|
||||||
|
cacheKey := SessionCacheKey{
|
||||||
|
Issuer: successServer.URL,
|
||||||
|
ClientID: "test-client-id",
|
||||||
|
Scopes: []string{"test-scope"},
|
||||||
|
RedirectURI: "http://localhost:0/callback",
|
||||||
|
}
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.Equal(t, []SessionCacheKey{cacheKey}, cache.sawGetKeys)
|
||||||
|
require.Equal(t, []SessionCacheKey{cacheKey}, cache.sawPutKeys)
|
||||||
|
require.Equal(t, []*oidctypes.Token{&testToken}, cache.sawPutTokens)
|
||||||
|
})
|
||||||
|
require.NoError(t, WithSessionCache(cache)(h))
|
||||||
|
require.NoError(t, WithCLISendingCredentials()(h))
|
||||||
|
require.NoError(t, WithUpstreamIdentityProvider("some-upstream-name", "ldap")(h))
|
||||||
|
|
||||||
|
discoveryRequestWasMade := false
|
||||||
|
authorizeRequestWasMade := false
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.True(t, discoveryRequestWasMade, "should have made an discovery request")
|
||||||
|
require.True(t, authorizeRequestWasMade, "should have made an authorize request")
|
||||||
|
})
|
||||||
|
|
||||||
|
require.NoError(t, WithClient(&http.Client{
|
||||||
|
Transport: roundtripper.Func(func(req *http.Request) (*http.Response, error) {
|
||||||
|
switch req.URL.Scheme + "://" + req.URL.Host + req.URL.Path {
|
||||||
|
case "http://" + successServer.Listener.Addr().String() + "/.well-known/openid-configuration":
|
||||||
|
discoveryRequestWasMade = true
|
||||||
|
return defaultDiscoveryResponse(req)
|
||||||
|
case "http://" + successServer.Listener.Addr().String() + "/authorize":
|
||||||
|
authorizeRequestWasMade = true
|
||||||
|
require.Equal(t, "some-upstream-username", req.Header.Get("Pinniped-Username"))
|
||||||
|
require.Equal(t, "some-upstream-password", req.Header.Get("Pinniped-Password"))
|
||||||
|
require.Equal(t, url.Values{
|
||||||
|
// This is the PKCE challenge which is calculated as base64(sha256("test-pkce")). For example:
|
||||||
|
// $ echo -n test-pkce | shasum -a 256 | cut -d" " -f1 | xxd -r -p | base64 | cut -d"=" -f1
|
||||||
|
// VVaezYqum7reIhoavCHD1n2d+piN3r/mywoYj7fCR7g
|
||||||
|
"code_challenge": []string{"VVaezYqum7reIhoavCHD1n2d-piN3r_mywoYj7fCR7g"},
|
||||||
|
"code_challenge_method": []string{"S256"},
|
||||||
|
"response_type": []string{"code"},
|
||||||
|
"scope": []string{"test-scope"},
|
||||||
|
"nonce": []string{"test-nonce"},
|
||||||
|
"state": []string{"test-state"},
|
||||||
|
"access_type": []string{"offline"},
|
||||||
|
"client_id": []string{"test-client-id"},
|
||||||
|
"redirect_uri": []string{"http://127.0.0.1:0/callback"},
|
||||||
|
"pinniped_idp_name": []string{"some-upstream-name"},
|
||||||
|
"pinniped_idp_type": []string{"ldap"},
|
||||||
|
}, req.URL.Query())
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusFound,
|
||||||
|
Header: http.Header{"Location": []string{
|
||||||
|
fmt.Sprintf("http://127.0.0.1:0/callback?code=%s&state=test-state", fakeAuthCode),
|
||||||
|
}},
|
||||||
|
}, nil
|
||||||
|
default:
|
||||||
|
// Note that "/token" requests should not be made. They are mocked by mocking calls to ExchangeAuthcodeAndValidateTokens().
|
||||||
|
require.FailNow(t, fmt.Sprintf("saw unexpected http call from the CLI: %s", req.URL.String()))
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
})(h))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
},
|
||||||
|
issuer: successServer.URL,
|
||||||
|
wantLogs: []string{
|
||||||
|
"\"level\"=4 \"msg\"=\"Pinniped: Performing OIDC discovery\" \"issuer\"=\"" + successServer.URL + "\"",
|
||||||
|
"\"level\"=4 \"msg\"=\"Pinniped: Read username from environment variable\" \"name\"=\"PINNIPED_USERNAME\"",
|
||||||
|
"\"level\"=4 \"msg\"=\"Pinniped: Read password from environment variable\" \"name\"=\"PINNIPED_PASSWORD\"",
|
||||||
|
},
|
||||||
|
wantToken: &testToken,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "with requested audience, session cache hit with valid token, but discovery fails",
|
name: "with requested audience, session cache hit with valid token, but discovery fails",
|
||||||
clientID: "test-client-id",
|
clientID: "test-client-id",
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
title: Pinniped How-To Guides
|
title: Pinniped how-to guides
|
||||||
cascade:
|
cascade:
|
||||||
layout: docs
|
layout: docs
|
||||||
menu:
|
menu:
|
||||||
|
@ -15,6 +15,7 @@ This guide shows you how to use this capability _without_ the Pinniped Superviso
|
|||||||
This is most useful if you have only a single cluster and want to authenticate to it via an existing OIDC provider.
|
This is most useful if you have only a single cluster and want to authenticate to it via an existing OIDC provider.
|
||||||
|
|
||||||
If you have multiple clusters, you may want to [install]({{< ref "install-supervisor" >}}) and [configure]({{< ref "configure-supervisor" >}}) the Pinniped Supervisor.
|
If you have multiple clusters, you may want to [install]({{< ref "install-supervisor" >}}) and [configure]({{< ref "configure-supervisor" >}}) the Pinniped Supervisor.
|
||||||
|
Then you can [configure the Concierge to use the Supervisor for authentication]({{< ref "configure-concierge-supervisor-jwt" >}}).
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
@ -121,7 +122,7 @@ You should see:
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
kubectl create clusterrolebinding my-user-admin \
|
kubectl create clusterrolebinding my-user-admin \
|
||||||
--clusterrole admin \
|
--clusterrole edit \
|
||||||
--user my-username@example.com
|
--user my-username@example.com
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
title: Configure the Pinniped Concierge to validate JWT tokens issued by the Pinniped Supervisor
|
title: Configure the Pinniped Concierge to validate JWT tokens issued by the Pinniped Supervisor
|
||||||
description: Set up JSON Web Token (JWT) based token authentication on an individual Kubernetes cluster using the Pinniped Supervisor as the OIDC Provider.
|
description: Set up JSON Web Token (JWT) based token authentication on an individual Kubernetes cluster using the Pinniped Supervisor as the OIDC provider.
|
||||||
cascade:
|
cascade:
|
||||||
layout: docs
|
layout: docs
|
||||||
menu:
|
menu:
|
||||||
@ -26,6 +26,9 @@ If you would rather not use the Supervisor, you may want to [configure the Conci
|
|||||||
This how-to guide assumes that you have already [installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress,
|
This how-to guide assumes that you have already [installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress,
|
||||||
and that you have [configured a FederationDomain to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}).
|
and that you have [configured a FederationDomain to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}).
|
||||||
|
|
||||||
|
It also assumes that you have configured an `OIDCIdentityProvider` or an `LDAPIdentityProvider` for the Supervisor as the source of your user's identities.
|
||||||
|
Various examples of configuring these resources can be found in these guides.
|
||||||
|
|
||||||
It also assumes that you have already [installed the Pinniped Concierge]({{< ref "install-concierge" >}})
|
It also assumes that you have already [installed the Pinniped Concierge]({{< ref "install-concierge" >}})
|
||||||
on all the clusters in which you would like to allow users to have a unified identity.
|
on all the clusters in which you would like to allow users to have a unified identity.
|
||||||
|
|
||||||
@ -64,62 +67,6 @@ kubectl apply -f my-supervisor-authenticator.yaml
|
|||||||
Do this on each cluster in which you would like to allow users from that FederationDomain to log in.
|
Do this on each cluster in which you would like to allow users from that FederationDomain to log in.
|
||||||
Don't forget to give each cluster a unique `audience` value for security reasons.
|
Don't forget to give each cluster a unique `audience` value for security reasons.
|
||||||
|
|
||||||
## Generate a kubeconfig file
|
## Next steps
|
||||||
|
|
||||||
Generate a kubeconfig file for one of the clusters in which you installed and configured the Concierge as described above:
|
Next, [log in to your cluster]({{< ref "login" >}})!
|
||||||
|
|
||||||
```sh
|
|
||||||
pinniped get kubeconfig > my-cluster.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
This assumes that your current kubeconfig is an admin-level kubeconfig for the cluster, such as the kubeconfig
|
|
||||||
that you used to install the Concierge.
|
|
||||||
|
|
||||||
This creates a kubeconfig YAML file `my-cluster.yaml`, unique to that cluster, which targets your JWTAuthenticator
|
|
||||||
using `pinniped login oidc` as an [ExecCredential plugin](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins).
|
|
||||||
This new kubeconfig can be shared with the other users of this cluster. It does not contain any specific
|
|
||||||
identity or credentials. When a user uses this new kubeconfig with `kubectl`, the Pinniped plugin will
|
|
||||||
prompt them to log in using their own identity.
|
|
||||||
|
|
||||||
## Use the kubeconfig file
|
|
||||||
|
|
||||||
Use the kubeconfig with `kubectl` to access your cluster:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl --kubeconfig my-cluster.yaml get namespaces
|
|
||||||
```
|
|
||||||
|
|
||||||
You should see:
|
|
||||||
|
|
||||||
- The `pinniped login oidc` command is executed automatically by `kubectl`.
|
|
||||||
|
|
||||||
- Pinniped directs you to login with whatever identity provider is configured in the Supervisor, either by opening
|
|
||||||
your browser (for upstream OIDC Providers) or by prompting for your username and password (for upstream LDAP providers).
|
|
||||||
|
|
||||||
- In your shell, you see your clusters namespaces.
|
|
||||||
|
|
||||||
If instead you get an access denied error, you may need to create a ClusterRoleBinding for username of your account
|
|
||||||
in the Supervisor's upstream identity provider, for example:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl create clusterrolebinding my-user-admin \
|
|
||||||
--clusterrole admin \
|
|
||||||
--user my-username@example.com
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, you could create role bindings based on the group membership of your users
|
|
||||||
in the upstream identity provider, for example:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl create clusterrolebinding my-auditors \
|
|
||||||
--clusterrole view \
|
|
||||||
--group auditors
|
|
||||||
```
|
|
||||||
|
|
||||||
## Other notes
|
|
||||||
|
|
||||||
- Pinniped kubeconfig files do not contain secrets and are safe to share between users.
|
|
||||||
|
|
||||||
- Temporary session credentials such as ID, access, and refresh tokens are stored in:
|
|
||||||
- `~/.config/pinniped/sessions.yaml` (macOS/Linux)
|
|
||||||
- `%USERPROFILE%/.config/pinniped/sessions.yaml` (Windows).
|
|
||||||
|
@ -112,5 +112,5 @@ You should see:
|
|||||||
If instead you get an access denied error, you may need to create a ClusterRoleBinding for the username/groups returned by your webhook, for example:
|
If instead you get an access denied error, you may need to create a ClusterRoleBinding for the username/groups returned by your webhook, for example:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
kubectl create clusterrolebinding my-user-admin --clusterrole admin --user my-username
|
kubectl create clusterrolebinding my-user-admin --clusterrole edit --user my-username
|
||||||
```
|
```
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
title: Configure the Pinniped Supervisor to use GitLab as an OIDC Provider
|
title: Configure the Pinniped Supervisor to use GitLab as an OIDC provider
|
||||||
description: Set up the Pinniped Supervisor to use GitLab login.
|
description: Set up the Pinniped Supervisor to use GitLab login.
|
||||||
cascade:
|
cascade:
|
||||||
layout: docs
|
layout: docs
|
||||||
@ -136,6 +136,7 @@ spec:
|
|||||||
# [...]
|
# [...]
|
||||||
```
|
```
|
||||||
|
|
||||||
## Next Steps
|
## Next steps
|
||||||
|
|
||||||
Now that you have configured the Supervisor to use GitLab, you will want to [configure the Concierge to validate JWTs issued by the Supervisor]({{< ref "configure-concierge-supervisor-jwt" >}}).
|
Next, [configure the Concierge to validate JWTs issued by the Supervisor]({{< ref "configure-concierge-supervisor-jwt" >}})!
|
||||||
|
Then you'll be able to log into those clusters as any of the users from the GitLab directory.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
title: Configure the Pinniped Supervisor to use JumpCloud as an LDAP Provider
|
title: Configure the Pinniped Supervisor to use JumpCloud as an LDAP provider
|
||||||
description: Set up the Pinniped Supervisor to use JumpCloud LDAP
|
description: Set up the Pinniped Supervisor to use JumpCloud LDAP
|
||||||
cascade:
|
cascade:
|
||||||
layout: docs
|
layout: docs
|
||||||
@ -152,7 +152,7 @@ kubectl describe LDAPIdentityProvider -n pinniped-supervisor jumpcloudldap
|
|||||||
|
|
||||||
Look at the `status` field. If it was configured correctly, you should see `phase: Ready`.
|
Look at the `status` field. If it was configured correctly, you should see `phase: Ready`.
|
||||||
|
|
||||||
## Next Steps
|
## Next steps
|
||||||
|
|
||||||
Now that you have configured the Supervisor to use JumpCloud LDAP, you will want to [configure the Concierge to validate JWTs issued by the Supervisor]({{< ref "configure-concierge-supervisor-jwt" >}}).
|
Next, [configure the Concierge to validate JWTs issued by the Supervisor]({{< ref "configure-concierge-supervisor-jwt" >}})!
|
||||||
Then you'll be able to log into those clusters as any of the users from the JumpCloud directory.
|
Then you'll be able to log into those clusters as any of the users from the JumpCloud directory.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
title: Configure the Pinniped Supervisor to use Okta as an OIDC Provider
|
title: Configure the Pinniped Supervisor to use Okta as an OIDC provider
|
||||||
description: Set up the Pinniped Supervisor to use Okta login.
|
description: Set up the Pinniped Supervisor to use Okta login.
|
||||||
cascade:
|
cascade:
|
||||||
layout: docs
|
layout: docs
|
||||||
@ -108,4 +108,5 @@ Look at the `status` field. If it was configured correctly, you should see `phas
|
|||||||
|
|
||||||
## Next steps
|
## Next steps
|
||||||
|
|
||||||
Now that you have configured the Supervisor to use Okta, you will want to [configure the Concierge to validate JWTs issued by the Supervisor]({{< ref "configure-concierge-supervisor-jwt" >}}).
|
Next, [configure the Concierge to validate JWTs issued by the Supervisor]({{< ref "configure-concierge-supervisor-jwt" >}})!
|
||||||
|
Then you'll be able to log into those clusters as any of the users from the Okta directory.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
title: Configure the Pinniped Supervisor to use OpenLDAP as an LDAP Provider
|
title: Configure the Pinniped Supervisor to use OpenLDAP as an LDAP provider
|
||||||
description: Set up the Pinniped Supervisor to use OpenLDAP login.
|
description: Set up the Pinniped Supervisor to use OpenLDAP login.
|
||||||
cascade:
|
cascade:
|
||||||
layout: docs
|
layout: docs
|
||||||
@ -22,7 +22,7 @@ cluster using their identity from an OpenLDAP server.
|
|||||||
This how-to guide assumes that you have already [installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress,
|
This how-to guide assumes that you have already [installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress,
|
||||||
and that you have [configured a FederationDomain to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}).
|
and that you have [configured a FederationDomain to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}).
|
||||||
|
|
||||||
## An Example of Deploying OpenLDAP on Kubernetes
|
## An example of deploying OpenLDAP on Kubernetes
|
||||||
|
|
||||||
*Note: If you already have an OpenLDAP server installed and configured, please skip to the next section to configure the Supervisor.*
|
*Note: If you already have an OpenLDAP server installed and configured, please skip to the next section to configure the Supervisor.*
|
||||||
|
|
||||||
@ -292,7 +292,7 @@ kubectl describe LDAPIdentityProvider -n pinniped-supervisor openldap
|
|||||||
|
|
||||||
Look at the `status` field. If it was configured correctly, you should see `phase: Ready`.
|
Look at the `status` field. If it was configured correctly, you should see `phase: Ready`.
|
||||||
|
|
||||||
## Next Steps
|
## Next steps
|
||||||
|
|
||||||
Now that you have configured the Supervisor to use OpenLDAP, you will want to [configure the Concierge to validate JWTs issued by the Supervisor]({{< ref "configure-concierge-supervisor-jwt" >}}).
|
Next, [configure the Concierge to validate JWTs issued by the Supervisor]({{< ref "configure-concierge-supervisor-jwt" >}})!
|
||||||
Then you'll be able to log into those clusters as any of the users from the OpenLDAP directory.
|
Then you'll be able to log into those clusters as any of the users from the OpenLDAP directory.
|
||||||
|
@ -163,3 +163,9 @@ You can create the certificate Secrets however you like, for example you could u
|
|||||||
or `kubectl create secret tls`.
|
or `kubectl create secret tls`.
|
||||||
Keep in mind that your users must load some of these endpoints in their web browsers, so the TLS certificates
|
Keep in mind that your users must load some of these endpoints in their web browsers, so the TLS certificates
|
||||||
should be signed by a certificate authority that is trusted by their browsers.
|
should be signed by a certificate authority that is trusted by their browsers.
|
||||||
|
|
||||||
|
## Next steps
|
||||||
|
|
||||||
|
Next, configure an `OIDCIdentityProvider` or an `LDAPIdentityProvider` for the Supervisor (several examples are available in these guides),
|
||||||
|
and [configure the Concierge to use the Supervisor for authentication]({{< ref "configure-concierge-supervisor-jwt" >}})
|
||||||
|
on each cluster!
|
||||||
|
@ -44,6 +44,8 @@ Click Open to allow the command to proceed.
|
|||||||
|
|
||||||
## Install a specific version via script
|
## Install a specific version via script
|
||||||
|
|
||||||
|
Choose your preferred [release](https://github.com/vmware-tanzu/pinniped/releases) version number and use it to replace the version number in the URL below.
|
||||||
|
|
||||||
For example, to install v0.9.2 on Linux/amd64:
|
For example, to install v0.9.2 on Linux/amd64:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
@ -52,4 +54,8 @@ curl -Lso pinniped https://get.pinniped.dev/v0.9.2/pinniped-cli-linux-amd64 \
|
|||||||
&& sudo mv pinniped /usr/local/bin/pinniped
|
&& sudo mv pinniped /usr/local/bin/pinniped
|
||||||
```
|
```
|
||||||
|
|
||||||
*Next, [install the Concierge]({{< ref "install-concierge.md" >}})!*
|
*Replace v0.9.2 with your preferred version number.*
|
||||||
|
|
||||||
|
## Next steps
|
||||||
|
|
||||||
|
Next, [install the Supervisor]({{< ref "install-supervisor.md" >}}) and/or [install the Concierge]({{< ref "install-concierge.md" >}})!
|
||||||
|
@ -68,4 +68,8 @@ Pinniped uses [ytt](https://carvel.dev/ytt/) from [Carvel](https://carvel.dev/)
|
|||||||
|
|
||||||
`ytt --file . | kapp deploy --yes --app pinniped-concierge --diff-changes --file -`
|
`ytt --file . | kapp deploy --yes --app pinniped-concierge --diff-changes --file -`
|
||||||
|
|
||||||
*Next, configure the Concierge for [JWT]({{< ref "configure-concierge-jwt.md" >}}) or [webhook]({{< ref "configure-concierge-webhook.md" >}}) authentication.*
|
## Next steps
|
||||||
|
|
||||||
|
Next, configure the Concierge for
|
||||||
|
[JWT]({{< ref "configure-concierge-jwt.md" >}}) or [webhook]({{< ref "configure-concierge-webhook.md" >}}) authentication,
|
||||||
|
or [configure the Concierge to use the Supervisor for authentication]({{< ref "configure-concierge-supervisor-jwt" >}}).
|
||||||
|
@ -67,6 +67,6 @@ Pinniped uses [ytt](https://carvel.dev/ytt/) from [Carvel](https://carvel.dev/)
|
|||||||
|
|
||||||
`ytt --file . | kapp deploy --yes --app pinniped-supervisor --diff-changes --file -`
|
`ytt --file . | kapp deploy --yes --app pinniped-supervisor --diff-changes --file -`
|
||||||
|
|
||||||
## Next Steps
|
## Next steps
|
||||||
|
|
||||||
Now that you have installed the Supervisor, you will want to [configure the Supervisor]({{< ref "configure-supervisor" >}}).
|
Next, [configure the Supervisor as an OIDC issuer]({{< ref "configure-supervisor" >}})!
|
||||||
|
138
site/content/docs/howto/login.md
Normal file
138
site/content/docs/howto/login.md
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
---
|
||||||
|
title: Logging into your cluster using Pinniped
|
||||||
|
description: Logging into your Kubernetes cluster using Pinniped for authentication.
|
||||||
|
cascade:
|
||||||
|
layout: docs
|
||||||
|
menu:
|
||||||
|
docs:
|
||||||
|
name: Log in to a Cluster
|
||||||
|
weight: 500
|
||||||
|
parent: howtos
|
||||||
|
---
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
This how-to guide assumes that you have already configured the following Pinniped server-side components within your Kubernetes cluster(s):
|
||||||
|
|
||||||
|
1. If you would like to use the Pinniped Supervisor for federated authentication across multiple Kubernetes clusters
|
||||||
|
then you have already:
|
||||||
|
1. [Installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress.
|
||||||
|
1. [Configured a FederationDomain to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}).
|
||||||
|
1. Configured an `OIDCIdentityProvider` or an `LDAPIdentityProvider` for the Supervisor as the source of your user's identities.
|
||||||
|
Various examples of configuring these resources can be found in these guides.
|
||||||
|
1. In each cluster for which you would like to use Pinniped for authentication, you have [installed the Concierge]({{< ref "install-concierge" >}}).
|
||||||
|
1. In each cluster's Concierge, you have configured an authenticator. For example, if you are using the Pinniped Supervisor,
|
||||||
|
then you have configured each Concierge to [use the Supervisor for authentication]({{< ref "configure-concierge-supervisor-jwt" >}}).
|
||||||
|
|
||||||
|
You should have also already [installed the `pinniped` command-line]({{< ref "install-cli" >}}) client, which is used to generate Pinniped-compatible kubeconfig files, and is also a `kubectl` plugin to enable the Pinniped-based login flow.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
1. A cluster admin uses Pinniped to generate a kubeconfig for each cluster, and shares the kubeconfig for each cluster with all users of that cluster.
|
||||||
|
1. A cluster user uses `kubectl` with the generated kubeconfig given to them by the cluster admin. `kubectl` interactively prompts the user to log in using their own unique identity.
|
||||||
|
|
||||||
|
## Key advantages of using the Pinniped Supervisor
|
||||||
|
|
||||||
|
Although you can choose to use Pinniped without using the Pinniped Supervisor, there are several key advantages of choosing to use the Pinniped Supervisor to manage identity across fleets of Kubernetes clusters.
|
||||||
|
|
||||||
|
1. A generated kubeconfig for a cluster will be specific for that cluster, however **it will not contain any specific user identity or credentials.
|
||||||
|
This kubeconfig file can be safely shared with all cluster users.** When the user runs `kubectl` commands using this kubeconfig, they will be interactively prompted to log in using their own unique identity from the OIDC or LDAP identity provider configured in the Supervisor.
|
||||||
|
|
||||||
|
1. The Supervisor will provide a federated identity across all clusters that use the same `FederationDomain`.
|
||||||
|
The user will be **prompted by `kubectl` to interactively authenticate once per day**, and then will be able to use all clusters
|
||||||
|
from the same `FederationDomain` for the rest of the day without being asked to authenticate again.
|
||||||
|
This federated identity is secure because behind the scenes the Supervisor is issuing very short-lived credentials
|
||||||
|
that are uniquely scoped to each cluster.
|
||||||
|
|
||||||
|
1. The Supervisor makes it easy to **bring your own OIDC or LDAP identity provider to act as the source of user identities**.
|
||||||
|
It also allows you to configure how identities and group memberships in the OIDC or LDAP identity provider map to identities
|
||||||
|
and group memberships in the Kubernetes clusters.
|
||||||
|
|
||||||
|
## Generate a Pinniped-compatible kubeconfig file
|
||||||
|
|
||||||
|
You will need to generate a Pinniped-compatible kubeconfig file for each cluster in which you have installed the Concierge.
|
||||||
|
This requires admin-level access to each cluster, so this would typically be performed by the same user who installed the Concierge.
|
||||||
|
|
||||||
|
For each cluster, use `pinniped get kubeconfig` to generate the new kubeconfig file for that cluster.
|
||||||
|
|
||||||
|
It is typically sufficient to run this command with no arguments, aside from pointing the command at your admin kubeconfig.
|
||||||
|
The command uses the [same rules](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/)
|
||||||
|
as `kubectl` to find your admin kubeconfig:
|
||||||
|
|
||||||
|
> "By default, `kubectl` looks for a file named config in the `$HOME/.kube` directory. You can specify other kubeconfig files by setting the `KUBECONFIG` environment variable or by setting the `--kubeconfig` flag."
|
||||||
|
|
||||||
|
For example, if your admin `kubeconfig` file were at the path `$HOME/admin-kubeconfig.yaml`, then you could use:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pinniped get kubeconfig \
|
||||||
|
--kubeconfig "$HOME/admin-kubeconfig.yaml" > pinniped-kubeconfig.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
The new Pinniped-compatible kubeconfig YAML will be output as stdout, and can be redirected to a file.
|
||||||
|
|
||||||
|
Various default behaviors of `pinniped get kubeconfig` can be overridden using [its command-line options]({{< ref "cli" >}}).
|
||||||
|
|
||||||
|
## Use the generated kubeconfig with `kubectl` to access the cluster
|
||||||
|
|
||||||
|
A cluster user will typically be given a Pinniped-compatible kubeconfig by their cluster admin. They can use this kubeconfig
|
||||||
|
with `kubectl` just like any other kubeconfig, as long as they have also installed the `pinniped` CLI tool at the
|
||||||
|
same absolute path where it is referenced inside the kubeconfig's YAML. The `pinniped` CLI will act as a `kubectl` plugin
|
||||||
|
to manage the user's authentication to the cluster.
|
||||||
|
|
||||||
|
For example, if the kubeconfig were saved at `$HOME/pinniped-kubeconfig.yaml`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl get namespaces \
|
||||||
|
--kubeconfig "$HOME/pinniped-kubeconfig.yaml"
|
||||||
|
```
|
||||||
|
|
||||||
|
This command, when configured to use the Pinniped-compatible kubeconfig, will invoke the `pinniped` CLI behind the scenes
|
||||||
|
as an [ExecCredential plugin](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins)
|
||||||
|
to authenticate the user to the cluster.
|
||||||
|
|
||||||
|
If the Pinniped Supervisor is used for authentication to that cluster, then the user's authentication experience
|
||||||
|
will depend on which type of identity provider was configured.
|
||||||
|
|
||||||
|
- For an OIDC identity provider, `kubectl` will open the user's web browser and direct it to the login page of
|
||||||
|
their OIDC Provider. This login flow is controlled by the provider, so it may include two-factor authentication or
|
||||||
|
other features provided by the OIDC Provider.
|
||||||
|
|
||||||
|
If the user's browser is not available, then `kubectl` will instead print a URL which can be visited in a
|
||||||
|
browser (potentially on a different computer) to complete the authentication.
|
||||||
|
|
||||||
|
- For an LDAP identity provider, `kubectl` will interactively prompt the user for their username and password at the CLI.
|
||||||
|
|
||||||
|
Alternatively, the user can set the environment variables `PINNIPED_USERNAME` and `PINNIPED_PASSWORD` for the
|
||||||
|
`kubectl` process to avoid the interactive prompts.
|
||||||
|
|
||||||
|
Once the user completes authentication, the `kubectl` command will automatically continue and complete the user's requested command.
|
||||||
|
For the example above, `kubectl` would list the cluster's namespaces.
|
||||||
|
|
||||||
|
## Authorization
|
||||||
|
|
||||||
|
Pinniped provides authentication (usernames and group memberships) but not authorization. Kubernetes authorization is often
|
||||||
|
provided by the [Kubernetes RBAC system](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) on each cluster.
|
||||||
|
|
||||||
|
In the example above, if the user gets an access denied error, then they may need authorization to list namespaces.
|
||||||
|
For example, an admin could grant the user "edit" access to all cluster resources via the user's username:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl create clusterrolebinding my-user-can-edit \
|
||||||
|
--clusterrole edit \
|
||||||
|
--user my-username@example.com
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, an admin could create role bindings based on the group membership of the users
|
||||||
|
in the upstream identity provider, for example:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
kubectl create clusterrolebinding my-auditors \
|
||||||
|
--clusterrole view \
|
||||||
|
--group auditors
|
||||||
|
```
|
||||||
|
|
||||||
|
## Other notes
|
||||||
|
|
||||||
|
- Temporary session credentials such as ID, access, and refresh tokens are stored in:
|
||||||
|
- `~/.config/pinniped/sessions.yaml` (macOS/Linux)
|
||||||
|
- `%USERPROFILE%/.config/pinniped/sessions.yaml` (Windows).
|
@ -22,6 +22,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
@ -1481,6 +1482,52 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
previous, err := adminConciergeClient.ConfigV1alpha1().CredentialIssuers().Get(ctx, credentialIssuerName(env), metav1.GetOptions{})
|
previous, err := adminConciergeClient.ConfigV1alpha1().CredentialIssuers().Get(ctx, credentialIssuerName(env), metav1.GetOptions{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
updateServiceAnnotations := func(annotations map[string]string) {
|
||||||
|
require.NoError(t, retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||||
|
service, err := adminClient.CoreV1().Services(env.ConciergeNamespace).Get(ctx, impersonationProxyLoadBalancerName(env), metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
updated := service.DeepCopy()
|
||||||
|
if updated.Annotations == nil {
|
||||||
|
updated.Annotations = map[string]string{}
|
||||||
|
}
|
||||||
|
// Add/update each requested annotation, without overwriting others that are already there.
|
||||||
|
for k, v := range annotations {
|
||||||
|
updated.Annotations[k] = v
|
||||||
|
}
|
||||||
|
if equality.Semantic.DeepEqual(service, updated) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("updating Service with annotations: %v", annotations)
|
||||||
|
_, err = adminClient.CoreV1().Services(env.ConciergeNamespace).Update(ctx, updated, metav1.UpdateOptions{})
|
||||||
|
return err
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
deleteServiceAnnotations := func(annotations map[string]string) {
|
||||||
|
require.NoError(t, retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||||
|
service, err := adminClient.CoreV1().Services(env.ConciergeNamespace).Get(ctx, impersonationProxyLoadBalancerName(env), metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
updated := service.DeepCopy()
|
||||||
|
if updated.Annotations != nil {
|
||||||
|
for k := range annotations {
|
||||||
|
delete(updated.Annotations, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if equality.Semantic.DeepEqual(service, updated) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("updating Service to remove annotations: %v", annotations)
|
||||||
|
_, err = adminClient.CoreV1().Services(env.ConciergeNamespace).Update(ctx, updated, metav1.UpdateOptions{})
|
||||||
|
return err
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
applyCredentialIssuerAnnotations := func(annotations map[string]string) {
|
applyCredentialIssuerAnnotations := func(annotations map[string]string) {
|
||||||
require.NoError(t, retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
require.NoError(t, retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||||
issuer, err := adminConciergeClient.ConfigV1alpha1().CredentialIssuers().Get(ctx, credentialIssuerName(env), metav1.GetOptions{})
|
issuer, err := adminConciergeClient.ConfigV1alpha1().CredentialIssuers().Get(ctx, credentialIssuerName(env), metav1.GetOptions{})
|
||||||
@ -1505,18 +1552,51 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
t.Logf("found Service %s of type %s with annotations: %s", service.Name, service.Spec.Type, service.Annotations)
|
t.Logf("found Service %s of type %s with actual annotations %q; expected annotations %q",
|
||||||
|
service.Name, service.Spec.Type, service.Annotations, annotations)
|
||||||
return equality.Semantic.DeepEqual(service.Annotations, annotations), nil
|
return equality.Semantic.DeepEqual(service.Annotations, annotations), nil
|
||||||
}, 30*time.Second, 100*time.Millisecond)
|
}, 30*time.Second, 100*time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
expectedAnnotations := func(credentialIssuerSpecAnnotations map[string]string, otherAnnotations map[string]string) map[string]string {
|
||||||
|
credentialIssuerSpecAnnotationKeys := []string{}
|
||||||
|
expectedAnnotations := map[string]string{}
|
||||||
|
// Expect the annotations specified on the CredentialIssuer spec to be present.
|
||||||
|
for k, v := range credentialIssuerSpecAnnotations {
|
||||||
|
credentialIssuerSpecAnnotationKeys = append(credentialIssuerSpecAnnotationKeys, k)
|
||||||
|
expectedAnnotations[k] = v
|
||||||
|
}
|
||||||
|
// Aside from the annotations requested on the CredentialIssuer spec, also expect the other annotation to still be there too.
|
||||||
|
for k, v := range otherAnnotations {
|
||||||
|
expectedAnnotations[k] = v
|
||||||
|
}
|
||||||
|
// Also expect the internal bookkeeping annotation to be present. It tracks the requested keys from the spec.
|
||||||
|
// Our controller sorts these keys to make the order in the annotation's value predictable.
|
||||||
|
sort.Strings(credentialIssuerSpecAnnotationKeys)
|
||||||
|
credentialIssuerSpecAnnotationKeysJSON, err := json.Marshal(credentialIssuerSpecAnnotationKeys)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expectedAnnotations["credentialissuer.pinniped.dev/annotation-keys"] = string(credentialIssuerSpecAnnotationKeysJSON)
|
||||||
|
return expectedAnnotations
|
||||||
|
}
|
||||||
|
|
||||||
|
otherActorAnnotations := map[string]string{
|
||||||
|
"pinniped.dev/test-other-actor-" + testlib.RandHex(t, 8): "test-other-actor-" + testlib.RandHex(t, 8),
|
||||||
|
}
|
||||||
|
|
||||||
// Whatever happens, set the annotations back to the original value and expect the Service to be updated.
|
// Whatever happens, set the annotations back to the original value and expect the Service to be updated.
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
t.Log("reverting CredentialIssuer back to previous configuration")
|
t.Log("reverting CredentialIssuer back to previous configuration")
|
||||||
|
deleteServiceAnnotations(otherActorAnnotations)
|
||||||
applyCredentialIssuerAnnotations(previous.Spec.ImpersonationProxy.Service.DeepCopy().Annotations)
|
applyCredentialIssuerAnnotations(previous.Spec.ImpersonationProxy.Service.DeepCopy().Annotations)
|
||||||
waitForServiceAnnotations(previous.Spec.ImpersonationProxy.Service.DeepCopy().Annotations)
|
waitForServiceAnnotations(
|
||||||
|
expectedAnnotations(previous.Spec.ImpersonationProxy.Service.DeepCopy().Annotations, map[string]string{}),
|
||||||
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Having another actor, like a human or a non-Pinniped controller, add unrelated annotations to the Service
|
||||||
|
// should not cause the Pinniped controllers to overwrite those annotations.
|
||||||
|
updateServiceAnnotations(otherActorAnnotations)
|
||||||
|
|
||||||
// Set a new annotation in the CredentialIssuer spec.impersonationProxy.service.annotations field.
|
// Set a new annotation in the CredentialIssuer spec.impersonationProxy.service.annotations field.
|
||||||
newAnnotationKey := "pinniped.dev/test-" + testlib.RandHex(t, 8)
|
newAnnotationKey := "pinniped.dev/test-" + testlib.RandHex(t, 8)
|
||||||
newAnnotationValue := "test-" + testlib.RandHex(t, 8)
|
newAnnotationValue := "test-" + testlib.RandHex(t, 8)
|
||||||
@ -1524,8 +1604,8 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
|
|||||||
updatedAnnotations[newAnnotationKey] = newAnnotationValue
|
updatedAnnotations[newAnnotationKey] = newAnnotationValue
|
||||||
applyCredentialIssuerAnnotations(updatedAnnotations)
|
applyCredentialIssuerAnnotations(updatedAnnotations)
|
||||||
|
|
||||||
// Expect it to be applied to the Service.
|
// Expect them to be applied to the Service.
|
||||||
waitForServiceAnnotations(updatedAnnotations)
|
waitForServiceAnnotations(expectedAnnotations(updatedAnnotations, otherActorAnnotations))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("running impersonation proxy with ClusterIP service", func(t *testing.T) {
|
t.Run("running impersonation proxy with ClusterIP service", func(t *testing.T) {
|
||||||
|
@ -249,15 +249,16 @@ func TestE2EFullIntegration(t *testing.T) {
|
|||||||
// It should now be in the "success" state.
|
// It should now be in the "success" state.
|
||||||
formpostExpectSuccessState(t, page)
|
formpostExpectSuccessState(t, page)
|
||||||
|
|
||||||
// Expect the CLI to output a list of namespaces in JSON format.
|
// Expect the CLI to output a list of namespaces.
|
||||||
t.Logf("waiting for kubectl to output namespace list JSON")
|
t.Logf("waiting for kubectl to output namespace list")
|
||||||
var kubectlOutput string
|
var kubectlOutput string
|
||||||
select {
|
select {
|
||||||
case <-time.After(10 * time.Second):
|
case <-time.After(10 * time.Second):
|
||||||
require.Fail(t, "timed out waiting for kubectl output")
|
require.Fail(t, "timed out waiting for kubectl output")
|
||||||
case kubectlOutput = <-kubectlOutputChan:
|
case kubectlOutput = <-kubectlOutputChan:
|
||||||
}
|
}
|
||||||
require.Greaterf(t, len(strings.Split(kubectlOutput, "\n")), 2, "expected some namespaces to be returned, got %q", kubectlOutput)
|
requireKubectlGetNamespaceOutput(t, env, kubectlOutput)
|
||||||
|
|
||||||
t.Logf("first kubectl command took %s", time.Since(start).String())
|
t.Logf("first kubectl command took %s", time.Since(start).String())
|
||||||
|
|
||||||
requireUserCanUseKubectlWithoutAuthenticatingAgain(ctx, t, env,
|
requireUserCanUseKubectlWithoutAuthenticatingAgain(ctx, t, env,
|
||||||
@ -364,10 +365,11 @@ func TestE2EFullIntegration(t *testing.T) {
|
|||||||
|
|
||||||
// Read all of the remaining output from the subprocess until EOF.
|
// Read all of the remaining output from the subprocess until EOF.
|
||||||
t.Logf("waiting for kubectl to output namespace list")
|
t.Logf("waiting for kubectl to output namespace list")
|
||||||
remainingOutput, _ := ioutil.ReadAll(ptyFile)
|
// Read all of the output from the subprocess until EOF.
|
||||||
// Ignore any errors returned because there is always an error on linux.
|
// Ignore any errors returned because there is always an error on linux.
|
||||||
require.Greaterf(t, len(remainingOutput), 0, "expected to get some more output from the kubectl subcommand, but did not")
|
kubectlOutputBytes, _ := ioutil.ReadAll(ptyFile)
|
||||||
require.Greaterf(t, len(strings.Split(string(remainingOutput), "\n")), 2, "expected some namespaces to be returned, got %q", string(remainingOutput))
|
requireKubectlGetNamespaceOutput(t, env, string(kubectlOutputBytes))
|
||||||
|
|
||||||
t.Logf("first kubectl command took %s", time.Since(start).String())
|
t.Logf("first kubectl command took %s", time.Since(start).String())
|
||||||
|
|
||||||
requireUserCanUseKubectlWithoutAuthenticatingAgain(ctx, t, env,
|
requireUserCanUseKubectlWithoutAuthenticatingAgain(ctx, t, env,
|
||||||
@ -380,8 +382,9 @@ func TestE2EFullIntegration(t *testing.T) {
|
|||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Add an LDAP upstream IDP and try using it to authenticate during kubectl commands.
|
// Add an LDAP upstream IDP and try using it to authenticate during kubectl commands
|
||||||
t.Run("with Supervisor LDAP upstream IDP", func(t *testing.T) {
|
// by interacting with the CLI's username and password prompts.
|
||||||
|
t.Run("with Supervisor LDAP upstream IDP using username and password prompts", func(t *testing.T) {
|
||||||
if len(env.ToolsNamespace) == 0 && !env.HasCapability(testlib.CanReachInternetLDAPPorts) {
|
if len(env.ToolsNamespace) == 0 && !env.HasCapability(testlib.CanReachInternetLDAPPorts) {
|
||||||
t.Skip("LDAP integration test requires connectivity to an LDAP server")
|
t.Skip("LDAP integration test requires connectivity to an LDAP server")
|
||||||
}
|
}
|
||||||
@ -389,12 +392,130 @@ func TestE2EFullIntegration(t *testing.T) {
|
|||||||
expectedUsername := env.SupervisorUpstreamLDAP.TestUserMailAttributeValue
|
expectedUsername := env.SupervisorUpstreamLDAP.TestUserMailAttributeValue
|
||||||
expectedGroups := env.SupervisorUpstreamLDAP.TestUserDirectGroupsDNs
|
expectedGroups := env.SupervisorUpstreamLDAP.TestUserDirectGroupsDNs
|
||||||
|
|
||||||
|
setupClusterForEndToEndLDAPTest(t, expectedUsername, env)
|
||||||
|
|
||||||
|
// Use a specific session cache for this test.
|
||||||
|
sessionCachePath := tempDir + "/ldap-test-sessions.yaml"
|
||||||
|
|
||||||
|
kubeconfigPath := runPinnipedGetKubeconfig(t, env, pinnipedExe, tempDir, []string{
|
||||||
|
"get", "kubeconfig",
|
||||||
|
"--concierge-api-group-suffix", env.APIGroupSuffix,
|
||||||
|
"--concierge-authenticator-type", "jwt",
|
||||||
|
"--concierge-authenticator-name", authenticator.Name,
|
||||||
|
"--oidc-session-cache", sessionCachePath,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Run "kubectl get namespaces" which should trigger an LDAP-style login CLI prompt via the plugin.
|
||||||
|
start := time.Now()
|
||||||
|
kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
|
||||||
|
kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...)
|
||||||
|
ptyFile, err := pty.Start(kubectlCmd)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Wait for the subprocess to print the username prompt, then type the user's username.
|
||||||
|
readFromFileUntilStringIsSeen(t, ptyFile, "Username: ")
|
||||||
|
_, err = ptyFile.WriteString(expectedUsername + "\n")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Wait for the subprocess to print the password prompt, then type the user's password.
|
||||||
|
readFromFileUntilStringIsSeen(t, ptyFile, "Password: ")
|
||||||
|
_, err = ptyFile.WriteString(env.SupervisorUpstreamLDAP.TestUserPassword + "\n")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Read all of the output from the subprocess until EOF.
|
||||||
|
// Ignore any errors returned because there is always an error on linux.
|
||||||
|
kubectlOutputBytes, _ := ioutil.ReadAll(ptyFile)
|
||||||
|
requireKubectlGetNamespaceOutput(t, env, string(kubectlOutputBytes))
|
||||||
|
|
||||||
|
t.Logf("first kubectl command took %s", time.Since(start).String())
|
||||||
|
|
||||||
|
requireUserCanUseKubectlWithoutAuthenticatingAgain(ctx, t, env,
|
||||||
|
downstream,
|
||||||
|
kubeconfigPath,
|
||||||
|
sessionCachePath,
|
||||||
|
pinnipedExe,
|
||||||
|
expectedUsername,
|
||||||
|
expectedGroups,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Add an LDAP upstream IDP and try using it to authenticate during kubectl commands
|
||||||
|
// by passing username and password via environment variables, thus avoiding the CLI's username and password prompts.
|
||||||
|
t.Run("with Supervisor LDAP upstream IDP using PINNIPED_USERNAME and PINNIPED_PASSWORD env vars", func(t *testing.T) {
|
||||||
|
if len(env.ToolsNamespace) == 0 && !env.HasCapability(testlib.CanReachInternetLDAPPorts) {
|
||||||
|
t.Skip("LDAP integration test requires connectivity to an LDAP server")
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedUsername := env.SupervisorUpstreamLDAP.TestUserMailAttributeValue
|
||||||
|
expectedGroups := env.SupervisorUpstreamLDAP.TestUserDirectGroupsDNs
|
||||||
|
|
||||||
|
setupClusterForEndToEndLDAPTest(t, expectedUsername, env)
|
||||||
|
|
||||||
|
// Use a specific session cache for this test.
|
||||||
|
sessionCachePath := tempDir + "/ldap-test-with-env-vars-sessions.yaml"
|
||||||
|
|
||||||
|
kubeconfigPath := runPinnipedGetKubeconfig(t, env, pinnipedExe, tempDir, []string{
|
||||||
|
"get", "kubeconfig",
|
||||||
|
"--concierge-api-group-suffix", env.APIGroupSuffix,
|
||||||
|
"--concierge-authenticator-type", "jwt",
|
||||||
|
"--concierge-authenticator-name", authenticator.Name,
|
||||||
|
"--oidc-session-cache", sessionCachePath,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Set up the username and password env vars to avoid the interactive prompts.
|
||||||
|
const usernameEnvVar = "PINNIPED_USERNAME"
|
||||||
|
originalUsername, hadOriginalUsername := os.LookupEnv(usernameEnvVar)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
if hadOriginalUsername {
|
||||||
|
require.NoError(t, os.Setenv(usernameEnvVar, originalUsername))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
require.NoError(t, os.Setenv(usernameEnvVar, expectedUsername))
|
||||||
|
const passwordEnvVar = "PINNIPED_PASSWORD" //nolint:gosec // this is not a credential
|
||||||
|
originalPassword, hadOriginalPassword := os.LookupEnv(passwordEnvVar)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
if hadOriginalPassword {
|
||||||
|
require.NoError(t, os.Setenv(passwordEnvVar, originalPassword))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
require.NoError(t, os.Setenv(passwordEnvVar, env.SupervisorUpstreamLDAP.TestUserPassword))
|
||||||
|
|
||||||
|
// Run "kubectl get namespaces" which should run an LDAP-style login without interactive prompts for username and password.
|
||||||
|
start := time.Now()
|
||||||
|
kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
|
||||||
|
kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...)
|
||||||
|
ptyFile, err := pty.Start(kubectlCmd)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Read all of the output from the subprocess until EOF.
|
||||||
|
// Ignore any errors returned because there is always an error on linux.
|
||||||
|
kubectlOutputBytes, _ := ioutil.ReadAll(ptyFile)
|
||||||
|
requireKubectlGetNamespaceOutput(t, env, string(kubectlOutputBytes))
|
||||||
|
|
||||||
|
t.Logf("first kubectl command took %s", time.Since(start).String())
|
||||||
|
|
||||||
|
// The next kubectl command should not require auth, so we should be able to run it without these env vars.
|
||||||
|
require.NoError(t, os.Unsetenv(usernameEnvVar))
|
||||||
|
require.NoError(t, os.Unsetenv(passwordEnvVar))
|
||||||
|
|
||||||
|
requireUserCanUseKubectlWithoutAuthenticatingAgain(ctx, t, env,
|
||||||
|
downstream,
|
||||||
|
kubeconfigPath,
|
||||||
|
sessionCachePath,
|
||||||
|
pinnipedExe,
|
||||||
|
expectedUsername,
|
||||||
|
expectedGroups,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupClusterForEndToEndLDAPTest(t *testing.T, username string, env *testlib.TestEnv) {
|
||||||
// Create a ClusterRoleBinding to give our test user from the upstream read-only access to the cluster.
|
// Create a ClusterRoleBinding to give our test user from the upstream read-only access to the cluster.
|
||||||
testlib.CreateTestClusterRoleBinding(t,
|
testlib.CreateTestClusterRoleBinding(t,
|
||||||
rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: expectedUsername},
|
rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: username},
|
||||||
rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "view"},
|
rbacv1.RoleRef{Kind: "ClusterRole", APIGroup: rbacv1.GroupName, Name: "view"},
|
||||||
)
|
)
|
||||||
testlib.WaitForUserToHaveAccess(t, expectedUsername, []string{}, &authorizationv1.ResourceAttributes{
|
testlib.WaitForUserToHaveAccess(t, username, []string{}, &authorizationv1.ResourceAttributes{
|
||||||
Verb: "get",
|
Verb: "get",
|
||||||
Group: "",
|
Group: "",
|
||||||
Version: "v1",
|
Version: "v1",
|
||||||
@ -434,51 +555,6 @@ func TestE2EFullIntegration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, idpv1alpha1.LDAPPhaseReady)
|
}, idpv1alpha1.LDAPPhaseReady)
|
||||||
|
|
||||||
// Use a specific session cache for this test.
|
|
||||||
sessionCachePath := tempDir + "/ldap-test-sessions.yaml"
|
|
||||||
|
|
||||||
kubeconfigPath := runPinnipedGetKubeconfig(t, env, pinnipedExe, tempDir, []string{
|
|
||||||
"get", "kubeconfig",
|
|
||||||
"--concierge-api-group-suffix", env.APIGroupSuffix,
|
|
||||||
"--concierge-authenticator-type", "jwt",
|
|
||||||
"--concierge-authenticator-name", authenticator.Name,
|
|
||||||
"--oidc-session-cache", sessionCachePath,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Run "kubectl get namespaces" which should trigger an LDAP-style login CLI prompt via the plugin.
|
|
||||||
start := time.Now()
|
|
||||||
kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath)
|
|
||||||
kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...)
|
|
||||||
ptyFile, err := pty.Start(kubectlCmd)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Wait for the subprocess to print the username prompt, then type the user's username.
|
|
||||||
readFromFileUntilStringIsSeen(t, ptyFile, "Username: ")
|
|
||||||
_, err = ptyFile.WriteString(expectedUsername + "\n")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Wait for the subprocess to print the password prompt, then type the user's password.
|
|
||||||
readFromFileUntilStringIsSeen(t, ptyFile, "Password: ")
|
|
||||||
_, err = ptyFile.WriteString(env.SupervisorUpstreamLDAP.TestUserPassword + "\n")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Read all of the remaining output from the subprocess until EOF.
|
|
||||||
remainingOutput, _ := ioutil.ReadAll(ptyFile)
|
|
||||||
// Ignore any errors returned because there is always an error on linux.
|
|
||||||
require.Greaterf(t, len(remainingOutput), 0, "expected to get some more output from the kubectl subcommand, but did not")
|
|
||||||
require.Greaterf(t, len(strings.Split(string(remainingOutput), "\n")), 2, "expected some namespaces to be returned, got %q", string(remainingOutput))
|
|
||||||
t.Logf("first kubectl command took %s", time.Since(start).String())
|
|
||||||
|
|
||||||
requireUserCanUseKubectlWithoutAuthenticatingAgain(ctx, t, env,
|
|
||||||
downstream,
|
|
||||||
kubeconfigPath,
|
|
||||||
sessionCachePath,
|
|
||||||
pinnipedExe,
|
|
||||||
expectedUsername,
|
|
||||||
expectedGroups,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func readFromFileUntilStringIsSeen(t *testing.T, f *os.File, until string) string {
|
func readFromFileUntilStringIsSeen(t *testing.T, f *os.File, until string) string {
|
||||||
@ -510,6 +586,19 @@ func readAvailableOutput(t *testing.T, r io.Reader) (string, bool) {
|
|||||||
return string(buf[:n]), false
|
return string(buf[:n]), false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func requireKubectlGetNamespaceOutput(t *testing.T, env *testlib.TestEnv, kubectlOutput string) {
|
||||||
|
t.Log("kubectl command output:\n", kubectlOutput)
|
||||||
|
require.Greaterf(t, len(kubectlOutput), 0, "expected to get some more output from the kubectl subcommand, but did not")
|
||||||
|
|
||||||
|
// Should look generally like a list of namespaces, with one namespace listed per line in a table format.
|
||||||
|
require.Greaterf(t, len(strings.Split(kubectlOutput, "\n")), 2, "expected some namespaces to be returned, got %q", kubectlOutput)
|
||||||
|
require.Contains(t, kubectlOutput, fmt.Sprintf("\n%s ", env.ConciergeNamespace))
|
||||||
|
require.Contains(t, kubectlOutput, fmt.Sprintf("\n%s ", env.SupervisorNamespace))
|
||||||
|
if len(env.ToolsNamespace) == 0 {
|
||||||
|
require.Contains(t, kubectlOutput, fmt.Sprintf("\n%s ", env.ToolsNamespace))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func requireUserCanUseKubectlWithoutAuthenticatingAgain(
|
func requireUserCanUseKubectlWithoutAuthenticatingAgain(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
|
Loading…
Reference in New Issue
Block a user