Merge branch 'main' into proposal_process

This commit is contained in:
Ryan Richard 2022-02-15 10:15:46 -08:00
commit 461c0ae56c
22 changed files with 1287 additions and 443 deletions

View File

@ -3,6 +3,7 @@
version: 2 version: 2
updates: updates:
- package-ecosystem: "gomod" - package-ecosystem: "gomod"
open-pull-requests-limit: 100
directory: "/" directory: "/"
schedule: schedule:
interval: "daily" interval: "daily"

View File

@ -3,7 +3,7 @@
# Copyright 2020-2022 the Pinniped contributors. All Rights Reserved. # Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
FROM golang:1.17.6 as build-env FROM golang:1.17.7 as build-env
WORKDIR /work WORKDIR /work
COPY . . COPY . .

View File

@ -33,16 +33,16 @@ The following table includes the current roadmap for Pinniped. If you have any q
Last Updated: Sept 2021 Last Updated: Jan 2022
|Theme|Description|Timeline| |Theme|Description|Timeline|
|--|--|--| |--|--|--|
|Improving Security Posture|Supervisor token refresh fails when the upstream refresh token no longer works for OIDC |Jan 2022| |Improving Security Posture|Support for refreshing LDAP/AD Group information |Feb 2022|
|Improving Security Posture|Supervisor token refresh fails when the upstream user is in an invalid state for LDAP/AD |Jan 2022| |Improving Documentation|Documentation updates for HowTo guides and Workspace ONE IDP |Feb/March 2022|
|Improving Security Posture|Set stricter default TLS versions and Ciphers |Jan 2022| |Improving Security Posture|Support FIPS compliant Boring crypto libraries |Feb/March 2022|
|Improving Security Posture|Support FIPS compliant Boring crypto libraries |Feb 2022|
|Multiple IDP support|Support multiple IDPs configured on a single Supervisor|March/April 2022| |Multiple IDP support|Support multiple IDPs configured on a single Supervisor|March/April 2022|
|Improving Security Posture|TLS hardening |March/April 2022| |Improving Security Posture|TLS hardening |March/April 2022|
|Improving Security Posture|Support Audit logging of security events related to Authentication |April/May 2022| |Improving Security Posture|Support Audit logging of security events related to Authentication |April/May 2022|
|Improving Usability|Support for integrating with UI/Dashboards |June/July 2022|
|Improving Security Posture|mTLS for Supervisor sessions |Exploring/Ongoing| |Improving Security Posture|mTLS for Supervisor sessions |Exploring/Ongoing|
|Improving Security Posture|Key management/rotation for Pinniped components with minimal downtime |Exploring/Ongoing| |Improving Security Posture|Key management/rotation for Pinniped components with minimal downtime |Exploring/Ongoing|
|Improving Security Posture|Support for Session Logout |Exploring/Ongoing| |Improving Security Posture|Support for Session Logout |Exploring/Ongoing|

56
go.mod
View File

@ -54,8 +54,8 @@ require (
github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket v1.4.2
github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
github.com/ory/fosite v0.42.0 github.com/ory/fosite v0.42.1
github.com/ory/x v0.0.337 github.com/ory/x v0.0.344
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/sclevine/agouti v3.0.0+incompatible github.com/sclevine/agouti v3.0.0+incompatible
@ -63,30 +63,30 @@ require (
github.com/spf13/cobra v1.3.0 github.com/spf13/cobra v1.3.0
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
github.com/tdewolff/minify/v2 v2.9.29 github.com/tdewolff/minify/v2 v2.10.0
go.uber.org/atomic v1.9.0 go.uber.org/atomic v1.9.0
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce golang.org/x/crypto v0.0.0-20220210151621-f4118a5b28e2
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
golang.org/x/text v0.3.7 golang.org/x/text v0.3.7
gopkg.in/square/go-jose.v2 v2.6.0 gopkg.in/square/go-jose.v2 v2.6.0
k8s.io/api v0.23.2 k8s.io/api v0.23.3
k8s.io/apiextensions-apiserver v0.23.2 k8s.io/apiextensions-apiserver v0.23.3
k8s.io/apimachinery v0.23.2 k8s.io/apimachinery v0.23.3
k8s.io/apiserver v0.23.2 k8s.io/apiserver v0.23.3
k8s.io/client-go v0.23.2 k8s.io/client-go v0.23.3
k8s.io/component-base v0.23.2 k8s.io/component-base v0.23.3
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 k8s.io/gengo v0.0.0-20211129171323-c02415ce4185
k8s.io/klog/v2 v2.40.1 k8s.io/klog/v2 v2.40.1
k8s.io/kube-aggregator v0.23.2 k8s.io/kube-aggregator v0.23.3
k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 k8s.io/utils v0.0.0-20220127004650-9b3446523e65
sigs.k8s.io/yaml v1.3.0 sigs.k8s.io/yaml v1.3.0
) )
require ( require (
cloud.google.com/go/compute v1.1.0 // indirect cloud.google.com/go/compute v1.2.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.24 // indirect github.com/Azure/go-autorest/autorest v0.11.24 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
@ -114,9 +114,9 @@ require (
github.com/go-asn1-ber/asn1-ber v1.5.3 // indirect github.com/go-asn1-ber/asn1-ber v1.5.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/swag v0.19.15 // indirect github.com/go-openapi/swag v0.21.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect github.com/golang-jwt/jwt/v4 v4.3.0 // indirect
github.com/golang/glog v1.0.0 // indirect github.com/golang/glog v1.0.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect github.com/golang/protobuf v1.5.2 // indirect
@ -145,19 +145,19 @@ require (
github.com/pelletier/go-toml v1.9.4 // indirect github.com/pelletier/go-toml v1.9.4 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/pquerna/cachecontrol v0.1.0 // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect
github.com/prometheus/client_golang v1.12.0 // indirect github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect github.com/prometheus/procfs v0.7.3 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/afero v1.8.0 // indirect github.com/spf13/afero v1.8.1 // indirect
github.com/spf13/cast v1.4.1 // indirect github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.2.0 // indirect github.com/subosito/gotenv v1.2.0 // indirect
github.com/tdewolff/parse/v2 v2.5.27 // indirect github.com/tdewolff/parse/v2 v2.5.27 // indirect
go.etcd.io/etcd/api/v3 v3.5.1 // indirect go.etcd.io/etcd/api/v3 v3.5.2 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.1 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.2 // indirect
go.etcd.io/etcd/client/v3 v3.5.1 // indirect go.etcd.io/etcd/client/v3 v3.5.2 // indirect
go.opentelemetry.io/contrib v0.20.0 // indirect go.opentelemetry.io/contrib v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect
@ -170,22 +170,22 @@ require (
go.opentelemetry.io/otel/trace v1.2.0 // indirect go.opentelemetry.io/otel/trace v1.2.0 // indirect
go.opentelemetry.io/proto/otlp v0.10.0 // indirect go.opentelemetry.io/proto/otlp v0.10.0 // indirect
go.uber.org/multierr v1.7.0 // indirect go.uber.org/multierr v1.7.0 // indirect
go.uber.org/zap v1.20.0 // indirect go.uber.org/zap v1.21.0 // indirect
golang.org/x/mod v0.5.1 // indirect golang.org/x/mod v0.5.1 // indirect
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
golang.org/x/tools v0.1.8 // indirect golang.org/x/tools v0.1.9 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5 // indirect google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068 // indirect
google.golang.org/grpc v1.43.0 // indirect google.golang.org/grpc v1.44.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.66.3 // indirect gopkg.in/ini.v1 v1.66.4 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/kube-openapi v0.0.0-20220114203427-a0453230fd26 // indirect k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect

119
go.sum
View File

@ -41,8 +41,8 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.1.0 h1:pyPhehLfZ6pVzRgJmXGYvCY4K7WSWRhVw0AwhgVvS84= cloud.google.com/go/compute v1.2.0 h1:EKki8sSdvDU0OO9mAXGwPXOTOgPz2l08R0/IutDH11I=
cloud.google.com/go/compute v1.1.0/go.mod h1:2NIffxgWfORSI7EOYMFatGTfjMLnqrOKBEyYb6NoRgA= cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
@ -262,7 +262,7 @@ github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1
github.com/cockroachdb/cockroach-go v0.0.0-20190925194419-606b3d062051/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/cockroach-go v0.0.0-20190925194419-606b3d062051/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
github.com/cockroachdb/cockroach-go v0.0.0-20200312223839-f565e4789405/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/cockroach-go v0.0.0-20200312223839-f565e4789405/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM= github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM=
github.com/cockroachdb/cockroach-go/v2 v2.2.1/go.mod h1:u3MiKYGupPPjkn3ozknpMUpxPaNLTFWAya419/zv6eI= github.com/cockroachdb/cockroach-go/v2 v2.2.7/go.mod h1:q4ZRgO6CQpwNyEvEwSxwNrOSVchsmzrBnAv3HuZ3Abc=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
@ -593,8 +593,9 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU=
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo=
@ -900,8 +901,9 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU=
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/gddo v0.0.0-20180828051604-96d2a289f41e/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4= github.com/golang/gddo v0.0.0-20180828051604-96d2a289f41e/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4= github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
@ -1507,8 +1509,8 @@ github.com/ory/dockertest/v3 v3.6.3/go.mod h1:EFLcVUOl8qCwp9NyDAcCDtq/QviLtYswW/
github.com/ory/dockertest/v3 v3.6.5/go.mod h1:iYKQSRlYrt/2s5fJWYdB98kCQG6g/LjBMvzEYii63vg= github.com/ory/dockertest/v3 v3.6.5/go.mod h1:iYKQSRlYrt/2s5fJWYdB98kCQG6g/LjBMvzEYii63vg=
github.com/ory/dockertest/v3 v3.8.1/go.mod h1:wSRQ3wmkz+uSARYMk7kVJFDBGm8x5gSxIhI7NDc+BAQ= github.com/ory/dockertest/v3 v3.8.1/go.mod h1:wSRQ3wmkz+uSARYMk7kVJFDBGm8x5gSxIhI7NDc+BAQ=
github.com/ory/fosite v0.29.0/go.mod h1:0atSZmXO7CAcs6NPMI/Qtot8tmZYj04Nddoold4S2h0= github.com/ory/fosite v0.29.0/go.mod h1:0atSZmXO7CAcs6NPMI/Qtot8tmZYj04Nddoold4S2h0=
github.com/ory/fosite v0.42.0 h1:ICAa2d7tR+kS/taYIyMzGKufGViC1bb/QAdOgLxFqlg= github.com/ory/fosite v0.42.1 h1:h2j+namO6bZIaD4WAFFHx+Cwfc8EDATE5JWQR9NMo6c=
github.com/ory/fosite v0.42.0/go.mod h1:qggrqm3ZWQF9i2f/d3RLH5mHHPtv44hsiltkVKLsCYo= github.com/ory/fosite v0.42.1/go.mod h1:qggrqm3ZWQF9i2f/d3RLH5mHHPtv44hsiltkVKLsCYo=
github.com/ory/go-acc v0.0.0-20181118080137-ddc355013f90/go.mod h1:sxnvPCxChFuSmTJGj8FdMupeq1BezCiEpDjTUXQ4hf4= github.com/ory/go-acc v0.0.0-20181118080137-ddc355013f90/go.mod h1:sxnvPCxChFuSmTJGj8FdMupeq1BezCiEpDjTUXQ4hf4=
github.com/ory/go-acc v0.2.6/go.mod h1:4Kb/UnPcT8qRAk3IAxta+hvVapdxTLWtrr7bFLlEgpw= github.com/ory/go-acc v0.2.6/go.mod h1:4Kb/UnPcT8qRAk3IAxta+hvVapdxTLWtrr7bFLlEgpw=
github.com/ory/go-acc v0.2.7 h1:VjSz+Xj3LoJRBmAXt9cxuL1lanO/Bvf9ajny5NvwbtM= github.com/ory/go-acc v0.2.7 h1:VjSz+Xj3LoJRBmAXt9cxuL1lanO/Bvf9ajny5NvwbtM=
@ -1539,8 +1541,8 @@ github.com/ory/x v0.0.214/go.mod h1:aRl57gzyD4GF0HQCekovXhv0xTZgAgiht3o8eVhsm9Q=
github.com/ory/x v0.0.250/go.mod h1:jUJaVptu+geeqlb9SyQCogTKj5ztSDIF6APkhbKtwLc= github.com/ory/x v0.0.250/go.mod h1:jUJaVptu+geeqlb9SyQCogTKj5ztSDIF6APkhbKtwLc=
github.com/ory/x v0.0.272/go.mod h1:1TTPgJGQutrhI2OnwdrTIHE9ITSf4MpzXFzA/ncTGRc= github.com/ory/x v0.0.272/go.mod h1:1TTPgJGQutrhI2OnwdrTIHE9ITSf4MpzXFzA/ncTGRc=
github.com/ory/x v0.0.288/go.mod h1:APpShLyJcVzKw1kTgrHI+j/L9YM+8BRjHlcYObc7C1U= github.com/ory/x v0.0.288/go.mod h1:APpShLyJcVzKw1kTgrHI+j/L9YM+8BRjHlcYObc7C1U=
github.com/ory/x v0.0.337 h1:YDlmEA43NMJJxezVVAQoqXyyYl/ZZSh9+mA8JSbOECw= github.com/ory/x v0.0.344 h1:ba74mT+d3UNNCdqSq5cTEHsjQc5UHoTjE9e873nNpRM=
github.com/ory/x v0.0.337/go.mod h1:VxITg5o/DfPfom76ni5FfFzP66Z+kLvJ/OATJxuT42c= github.com/ory/x v0.0.344/go.mod h1:Ddbu3ecSaNDgxdntdD1gDu3ALG5fWR5AwUB1ILeBUNE=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/parnurzeal/gorequest v0.2.15/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE= github.com/parnurzeal/gorequest v0.2.15/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
@ -1597,8 +1599,8 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
@ -1734,8 +1736,8 @@ github.com/spf13/afero v1.3.2/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY52
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
github.com/spf13/afero v1.5.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.5.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/afero v1.8.0 h1:5MmtuhAgYeU6qpa7w7bP0dv6MBYuup0vekhSpSkoq60= github.com/spf13/afero v1.8.1 h1:izYHOT71f9iZ7iq37Uqjael60/vYC6vMtzedudZ0zEk=
github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/afero v1.8.1/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
@ -1795,8 +1797,8 @@ github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tdewolff/minify/v2 v2.9.29 h1:QMVJaCJzWL0mXS33cX792YD074xz4lOhkyBS8hAzYAY= github.com/tdewolff/minify/v2 v2.10.0 h1:ovVAHUcjfGrBDf1EIvsodRUVJiZK/28mMose08B7k14=
github.com/tdewolff/minify/v2 v2.9.29/go.mod h1:6XAjcHM46pFcRE0eztigFPm0Q+Cxsw8YhEWT+rDkcZM= github.com/tdewolff/minify/v2 v2.10.0/go.mod h1:6XAjcHM46pFcRE0eztigFPm0Q+Cxsw8YhEWT+rDkcZM=
github.com/tdewolff/parse/v2 v2.5.27 h1:PL3LzzXaOpmdrknnOlIeO2muIBHAwiKp6TxN1RbU5gI= github.com/tdewolff/parse/v2 v2.5.27 h1:PL3LzzXaOpmdrknnOlIeO2muIBHAwiKp6TxN1RbU5gI=
github.com/tdewolff/parse/v2 v2.5.27/go.mod h1:WzaJpRSbwq++EIQHYIRTpbYKNA3gn9it1Ik++q4zyho= github.com/tdewolff/parse/v2 v2.5.27/go.mod h1:WzaJpRSbwq++EIQHYIRTpbYKNA3gn9it1Ik++q4zyho=
github.com/tdewolff/test v1.0.6 h1:76mzYJQ83Op284kMT+63iCNCI7NEERsIN8dLM+RiKr4= github.com/tdewolff/test v1.0.6 h1:76mzYJQ83Op284kMT+63iCNCI7NEERsIN8dLM+RiKr4=
@ -1897,17 +1899,19 @@ go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mI
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/api/v3 v3.5.1 h1:v28cktvBq+7vGyJXF8G+rWJmj+1XUmMtqcLnH8hDocM=
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/api/v3 v3.5.2 h1:tXok5yLlKyuQ/SXSjtqHc4uzNaMqZi2XsoSPr/LlJXI=
go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/pkg/v3 v3.5.1 h1:XIQcHCFSG53bJETYeRJtIxdLv2EWRGxcfzR8lSnTH4E=
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/pkg/v3 v3.5.2 h1:4hzqQ6hIb3blLyQ8usCU4h3NghkqcsohEQ3o3VetYxE=
go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.etcd.io/etcd/client/v2 v2.305.1 h1:vtxYCKWA9x31w0WJj7DdqsHFNjhkigdAnziDtkZb/l4= go.etcd.io/etcd/client/v2 v2.305.1 h1:vtxYCKWA9x31w0WJj7DdqsHFNjhkigdAnziDtkZb/l4=
go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
go.etcd.io/etcd/client/v3 v3.5.1 h1:oImGuV5LGKjCqXdjkMHCyWa5OO1gYKCnC/1sgdfj1Uk= go.etcd.io/etcd/client/v3 v3.5.2 h1:WdnejrUtQC4nCxK0/dLTMqKOB+U5TP/2Ya0BJL+1otA=
go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= go.etcd.io/etcd/client/v3 v3.5.2/go.mod h1:kOOaWFFgHygyT0WlSmL8TJiXmMysO/nNUlEsSsN6W4o=
go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk= go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk=
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw= go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw=
@ -1985,8 +1989,8 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.20.0 h1:N4oPlghZwYG55MlU6LXk/Zp00FVNE9X9wrYO8CEs4lc= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
go.uber.org/zap v1.20.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180830192347-182538f80094/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180830192347-182538f80094/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -2038,8 +2042,8 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce h1:Roh6XWxHFKrPgC/EQhVubSAGQ6Ozk6IdxHSzt1mR0EI= golang.org/x/crypto v0.0.0-20220210151621-f4118a5b28e2 h1:XdAboW3BNMv9ocSCOk/u1MFioZGzCNkiJZ19v9Oe3Ig=
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220210151621-f4118a5b28e2/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -2169,8 +2173,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d h1:1n1fc535VhN8SYtD4cDUyNlfpAF2ROMM9+11equK3hs= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181003184128-c57b0facaced/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181003184128-c57b0facaced/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -2349,8 +2353,9 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -2488,8 +2493,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w= golang.org/x/tools v0.1.9 h1:j9KsMiaP1c3B0OTQGth0/k+miLGTgLsAFUCrF2vLcF8=
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -2543,7 +2548,7 @@ google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUb
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
google.golang.org/api v0.65.0/go.mod h1:ArYhxgGadlWmqO1IqVujw6Cs8IdD33bTmzKo2Sh+cbg= google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -2632,10 +2637,10 @@ google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ6
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5 h1:zzNejm+EgrbLfDZ6lu9Uud2IVvHySPl8vQzf04laR5Q= google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068 h1:pwzFiZfBTH/GjBWz1BcDwMBaHBo8mZvpLa7eBKJpFAk=
google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220208230804-65c12eb4c068/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
@ -2672,8 +2677,8 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/grpc/examples v0.0.0-20210304020650-930c79186c99/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= google.golang.org/grpc/examples v0.0.0-20210304020650-930c79186c99/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@ -2721,8 +2726,8 @@ gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.66.3 h1:jRskFVxYaMGAMUbN0UZ7niA9gzL9B49DOqE78vg0k3w= gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
gopkg.in/ini.v1 v1.66.3/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
@ -2774,31 +2779,31 @@ howett.net/plist v0.0.0-20201203080718-1454fab16a06/go.mod h1:vMygbs4qMhSZSc4lCU
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
k8s.io/api v0.23.2 h1:62cpzreV3dCuj0hqPi8r4dyWh48ogMcyh+ga9jEGij4= k8s.io/api v0.23.3 h1:KNrME8KHGr12Ozjf8ytOewKzZh6hl/hHUZeHddT3a38=
k8s.io/api v0.23.2/go.mod h1:sYuDb3flCtRPI8ghn6qFrcK5ZBu2mhbElxRE95qpwlI= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ=
k8s.io/apiextensions-apiserver v0.23.2 h1:N6CIVAhmF0ahgFKUMDdV/AUyckhUb4nIyVPohPtdUPk= k8s.io/apiextensions-apiserver v0.23.3 h1:JvPJA7hSEAqMRteveq4aj9semilAZYcJv+9HHFWfUdM=
k8s.io/apiextensions-apiserver v0.23.2/go.mod h1:9cs7avT6+GfzbB0pambTvH11wcaR85QQg4ovl9s15UU= k8s.io/apiextensions-apiserver v0.23.3/go.mod h1:/ZpRXdgKZA6DvIVPEmXDCZJN53YIQEUDF+hrpIQJL38=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
k8s.io/apimachinery v0.23.2 h1:dBmjCOeYBdg2ibcQxMuUq+OopZ9fjfLIR5taP/XKeTs= k8s.io/apimachinery v0.23.3 h1:7IW6jxNzrXTsP0c8yXz2E5Yx/WTzVPTsHIx/2Vm0cIk=
k8s.io/apimachinery v0.23.2/go.mod h1:zDqeV0AK62LbCI0CI7KbWCAYdLg+E+8UXJ0rIz5gmS8= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
k8s.io/apiserver v0.23.2 h1:vGFCojjwSLyunapA7FWuzyekml/s0nAsoh4iBpzWzOs= k8s.io/apiserver v0.23.3 h1:gWY1DmA0AdAGR/H+Q/1FtyGkFq8xqSaZOw7oLopmO8k=
k8s.io/apiserver v0.23.2/go.mod h1:Kdt8gafkPev9Gfh+H6lCPbmRu42f7BfhOfHKKa3dtyU= k8s.io/apiserver v0.23.3/go.mod h1:3HhsTmC+Pn+Jctw+Ow0LHA4dQ4oXrQ4XJDzrVDG64T4=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
k8s.io/client-go v0.23.2 h1:BNbOcxa99jxHH8mM1cPKGIrrKRnCSAfAtyonYGsbFtE= k8s.io/client-go v0.23.3 h1:23QYUmCQ/W6hW78xIwm3XqZrrKZM+LWDqW2zfo+szJs=
k8s.io/client-go v0.23.2/go.mod h1:k3YbsWg6GWdHF1THHTQP88X9RhB1DWPo3Dq7KfU/D1c= k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE=
k8s.io/code-generator v0.23.2/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
k8s.io/component-base v0.23.2 h1:dAYmUhWIBWO762etTjBEEKtYYHi5CoQInSLtK6LM1Zs= k8s.io/component-base v0.23.3 h1:q+epprVdylgecijVGVdf4MbizEL2feW4ssd7cdo6LVY=
k8s.io/component-base v0.23.2/go.mod h1:wS9Z03MO3oJ0RU8bB/dbXTiluGju+SC/F5i660gxB8c= k8s.io/component-base v0.23.3/go.mod h1:1Smc4C60rWG7d3HjSYpIwEbySQ3YWg0uzH5a2AtaTLg=
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
@ -2813,18 +2818,18 @@ k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.40.1 h1:P4RRucWk/lFOlDdkAr3mc7iWFkgKrZY9qZMAgek06S4= k8s.io/klog/v2 v2.40.1 h1:P4RRucWk/lFOlDdkAr3mc7iWFkgKrZY9qZMAgek06S4=
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-aggregator v0.23.2 h1:6CoZZqNdFc9benrgSJJ0GQGgFtKjI0y3UwlBbioXtc8= k8s.io/kube-aggregator v0.23.3 h1:9IP+D+YzIbGor/TArN3pYf9Thj19wYhzLRGRrFaKFSs=
k8s.io/kube-aggregator v0.23.2/go.mod h1:hoxP4rZREnjCJmrb0pHFPqm7+pkxoFjh8IpXL7OBWRA= k8s.io/kube-aggregator v0.23.3/go.mod h1:pt5QJ3QaIdhZzNlUvN5wndbM0LNT4BvhszGkzy2QdFo=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
k8s.io/kube-openapi v0.0.0-20220114203427-a0453230fd26 h1:2G24ndYyfk0l23ZrGutxb0s9TRe4m1ZjFlcu4cEU1zA= k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf h1:M9XBsiMslw2lb2ZzglC0TOkBPK5NQi0/noUrdnoFwUg=
k8s.io/kube-openapi v0.0.0-20220114203427-a0453230fd26/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 h1:ZKMMxTvduyf5WUtREOqg5LiXaN1KO/+0oOQPRFrClpo= k8s.io/utils v0.0.0-20220127004650-9b3446523e65 h1:ONWS0Wgdg5wRiQIAui7L/023aC9+IxrIrydY7l8llsE=
k8s.io/utils v0.0.0-20211208161948-7d6a63dca704/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220127004650-9b3446523e65/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=

View File

@ -1,4 +1,4 @@
// Copyright 2021 the Pinniped contributors. All Rights Reserved. // Copyright 2021-2022 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
window.onload = () => { window.onload = () => {
@ -44,11 +44,22 @@ window.onload = () => {
responseParams['redirect_uri'].value, responseParams['redirect_uri'].value,
{ {
method: 'POST', method: 'POST',
mode: 'no-cors', mode: 'no-cors', // in the future, we could change this to "cors" (see comment below)
headers: {'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}, headers: {'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'},
body: responseParams['encoded_params'].value, body: responseParams['encoded_params'].value,
}) })
.then(() => clearTimeout(timeout)) .then(response => {
.then(() => transitionToState('success')) clearTimeout(timeout);
// Requests made using "no-cors" mode will hide the real response.status by making it 0
// and the real response.ok by making it false.
// If the real response was success, then we would like to show the success state.
// If the real response was an error, then we wish we could show the manual
// state, but we have no way to know that, as long as we are making "no-cors" requests.
// For now, show the success status for all responses.
// In the future, we could make this request in "cors" mode once old versions of our CLI
// which did not handle CORS are upgraded out by our users. That would allow us to use
// a conditional statement based on response.ok here to decide which state to transition into.
transitionToState('success');
})
.catch(() => transitionToState('manual')); .catch(() => transitionToState('manual'));
}; };

View File

@ -30,7 +30,7 @@ var (
<head> <head>
<meta charset="UTF-8"> <meta charset="UTF-8">
<style>body{font-family:metropolis-light,Helvetica,sans-serif}h1{font-size:20px}.state{position:absolute;top:100px;left:50%;width:400px;height:80px;margin-top:-40px;margin-left:-200px;font-size:14px;line-height:24px}button{margin:-10px;padding:10px;text-align:left;width:100%;display:inline;border:none;background:0 0;cursor:pointer;transition:all .1s}button:hover{background-color:#eee;transform:scale(1.01)}button:active{background-color:#ddd;transform:scale(.99)}code{display:block;word-wrap:break-word;word-break:break-all;font-size:12px;font-family:monospace;color:#333}.copy-icon{float:left;width:36px;height:36px;margin-top:-3px;margin-right:10px;background-size:contain;background-repeat:no-repeat;background-image:url("data:image/svg+xml,%3Csvg width=%2236%22 height=%2236%22 viewBox=%220 0 36 36%22 xmlns=%22http://www.w3.org/2000/svg%22 xmlns:xlink=%22http://www.w3.org/1999/xlink%22%3E%3Ctitle%3Ecopy-to-clipboard-line%3C/title%3E%3Cpath d=%22M22.6 4H21.55a3.89 3.89.0 00-7.31.0H13.4A2.41 2.41.0 0011 6.4V10H25V6.4A2.41 2.41.0 0022.6 4zM23 8H13V6.25A.25.25.0 0113.25 6h2.69l.12-1.11A1.24 1.24.0 0116.61 4a2 2 0 013.15 1.18l.09.84h2.9a.25.25.0 01.25.25z%22 class=%22clr-i-outline clr-i-outline-path-1%22/%3E%3Cpath d=%22M33.25 18.06H21.33l2.84-2.83a1 1 0 10-1.42-1.42L17.5 19.06l5.25 5.25a1 1 0 00.71.29 1 1 0 00.71-1.7l-2.84-2.84H33.25a1 1 0 000-2z%22 class=%22clr-i-outline clr-i-outline-path-2%22/%3E%3Cpath d=%22M29 16h2V6.68A1.66 1.66.0 0029.35 5H27.08V7H29z%22 class=%22clr-i-outline clr-i-outline-path-3%22/%3E%3Cpath d=%22M29 31H7V7H9V5H6.64A1.66 1.66.0 005 6.67V31.32A1.66 1.66.0 006.65 33H29.36A1.66 1.66.0 0031 31.33V22.06H29z%22 class=%22clr-i-outline clr-i-outline-path-4%22/%3E%3Crect x=%220%22 y=%220%22 width=%2236%22 height=%2236%22 fill-opacity=%220%22/%3E%3C/svg%3E")}@keyframes loader{to{transform:rotate(360deg)}}#loading{content:'';box-sizing:border-box;width:80px;height:80px;margin-top:-40px;margin-left:-40px;border-radius:50%;border:2px solid #fff;border-top-color:#1b3951;animation:loader .6s linear infinite}</style> <style>body{font-family:metropolis-light,Helvetica,sans-serif}h1{font-size:20px}.state{position:absolute;top:100px;left:50%;width:400px;height:80px;margin-top:-40px;margin-left:-200px;font-size:14px;line-height:24px}button{margin:-10px;padding:10px;text-align:left;width:100%;display:inline;border:none;background:0 0;cursor:pointer;transition:all .1s}button:hover{background-color:#eee;transform:scale(1.01)}button:active{background-color:#ddd;transform:scale(.99)}code{display:block;word-wrap:break-word;word-break:break-all;font-size:12px;font-family:monospace;color:#333}.copy-icon{float:left;width:36px;height:36px;margin-top:-3px;margin-right:10px;background-size:contain;background-repeat:no-repeat;background-image:url("data:image/svg+xml,%3Csvg width=%2236%22 height=%2236%22 viewBox=%220 0 36 36%22 xmlns=%22http://www.w3.org/2000/svg%22 xmlns:xlink=%22http://www.w3.org/1999/xlink%22%3E%3Ctitle%3Ecopy-to-clipboard-line%3C/title%3E%3Cpath d=%22M22.6 4H21.55a3.89 3.89.0 00-7.31.0H13.4A2.41 2.41.0 0011 6.4V10H25V6.4A2.41 2.41.0 0022.6 4zM23 8H13V6.25A.25.25.0 0113.25 6h2.69l.12-1.11A1.24 1.24.0 0116.61 4a2 2 0 013.15 1.18l.09.84h2.9a.25.25.0 01.25.25z%22 class=%22clr-i-outline clr-i-outline-path-1%22/%3E%3Cpath d=%22M33.25 18.06H21.33l2.84-2.83a1 1 0 10-1.42-1.42L17.5 19.06l5.25 5.25a1 1 0 00.71.29 1 1 0 00.71-1.7l-2.84-2.84H33.25a1 1 0 000-2z%22 class=%22clr-i-outline clr-i-outline-path-2%22/%3E%3Cpath d=%22M29 16h2V6.68A1.66 1.66.0 0029.35 5H27.08V7H29z%22 class=%22clr-i-outline clr-i-outline-path-3%22/%3E%3Cpath d=%22M29 31H7V7H9V5H6.64A1.66 1.66.0 005 6.67V31.32A1.66 1.66.0 006.65 33H29.36A1.66 1.66.0 0031 31.33V22.06H29z%22 class=%22clr-i-outline clr-i-outline-path-4%22/%3E%3Crect x=%220%22 y=%220%22 width=%2236%22 height=%2236%22 fill-opacity=%220%22/%3E%3C/svg%3E")}@keyframes loader{to{transform:rotate(360deg)}}#loading{content:'';box-sizing:border-box;width:80px;height:80px;margin-top:-40px;margin-left:-40px;border-radius:50%;border:2px solid #fff;border-top-color:#1b3951;animation:loader .6s linear infinite}</style>
<script>window.onload=()=>{const e=t=>{Array.from(document.querySelectorAll('.state')).forEach(e=>e.hidden=!0);const e=document.getElementById(t);e.hidden=!1,document.title=e.dataset.title,document.getElementById('favicon').setAttribute('href','data:image/svg+xml,<svg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 100 100%22><text y=%22.9em%22 font-size=%2290%22>'+e.dataset.favicon+'</text></svg>')};e('loading'),window.history.replaceState(null,'','./'),document.getElementById('manual-copy-button').onclick=()=>{const e=document.getElementById('manual-copy-button').innerText;navigator.clipboard.writeText(e).then(()=>console.info('copied authorization code '+e+' to clipboard')).catch(t=>console.error('failed to copy code '+e+' to clipboard: '+t))};const n=setTimeout(()=>e('manual'),2e3),t=document.forms[0].elements;fetch(t.redirect_uri.value,{method:'POST',mode:'no-cors',headers:{'Content-Type':'application/x-www-form-urlencoded;charset=UTF-8'},body:t.encoded_params.value}).then(()=>clearTimeout(n)).then(()=>e('success')).catch(()=>e('manual'))}</script> <script>window.onload=()=>{const e=t=>{Array.from(document.querySelectorAll(".state")).forEach(e=>e.hidden=!0);const e=document.getElementById(t);e.hidden=!1,document.title=e.dataset.title,document.getElementById("favicon").setAttribute("href","data:image/svg+xml,<svg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 100 100%22><text y=%22.9em%22 font-size=%2290%22>"+e.dataset.favicon+"</text></svg>")};e("loading"),window.history.replaceState(null,'',"./"),document.getElementById("manual-copy-button").onclick=()=>{const e=document.getElementById("manual-copy-button").innerText;navigator.clipboard.writeText(e).then(()=>console.info("copied authorization code "+e+" to clipboard")).catch(t=>console.error("failed to copy code "+e+" to clipboard: "+t))};const n=setTimeout(()=>e("manual"),2e3),t=document.forms[0].elements;fetch(t.redirect_uri.value,{method:"POST",mode:"no-cors",headers:{'Content-Type':"application/x-www-form-urlencoded;charset=UTF-8"},body:t.encoded_params.value}).then(t=>{clearTimeout(n),e("success")}).catch(()=>e("manual"))}</script>
<link id="favicon" rel="icon"/> <link id="favicon" rel="icon"/>
</head> </head>
<body> <body>
@ -61,7 +61,7 @@ var (
// It's okay if this changes in the future, but this gives us a chance to eyeball the formatting. // It's okay if this changes in the future, but this gives us a chance to eyeball the formatting.
// Our browser-based integration tests should find any incompatibilities. // Our browser-based integration tests should find any incompatibilities.
testExpectedCSP = `default-src 'none'; ` + testExpectedCSP = `default-src 'none'; ` +
`script-src 'sha256-cjTdJmRvuz5EHNb/cw6pFk9iWyjegU9Ihx7Fb9tlqRg='; ` + `script-src 'sha256-1LS3gM7wTGc0dYXZiqW6HK1LHk74YSG8GsJBC/j1/i8='; ` +
`style-src 'sha256-CtfkX7m8x2UdGYvGgDq+6b6yIAQsASW9pbQK+sG8fNA='; ` + `style-src 'sha256-CtfkX7m8x2UdGYvGgDq+6b6yIAQsASW9pbQK+sG8fNA='; ` +
`img-src data:; ` + `img-src data:; ` +
`connect-src *; ` + `connect-src *; ` +

View File

@ -834,21 +834,68 @@ func (h *handlerState) handleAuthCodeCallback(w http.ResponseWriter, r *http.Req
}() }()
var params url.Values var params url.Values
if h.useFormPost { if h.useFormPost { // nolint:nestif
// Return HTTP 405 for anything that's not a POST. // Return HTTP 405 for anything that's not a POST or an OPTIONS request.
if r.Method != http.MethodPost { if r.Method != http.MethodPost && r.Method != http.MethodOptions {
return httperr.Newf(http.StatusMethodNotAllowed, "wanted POST") h.logger.V(debugLogLevel).Info("Pinniped: Got unexpected request on callback listener", "method", r.Method)
w.WriteHeader(http.StatusMethodNotAllowed)
return nil // keep listening for more requests
} }
// Parse and pull the response parameters from a application/x-www-form-urlencoded request body. // For POST and OPTIONS requests, calculate the allowed origin for CORS.
issuerURL, parseErr := url.Parse(h.issuer)
if parseErr != nil {
return httperr.Wrap(http.StatusInternalServerError, "invalid issuer url", parseErr)
}
allowOrigin := issuerURL.Scheme + "://" + issuerURL.Host
if r.Method == http.MethodOptions {
// Google Chrome decided that it should do CORS preflight checks for this Javascript form submission POST request.
// See https://developer.chrome.com/blog/private-network-access-preflight/
origin := r.Header.Get("Origin")
if origin == "" {
// The CORS preflight request should have an origin.
h.logger.V(debugLogLevel).Info("Pinniped: Got OPTIONS request without origin header")
w.WriteHeader(http.StatusBadRequest)
return nil // keep listening for more requests
}
h.logger.V(debugLogLevel).Info("Pinniped: Got CORS preflight request from browser", "origin", origin)
// To tell the browser that it is okay to make the real POST request, return the following response.
w.Header().Set("Access-Control-Allow-Origin", allowOrigin)
w.Header().Set("Vary", "*") // supposed to use Vary when Access-Control-Allow-Origin is a specific host
w.Header().Set("Access-Control-Allow-Credentials", "false")
w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS")
w.Header().Set("Access-Control-Allow-Private-Network", "true")
// If the browser would like to send some headers on the real request, allow them. Chrome doesn't
// currently send this header at the moment. This is in case some browser in the future decides to
// request to be allowed to send specific headers by using Access-Control-Request-Headers.
requestedHeaders := r.Header.Get("Access-Control-Request-Headers")
if requestedHeaders != "" {
w.Header().Set("Access-Control-Allow-Headers", requestedHeaders)
}
w.WriteHeader(http.StatusNoContent)
return nil // keep listening for more requests
} // Otherwise, this is a POST request...
// Parse and pull the response parameters from an application/x-www-form-urlencoded request body.
if err := r.ParseForm(); err != nil { if err := r.ParseForm(); err != nil {
return httperr.Wrap(http.StatusBadRequest, "invalid form", err) return httperr.Wrap(http.StatusBadRequest, "invalid form", err)
} }
params = r.Form params = r.Form
// Allow CORS requests for POST so in the future our Javascript code can be updated to use the fetch API's
// mode "cors", and still be compatible with older CLI versions starting with those that have this code
// for CORS headers. Updating to use CORS would allow our Javascript code (form_post.js) to see the true
// http response status from this endpoint. Note that the POST response does not need to set as many CORS
// headers as the OPTIONS preflight response.
w.Header().Set("Access-Control-Allow-Origin", allowOrigin)
w.Header().Set("Vary", "*") // supposed to use Vary when Access-Control-Allow-Origin is a specific host
} else { } else {
// Return HTTP 405 for anything that's not a GET. // Return HTTP 405 for anything that's not a GET.
if r.Method != http.MethodGet { if r.Method != http.MethodGet {
return httperr.Newf(http.StatusMethodNotAllowed, "wanted GET") h.logger.V(debugLogLevel).Info("Pinniped: Got unexpected request on callback listener", "method", r.Method)
w.WriteHeader(http.StatusMethodNotAllowed)
return nil // keep listening for more requests
} }
// Pull response parameters from the URL query string. // Pull response parameters from the URL query string.

View File

@ -1825,6 +1825,8 @@ func TestHandlePasteCallback(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
tt := tt tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel()
h := &handlerState{ h := &handlerState{
callbacks: make(chan callbackResult, 1), callbacks: make(chan callbackResult, 1),
state: state.State("test-state"), state: state.State("test-state"),
@ -1870,58 +1872,157 @@ func TestHandleAuthCodeCallback(t *testing.T) {
method string method string
query string query string
body []byte body []byte
contentType string headers http.Header
opt func(t *testing.T) Option opt func(t *testing.T) Option
wantErr string wantErr string
wantHTTPStatus int wantHTTPStatus int
wantNoCallbacks bool
wantHeaders http.Header
}{ }{
{ {
name: "wrong method", name: "wrong method returns an error but keeps listening",
method: "POST", method: http.MethodPost,
query: "", query: "",
wantErr: "wanted GET", wantNoCallbacks: true,
wantHeaders: map[string][]string{},
wantHTTPStatus: http.StatusMethodNotAllowed, wantHTTPStatus: http.StatusMethodNotAllowed,
}, },
{ {
name: "wrong method for form_post", name: "wrong method for form_post returns an error but keeps listening",
method: "GET", method: http.MethodGet,
query: "", query: "",
opt: withFormPostMode, opt: withFormPostMode,
wantErr: "wanted POST", wantNoCallbacks: true,
wantHeaders: map[string][]string{},
wantHTTPStatus: http.StatusMethodNotAllowed, wantHTTPStatus: http.StatusMethodNotAllowed,
}, },
{ {
name: "invalid form for form_post", name: "invalid form for form_post",
method: "POST", method: http.MethodPost,
query: "", query: "",
contentType: "application/x-www-form-urlencoded", headers: map[string][]string{"Content-Type": {"application/x-www-form-urlencoded"}},
body: []byte(`%`), body: []byte(`%`),
opt: withFormPostMode, opt: withFormPostMode,
wantErr: `invalid form: invalid URL escape "%"`, wantErr: `invalid form: invalid URL escape "%"`,
wantHeaders: map[string][]string{},
wantHTTPStatus: http.StatusBadRequest, wantHTTPStatus: http.StatusBadRequest,
}, },
{ {
name: "invalid state", name: "invalid state",
query: "state=invalid", query: "state=invalid",
wantErr: "missing or invalid state parameter", wantErr: "missing or invalid state parameter",
wantHeaders: map[string][]string{},
wantHTTPStatus: http.StatusForbidden, wantHTTPStatus: http.StatusForbidden,
}, },
{ {
name: "error code from provider", name: "error code from provider",
query: "state=test-state&error=some_error", query: "state=test-state&error=some_error",
wantErr: `login failed with code "some_error"`, wantErr: `login failed with code "some_error"`,
wantHeaders: map[string][]string{},
wantHTTPStatus: http.StatusBadRequest, wantHTTPStatus: http.StatusBadRequest,
}, },
{ {
name: "error code with a description from provider", name: "error code with a description from provider",
query: "state=test-state&error=some_error&error_description=optional%20error%20description", query: "state=test-state&error=some_error&error_description=optional%20error%20description",
wantErr: `login failed with code "some_error": optional error description`, wantErr: `login failed with code "some_error": optional error description`,
wantHeaders: map[string][]string{},
wantHTTPStatus: http.StatusBadRequest, wantHTTPStatus: http.StatusBadRequest,
}, },
{
name: "in form post mode, invalid issuer url config during CORS preflight request returns an error",
method: http.MethodOptions,
query: "",
headers: map[string][]string{"Origin": {"https://some-origin.com"}},
wantErr: `invalid issuer url: parse "://bad-url": missing protocol scheme`,
wantHeaders: map[string][]string{},
wantHTTPStatus: http.StatusInternalServerError,
opt: func(t *testing.T) Option {
return func(h *handlerState) error {
h.useFormPost = true
h.issuer = "://bad-url"
return nil
}
},
},
{
name: "in form post mode, invalid issuer url config during POST request returns an error",
method: http.MethodPost,
query: "",
headers: map[string][]string{"Origin": {"https://some-origin.com"}},
wantErr: `invalid issuer url: parse "://bad-url": missing protocol scheme`,
wantHeaders: map[string][]string{},
wantHTTPStatus: http.StatusInternalServerError,
opt: func(t *testing.T) Option {
return func(h *handlerState) error {
h.useFormPost = true
h.issuer = "://bad-url"
return nil
}
},
},
{
name: "in form post mode, options request is missing origin header results in 400 and keeps listener running",
method: http.MethodOptions,
query: "",
opt: withFormPostMode,
wantNoCallbacks: true,
wantHeaders: map[string][]string{},
wantHTTPStatus: http.StatusBadRequest,
},
{
name: "in form post mode, valid CORS request responds with 402 and CORS headers and keeps listener running",
method: http.MethodOptions,
query: "",
headers: map[string][]string{"Origin": {"https://some-origin.com"}},
wantNoCallbacks: true,
wantHTTPStatus: http.StatusNoContent,
wantHeaders: map[string][]string{
"Access-Control-Allow-Credentials": {"false"},
"Access-Control-Allow-Methods": {"POST, OPTIONS"},
"Access-Control-Allow-Origin": {"https://valid-issuer.com"},
"Vary": {"*"},
"Access-Control-Allow-Private-Network": {"true"},
},
opt: func(t *testing.T) Option {
return func(h *handlerState) error {
h.useFormPost = true
h.issuer = "https://valid-issuer.com/with/some/path"
return nil
}
},
},
{
name: "in form post mode, valid CORS request with Access-Control-Request-Headers responds with 402 and CORS headers including Access-Control-Allow-Headers and keeps listener running",
method: http.MethodOptions,
query: "",
headers: map[string][]string{
"Origin": {"https://some-origin.com"},
"Access-Control-Request-Headers": {"header1, header2, header3"},
},
wantNoCallbacks: true,
wantHTTPStatus: http.StatusNoContent,
wantHeaders: map[string][]string{
"Access-Control-Allow-Credentials": {"false"},
"Access-Control-Allow-Methods": {"POST, OPTIONS"},
"Access-Control-Allow-Origin": {"https://valid-issuer.com"},
"Vary": {"*"},
"Access-Control-Allow-Private-Network": {"true"},
"Access-Control-Allow-Headers": {"header1, header2, header3"},
},
opt: func(t *testing.T) Option {
return func(h *handlerState) error {
h.useFormPost = true
h.issuer = "https://valid-issuer.com/with/some/path"
return nil
}
},
},
{ {
name: "invalid code", name: "invalid code",
query: "state=test-state&code=invalid", query: "state=test-state&code=invalid",
wantErr: "could not complete code exchange: some exchange error", wantErr: "could not complete code exchange: some exchange error",
wantHeaders: map[string][]string{},
wantHTTPStatus: http.StatusBadRequest, wantHTTPStatus: http.StatusBadRequest,
opt: func(t *testing.T) Option { opt: func(t *testing.T) Option {
return func(h *handlerState) error { return func(h *handlerState) error {
@ -1940,6 +2041,8 @@ func TestHandleAuthCodeCallback(t *testing.T) {
{ {
name: "valid", name: "valid",
query: "state=test-state&code=valid", query: "state=test-state&code=valid",
wantHTTPStatus: http.StatusOK,
wantHeaders: map[string][]string{"Content-Type": {"text/plain; charset=utf-8"}},
opt: func(t *testing.T) Option { opt: func(t *testing.T) Option {
return func(h *handlerState) error { return func(h *handlerState) error {
h.oauth2Config = &oauth2.Config{RedirectURL: testRedirectURI} h.oauth2Config = &oauth2.Config{RedirectURL: testRedirectURI}
@ -1957,8 +2060,43 @@ func TestHandleAuthCodeCallback(t *testing.T) {
{ {
name: "valid form_post", name: "valid form_post",
method: http.MethodPost, method: http.MethodPost,
contentType: "application/x-www-form-urlencoded", headers: map[string][]string{"Content-Type": {"application/x-www-form-urlencoded"}},
body: []byte(`state=test-state&code=valid`), body: []byte(`state=test-state&code=valid`),
wantHeaders: map[string][]string{
"Access-Control-Allow-Origin": {"https://valid-issuer.com"},
"Vary": {"*"},
"Content-Type": {"text/plain; charset=utf-8"},
},
wantHTTPStatus: http.StatusOK,
opt: func(t *testing.T) Option {
return func(h *handlerState) error {
h.useFormPost = true
h.oauth2Config = &oauth2.Config{RedirectURL: testRedirectURI}
h.getProvider = func(_ *oauth2.Config, _ *oidc.Provider, _ *http.Client) provider.UpstreamOIDCIdentityProviderI {
mock := mockUpstream(t)
mock.EXPECT().
ExchangeAuthcodeAndValidateTokens(gomock.Any(), "valid", pkce.Code("test-pkce"), nonce.Nonce("test-nonce"), testRedirectURI).
Return(&oidctypes.Token{IDToken: &oidctypes.IDToken{Token: "test-id-token"}}, nil)
return mock
}
return nil
}
},
},
{
name: "valid form_post made with the same origin headers that would be used by a Javascript fetch client using mode=cors",
method: http.MethodPost,
headers: map[string][]string{
"Content-Type": {"application/x-www-form-urlencoded"},
"Origin": {"https://some-origin.com"},
},
body: []byte(`state=test-state&code=valid`),
wantHeaders: map[string][]string{
"Access-Control-Allow-Origin": {"https://valid-issuer.com"},
"Vary": {"*"},
"Content-Type": {"text/plain; charset=utf-8"},
},
wantHTTPStatus: http.StatusOK,
opt: func(t *testing.T) Option { opt: func(t *testing.T) Option {
return func(h *handlerState) error { return func(h *handlerState) error {
h.useFormPost = true h.useFormPost = true
@ -1978,11 +2116,15 @@ func TestHandleAuthCodeCallback(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
tt := tt tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel()
h := &handlerState{ h := &handlerState{
callbacks: make(chan callbackResult, 1), callbacks: make(chan callbackResult, 1),
state: state.State("test-state"), state: state.State("test-state"),
pkce: pkce.Code("test-pkce"), pkce: pkce.Code("test-pkce"),
nonce: nonce.Nonce("test-nonce"), nonce: nonce.Nonce("test-nonce"),
logger: testlogger.New(t).Logger,
issuer: "https://valid-issuer.com/with/some/path",
} }
if tt.opt != nil { if tt.opt != nil {
require.NoError(t, tt.opt(t)(h)) require.NoError(t, tt.opt(t)(h))
@ -1998,8 +2140,8 @@ func TestHandleAuthCodeCallback(t *testing.T) {
if tt.method != "" { if tt.method != "" {
req.Method = tt.method req.Method = tt.method
} }
if tt.contentType != "" { if tt.headers != nil {
req.Header.Set("Content-Type", tt.contentType) req.Header = tt.headers
} }
err = h.handleAuthCodeCallback(resp, req) err = h.handleAuthCodeCallback(resp, req)
@ -2012,11 +2154,19 @@ func TestHandleAuthCodeCallback(t *testing.T) {
} }
} else { } else {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tt.wantHTTPStatus, resp.Code)
} }
if tt.wantHeaders != nil {
require.Equal(t, tt.wantHeaders, resp.Header())
}
gotCallback := false
select { select {
case <-time.After(1 * time.Second): case <-time.After(1 * time.Second):
if !tt.wantNoCallbacks {
require.Fail(t, "timed out waiting to receive from callbacks channel") require.Fail(t, "timed out waiting to receive from callbacks channel")
}
case result := <-h.callbacks: case result := <-h.callbacks:
if tt.wantErr != "" { if tt.wantErr != "" {
require.EqualError(t, result.err, tt.wantErr) require.EqualError(t, result.err, tt.wantErr)
@ -2025,7 +2175,9 @@ func TestHandleAuthCodeCallback(t *testing.T) {
require.NoError(t, result.err) require.NoError(t, result.err)
require.NotNil(t, result.token) require.NotNil(t, result.token)
require.Equal(t, result.token.IDToken.Token, "test-id-token") require.Equal(t, result.token.IDToken.Token, "test-id-token")
gotCallback = true
} }
require.Equal(t, tt.wantNoCallbacks, !gotCallback)
}) })
} }
} }

View File

@ -7,7 +7,7 @@ params:
github_url: "https://github.com/vmware-tanzu/pinniped" github_url: "https://github.com/vmware-tanzu/pinniped"
slack_url: "https://kubernetes.slack.com/messages/pinniped" slack_url: "https://kubernetes.slack.com/messages/pinniped"
community_url: "https://go.pinniped.dev/community" community_url: "https://go.pinniped.dev/community"
latest_version: v0.13.0 latest_version: v0.14.0
pygmentsCodefences: true pygmentsCodefences: true
pygmentsStyle: "pygments" pygmentsStyle: "pygments"
markup: markup:

View File

@ -13,6 +13,10 @@ As a Kubernetes cluster administrator or user, you can learn how Pinniped works,
Have a question, comment, or idea? Please reach out via [GitHub Discussions](https://github.com/vmware-tanzu/pinniped/discussions) or [join the Pinniped community meetings]({{< ref "/community" >}}). Have a question, comment, or idea? Please reach out via [GitHub Discussions](https://github.com/vmware-tanzu/pinniped/discussions) or [join the Pinniped community meetings]({{< ref "/community" >}}).
## New to Pinniped?
- ⚠️ **Start here:** [Learn to use Pinniped for federated authentication to Kubernetes clusters]({{< ref "tutorials/concierge-and-supervisor-demo" >}})
## Background ## Background
{{< docsmenu "background" >}} {{< docsmenu "background" >}}

View File

@ -45,6 +45,8 @@ Pinniped supports the following IDPs.
1. Any [LDAP](https://ldap.com) identity provider. 1. Any [LDAP](https://ldap.com) identity provider.
1. Any Active Directory identity provider (via LDAP).
The The
[`idp.supervisor.pinniped.dev`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-idp-supervisor-pinniped-dev-v1alpha1) [`idp.supervisor.pinniped.dev`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-idp-supervisor-pinniped-dev-v1alpha1)
API group contains the Kubernetes custom resources that configure the Pinniped API group contains the Kubernetes custom resources that configure the Pinniped

View File

@ -26,7 +26,7 @@ If you would rather not use the Supervisor, you may want to [configure the Conci
This how-to guide assumes that you have already [installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress, This how-to guide assumes that you have already [installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress,
and that you have [configured a FederationDomain to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}). and that you have [configured a FederationDomain to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}).
It also assumes that you have configured an `OIDCIdentityProvider` or an `LDAPIdentityProvider` for the Supervisor as the source of your user's identities. It also assumes that you have configured an `OIDCIdentityProvider`, `LDAPIdentityProvider`, or `ActiveDirectoryIdentityProvider` for the Supervisor as the source of your user's identities.
Various examples of configuring these resources can be found in these guides. Various examples of configuring these resources can be found in these guides.
It also assumes that you have already [installed the Pinniped Concierge]({{< ref "install-concierge" >}}) It also assumes that you have already [installed the Pinniped Concierge]({{< ref "install-concierge" >}})

View File

@ -18,7 +18,7 @@ This how-to guide assumes that you have already configured the following Pinnipe
then you have already: then you have already:
1. [Installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress. 1. [Installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress.
1. [Configured a FederationDomain to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}). 1. [Configured a FederationDomain to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}).
1. Configured an `OIDCIdentityProvider` or an `LDAPIdentityProvider` for the Supervisor as the source of your user's identities. 1. Configured an `OIDCIdentityProvider`, `LDAPIdentityProvider`, or `ActiveDirectoryIdentityProvider` for the Supervisor as the source of your user's identities.
Various examples of configuring these resources can be found in these guides. Various examples of configuring these resources can be found in these guides.
1. In each cluster for which you would like to use Pinniped for authentication, you have [installed the Concierge]({{< ref "install-concierge" >}}). 1. In each cluster for which you would like to use Pinniped for authentication, you have [installed the Concierge]({{< ref "install-concierge" >}}).
1. In each cluster's Concierge, you have configured an authenticator. For example, if you are using the Pinniped Supervisor, 1. In each cluster's Concierge, you have configured an authenticator. For example, if you are using the Pinniped Supervisor,
@ -35,8 +35,13 @@ You should have also already [installed the `pinniped` command-line]({{< ref "in
Although you can choose to use Pinniped without using the Pinniped Supervisor, there are several key advantages of choosing to use the Pinniped Supervisor to manage identity across fleets of Kubernetes clusters. Although you can choose to use Pinniped without using the Pinniped Supervisor, there are several key advantages of choosing to use the Pinniped Supervisor to manage identity across fleets of Kubernetes clusters.
1. The Supervisor makes it easy to **bring your own OIDC, LDAP, or Active Directory identity provider to act as the source of user identities**.
It also allows you to configure how identities and group memberships in the identity provider map to identities
and group memberships in the Kubernetes clusters.
1. A generated kubeconfig for a cluster will be specific for that cluster, however **it will not contain any specific user identity or credentials. 1. A generated kubeconfig for a cluster will be specific for that cluster, however **it will not contain any specific user identity or credentials.
This kubeconfig file can be safely shared with all cluster users.** When the user runs `kubectl` commands using this kubeconfig, they will be interactively prompted to log in using their own unique identity from the OIDC or LDAP identity provider configured in the Supervisor. This kubeconfig file can be safely shared with all cluster users.** When the user runs `kubectl` commands using this kubeconfig,
they will be interactively prompted to log in using their own unique identity from the identity provider configured in the Supervisor.
1. The Supervisor will provide a federated identity across all clusters that use the same `FederationDomain`. 1. The Supervisor will provide a federated identity across all clusters that use the same `FederationDomain`.
The user will be **prompted by `kubectl` to interactively authenticate once per day**, and then will be able to use all clusters The user will be **prompted by `kubectl` to interactively authenticate once per day**, and then will be able to use all clusters
@ -44,10 +49,6 @@ Although you can choose to use Pinniped without using the Pinniped Supervisor, t
This federated identity is secure because behind the scenes the Supervisor is issuing very short-lived credentials This federated identity is secure because behind the scenes the Supervisor is issuing very short-lived credentials
that are uniquely scoped to each cluster. that are uniquely scoped to each cluster.
1. The Supervisor makes it easy to **bring your own OIDC or LDAP identity provider to act as the source of user identities**.
It also allows you to configure how identities and group memberships in the OIDC or LDAP identity provider map to identities
and group memberships in the Kubernetes clusters.
## Generate a Pinniped-compatible kubeconfig file ## Generate a Pinniped-compatible kubeconfig file
You will need to generate a Pinniped-compatible kubeconfig file for each cluster in which you have installed the Concierge. You will need to generate a Pinniped-compatible kubeconfig file for each cluster in which you have installed the Concierge.

View File

@ -1,5 +1,5 @@
--- ---
title: Learn to use the Pinniped Supervisor alongside the Concierge title: Learn to use Pinniped for federated authentication to Kubernetes clusters
description: See how the Pinniped Supervisor streamlines login to multiple Kubernetes clusters. description: See how the Pinniped Supervisor streamlines login to multiple Kubernetes clusters.
cascade: cascade:
layout: docs layout: docs
@ -7,181 +7,126 @@ menu:
docs: docs:
name: Concierge with Supervisor name: Concierge with Supervisor
parent: tutorials parent: tutorials
weight: 1
--- ---
## Prerequisites ## Why Pinniped?
1. A Kubernetes cluster of a type supported by Pinniped Concierge as described in [architecture](/docs/background/architecture). There are many benefits to using the Pinniped Supervisor, Concierge, and CLI components together
to provide Kubernetes authentication.
Don't have a cluster handy? Consider using [kind](https://kind.sigs.k8s.io/) on your local machine. - It's easy to **bring your own OIDC, LDAP, or Active Directory identity provider** to act as the source of user identities.
See below for an example of using kind. A user's identity in the external identity provider becomes their identity in Kubernetes.
All other aspects of Kubernetes that are sensitive to identity, such as authorization policies and audit logging, are then
based on the user identities from your identity provider.
1. A Kubernetes cluster of a type supported by Pinniped Supervisor (this can be the same cluster as the first, or different). - You can **bring identities from your own identity provider into many types of Kubernetes clusters in a consistent way**.
This includes clusters from various vendors run on-prem, and clusters provided as a cloud service by various popular cloud companies.
1. A kubeconfig that has administrator-like privileges on each cluster. - Kubeconfig files **will not contain any specific user identity or credentials, so they can be safely shared**.
1. An external OIDC identity provider to use as the source of identity for Pinniped. - Deep integration with `kubectl` means that when a user runs `kubectl` commands,
they will be **interactively prompted to log in using their own unique identity** from your identity provider.
## Overview - Users will be prompted by `kubectl` to interactively **authenticate only once per day**, and then will be able to
use multiple clusters for the rest of the day without being asked to authenticate again.
Installing and trying Pinniped on any cluster consists of the following general steps. See the next section below - All credentials are short-lived, and refreshed often. Additionally, **frequent checks are made against your identity provider
for a more specific example, including the commands to use for that case. to ensure that the user should continue to have access to the Kubernetes clusters**. For example, within minutes
of locking an Active Directory account, that user will lose access to Kubernetes clusters, even if they were
already logged in.
1. [Install the Supervisor]({{< ref "../howto/install-supervisor" >}}). - A **user can safely be granted high levels of authorization on a cluster**, if needed.
1. Create a Even if they abuse their privilege by capturing the credentials sent by other users to the cluster,
[`FederationDomain`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-supervisor-config-v1alpha1-federationdomain) they will not be able to use the captured credentials to access other clusters, because all credentials
via the installed Pinniped Supervisor. sent to clusters are uniquely scoped to each individual cluster.
1. Create an
[`OIDCIdentityProvider`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-supervisor-idp-v1alpha1-oidcidentityprovider)
via the installed Pinniped Supervisor.
1. Install the Pinniped Concierge. See [deploy/concierge/README.md](https://github.com/vmware-tanzu/pinniped/blob/main/deploy/concierge/README.md).
1. Create a
[`JWTAuthenticator`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-concierge-authentication-v1alpha1-jwtauthenticator)
via the installed Pinniped Concierge.
1. [Install the Pinniped command-line tool]({{< ref "../howto/install-cli" >}}).
1. Generate a kubeconfig using the Pinniped command-line tool. Run `pinniped get kubeconfig --help` for more information.
1. Run `kubectl` commands using the generated kubeconfig. The Pinniped Supervisor and Concierge are automatically used for authentication during those commands.
## Example of deploying on multiple kind clusters - Pinniped will not interfere with a cluster's original vendor-specific authentication system.
The **original admin-level kubeconfig from a cluster can be privately kept by the cluster's creator** for
bootstrapping and break-glass access purposes.
[kind](https://kind.sigs.k8s.io) is a tool for creating and managing Kubernetes clusters on your local machine - Pinniped is **open source** and will never be tied to any one vendor's authentication system.
which uses Docker containers as the cluster's nodes. This is a convenient way to try out Pinniped on local As Pinniped improves in the future, all your Kubernetes clusters can benefit, regardless of which vendor provided the clusters.
non-production clusters. The code is available on GitHub for any expert to audit, and for any community member to contribute.
The following steps deploy the latest release of Pinniped on kind. They deploy the Pinniped ## What this tutorial will show
Supervisor on one cluster, and the Pinniped Concierge on another cluster. A multi-cluster deployment
strategy is typical for Pinniped. The Pinniped Concierge uses a
[`JWTAuthenticator`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-concierge-authentication-v1alpha1-jwtauthenticator)
to authenticate federated identities from the Supervisor.
1. Install the tools required for the following steps. This tutorial will show:
- A detailed example of how to install and configure a Supervisor with ingress, DNS, TLS, and an external identity provider
- How to install the Concierge onto multiple workload clusters and configure them all to trust identities from the Supervisor
- How an admin can create and distribute kubeconfig files for the workload clusters
- How a developer or devops user can authenticate with kubectl using their identity from the external identity provider,
and how they can securely access all workload clusters for the rest of the day without needing to authenticate again
- [Install kind](https://kind.sigs.k8s.io/docs/user/quick-start/), if not already installed. For example, `brew install kind` on macOS. ## Tutorial background
- kind depends on Docker. If not already installed, [install Docker](https://docs.docker.com/get-docker/), for example `brew cask install docker` on macOS. This tutorial is intended to be a step-by-step example of installing and configuring the Pinniped components
to provide a multi-cluster federated authentication solution. It will show every
command needed to replicate the same setup to allow the reader to follow the same steps themselves.
- This demo requires `kubectl`, which comes with Docker, or can be [installed separately](https://kubernetes.io/docs/tasks/tools/install-kubectl/). A single Pinniped Supervisor can provide authentication for any number of Kubernetes clusters. In a typical deployment:
- This demo requires `openssl`, which is installed on macOS by default, or can be [installed separately](https://www.openssl.org/). - A single Supervisor is deployed on a special cluster where app developers and devops users have no access.
App developers and devops users should have no access at least to the resources in the Supervisor's namespace,
but usually have no access to the whole cluster. For this tutorial, let's call this cluster the *"supervisor cluster"*.
- App developers and devops users can then use their identities provided by the Supervisor to log in to many
clusters where they can manage their apps. For this tutorial, let's call these clusters the *"workload clusters"*.
The Pinniped Concierge component is installed into each workload cluster and is configured to trust the single Supervisor.
The Concierge acts as an in-cluster agent to provide authentication services.
1. Create a new Kubernetes cluster for the Pinniped Supervisor using `kind create cluster --name pinniped-supervisor`. There are many ways to install and configure Pinniped. To make the steps of this tutorial as specific as possible, we
had to make some choices. The choices made for this tutorial were:
1. Create a new Kubernetes cluster for the Pinniped Concierge using `kind create cluster --name pinniped-concierge`. - The Pinniped Supervisor can draw user identities from OIDC identity providers, Active Directory providers (via LDAP),
and generic LDAP providers. In this tutorial we will use Okta as an OIDC identity provider.
Okta offers a free developer account, so any reader should be able to sign up for an Okta
account if they would like to try these steps themselves.
- The Pinniped Supervisor can be installed on any type of Kubernetes cluster. In this tutorial we will
demonstrate the installation process for GKE because any reader should be able to sign up for a Google Cloud
account if they would like to try these steps themselves. We will use separate supervisor and workload clusters.
- The Pinniped Supervisor needs working ingress. There are many ways to configure ingress for apps running on
Kubernetes clusters, as described in the [howto guide for installing the Supervisor]({{< ref "../howto/install-supervisor" >}}).
For this tutorial we will use a LoadBalancer Service with a public IP address. This is a simple setup which
allows us to terminate TLS inside the Supervisor app, keeping the connection secure all the way into
the Supervisor app's pods. A corporate installation of the Supervisor might keep it behind the corporate firewall,
but for this tutorial a public IP also allows your desktop (and anyone on the internet) to access the Supervisor's endpoints.
The HTTPS endpoints of a properly configured Supervisor are generally safe to expose publicly, as long as you are not concerned
with denial of service attacks (or have some external protection against such attacks).
- Although it is possible to configure the Supervisor's FederationDomain to use an IP address, it is better to
use a DNS name. There are many ways to manage DNS. For this tutorial, we will use Google Cloud's
[Cloud DNS](https://cert-manager.io/docs/) service to register a new hostname for the Supervisor
app's load balancer's public IP address. We won't describe how to prepare Cloud DNS to manage DNS for
the parent domain in this tutorial. This typically involves setting up Cloud DNS's servers as the list of DNS servers
for your domain within your domain registrar. We'll assume that this has already been done.
- For web-based login flows as used by OIDC identity providers, the Pinniped Supervisor needs TLS certificates
that are trusted by the end users' web browsers. There are many ways to create TLS certificates.
There are also several ways to configure the TLS certificates on the Supervisor, as described in the
[docs for configuring the Supervisor]({{< ref "../howto/configure-supervisor" >}}).
For this tutorial we will use [Let's Encrypt](https://letsencrypt.org) with [cert-manager](https://cert-manager.io/docs/),
because any reader could use these services if they would like to try these steps themselves.
- The Pinniped Concierge can be installed in many types of Kubernetes clusters, as described in
[supported Kubernetes clusters]({{< ref "../reference/supported-clusters" >}}). In this tutorial we will
use GKE clusters as our workload clusters, for the same reasons that we are using GKE for the supervisor cluster.
It is worth noting that a Supervisor running on GKE can provide authentication for workload clusters of any supported
Kubernetes type, not only for GKE workload clusters.
- GKE and Google Cloud DNS can be managed in the Google Cloud Console web UI, or via the gcloud CLI. For this tutorial,
we will use the [gcloud CLI](https://cloud.google.com/sdk/docs/quickstart) so we can be as specific as possible.
However, the same steps could be performed via the UI instead.
This tutorial assumes that you have already authenticated with the gcloud CLI as a user who has permission to
run all the gcloud commands used below.
- Pinniped provides authentication, not authorization. Inside Kubernetes, a user authenticated via Pinniped will have a username
and may also have a list of group names. These usernames and group names can be used to create authorization policies using any
Kubernetes authorization system, usually using [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac).
1. Deploy the Pinniped Supervisor with a valid serving certificate and network path. See The details of the steps shown in this tutorial would be different if any of the above choices were made differently,
[deploy/supervisor/README.md](https://github.com/vmware-tanzu/pinniped/blob/main/deploy/supervisor/README.md). however the general concepts at each step would still apply.
For purposes of this demo, the following issuer is used. This issuer is specific to DNS and ## Ready? Let's go!
TLS infrastructure set up for this demo:
```sh ### Install the Pinniped CLI
issuer=https://my-supervisor.demo.pinniped.dev
```
This demo uses a `Secret` named `my-federation-domain-tls` to provide the serving certificate for If you have not already done so, [install the Pinniped command-line tool]({{< ref "../howto/install-cli" >}}).
the
[`FederationDomain`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-supervisor-config-v1alpha1-federationdomain). The
serving certificate `Secret` must be of type `kubernetes.io/tls`.
The CA bundle for this serving
certificate is assumed to be written, base64-encoded, to a file named
`/tmp/pinniped-supervisor-ca-bundle-base64-encoded.pem`.
1. Create a
[`FederationDomain`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-supervisor-config-v1alpha1-federationdomain)
object to configure the Pinniped Supervisor to issue federated identities.
```sh
cat <<EOF | kubectl create --context kind-pinniped-supervisor --namespace pinniped-supervisor -f -
apiVersion: config.supervisor.pinniped.dev/v1alpha1
kind: FederationDomain
metadata:
name: my-federation-domain
spec:
issuer: $issuer
tls:
secretName: my-federation-domain-tls
EOF
```
1. Create a `Secret` with the external OIDC identity provider OAuth 2.0 client credentials named
`my-oidc-identity-provider-client` in the pinniped-supervisor namespace.
```sh
kubectl create secret generic my-oidc-identity-provider-client \
--context kind-pinniped-supervisor \
--namespace pinniped-supervisor \
--type secrets.pinniped.dev/oidc-client \
--from-literal=clientID=xxx \
--from-literal=clientSecret=yyy
```
1. Create an
[`OIDCIdentityProvider`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-supervisor-idp-v1alpha1-oidcidentityprovider)
object to configure the Pinniped Supervisor to federate identities from an upstream OIDC identity
provider.
Replace the `issuer` with your external identity provider's issuer and
adjust any other configuration on the spec.
```sh
cat <<EOF | kubectl create --context kind-pinniped-supervisor --namespace pinniped-supervisor -f -
apiVersion: idp.supervisor.pinniped.dev/v1alpha1
kind: OIDCIdentityProvider
metadata:
name: my-oidc-identity-provider
spec:
issuer: https://dev-zzz.okta.com/oauth2/default
claims:
username: email
authorizationConfig:
additionalScopes: ['email', 'offline_access']
client:
secretName: my-oidc-identity-provider-client
EOF
```
1. Deploy the Pinniped Concierge.
```sh
kubectl apply --context kind-pinniped-concierge \
-f https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-crds.yaml
kubectl apply --context kind-pinniped-concierge \
-f https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-resources.yaml
```
The `install-pinniped-concierge-crds.yaml` file contains the Concierge CustomResourceDefinitions.
These define the custom APIs that you use to configure and interact with the Concierge.
The `install-pinniped-concierge-resources.yaml` file includes the rest of the Concierge resources with default deployment options.
If you would prefer to customize the available options, please see the [Concierge installation guide]({{< ref "../howto/install-concierge" >}})
for instructions on how to deploy using `ytt`.
1. Generate a random audience value for this cluster.
```sh
audience="$(openssl rand -hex 8)"
```
1. Create a
[`JWTAuthenticator`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-concierge-authentication-v1alpha1-jwtauthenticator)
object to configure the Pinniped Concierge to authenticate using the Pinniped Supervisor.
```sh
cat <<EOF | kubectl create --context kind-pinniped-concierge -f -
apiVersion: authentication.concierge.pinniped.dev/v1alpha1
kind: JWTAuthenticator
metadata:
name: my-jwt-authenticator
spec:
issuer: $issuer
audience: $audience
tls:
certificateAuthorityData: $(cat /tmp/pinniped-supervisor-ca-bundle-base64-encoded.pem)
EOF
```
1. Download the latest version of the Pinniped command-line tool for your platform.
On macOS or Linux, you can do this using Homebrew: On macOS or Linux, you can do this using Homebrew:
```sh ```sh
@ -190,50 +135,633 @@ to authenticate federated identities from the Supervisor.
On other platforms, see the [command-line installation guide]({{< ref "../howto/install-cli" >}}) for more details. On other platforms, see the [command-line installation guide]({{< ref "../howto/install-cli" >}}) for more details.
1. Generate a kubeconfig for the current cluster. ### Create some GKE clusters
For the rest of this tutorial, let's assume that your Google Cloud project name and your preferred Google Cloud zone name
are set as environment variables.
```sh
# Replace the values of these variables with your own.
PROJECT="my-gcp-project-name"
ZONE="us-central1-c"
```
Let's create one supervisor cluster and two workload clusters. There are many options to consider here, but for this
tutorial we will use only the most basic options.
```sh
gcloud container clusters create "demo-supervisor-cluster" \
--project "$PROJECT" --zone "$ZONE"
gcloud container clusters create "demo-workload-cluster1" \
--project "$PROJECT" --zone "$ZONE"
gcloud container clusters create "demo-workload-cluster2" \
--project "$PROJECT" --zone "$ZONE"
```
### Get the admin kubeconfigs for each GKE cluster
Most of the following installation and configuration steps are performed using the cluster's admin kubeconfig.
Let's download those kubeconfig files now.
```sh
# Note: KUBECONFIG determines the output location for these commands.
KUBECONFIG="supervisor-admin.yaml" gcloud container clusters get-credentials \
"demo-supervisor-cluster" --project "$PROJECT" --zone "$ZONE"
KUBECONFIG="workload1-admin.yaml" gcloud container clusters get-credentials \
"demo-workload-cluster1" --project "$PROJECT" --zone "$ZONE"
KUBECONFIG="workload2-admin.yaml" gcloud container clusters get-credentials \
"demo-workload-cluster2" --project "$PROJECT" --zone "$ZONE"
```
### Decide which hostname and domain or subdomain will be used for the Supervisor
The Pinniped maintainers own the pinniped.dev domain and have already set it up for use with Google Cloud DNS,
so for this tutorial we will call our Supervisor server `demo-supervisor.pinniped.dev`.
### Install the Pinniped Supervisor on the supervisor cluster
There are several installation options described in the
[howto guide for installing the Supervisor]({{< ref "../howto/install-supervisor" >}}).
For this tutorial, we will install the latest version using the `kubectl` CLI.
```sh
kubectl apply \
-f https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-supervisor.yaml \
--kubeconfig supervisor-admin.yaml
```
### Create a LoadBalancer Service for the Supervisor
There are several options for exposing the Supervisor's endpoints outside the cluster, which are described in the
[howto guide for configuring the Supervisor]({{< ref "../howto/configure-supervisor" >}}). For this tutorial,
we will use a public LoadBalancer.
Create a LoadBalancer to expose the Supervisor's endpoints to the public, being careful to only
expose the HTTPS endpoint (not the HTTP endpoint).
```sh
cat <<EOF | kubectl create --kubeconfig supervisor-admin.yaml -f -
apiVersion: v1
kind: Service
metadata:
name: pinniped-supervisor-loadbalancer
namespace: pinniped-supervisor
spec:
type: LoadBalancer
selector:
app: pinniped-supervisor
ports:
- protocol: TCP
port: 443
targetPort: 8443 # 8443 is the TLS port. Do not expose port 8080.
EOF
```
Check for an IP using the following command. The value returned
is the public IP of you LoadBalancer, which will be used
in the steps below. It may take a little time for the LoadBalancer to be assigned a public IP, and this
command will have empty output until then.
```sh
kubectl get service pinniped-supervisor-loadbalancer \
-o jsonpath='{.status.loadBalancer.ingress[*].ip}' \
--namespace pinniped-supervisor --kubeconfig supervisor-admin.yaml
```
### Install and configure cert-manager on the supervisor cluster
Install cert-manager.
```sh
kubectl apply \
-f https://github.com/jetstack/cert-manager/releases/download/v1.5.3/cert-manager.yaml \
--kubeconfig supervisor-admin.yaml
```
Create a GCP service account for cert manager to be able to manage to Google Cloud DNS.
cert-manager will need this as part of its process to prove to Let's Encrypt that we own the domain.
```sh
gcloud iam service-accounts create demo-dns-solver \
--display-name "demo-dns-solver" --project "$PROJECT"
gcloud projects add-iam-policy-binding "$PROJECT" \
--member "serviceAccount:demo-dns-solver@$PROJECT.iam.gserviceaccount.com" \
--role roles/dns.admin --condition=None
```
Create and download a key for the new service account, and then put it into a Secret on the cluster.
Be careful with this key as it allows full control over the DNS of your Cloud DNS zones.
```sh
gcloud iam service-accounts keys create demo-dns-solver-key.json \
--iam-account "demo-dns-solver@$PROJECT.iam.gserviceaccount.com" \
--project "$PROJECT"
kubectl create secret generic demo-dns-solver-svc-acct \
--namespace pinniped-supervisor --from-file=demo-dns-solver-key.json \
--kubeconfig supervisor-admin.yaml
```
Configure cert-manager to use Let's Encrypt.
```sh
# Replace this email address with your own.
# Let's Encrypt will use this to contact you about expiring
# certificates, and issues related to your account.
# Using @example.com is not allowed and will cause failures.
MY_EMAIL="someone@example.com"
cat <<EOF | kubectl create --kubeconfig supervisor-admin.yaml -f -
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: demo-issuer
namespace: pinniped-supervisor
spec:
acme:
email: "$MY_EMAIL"
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: demo-issuer-account-key
solvers:
- dns01:
cloudDNS:
# The ID of the GCP project.
project: "$PROJECT"
# This is the secret used to access the service account.
serviceAccountSecretRef:
name: demo-dns-solver-svc-acct
key: demo-dns-solver-key.json
EOF
```
### Set up DNS for the Supervisor's public IP
Create a record in Cloud DNS for the public IP of the LoadBalancer created above.
Of course, you would replace these sample argument values with your actual public IP address, DNS zone name, and domain.
```sh
# Replace the values of these variables with your own.
PUBLIC_IP="1.2.3.4"
DNS_ZONE="pinniped-dev"
DNS_NAME="demo-supervisor.pinniped.dev"
gcloud dns record-sets transaction start \
--zone="$DNS_ZONE" --project "$PROJECT"
# Note that the trailing dot is required after $DNS_NAME.
gcloud dns record-sets transaction add "$PUBLIC_IP" \
--name="$DNS_NAME." --ttl="300" --type="A" \
--zone="$DNS_ZONE" --project "$PROJECT"
gcloud dns record-sets transaction execute \
--zone="$DNS_ZONE" --project "$PROJECT"
```
This will take a few moments to move from status "pending" to status "done". Using the change ID that was
output from the previous command (e.g. "87"), you can check the status with this command.
```sh
# Replace the example ID "87" with the actual ID.
gcloud dns record-sets changes describe "87" \
--zone "$DNS_ZONE" --project "$PROJECT"
```
### Ask cert-manager to create a TLS certificate as a Secret
```sh
cat <<EOF | kubectl create --kubeconfig supervisor-admin.yaml -f -
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: supervisor-tls-cert-request
namespace: pinniped-supervisor
spec:
secretName: supervisor-tls-cert
issuerRef:
# The cert-manager Issuer created in the step above.
name: demo-issuer
dnsNames:
- "$DNS_NAME"
EOF
```
Wait for the Secret to get created. This may take a few minutes. Use the following command to see if it exists.
```sh
kubectl get secret supervisor-tls-cert \
--namespace pinniped-supervisor --kubeconfig supervisor-admin.yaml
```
### Configure a FederationDomain in the Pinniped Supervisor
The Supervisor should be configured to have a [FederationDomain](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#federationdomain), which, under the hood:
- Acts as an OIDC provider to the Pinniped CLI, creating a consistent interface for the CLI to use regardless
of which protocol the Supervisor is using to talk to the external identity provider
- Also acts as an OIDC provider to the workload cluster's Concierge component, which will receive JWT tokens
from the CLI and cryptographically validate that they were issued by the Supervisor
Create the FederationDomain.
```sh
cat <<EOF | kubectl create --kubeconfig supervisor-admin.yaml -f -
apiVersion: config.supervisor.pinniped.dev/v1alpha1
kind: FederationDomain
metadata:
name: demo-federation-domain
namespace: pinniped-supervisor
spec:
# You can choose an arbitrary path for the issuer URL.
issuer: "https://$DNS_NAME/demo-issuer"
tls:
# The name of the secretName from the cert-manager Certificate
# resource above.
secretName: supervisor-tls-cert
EOF
```
Check that the DNS, certificate, and FederationDomain are all working together by trying
to fetch one of its endpoints. If it works it should return a nice json-formatted discovery response.
Note that it may take a little time for the new DNS entry created above to propagate to your machine.
```sh
curl "https://${DNS_NAME}/demo-issuer/.well-known/openid-configuration"
```
### Create a client (also known as an "app") in the Okta admin UI
In this tutorial we are using Okta as an OIDC identity provider. Refer to the
[howto guides]({{< ref "../howto/" >}}) for examples of using other identity
providers.
The Pinniped Supervisor app will be a client of Okta.
The general steps required to create and configure a client in Okta are:
1. Sign up for Okta if you don't already have an account. They offer a free developer account.
2. Login to the admin UI of your account.
3. Create a test user with an email and a password. It does not need to be a real email address for the purposes of this tutorial.
4. Create an app in the Okta UI.
1. For more information about creating an app in the Okta UI, see the
[Configure Supervisor With Okta OIDC howto doc]({{< ref "../howto/configure-supervisor-with-okta" >}}).
2. Make sure that the test user is assigned to the app in the app's "Assignments" tab.
3. Add the FederationDomain's callback endpoint to the "Sign-in redirect URIs" list on the app in the UI.
The callback endpoint is the FederationDomain's issuer URL plus `/callback`,
e.g. `https://demo-supervisor.pinniped.dev/demo-issuer/callback`.
4. Get the app's "Okta Domain", "Client ID", and "Client secret" from the UI for use in the next step.
### Configure the Supervisor to use Okta as the external identity provider
Create an [OIDCIdentityProvider](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#oidcidentityprovider) and a Secret.
```sh
# Replace the issuer's domain, the client ID, and client secret below.
cat <<EOF | kubectl create --kubeconfig supervisor-admin.yaml -f -
apiVersion: idp.supervisor.pinniped.dev/v1alpha1
kind: OIDCIdentityProvider
metadata:
namespace: pinniped-supervisor
name: okta
spec:
# This should be the app's "Okta Domain" plus "/oauth2/default".
issuer: https://dev-123456.okta.com/oauth2/default
authorizationConfig:
additionalScopes: [groups, email, offline_access]
claims:
username: email
groups: groups
client:
secretName: okta-client-credentials
---
apiVersion: v1
kind: Secret
metadata:
namespace: pinniped-supervisor
name: okta-client-credentials
type: secrets.pinniped.dev/oidc-client
stringData:
# This should be the app's "Client ID"
clientID: "0oa45dekegIzOlvB17x9"
# This should be the app's "Client secret"
clientSecret: "<redacted>"
EOF
```
To check that the connection to Okta is working, look at the status conditions and status phase of the resource.
It should be in phase "Ready".
```sh
kubectl get OIDCIdentityProvider okta \
--namespace pinniped-supervisor --kubeconfig supervisor-admin.yaml -o yaml
```
### Install and configure the Concierge on the workload clusters
There are several installation options described in the
[howto guide for installing the Concierge]({{< ref "../howto/install-concierge" >}}).
For this tutorial, we will install the latest version using the `kubectl` CLI.
```sh
# Install onto the first workload cluster.
kubectl apply -f \
"https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-crds.yaml" \
--kubeconfig workload1-admin.yaml
kubectl apply -f \
"https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-resources.yaml" \
--kubeconfig workload1-admin.yaml
# Install onto the second workload cluster.
kubectl apply -f \
"https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-crds.yaml" \
--kubeconfig workload2-admin.yaml
kubectl apply -f \
"https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-resources.yaml" \
--kubeconfig workload2-admin.yaml
```
Configure the Concierge on the first workload cluster to trust the Supervisor's
FederationDomain for authentication by creating a
[JWTAuthenticator](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#jwtauthenticator).
```sh
# The audience value below is an arbitrary value which must uniquely
# identify this cluster. No other workload cluster should use the same value.
# It can have a human-readable component, but part of it should be random
# enough to ensure its uniqueness.
# The command `openssl rand -hex 8` can help in generating random values.
cat <<EOF | kubectl create --kubeconfig workload1-admin.yaml -f -
apiVersion: authentication.concierge.pinniped.dev/v1alpha1
kind: JWTAuthenticator
metadata:
name: demo-supervisor-jwt-authenticator
spec:
# This should be the issuer URL that was declared in the FederationDomain.
issuer: "https://$DNS_NAME/demo-issuer"
# Replace this with your own unique value.
audience: workload1-ed9de33c370981f61e9c
EOF
```
Apply a similar configuration in the other workload cluster with a different
`audience` value.
```sh
cat <<EOF | kubectl create --kubeconfig workload2-admin.yaml -f -
apiVersion: authentication.concierge.pinniped.dev/v1alpha1
kind: JWTAuthenticator
metadata:
name: demo-supervisor-jwt-authenticator
spec:
issuer: "https://$DNS_NAME/demo-issuer"
# Replace this with your own unique value.
audience: workload2-86af71b821afe8d9caf4
EOF
```
### Configure RBAC rules for the developer and devops users
For this tutorial, we will keep the Kubernetes RBAC configuration simple.
We'll use a contrived example of RBAC policies to avoid getting into RBAC policy design discussions.
If one of your Okta users has the email address `walrus@example.com`,
then you could allow that user to [edit](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles)
things in a new namespace in one workload cluster,
and [view](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles)
most things in the other workload cluster, with the following commands.
```sh
# Create a namespace in the first workload cluster.
kubectl create namespace "dev" \
--kubeconfig workload1-admin.yaml
# Allow the developer to edit everything in the new namespace.
cat <<EOF | kubectl create --kubeconfig workload1-admin.yaml -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: developer-can-edit-dev-ns
namespace: dev
subjects:
- kind: User
name: walrus@example.com
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: edit
apiGroup: rbac.authorization.k8s.io
EOF
# In the second workload cluster, allow the developer
# to view everything in all namespaces.
kubectl create clusterrolebinding developer-can-view \
--clusterrole view \
--user walrus@example.com \
--kubeconfig workload2-admin.yaml
```
RBAC rules can be defined for your users using their usernames and/or their group memberships.
### Create kubeconfig files for the workload clusters
As the cluster admin, create kubeconfig files for the workload clusters that can be
used by the developer and devops users. These commands should be run using the admin
kubeconfigs of the workload clusters, and they will output the new Pinniped-compatible
kubeconfigs for the workload clusters.
The `--kubeconfig` and `--kubeconfig-context` options, along with the `KUBECONFIG` environment variable,
can help you specify how the command should find the admin kubeconfig for the cluster.
The new Pinniped-compatible kubeconfig will be printed to stdout, so in these examples we will redirect
that to a file.
```sh ```sh
pinniped get kubeconfig \ pinniped get kubeconfig \
--kubeconfig-context kind-pinniped-concierge \ --kubeconfig workload1-admin.yaml > workload1-developer.yaml
> /tmp/pinniped-kubeconfig
pinniped get kubeconfig \
--kubeconfig workload2-admin.yaml > workload2-developer.yaml
``` ```
1. Try using the generated kubeconfig to issue arbitrary `kubectl` commands. The `pinniped` command-line tool These new kubeconfig files may be distributed to the app developers and devops users who
opens a browser page that can be used to login to the external OIDC identity provider configured earlier. will be using these workload clusters. They do not contain any particular identity or credential.
As the cluster creator, do not share the admin kubeconfig files with your workload cluster users.
Save the admin kubeconfig files somewhere private and secure for your own future use.
See the [full documentation for the `pinniped get kubeconfig` command]({{< ref "../reference/cli" >}})
for other available optional parameters.
### Optional: Merge the developer kubeconfig files to distribute them as one file
The `kubectl` CLI [can merge kubeconfig files](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#merging-kubeconfig-files).
If you wanted to distribute one kubeconfig file instead of one per cluster,
you could choose to merge the Pinniped-compatible kubeconfig files.
```sh ```sh
kubectl --kubeconfig /tmp/pinniped-kubeconfig get pods -n pinniped-concierge # For this command, KUBECONFIG is treated as a list of input files.
KUBECONFIG="workload1-developer.yaml:workload2-developer.yaml" kubectl \
config view --flatten -o yaml > all-workload-clusters-developer.yaml
``` ```
Because this user has no RBAC permissions on this cluster, the previous command results in an The developer who uses the combined kubeconfig file will need to use the standard `kubectl` methods to choose their current context.
error that is similar to
`Error from server (Forbidden): pods is forbidden: User "pinny" cannot list resource "pods"
in API group "" in the namespace "pinniped"`, where `pinny` is the username that was used to login
to the upstream OIDC identity provider. However, this does prove that you are authenticated and
acting as the `pinny` user.
1. As the administrator user, create RBAC rules for the test user to give them permissions to perform actions on the cluster. For clarity, the steps shown below will continue to use the separate kubeconfig files.
For example, grant the test user permission to view all cluster resources.
### As a developer or devops user, access the workload clusters by using regular kubectl commands
A developer or devops user who would like to use the workload clusters may do so using kubectl with
the kubeconfig files provided to them by the cluster admin in the previous step.
The kubeconfig files tell kubectl how to invoke the Pinniped CLI as a plugin to aid in authentication.
First, the user will need to install the Pinniped CLI at the same full path where it is referenced
inside the kubeconfig file. Or, they can adjust the full path to the Pinniped CLI inside
their own copy of the kubeconfig file, to make it match where they have locally installed the Pinniped CLI.
Then the developer can run any kubectl command using a kubeconfig file
that was provided to them by the cluster admin. For example, let's run a command against the first workload cluster.
```sh ```sh
kubectl --context kind-pinniped-concierge create clusterrolebinding pinny-can-read --clusterrole view --user pinny kubectl get namespaces --kubeconfig workload1-developer.yaml
``` ```
1. Use the generated kubeconfig to issue arbitrary `kubectl` commands as the `pinny` user. The first time this command is run, it will open their default web browser and redirect them to Okta for login.
After successfully logging in to Okta, for example as the user `walrus@example.com`, the kubectl command will
continue and will try to list the namespaces.
The user's identity in Kubernetes (username and group memberships) came from Okta, through Pinniped.
Oops! This results in an RBAC error similar to
`Error from server (Forbidden): namespaces is forbidden: User "walrus@example.com" cannot list resource "namespaces" in API group "" at the cluster scope`.
Recall that in the first workload cluster, the user only has RBAC permissions in the `dev` namespace.
Let's try again, but this time we will list something in the `dev` namespace.
```sh ```sh
kubectl --kubeconfig /tmp/pinniped-kubeconfig get pods -n pinniped-concierge kubectl get serviceaccounts --namespace dev \
--kubeconfig workload1-developer.yaml
``` ```
The user has permission to list pods, so the command succeeds this time. This will successfully list the default service account in the `dev` namespace.
Pinniped has provided authentication into the cluster for your `kubectl` command. 🎉
1. Carry on issuing as many `kubectl` commands as you'd like as the `pinny` user. That same developer user can access all other workload clusters in a similar fashion. For example,
Each invocation uses Pinniped for authentication. let's run a command against the second workload cluster. Recall that the developer is allowed
You may find it convenient to set the `KUBECONFIG` environment variable rather than passing `--kubeconfig` to each invocation. to read everthing in the second workload cluster.
```sh ```sh
export KUBECONFIG=/tmp/pinniped-kubeconfig kubectl get namespaces --kubeconfig workload2-developer.yaml
kubectl get namespaces ```
kubectl get pods -A
This time, the command will list namespaces immediately.
Even though you are accessing a different cluster, the web browser will not open again.
You do not need to interactively sign in again for the rest of the day to access
any workload cluster within the same FederationDomain.
Behind the scenes, Pinniped is performing token refreshes and token exchanges
on behalf of the user to create a short-lived, cluster-scoped token to access
this new workload cluster using the same identity from Okta.
Note that users can use any of kubectl's supported means of providing kubeconfig information to kubectl.
They are not limited to only using the `--kubeconfig` flag. For example, they could set the `KUBECONFIG`
environment variable instead.
For more information about logging in to workload clusters, see the [howto doc about login]({{< ref "../howto/login" >}}).
### Whoami
Not sure what identity you're using on the cluster? Pinniped has a convenient feature to help out with that.
```sh
pinniped whoami --kubeconfig workload2-developer.yaml
```
The output will include your username and group names, and will look similar to the following output.
```
Current cluster info:
Name: gke_your_project_us-central1-c_demo-workload-cluster2-pinniped
URL: https://1.2.3.4
Current user info:
Username: walrus@example.com
Groups: Everyone, developers, system:authenticated
```
## What we've learned
This tutorial showed:
- A detailed example of how to install and configure a Supervisor with ingress, DNS, TLS, and an external identity provider
- How to install the Concierge onto multiple workload clusters and configure them all to trust identities from the Supervisor
- How an admin can create and distribute kubeconfig files for the workload clusters
- How a developer or devops user can authenticate with kubectl using their identity from the external identity provider,
and how they can securely access all workload clusters for the rest of the day without needing to authenticate again
## Removing the resources created in this tutorial
If you would like to delete the resources created in this tutorial, you can use the following commands.
```sh
# To uninstall the Pinniped Supervisor app and all related configuration
# (including the GCP load balancer):
kubectl delete \
-f "https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-supervisor.yaml" \
--kubeconfig supervisor-admin.yaml \
--ignore-not-found
# To uninstall cert-manager (assuming you already ran the above command):
kubectl delete -f \
"https://github.com/jetstack/cert-manager/releases/download/v1.5.3/cert-manager.yaml" \
--kubeconfig supervisor-admin.yaml
# To uninstall the Pinniped Concierge apps and all related configuration:
kubectl delete -f \
"https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-resources.yaml" \
--kubeconfig workload1-admin.yaml
kubectl delete -f \
"https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-crds.yaml" \
--kubeconfig workload1-admin.yaml
kubectl delete -f \
"https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-resources.yaml" \
--kubeconfig workload2-admin.yaml
kubectl delete -f \
"https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-crds.yaml" \
--kubeconfig workload2-admin.yaml
# To delete the GKE clusters entirely:
gcloud container clusters delete "demo-supervisor-cluster" \
--project "$PROJECT" --zone "$ZONE" --quiet
gcloud container clusters delete "demo-workload-cluster1" \
--project "$PROJECT" --zone "$ZONE" --quiet
gcloud container clusters delete "demo-workload-cluster2" \
--project "$PROJECT" --zone "$ZONE" --quiet
# To delete the DNS entry for the Supervisor:
gcloud dns record-sets transaction start \
--zone="$DNS_ZONE" --project "$PROJECT"
gcloud dns record-sets transaction remove "$PUBLIC_IP" \
--name="$DNS_NAME." --ttl="300" --type="A" \
--zone="$DNS_ZONE" --project "$PROJECT"
gcloud dns record-sets transaction execute \
--zone="$DNS_ZONE" --project "$PROJECT"
# To delete the service account we created for cert-manager:
gcloud projects remove-iam-policy-binding "$PROJECT" \
--member "serviceAccount:demo-dns-solver@$PROJECT.iam.gserviceaccount.com" \
--role roles/dns.admin --condition=None
gcloud iam service-accounts delete \
"demo-dns-solver@$PROJECT.iam.gserviceaccount.com" \
--project "$PROJECT" --quiet
``` ```

View File

@ -10,6 +10,30 @@ menu:
weight: 100 weight: 100
--- ---
## Overview
This tutorial shows how to use the Pinniped Concierge on Kubernetes clusters.
If you would like to learn how to use the Pinniped Supervisor and Concierge together to
provided federated identity with a single sign-on user experience to many Kubernetes clusters,
please instead see this other tutorial:
- [Concierge with Supervisor: a complete example of every step, demonstrated using GKE clusters]({{< ref "concierge-and-supervisor-demo" >}})
Installing and trying the Pinniped Concierge on any cluster consists of the following general steps. See the next section below
for a more specific example of installing onto a local kind cluster, including the exact commands to use for that case.
1. [Install the Concierge]({{< ref "../howto/install-concierge" >}}).
1. [Install the Pinniped command-line tool]({{< ref "../howto/install-cli" >}}).
1. Configure the Concierge with a
[JWT]({{< ref "../howto/configure-concierge-jwt" >}}) or
[webhook]({{< ref "../howto/configure-concierge-webhook" >}}) authenticator.
1. Generate a kubeconfig using the Pinniped command-line tool (run `pinniped get kubeconfig --help` for more information).
1. Run `kubectl` commands using the generated kubeconfig. The Pinniped Concierge will automatically be used for authentication during those commands.
Please be aware that using the Concierge without the Supervisor is an advanced use case, not the typical use case.
For example, the Supervisor issues cluster-scoped credentials that cannot be replayed against other clusters,
so using the Concierge without the Supervisor removes that protection. You might have designed another system to provide
that protection, but if not then please carefully consider the security implications.
## Prerequisites ## Prerequisites
1. A Kubernetes cluster of a type supported by Pinniped as described in [architecture](/docs/background/architecture). 1. A Kubernetes cluster of a type supported by Pinniped as described in [architecture](/docs/background/architecture).
@ -27,21 +51,6 @@ menu:
1. A kubeconfig where the current context points to the cluster and has administrator-like 1. A kubeconfig where the current context points to the cluster and has administrator-like
privileges on that cluster. privileges on that cluster.
## Overview
Installing and trying the Pinniped Concierge on any cluster consists of the following general steps. See the next section below
for a more specific example of installing onto a local kind cluster, including the exact commands to use for that case.
1. [Install the Concierge]({{< ref "../howto/install-concierge" >}}).
1. [Install the Pinniped command-line tool]({{< ref "../howto/install-cli" >}}).
1. Configure the Concierge with a
[JWT]({{< ref "../howto/configure-concierge-jwt" >}}) or
[webhook]({{< ref "../howto/configure-concierge-webhook" >}}) authenticator.
1. Generate a kubeconfig using the Pinniped command-line tool (run `pinniped get kubeconfig --help` for more information).
1. Run `kubectl` commands using the generated kubeconfig.
The Pinniped Concierge is automatically be used for authentication during those commands.
## Example of deploying on kind ## Example of deploying on kind
[kind](https://kind.sigs.k8s.io) is a tool for creating and managing Kubernetes clusters on your local machine [kind](https://kind.sigs.k8s.io) is a tool for creating and managing Kubernetes clusters on your local machine

View File

@ -8,10 +8,25 @@ layout: section
<h1>Resources</h1> <h1>Resources</h1>
</div> </div>
</div> </div>
<div class="wrapper resources"> <div class="wrapper resources">
<h2>Resources about Pinniped, such as videos, podcasts, and community articles</h2> <h2>Resources about Pinniped, such as videos, podcasts, and community articles</h2>
<div class="grid three"> <div class="grid three">
<div class="col">
<a href="https://pinniped.dev/docs/tutorials/concierge-and-supervisor-demo/">
<div class="icon">
<img src="/img/logo.svg"/>
</div>
<div class="content">
<p class="strong">Pinniped Tutorial:</p>
<p>Learn to use Pinniped for federated authentication to Kubernetes clusters</p>
</div>
</a>
</div>
<div class="col"> <div class="col">
<a href="https://github.com/vmware-tanzu/pinniped"> <a href="https://github.com/vmware-tanzu/pinniped">
<div class="icon"> <div class="icon">
@ -30,7 +45,8 @@ layout: section
</div> </div>
<div class="content"> <div class="content">
<p class="strong">Community Meetings and Demos</p> <p class="strong">Community Meetings and Demos</p>
<p>We have a YouTube playlist for our Pinniped community meetings and demos, <a href="https://go.pinniped.dev/community/youtube">check it out here</a>.</p> <p>We have a YouTube playlist for our Pinniped community meetings and demos, <a
href="https://go.pinniped.dev/community/youtube">check it out here</a>.</p>
</div> </div>
</div> </div>
@ -39,7 +55,9 @@ layout: section
<iframe class="embed-responsive-item" src="https://www.youtube.com/embed/OCkTnElNE9M"></iframe> <iframe class="embed-responsive-item" src="https://www.youtube.com/embed/OCkTnElNE9M"></iframe>
</div> </div>
<div class="content"> <div class="content">
<p><a href="https://www.youtube.com/watch?v=OCkTnElNE9M&feature=emb_logo">KubeCon + CloudNativeCon North America 2021: Everything Wrong with K8s Authentication and How We Worked Around It with Mo Khan & Margo Crawford</a></p> <p><a href="https://www.youtube.com/watch?v=OCkTnElNE9M&feature=emb_logo">KubeCon + CloudNativeCon North
America 2021: Everything Wrong with K8s Authentication and How We Worked Around It with Mo Khan &
Margo Crawford</a></p>
</div> </div>
</div> </div>
@ -48,9 +66,11 @@ layout: section
<iframe class="embed-responsive-item" src="https://www.youtube.com/embed/2fI_XOGEoIU"></iframe> <iframe class="embed-responsive-item" src="https://www.youtube.com/embed/2fI_XOGEoIU"></iframe>
</div> </div>
<div class="content"> <div class="content">
<p><a href="https://www.youtube.com/watch?v=2fI_XOGEoIU&feature=emb_logo">Pinniped: A Unified Framework for User Authentication to Kubernetes Cluster with Mo Khan & Anjali Telang</a></p> <p><a href="https://www.youtube.com/watch?v=2fI_XOGEoIU&feature=emb_logo">Pinniped: A Unified Framework
for User Authentication to Kubernetes Cluster with Mo Khan & Anjali Telang</a></p>
</div> </div>
</div> </div>
<div class="col"> <div class="col">
<div class="embed-responsive"> <div class="embed-responsive">
<iframe class="embed-responsive-item" <iframe class="embed-responsive-item"
@ -65,7 +85,7 @@ layout: section
<p> <p>
<a href="https://www.cncf.io/online-programs/cncf-live-webinar-easy-secure-kubernetes-authentication-with-pinniped/" <a href="https://www.cncf.io/online-programs/cncf-live-webinar-easy-secure-kubernetes-authentication-with-pinniped/"
target="_blank"> target="_blank">
CNCF Live Webinar: Easy, Secure Kubernetes Authentication With Pinniped - August 24, 2021 CNCF Live Webinar: Easy, Secure Kubernetes Authentication With Pinniped - Matt Moyer - August 24, 2021
[VIDEO] [VIDEO]
</a> </a>
<a href="https://docs.google.com/presentation/d/1euA62C7SHQpHewPqPaxTvNEKdvyNOdU9MDWh3YN3NvY/edit?usp=sharing" <a href="https://docs.google.com/presentation/d/1euA62C7SHQpHewPqPaxTvNEKdvyNOdU9MDWh3YN3NvY/edit?usp=sharing"

View File

@ -23,7 +23,8 @@
{{ partial "footer" . }} {{ partial "footer" . }}
<script type="text/javascript" src="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js"></script> <script type="text/javascript" src="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js"></script>
<script type="text/javascript"> docsearch({ <script type="text/javascript"> docsearch({
apiKey: '8f37841a145124eb42a0e3249d49a3c5', appId: 'FYQE1VJB02',
apiKey: '826ac6c64f20d5e00a72a0599cf76177',
indexName: 'pinniped', indexName: 'pinniped',
inputSelector: '.docsearch-input', inputSelector: '.docsearch-input',
debug: false // Set debug to true if you want to inspect the dropdown debug: false // Set debug to true if you want to inspect the dropdown

View File

@ -1,4 +1,4 @@
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. // Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package integration package integration
@ -7,7 +7,6 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -293,32 +292,21 @@ func runPinnipedLoginOIDC(
t.Logf("starting CLI subprocess") t.Logf("starting CLI subprocess")
require.NoError(t, cmd.Start()) require.NoError(t, cmd.Start())
t.Cleanup(func() { t.Cleanup(func() {
err := cmd.Wait() err := cmd.Wait() // handles closing of file descriptors
t.Logf("CLI subprocess exited with code %d", cmd.ProcessState.ExitCode()) t.Logf("CLI subprocess exited with code %d", cmd.ProcessState.ExitCode())
require.NoErrorf(t, err, "CLI process did not exit cleanly") require.NoErrorf(t, err, "CLI process did not exit cleanly")
}) })
// Start a background goroutine to read stderr from the CLI and parse out the login URL. // Start a background goroutine to read stderr from the CLI and parse out the login URL.
loginURLChan := make(chan string) loginURLChan := make(chan string, 1)
spawnTestGoroutine(t, func() (err error) { spawnTestGoroutine(ctx, t, func() error {
t.Helper()
defer func() {
closeErr := stderr.Close()
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
return
}
if err == nil {
err = fmt.Errorf("stderr stream closed with error: %w", closeErr)
}
}()
reader := bufio.NewReader(testlib.NewLoggerReader(t, "stderr", stderr)) reader := bufio.NewReader(testlib.NewLoggerReader(t, "stderr", stderr))
scanner := bufio.NewScanner(reader) scanner := bufio.NewScanner(reader)
for scanner.Scan() { for scanner.Scan() {
loginURL, err := url.Parse(strings.TrimSpace(scanner.Text())) loginURL, err := url.Parse(strings.TrimSpace(scanner.Text()))
if err == nil && loginURL.Scheme == "https" { if err == nil && loginURL.Scheme == "https" {
loginURLChan <- loginURL.String() loginURLChan <- loginURL.String() // this channel is buffered so this will not block
return nil return nil
} }
} }
@ -327,23 +315,14 @@ func runPinnipedLoginOIDC(
}) })
// Start a background goroutine to read stdout from the CLI and parse out an ExecCredential. // Start a background goroutine to read stdout from the CLI and parse out an ExecCredential.
credOutputChan := make(chan clientauthenticationv1beta1.ExecCredential) credOutputChan := make(chan clientauthenticationv1beta1.ExecCredential, 1)
spawnTestGoroutine(t, func() (err error) { spawnTestGoroutine(ctx, t, func() error {
defer func() {
closeErr := stdout.Close()
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
return
}
if err == nil {
err = fmt.Errorf("stdout stream closed with error: %w", closeErr)
}
}()
reader := bufio.NewReader(testlib.NewLoggerReader(t, "stdout", stdout)) reader := bufio.NewReader(testlib.NewLoggerReader(t, "stdout", stdout))
var out clientauthenticationv1beta1.ExecCredential var out clientauthenticationv1beta1.ExecCredential
if err := json.NewDecoder(reader).Decode(&out); err != nil { if err := json.NewDecoder(reader).Decode(&out); err != nil {
return fmt.Errorf("could not read ExecCredential from stdout: %w", err) return fmt.Errorf("could not read ExecCredential from stdout: %w", err)
} }
credOutputChan <- out credOutputChan <- out // this channel is buffered so this will not block
return readAndExpectEmpty(reader) return readAndExpectEmpty(reader)
}) })
@ -398,11 +377,33 @@ func readAndExpectEmpty(r io.Reader) (err error) {
return nil return nil
} }
func spawnTestGoroutine(t *testing.T, f func() error) { // Note: Callers should ensure that f eventually returns, otherwise this helper will leak a go routine.
func spawnTestGoroutine(ctx context.Context, t *testing.T, f func() error) {
t.Helper() t.Helper()
var eg errgroup.Group var eg errgroup.Group
t.Cleanup(func() { t.Cleanup(func() {
require.NoError(t, eg.Wait(), "background goroutine failed") egCh := make(chan error, 1) // do not block the go routine from exiting even after the select has completed
go func() {
egCh <- eg.Wait()
}()
leewayCh := make(chan struct{})
go func() {
<-ctx.Done()
// give f up to 30 seconds after the context is canceled to return
// this prevents "race" conditions where f is orchestrated via the same context
time.Sleep(30 * time.Second)
close(leewayCh)
}()
select {
case <-leewayCh:
t.Errorf("background goroutine hung: %v", ctx.Err())
case err := <-egCh:
require.NoError(t, err, "background goroutine failed")
}
}) })
eg.Go(f) eg.Go(f)
} }

View File

@ -19,6 +19,7 @@ import (
"regexp" "regexp"
"sort" "sort"
"strings" "strings"
"sync/atomic"
"testing" "testing"
"time" "time"
@ -29,6 +30,7 @@ import (
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
authv1alpha "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1" authv1alpha "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1"
configv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/config/v1alpha1" configv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/config/v1alpha1"
@ -49,7 +51,7 @@ import (
func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo
env := testlib.IntegrationEnv(t) env := testlib.IntegrationEnv(t)
ctx, cancelFunc := context.WithTimeout(context.Background(), 15*time.Minute) ctx, cancelFunc := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancelFunc() defer cancelFunc()
// Build pinniped CLI. // Build pinniped CLI.
@ -106,6 +108,9 @@ func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo
// Add an OIDC upstream IDP and try using it to authenticate during kubectl commands. // Add an OIDC upstream IDP and try using it to authenticate during kubectl commands.
t.Run("with Supervisor OIDC upstream IDP and automatic flow", func(t *testing.T) { t.Run("with Supervisor OIDC upstream IDP and automatic flow", func(t *testing.T) {
testCtx, cancel := context.WithTimeout(ctx, 2*time.Minute)
t.Cleanup(cancel)
// Start a fresh browser driver because we don't want to share cookies between the various tests in this file. // Start a fresh browser driver because we don't want to share cookies between the various tests in this file.
page := browsertest.Open(t) page := browsertest.Open(t)
@ -157,48 +162,58 @@ func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo
// Run "kubectl get namespaces" which should trigger a browser login via the plugin. // Run "kubectl get namespaces" which should trigger a browser login via the plugin.
start := time.Now() start := time.Now()
kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath) kubectlCmd := exec.CommandContext(testCtx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath, "-v", "6")
kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...) kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...)
stderrPipe, err := kubectlCmd.StderrPipe()
// Wrap the stdout and stderr pipes with TeeReaders which will copy each incremental read to an
// in-memory buffer, so we can have the full output available to us at the end.
originalStderrPipe, err := kubectlCmd.StderrPipe()
require.NoError(t, err) require.NoError(t, err)
stdoutPipe, err := kubectlCmd.StdoutPipe() originalStdoutPipe, err := kubectlCmd.StdoutPipe()
require.NoError(t, err) require.NoError(t, err)
var stderrPipeBuf, stdoutPipeBuf bytes.Buffer
stderrPipe := io.TeeReader(originalStderrPipe, &stderrPipeBuf)
stdoutPipe := io.TeeReader(originalStdoutPipe, &stdoutPipeBuf)
t.Logf("starting kubectl subprocess") t.Logf("starting kubectl subprocess")
require.NoError(t, kubectlCmd.Start()) require.NoError(t, kubectlCmd.Start())
t.Cleanup(func() { t.Cleanup(func() {
err := kubectlCmd.Wait() // Consume readers so that the tee buffers will contain all the output so far.
_, stdoutReadAllErr := readAllCtx(testCtx, stdoutPipe)
_, stderrReadAllErr := readAllCtx(testCtx, stderrPipe)
// Note that Wait closes the stdout/stderr pipes, so we don't need to close them ourselves.
waitErr := kubectlCmd.Wait()
t.Logf("kubectl subprocess exited with code %d", kubectlCmd.ProcessState.ExitCode()) t.Logf("kubectl subprocess exited with code %d", kubectlCmd.ProcessState.ExitCode())
stdout, stdoutErr := ioutil.ReadAll(stdoutPipe)
if stdoutErr != nil { // Upon failure, print the full output so far of the kubectl command.
stdout = []byte("<error reading stdout: " + stdoutErr.Error() + ">") var testAlreadyFailedErr error
if t.Failed() {
testAlreadyFailedErr = errors.New("test failed prior to clean up function")
} }
stderr, stderrErr := ioutil.ReadAll(stderrPipe) cleanupErrs := utilerrors.NewAggregate([]error{waitErr, stdoutReadAllErr, stderrReadAllErr, testAlreadyFailedErr})
if stderrErr != nil {
stderr = []byte("<error reading stderr: " + stderrErr.Error() + ">") if cleanupErrs != nil {
t.Logf("kubectl stdout was:\n----start of stdout\n%s\n----end of stdout", stdoutPipeBuf.String())
t.Logf("kubectl stderr was:\n----start of stderr\n%s\n----end of stderr", stderrPipeBuf.String())
} }
require.NoErrorf(t, err, "kubectl process did not exit cleanly, stdout/stderr: %q/%q", string(stdout), string(stderr)) require.NoErrorf(t, cleanupErrs, "kubectl process did not exit cleanly and/or the test failed. "+
"Note: if kubectl's first call to the Pinniped CLI results in the Pinniped CLI returning an error, "+
"then kubectl may call the Pinniped CLI again, which may hang because it will wait for the user "+
"to finish the login. This test will kill the kubectl process after a timeout. In this case, the "+
" kubectl output printed above will include multiple prompts for the user to enter their authcode.",
)
}) })
// Start a background goroutine to read stderr from the CLI and parse out the login URL. // Start a background goroutine to read stderr from the CLI and parse out the login URL.
loginURLChan := make(chan string) loginURLChan := make(chan string, 1)
spawnTestGoroutine(t, func() (err error) { spawnTestGoroutine(testCtx, t, func() error {
defer func() {
closeErr := stderrPipe.Close()
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
return
}
if err == nil {
err = fmt.Errorf("stderr stream closed with error: %w", closeErr)
}
}()
reader := bufio.NewReader(testlib.NewLoggerReader(t, "stderr", stderrPipe)) reader := bufio.NewReader(testlib.NewLoggerReader(t, "stderr", stderrPipe))
scanner := bufio.NewScanner(reader) scanner := bufio.NewScanner(reader)
for scanner.Scan() { for scanner.Scan() {
loginURL, err := url.Parse(strings.TrimSpace(scanner.Text())) loginURL, err := url.Parse(strings.TrimSpace(scanner.Text()))
if err == nil && loginURL.Scheme == "https" { if err == nil && loginURL.Scheme == "https" {
loginURLChan <- loginURL.String() loginURLChan <- loginURL.String() // this channel is buffered so this will not block
return nil return nil
} }
} }
@ -206,23 +221,14 @@ func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo
}) })
// Start a background goroutine to read stdout from kubectl and return the result as a string. // Start a background goroutine to read stdout from kubectl and return the result as a string.
kubectlOutputChan := make(chan string) kubectlOutputChan := make(chan string, 1)
spawnTestGoroutine(t, func() (err error) { spawnTestGoroutine(testCtx, t, func() error {
defer func() { output, err := readAllCtx(testCtx, stdoutPipe)
closeErr := stdoutPipe.Close()
if closeErr == nil || errors.Is(closeErr, os.ErrClosed) {
return
}
if err == nil {
err = fmt.Errorf("stdout stream closed with error: %w", closeErr)
}
}()
output, err := ioutil.ReadAll(stdoutPipe)
if err != nil { if err != nil {
return err return err
} }
t.Logf("kubectl output:\n%s\n", output) t.Logf("kubectl output:\n%s\n", output)
kubectlOutputChan <- string(output) kubectlOutputChan <- string(output) // this channel is buffered so this will not block
return nil return nil
}) })
@ -234,7 +240,7 @@ func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo
require.Fail(t, "timed out waiting for login URL") require.Fail(t, "timed out waiting for login URL")
case loginURL = <-loginURLChan: case loginURL = <-loginURLChan:
} }
t.Logf("navigating to login page") t.Logf("navigating to login page: %q", loginURL)
require.NoError(t, page.Navigate(loginURL)) require.NoError(t, page.Navigate(loginURL))
// Expect to be redirected to the upstream provider and log in. // Expect to be redirected to the upstream provider and log in.
@ -260,7 +266,7 @@ func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo
t.Logf("first kubectl command took %s", time.Since(start).String()) t.Logf("first kubectl command took %s", time.Since(start).String())
requireUserCanUseKubectlWithoutAuthenticatingAgain(ctx, t, env, requireUserCanUseKubectlWithoutAuthenticatingAgain(testCtx, t, env,
downstream, downstream,
kubeconfigPath, kubeconfigPath,
sessionCachePath, sessionCachePath,
@ -1177,3 +1183,42 @@ func getSecretNameFromSignature(t *testing.T, signature string, typeLabel string
signatureAsValidName := strings.ToLower(b32.EncodeToString(signatureBytes)) signatureAsValidName := strings.ToLower(b32.EncodeToString(signatureBytes))
return fmt.Sprintf("pinniped-storage-%s-%s", typeLabel, signatureAsValidName) return fmt.Sprintf("pinniped-storage-%s-%s", typeLabel, signatureAsValidName)
} }
func readAllCtx(ctx context.Context, r io.Reader) ([]byte, error) {
errCh := make(chan error, 1)
data := &atomic.Value{}
go func() { // copied from io.ReadAll and modified to use the atomic.Value above
b := make([]byte, 0, 512)
data.Store(string(b)) // cast to string to make a copy of the byte slice
for {
if len(b) == cap(b) {
// Add more capacity (let append pick how much).
b = append(b, 0)[:len(b)]
data.Store(string(b)) // cast to string to make a copy of the byte slice
}
n, err := r.Read(b[len(b):cap(b)])
b = b[:len(b)+n]
data.Store(string(b)) // cast to string to make a copy of the byte slice
if err != nil {
if err == io.EOF {
err = nil
}
errCh <- err
return
}
}
}()
select {
case <-ctx.Done():
b, _ := data.Load().(string)
return nil, fmt.Errorf("failed to complete read all: %w, data read so far:\n%q", ctx.Err(), b)
case err := <-errCh:
b, _ := data.Load().(string)
if len(b) == 0 {
return nil, err
}
return []byte(b), err
}
}

View File

@ -1,4 +1,4 @@
// Copyright 2021 the Pinniped contributors. All Rights Reserved. // Copyright 2021-2022 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package integration package integration
@ -60,6 +60,10 @@ func TestFormPostHTML_Parallel(t *testing.T) {
// //
// This case is fairly unlikely in practice, and if the CLI encounters // This case is fairly unlikely in practice, and if the CLI encounters
// an error it can also expose it via stderr anyway. // an error it can also expose it via stderr anyway.
//
// In the future, we could change the Javascript code to use mode 'cors'
// because we have upgraded our CLI callback endpoint to handle CORS,
// and then we could change this to formpostExpectManualState().
formpostExpectSuccessState(t, page) formpostExpectSuccessState(t, page)
}) })
@ -109,6 +113,19 @@ func formpostCallbackServer(t *testing.T) (string, func(*testing.T, url.Values))
results := make(chan url.Values) results := make(chan url.Values)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// 404 for any other requests aside from POSTs. We do not need to support CORS preflight OPTIONS
// requests for this test because both the web page and the callback are on 127.0.0.1 (same origin).
if r.Method != http.MethodPost {
t.Logf("test callback server got unexpeted request method")
w.WriteHeader(http.StatusNotFound)
return
}
// Allow CORS requests. This will be needed for this test in the future if we change
// the Javascript code from using mode 'no-cors' to instead use mode 'cors'. At the
// moment it should be ignored by the browser.
w.Header().Set("Access-Control-Allow-Origin", "*")
assert.NoError(t, r.ParseForm()) assert.NoError(t, r.ParseForm())
// Extract only the POST parameters (r.Form also contains URL query parameters). // Extract only the POST parameters (r.Form also contains URL query parameters).

View File

@ -1,4 +1,4 @@
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. // Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// Package browsertest provides integration test helpers for our browser-based tests. // Package browsertest provides integration test helpers for our browser-based tests.
@ -119,7 +119,7 @@ func LoginToUpstream(t *testing.T, page *agouti.Page, upstream testlib.TestOIDCU
{ {
Name: "Okta", Name: "Okta",
IssuerPattern: regexp.MustCompile(`\Ahttps://.+\.okta\.com/.+\z`), IssuerPattern: regexp.MustCompile(`\Ahttps://.+\.okta\.com/.+\z`),
LoginPagePattern: regexp.MustCompile(`\Ahttps://.+\.okta\.com/.+\z`), LoginPagePattern: regexp.MustCompile(`\Ahttps://.+\.okta\.com/.*\z`),
UsernameSelector: "input#okta-signin-username", UsernameSelector: "input#okta-signin-username",
PasswordSelector: "input#okta-signin-password", PasswordSelector: "input#okta-signin-password",
LoginButtonSelector: "input#okta-signin-submit", LoginButtonSelector: "input#okta-signin-submit",