From 0dd3b4069423ae2f393d5bb662f5e6c13b752573 Mon Sep 17 00:00:00 2001 From: anjalitelang <49958114+anjaltelang@users.noreply.github.com> Date: Mon, 31 Jan 2022 12:13:18 -0500 Subject: [PATCH 01/24] Update ROADMAP.md --- ROADMAP.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ROADMAP.md b/ROADMAP.md index 6b7eb097..e5bda2e4 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -33,7 +33,7 @@ The following table includes the current roadmap for Pinniped. If you have any q -Last Updated: Sept 2021 +Last Updated: Jan 2022 |Theme|Description|Timeline| |--|--|--| |Improving Security Posture|Supervisor token refresh fails when the upstream refresh token no longer works for OIDC |Jan 2022| From 7c246784dc009364d57eae0bed2b980d106c6acb Mon Sep 17 00:00:00 2001 From: anjalitelang <49958114+anjaltelang@users.noreply.github.com> Date: Thu, 3 Feb 2022 08:57:47 -0500 Subject: [PATCH 02/24] Update ROADMAP.md Updated roadmap to reflect changes planned for v0.14 release and beyond. --- ROADMAP.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ROADMAP.md b/ROADMAP.md index e5bda2e4..fcf193b8 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -36,13 +36,13 @@ The following table includes the current roadmap for Pinniped. If you have any q Last Updated: Jan 2022 |Theme|Description|Timeline| |--|--|--| -|Improving Security Posture|Supervisor token refresh fails when the upstream refresh token no longer works for OIDC |Jan 2022| -|Improving Security Posture|Supervisor token refresh fails when the upstream user is in an invalid state for LDAP/AD |Jan 2022| -|Improving Security Posture|Set stricter default TLS versions and Ciphers |Jan 2022| -|Improving Security Posture|Support FIPS compliant Boring crypto libraries |Feb 2022| +|Improving Security Posture|Support for refreshing LDAP/AD Group information |Feb 2022| +|Improving Documentation|Documentation updates for HowTo guides and Workspace ONE IDP |Feb/March 2022| +|Improving Security Posture|Support FIPS compliant Boring crypto libraries |Feb/March 2022| |Multiple IDP support|Support multiple IDPs configured on a single Supervisor|March/April 2022| |Improving Security Posture|TLS hardening |March/April 2022| |Improving Security Posture|Support Audit logging of security events related to Authentication |April/May 2022| +|Improving Usability|Support for integrating with UI/Dashboards |June/July 2022| |Improving Security Posture|mTLS for Supervisor sessions |Exploring/Ongoing| |Improving Security Posture|Key management/rotation for Pinniped components with minimal downtime |Exploring/Ongoing| |Improving Security Posture|Support for Session Logout |Exploring/Ongoing| From 7b97f1533e3010f278b14dac823b294e5f746969 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Fri, 4 Feb 2022 16:57:37 -0800 Subject: [PATCH 03/24] Add CORS request handling to CLI's localhost listener This is to support the new changes in Google Chrome v98 which now performs CORS preflight requests for the Javascript form submission on the Supervisor's login page, even though the form is being submitted to a localhost listener. --- pkg/oidcclient/login.go | 37 +++++++++- pkg/oidcclient/login_test.go | 132 +++++++++++++++++++++++++++++------ 2 files changed, 143 insertions(+), 26 deletions(-) diff --git a/pkg/oidcclient/login.go b/pkg/oidcclient/login.go index d9364689..223e7fb2 100644 --- a/pkg/oidcclient/login.go +++ b/pkg/oidcclient/login.go @@ -834,10 +834,41 @@ func (h *handlerState) handleAuthCodeCallback(w http.ResponseWriter, r *http.Req }() var params url.Values - if h.useFormPost { + if h.useFormPost { // nolint:nestif + if r.Method == http.MethodOptions { + // Google Chrome decided that it should do CORS preflight checks for this Javascript form submission POST request. + // See https://developer.chrome.com/blog/private-network-access-preflight/ + origin := r.Header.Get("Origin") + if origin == "" { + // The CORS preflight request should have an origin. + h.logger.V(debugLogLevel).Info("Pinniped: Got OPTIONS request without origin header") + w.WriteHeader(http.StatusBadRequest) + return nil // keep listening for more requests + } + h.logger.V(debugLogLevel).Info("Pinniped: Got CORS preflight request from browser", "origin", origin) + issuerURL, parseErr := url.Parse(h.issuer) + if parseErr != nil { + return httperr.Wrap(http.StatusInternalServerError, "invalid issuer url", parseErr) + } + // To tell the browser that it is okay to make the real POST request, return the following response. + w.Header().Set("Access-Control-Allow-Origin", issuerURL.Scheme+"://"+issuerURL.Host) + w.Header().Set("Access-Control-Allow-Credentials", "false") + w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS") + w.Header().Set("Access-Control-Allow-Private-Network", "true") + // If the browser would like to send some headers on the real request, allow them. Chrome doesn't + // currently send this header at the moment. This is in case some browser in the future decides to + // request to be allowed to send specific headers by using Access-Control-Request-Headers. + requestedHeaders := r.Header.Get("Access-Control-Request-Headers") + if requestedHeaders != "" { + w.Header().Set("Access-Control-Allow-Headers", requestedHeaders) + } + w.WriteHeader(http.StatusNoContent) + return nil // keep listening for more requests + } + // Return HTTP 405 for anything that's not a POST. if r.Method != http.MethodPost { - return httperr.Newf(http.StatusMethodNotAllowed, "wanted POST") + return httperr.Newf(http.StatusMethodNotAllowed, "wanted POST but got %s", r.Method) } // Parse and pull the response parameters from a application/x-www-form-urlencoded request body. @@ -848,7 +879,7 @@ func (h *handlerState) handleAuthCodeCallback(w http.ResponseWriter, r *http.Req } else { // Return HTTP 405 for anything that's not a GET. if r.Method != http.MethodGet { - return httperr.Newf(http.StatusMethodNotAllowed, "wanted GET") + return httperr.Newf(http.StatusMethodNotAllowed, "wanted GET but got %s", r.Method) } // Pull response parameters from the URL query string. diff --git a/pkg/oidcclient/login_test.go b/pkg/oidcclient/login_test.go index 8ee920d7..bae18b49 100644 --- a/pkg/oidcclient/login_test.go +++ b/pkg/oidcclient/login_test.go @@ -1825,6 +1825,8 @@ func TestHandlePasteCallback(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { + t.Parallel() + h := &handlerState{ callbacks: make(chan callbackResult, 1), state: state.State("test-state"), @@ -1866,35 +1868,38 @@ func TestHandleAuthCodeCallback(t *testing.T) { } } tests := []struct { - name string - method string - query string - body []byte - contentType string - opt func(t *testing.T) Option - wantErr string - wantHTTPStatus int + name string + method string + query string + body []byte + headers http.Header + opt func(t *testing.T) Option + + wantErr string + wantHTTPStatus int + wantNoCallbacks bool + wantHeaders http.Header }{ { name: "wrong method", - method: "POST", + method: http.MethodPost, query: "", - wantErr: "wanted GET", + wantErr: "wanted GET but got POST", wantHTTPStatus: http.StatusMethodNotAllowed, }, { name: "wrong method for form_post", - method: "GET", + method: http.MethodGet, query: "", opt: withFormPostMode, - wantErr: "wanted POST", + wantErr: "wanted POST but got GET", wantHTTPStatus: http.StatusMethodNotAllowed, }, { name: "invalid form for form_post", - method: "POST", + method: http.MethodPost, query: "", - contentType: "application/x-www-form-urlencoded", + headers: map[string][]string{"Content-Type": {"application/x-www-form-urlencoded"}}, body: []byte(`%`), opt: withFormPostMode, wantErr: `invalid form: invalid URL escape "%"`, @@ -1918,6 +1923,75 @@ func TestHandleAuthCodeCallback(t *testing.T) { wantErr: `login failed with code "some_error": optional error description`, wantHTTPStatus: http.StatusBadRequest, }, + { + name: "in form post mode, invalid issuer url config during CORS preflight request returns an error", + method: http.MethodOptions, + query: "", + headers: map[string][]string{"Origin": {"https://some-origin.com"}}, + wantErr: `invalid issuer url: parse "://bad-url": missing protocol scheme`, + wantHTTPStatus: http.StatusInternalServerError, + opt: func(t *testing.T) Option { + return func(h *handlerState) error { + h.useFormPost = true + h.issuer = "://bad-url" + return nil + } + }, + }, + { + name: "in form post mode, options request is missing origin header results in 400 and keeps listener running", + method: http.MethodOptions, + query: "", + opt: withFormPostMode, + wantNoCallbacks: true, + wantHTTPStatus: http.StatusBadRequest, + }, + { + name: "in form post mode, valid CORS request responds with 402 and CORS headers and keeps listener running", + method: http.MethodOptions, + query: "", + headers: map[string][]string{"Origin": {"https://some-origin.com"}}, + wantNoCallbacks: true, + wantHTTPStatus: http.StatusNoContent, + wantHeaders: map[string][]string{ + "Access-Control-Allow-Credentials": {"false"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Origin": {"https://valid-issuer.com"}, + "Access-Control-Allow-Private-Network": {"true"}, + }, + opt: func(t *testing.T) Option { + return func(h *handlerState) error { + h.useFormPost = true + h.issuer = "https://valid-issuer.com/with/some/path" + return nil + } + }, + }, + { + name: "in form post mode, valid CORS request with Access-Control-Request-Headers responds with 402 and CORS headers including Access-Control-Allow-Headers and keeps listener running", + method: http.MethodOptions, + query: "", + headers: map[string][]string{ + "Origin": {"https://some-origin.com"}, + "Access-Control-Request-Headers": {"header1, header2, header3"}, + }, + wantNoCallbacks: true, + wantHTTPStatus: http.StatusNoContent, + wantHeaders: map[string][]string{ + "Access-Control-Allow-Credentials": {"false"}, + "Access-Control-Allow-Methods": {"POST, OPTIONS"}, + "Access-Control-Allow-Origin": {"https://valid-issuer.com"}, + "Access-Control-Allow-Private-Network": {"true"}, + "Access-Control-Allow-Headers": {"header1, header2, header3"}, + }, + opt: func(t *testing.T) Option { + return func(h *handlerState) error { + h.useFormPost = true + h.issuer = "https://valid-issuer.com/with/some/path" + return nil + } + }, + }, { name: "invalid code", query: "state=test-state&code=invalid", @@ -1938,8 +2012,9 @@ func TestHandleAuthCodeCallback(t *testing.T) { }, }, { - name: "valid", - query: "state=test-state&code=valid", + name: "valid", + query: "state=test-state&code=valid", + wantHTTPStatus: http.StatusOK, opt: func(t *testing.T) Option { return func(h *handlerState) error { h.oauth2Config = &oauth2.Config{RedirectURL: testRedirectURI} @@ -1955,10 +2030,11 @@ func TestHandleAuthCodeCallback(t *testing.T) { }, }, { - name: "valid form_post", - method: http.MethodPost, - contentType: "application/x-www-form-urlencoded", - body: []byte(`state=test-state&code=valid`), + name: "valid form_post", + method: http.MethodPost, + headers: map[string][]string{"Content-Type": {"application/x-www-form-urlencoded"}}, + body: []byte(`state=test-state&code=valid`), + wantHTTPStatus: http.StatusOK, opt: func(t *testing.T) Option { return func(h *handlerState) error { h.useFormPost = true @@ -1978,11 +2054,14 @@ func TestHandleAuthCodeCallback(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { + t.Parallel() + h := &handlerState{ callbacks: make(chan callbackResult, 1), state: state.State("test-state"), pkce: pkce.Code("test-pkce"), nonce: nonce.Nonce("test-nonce"), + logger: testlogger.New(t).Logger, } if tt.opt != nil { require.NoError(t, tt.opt(t)(h)) @@ -1998,8 +2077,8 @@ func TestHandleAuthCodeCallback(t *testing.T) { if tt.method != "" { req.Method = tt.method } - if tt.contentType != "" { - req.Header.Set("Content-Type", tt.contentType) + if tt.headers != nil { + req.Header = tt.headers } err = h.handleAuthCodeCallback(resp, req) @@ -2012,11 +2091,18 @@ func TestHandleAuthCodeCallback(t *testing.T) { } } else { require.NoError(t, err) + require.Equal(t, tt.wantHTTPStatus, resp.Code) + } + + if tt.wantHeaders != nil { + require.Equal(t, tt.wantHeaders, resp.Header()) } select { case <-time.After(1 * time.Second): - require.Fail(t, "timed out waiting to receive from callbacks channel") + if !tt.wantNoCallbacks { + require.Fail(t, "timed out waiting to receive from callbacks channel") + } case result := <-h.callbacks: if tt.wantErr != "" { require.EqualError(t, result.err, tt.wantErr) From 2b93fdf357befef44f755655920d5891812f6af2 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Mon, 7 Feb 2022 11:57:54 -0800 Subject: [PATCH 04/24] Fix a bug in the e2e tests When the test was going to fail, a goroutine would accidentally block on writing to an unbuffered channel, and the spawnTestGoroutine helper would wait for that goroutine to end on cleanup, causing the test to hang forever while it was trying to fail. --- test/integration/cli_test.go | 11 ++++++----- test/integration/e2e_test.go | 8 ++++---- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/test/integration/cli_test.go b/test/integration/cli_test.go index dd833414..3c16d11f 100644 --- a/test/integration/cli_test.go +++ b/test/integration/cli_test.go @@ -1,4 +1,4 @@ -// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. +// Copyright 2020-2022 the Pinniped contributors. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package integration @@ -299,7 +299,7 @@ func runPinnipedLoginOIDC( }) // Start a background goroutine to read stderr from the CLI and parse out the login URL. - loginURLChan := make(chan string) + loginURLChan := make(chan string, 1) spawnTestGoroutine(t, func() (err error) { t.Helper() defer func() { @@ -318,7 +318,7 @@ func runPinnipedLoginOIDC( for scanner.Scan() { loginURL, err := url.Parse(strings.TrimSpace(scanner.Text())) if err == nil && loginURL.Scheme == "https" { - loginURLChan <- loginURL.String() + loginURLChan <- loginURL.String() // this channel is buffered so this will not block return nil } } @@ -327,7 +327,7 @@ func runPinnipedLoginOIDC( }) // Start a background goroutine to read stdout from the CLI and parse out an ExecCredential. - credOutputChan := make(chan clientauthenticationv1beta1.ExecCredential) + credOutputChan := make(chan clientauthenticationv1beta1.ExecCredential, 1) spawnTestGoroutine(t, func() (err error) { defer func() { closeErr := stdout.Close() @@ -343,7 +343,7 @@ func runPinnipedLoginOIDC( if err := json.NewDecoder(reader).Decode(&out); err != nil { return fmt.Errorf("could not read ExecCredential from stdout: %w", err) } - credOutputChan <- out + credOutputChan <- out // this channel is buffered so this will not block return readAndExpectEmpty(reader) }) @@ -398,6 +398,7 @@ func readAndExpectEmpty(r io.Reader) (err error) { return nil } +// Note: Callers should ensure that f eventually returns, otherwise this helper will hang forever in t.Cleanup. func spawnTestGoroutine(t *testing.T, f func() error) { t.Helper() var eg errgroup.Group diff --git a/test/integration/e2e_test.go b/test/integration/e2e_test.go index 4f334541..f84a6395 100644 --- a/test/integration/e2e_test.go +++ b/test/integration/e2e_test.go @@ -181,7 +181,7 @@ func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo }) // Start a background goroutine to read stderr from the CLI and parse out the login URL. - loginURLChan := make(chan string) + loginURLChan := make(chan string, 1) spawnTestGoroutine(t, func() (err error) { defer func() { closeErr := stderrPipe.Close() @@ -198,7 +198,7 @@ func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo for scanner.Scan() { loginURL, err := url.Parse(strings.TrimSpace(scanner.Text())) if err == nil && loginURL.Scheme == "https" { - loginURLChan <- loginURL.String() + loginURLChan <- loginURL.String() // this channel is buffered so this will not block return nil } } @@ -206,7 +206,7 @@ func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo }) // Start a background goroutine to read stdout from kubectl and return the result as a string. - kubectlOutputChan := make(chan string) + kubectlOutputChan := make(chan string, 1) spawnTestGoroutine(t, func() (err error) { defer func() { closeErr := stdoutPipe.Close() @@ -222,7 +222,7 @@ func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo return err } t.Logf("kubectl output:\n%s\n", output) - kubectlOutputChan <- string(output) + kubectlOutputChan <- string(output) // this channel is buffered so this will not block return nil }) From 3c7e387137a880b3cffed05083e467ec6f4f6719 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Mon, 7 Feb 2022 13:32:31 -0800 Subject: [PATCH 05/24] Keep the CLI localhost listener running after requests with wrong verb Just in case some future browser change sends some new kind of request to our CLI, just ignore them by returning StatusMethodNotAllowed and continuing to listen. --- pkg/oidcclient/login.go | 10 +++++++--- pkg/oidcclient/login_test.go | 25 ++++++++++++++----------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/pkg/oidcclient/login.go b/pkg/oidcclient/login.go index 223e7fb2..ca2335bc 100644 --- a/pkg/oidcclient/login.go +++ b/pkg/oidcclient/login.go @@ -868,10 +868,12 @@ func (h *handlerState) handleAuthCodeCallback(w http.ResponseWriter, r *http.Req // Return HTTP 405 for anything that's not a POST. if r.Method != http.MethodPost { - return httperr.Newf(http.StatusMethodNotAllowed, "wanted POST but got %s", r.Method) + h.logger.V(debugLogLevel).Info("Pinniped: Got unexpected request on callback listener", "method", r.Method) + w.WriteHeader(http.StatusMethodNotAllowed) + return nil // keep listening for more requests } - // Parse and pull the response parameters from a application/x-www-form-urlencoded request body. + // Parse and pull the response parameters from an application/x-www-form-urlencoded request body. if err := r.ParseForm(); err != nil { return httperr.Wrap(http.StatusBadRequest, "invalid form", err) } @@ -879,7 +881,9 @@ func (h *handlerState) handleAuthCodeCallback(w http.ResponseWriter, r *http.Req } else { // Return HTTP 405 for anything that's not a GET. if r.Method != http.MethodGet { - return httperr.Newf(http.StatusMethodNotAllowed, "wanted GET but got %s", r.Method) + h.logger.V(debugLogLevel).Info("Pinniped: Got unexpected request on callback listener", "method", r.Method) + w.WriteHeader(http.StatusMethodNotAllowed) + return nil // keep listening for more requests } // Pull response parameters from the URL query string. diff --git a/pkg/oidcclient/login_test.go b/pkg/oidcclient/login_test.go index bae18b49..a7c765c8 100644 --- a/pkg/oidcclient/login_test.go +++ b/pkg/oidcclient/login_test.go @@ -1881,19 +1881,19 @@ func TestHandleAuthCodeCallback(t *testing.T) { wantHeaders http.Header }{ { - name: "wrong method", - method: http.MethodPost, - query: "", - wantErr: "wanted GET but got POST", - wantHTTPStatus: http.StatusMethodNotAllowed, + name: "wrong method returns an error but keeps listening", + method: http.MethodPost, + query: "", + wantNoCallbacks: true, + wantHTTPStatus: http.StatusMethodNotAllowed, }, { - name: "wrong method for form_post", - method: http.MethodGet, - query: "", - opt: withFormPostMode, - wantErr: "wanted POST but got GET", - wantHTTPStatus: http.StatusMethodNotAllowed, + name: "wrong method for form_post returns an error but keeps listening", + method: http.MethodGet, + query: "", + opt: withFormPostMode, + wantNoCallbacks: true, + wantHTTPStatus: http.StatusMethodNotAllowed, }, { name: "invalid form for form_post", @@ -2098,6 +2098,7 @@ func TestHandleAuthCodeCallback(t *testing.T) { require.Equal(t, tt.wantHeaders, resp.Header()) } + gotCallback := false select { case <-time.After(1 * time.Second): if !tt.wantNoCallbacks { @@ -2111,7 +2112,9 @@ func TestHandleAuthCodeCallback(t *testing.T) { require.NoError(t, result.err) require.NotNil(t, result.token) require.Equal(t, result.token.IDToken.Token, "test-id-token") + gotCallback = true } + require.Equal(t, tt.wantNoCallbacks, !gotCallback) }) } } From aa56f174dba281e9a7af1047e9787070b27c836f Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Mon, 7 Feb 2022 16:17:38 -0800 Subject: [PATCH 06/24] Capture and print the full kubectl output in an e2e test upon failure --- test/integration/e2e_test.go | 57 ++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 32 deletions(-) diff --git a/test/integration/e2e_test.go b/test/integration/e2e_test.go index f84a6395..7aaa11c6 100644 --- a/test/integration/e2e_test.go +++ b/test/integration/e2e_test.go @@ -29,6 +29,7 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" authv1alpha "go.pinniped.dev/generated/latest/apis/concierge/authentication/v1alpha1" configv1alpha1 "go.pinniped.dev/generated/latest/apis/supervisor/config/v1alpha1" @@ -159,40 +160,41 @@ func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo start := time.Now() kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath) kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...) - stderrPipe, err := kubectlCmd.StderrPipe() + + // Wrap the stdout and stderr pipes with TeeReaders which will copy each incremental read to an + // in-memory buffer, so we can have the full output available to us at the end. + originalStderrPipe, err := kubectlCmd.StderrPipe() require.NoError(t, err) - stdoutPipe, err := kubectlCmd.StdoutPipe() + originalStdoutPipe, err := kubectlCmd.StdoutPipe() require.NoError(t, err) + var stderrPipeBuf, stdoutPipeBuf bytes.Buffer + stderrPipe := io.TeeReader(originalStderrPipe, &stderrPipeBuf) + stdoutPipe := io.TeeReader(originalStdoutPipe, &stdoutPipeBuf) t.Logf("starting kubectl subprocess") require.NoError(t, kubectlCmd.Start()) t.Cleanup(func() { - err := kubectlCmd.Wait() + // Consume readers so that the tee buffers will contain all the output so far. + _, stdoutReadAllErr := ioutil.ReadAll(stdoutPipe) + _, stderrReadAllErr := ioutil.ReadAll(stderrPipe) + + // Note that Wait closes the stdout/stderr pipes, so we don't need to close them ourselves. + waitErr := kubectlCmd.Wait() t.Logf("kubectl subprocess exited with code %d", kubectlCmd.ProcessState.ExitCode()) - stdout, stdoutErr := ioutil.ReadAll(stdoutPipe) - if stdoutErr != nil { - stdout = []byte("") + + // Upon failure, print the full output so far of the kubectl command. + var testAlreadyFailedErr error + if t.Failed() { + testAlreadyFailedErr = errors.New("test failed prior to clean up function") } - stderr, stderrErr := ioutil.ReadAll(stderrPipe) - if stderrErr != nil { - stderr = []byte("") - } - require.NoErrorf(t, err, "kubectl process did not exit cleanly, stdout/stderr: %q/%q", string(stdout), string(stderr)) + cleanupErrs := utilerrors.NewAggregate([]error{waitErr, stdoutReadAllErr, stderrReadAllErr, testAlreadyFailedErr}) + require.NoErrorf(t, cleanupErrs, "kubectl process did not exit cleanly and/or the test failed\nstdout: %q\nstderr: %q", + stdoutPipeBuf.String(), stderrPipeBuf.String()) }) // Start a background goroutine to read stderr from the CLI and parse out the login URL. loginURLChan := make(chan string, 1) - spawnTestGoroutine(t, func() (err error) { - defer func() { - closeErr := stderrPipe.Close() - if closeErr == nil || errors.Is(closeErr, os.ErrClosed) { - return - } - if err == nil { - err = fmt.Errorf("stderr stream closed with error: %w", closeErr) - } - }() - + spawnTestGoroutine(t, func() error { reader := bufio.NewReader(testlib.NewLoggerReader(t, "stderr", stderrPipe)) scanner := bufio.NewScanner(reader) for scanner.Scan() { @@ -207,16 +209,7 @@ func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo // Start a background goroutine to read stdout from kubectl and return the result as a string. kubectlOutputChan := make(chan string, 1) - spawnTestGoroutine(t, func() (err error) { - defer func() { - closeErr := stdoutPipe.Close() - if closeErr == nil || errors.Is(closeErr, os.ErrClosed) { - return - } - if err == nil { - err = fmt.Errorf("stdout stream closed with error: %w", closeErr) - } - }() + spawnTestGoroutine(t, func() error { output, err := ioutil.ReadAll(stdoutPipe) if err != nil { return err From 6781bfd7d8d017190e83441c6365e8bbd0434888 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Mon, 7 Feb 2022 16:21:23 -0800 Subject: [PATCH 07/24] Fix JS bug: form post UI shows manual copy/paste UI upon failed callback When the POST to the CLI's localhost callback endpoint results in a non-2XX status code, then treat that as a failed login attempt and automatically show the manual copy/paste UI. --- internal/oidc/provider/formposthtml/form_post.js | 13 ++++++++++--- .../oidc/provider/formposthtml/formposthtml_test.go | 4 ++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/internal/oidc/provider/formposthtml/form_post.js b/internal/oidc/provider/formposthtml/form_post.js index 4c0eb7df..57a18725 100644 --- a/internal/oidc/provider/formposthtml/form_post.js +++ b/internal/oidc/provider/formposthtml/form_post.js @@ -1,4 +1,4 @@ -// Copyright 2021 the Pinniped contributors. All Rights Reserved. +// Copyright 2021-2022 the Pinniped contributors. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 window.onload = () => { @@ -48,7 +48,14 @@ window.onload = () => { headers: {'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}, body: responseParams['encoded_params'].value, }) - .then(() => clearTimeout(timeout)) - .then(() => transitionToState('success')) + .then(response => { + clearTimeout(timeout); + if (response.ok) { + transitionToState('success'); + } else { + // Got non-2XX http response status. + transitionToState('manual'); + } + }) .catch(() => transitionToState('manual')); }; diff --git a/internal/oidc/provider/formposthtml/formposthtml_test.go b/internal/oidc/provider/formposthtml/formposthtml_test.go index b09c0d7b..0a6a30ec 100644 --- a/internal/oidc/provider/formposthtml/formposthtml_test.go +++ b/internal/oidc/provider/formposthtml/formposthtml_test.go @@ -30,7 +30,7 @@ var ( - + @@ -61,7 +61,7 @@ var ( // It's okay if this changes in the future, but this gives us a chance to eyeball the formatting. // Our browser-based integration tests should find any incompatibilities. testExpectedCSP = `default-src 'none'; ` + - `script-src 'sha256-cjTdJmRvuz5EHNb/cw6pFk9iWyjegU9Ihx7Fb9tlqRg='; ` + + `script-src 'sha256-Lon+X41NoXuVGPqi3LsAPmBqlDmwbu3lGhQii7/Zjrc='; ` + `style-src 'sha256-CtfkX7m8x2UdGYvGgDq+6b6yIAQsASW9pbQK+sG8fNA='; ` + `img-src data:; ` + `connect-src *; ` + From 0431a072ae255439df40f7d4fa3092755a3914c3 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Mon, 7 Feb 2022 16:26:39 -0800 Subject: [PATCH 08/24] Remove an unnecessary nolint comment --- test/integration/e2e_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/e2e_test.go b/test/integration/e2e_test.go index 7aaa11c6..65d687c6 100644 --- a/test/integration/e2e_test.go +++ b/test/integration/e2e_test.go @@ -47,7 +47,7 @@ import ( ) // TestE2EFullIntegration tests a full integration scenario that combines the supervisor, concierge, and CLI. -func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo +func TestE2EFullIntegration(t *testing.T) { env := testlib.IntegrationEnv(t) ctx, cancelFunc := context.WithTimeout(context.Background(), 15*time.Minute) From 1388183bf1f6e34b9ecb0d1861168460ec373ad9 Mon Sep 17 00:00:00 2001 From: Mo Khan Date: Mon, 7 Feb 2022 20:53:03 -0500 Subject: [PATCH 09/24] TestE2EFullIntegration: reduce timeout This causes the test to timeout before concourse terminates the entire test run. --- test/integration/e2e_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/e2e_test.go b/test/integration/e2e_test.go index 65d687c6..6129f7af 100644 --- a/test/integration/e2e_test.go +++ b/test/integration/e2e_test.go @@ -50,7 +50,7 @@ import ( func TestE2EFullIntegration(t *testing.T) { env := testlib.IntegrationEnv(t) - ctx, cancelFunc := context.WithTimeout(context.Background(), 15*time.Minute) + ctx, cancelFunc := context.WithTimeout(context.Background(), 5*time.Minute) defer cancelFunc() // Build pinniped CLI. From 8ee461ae8a4ee2b0db0690a7afcc4927aef98e71 Mon Sep 17 00:00:00 2001 From: Monis Khan Date: Tue, 8 Feb 2022 10:27:26 -0500 Subject: [PATCH 10/24] e2e_test: handle hung go routines and readers Signed-off-by: Monis Khan --- test/integration/cli_test.go | 54 +++++++++++++++---------------- test/integration/e2e_test.go | 61 ++++++++++++++++++++++++++++++------ 2 files changed, 79 insertions(+), 36 deletions(-) diff --git a/test/integration/cli_test.go b/test/integration/cli_test.go index 3c16d11f..263c9154 100644 --- a/test/integration/cli_test.go +++ b/test/integration/cli_test.go @@ -7,7 +7,6 @@ import ( "bytes" "context" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -293,25 +292,14 @@ func runPinnipedLoginOIDC( t.Logf("starting CLI subprocess") require.NoError(t, cmd.Start()) t.Cleanup(func() { - err := cmd.Wait() + err := cmd.Wait() // handles closing of file descriptors t.Logf("CLI subprocess exited with code %d", cmd.ProcessState.ExitCode()) require.NoErrorf(t, err, "CLI process did not exit cleanly") }) // Start a background goroutine to read stderr from the CLI and parse out the login URL. loginURLChan := make(chan string, 1) - spawnTestGoroutine(t, func() (err error) { - t.Helper() - defer func() { - closeErr := stderr.Close() - if closeErr == nil || errors.Is(closeErr, os.ErrClosed) { - return - } - if err == nil { - err = fmt.Errorf("stderr stream closed with error: %w", closeErr) - } - }() - + spawnTestGoroutine(ctx, t, func() error { reader := bufio.NewReader(testlib.NewLoggerReader(t, "stderr", stderr)) scanner := bufio.NewScanner(reader) @@ -328,16 +316,7 @@ func runPinnipedLoginOIDC( // Start a background goroutine to read stdout from the CLI and parse out an ExecCredential. credOutputChan := make(chan clientauthenticationv1beta1.ExecCredential, 1) - spawnTestGoroutine(t, func() (err error) { - defer func() { - closeErr := stdout.Close() - if closeErr == nil || errors.Is(closeErr, os.ErrClosed) { - return - } - if err == nil { - err = fmt.Errorf("stdout stream closed with error: %w", closeErr) - } - }() + spawnTestGoroutine(ctx, t, func() error { reader := bufio.NewReader(testlib.NewLoggerReader(t, "stdout", stdout)) var out clientauthenticationv1beta1.ExecCredential if err := json.NewDecoder(reader).Decode(&out); err != nil { @@ -398,12 +377,33 @@ func readAndExpectEmpty(r io.Reader) (err error) { return nil } -// Note: Callers should ensure that f eventually returns, otherwise this helper will hang forever in t.Cleanup. -func spawnTestGoroutine(t *testing.T, f func() error) { +// Note: Callers should ensure that f eventually returns, otherwise this helper will leak a go routine. +func spawnTestGoroutine(ctx context.Context, t *testing.T, f func() error) { t.Helper() + var eg errgroup.Group t.Cleanup(func() { - require.NoError(t, eg.Wait(), "background goroutine failed") + egCh := make(chan error, 1) // do not block the go routine from exiting even after the select has completed + go func() { + egCh <- eg.Wait() + }() + + leewayCh := make(chan struct{}) + go func() { + <-ctx.Done() + // give f up to 30 seconds after the context is canceled to return + // this prevents "race" conditions where f is orchestrated via the same context + time.Sleep(30 * time.Second) + close(leewayCh) + }() + + select { + case <-leewayCh: + t.Errorf("background goroutine hung: %v", ctx.Err()) + + case err := <-egCh: + require.NoError(t, err, "background goroutine failed") + } }) eg.Go(f) } diff --git a/test/integration/e2e_test.go b/test/integration/e2e_test.go index 6129f7af..b4b5f494 100644 --- a/test/integration/e2e_test.go +++ b/test/integration/e2e_test.go @@ -19,6 +19,7 @@ import ( "regexp" "sort" "strings" + "sync/atomic" "testing" "time" @@ -50,7 +51,7 @@ import ( func TestE2EFullIntegration(t *testing.T) { env := testlib.IntegrationEnv(t) - ctx, cancelFunc := context.WithTimeout(context.Background(), 5*time.Minute) + ctx, cancelFunc := context.WithTimeout(context.Background(), 10*time.Minute) defer cancelFunc() // Build pinniped CLI. @@ -107,6 +108,9 @@ func TestE2EFullIntegration(t *testing.T) { // Add an OIDC upstream IDP and try using it to authenticate during kubectl commands. t.Run("with Supervisor OIDC upstream IDP and automatic flow", func(t *testing.T) { + testCtx, cancel := context.WithTimeout(ctx, 2*time.Minute) + t.Cleanup(cancel) + // Start a fresh browser driver because we don't want to share cookies between the various tests in this file. page := browsertest.Open(t) @@ -158,7 +162,7 @@ func TestE2EFullIntegration(t *testing.T) { // Run "kubectl get namespaces" which should trigger a browser login via the plugin. start := time.Now() - kubectlCmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath) + kubectlCmd := exec.CommandContext(testCtx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath) kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...) // Wrap the stdout and stderr pipes with TeeReaders which will copy each incremental read to an @@ -175,8 +179,8 @@ func TestE2EFullIntegration(t *testing.T) { require.NoError(t, kubectlCmd.Start()) t.Cleanup(func() { // Consume readers so that the tee buffers will contain all the output so far. - _, stdoutReadAllErr := ioutil.ReadAll(stdoutPipe) - _, stderrReadAllErr := ioutil.ReadAll(stderrPipe) + _, stdoutReadAllErr := readAllCtx(testCtx, stdoutPipe) + _, stderrReadAllErr := readAllCtx(testCtx, stderrPipe) // Note that Wait closes the stdout/stderr pipes, so we don't need to close them ourselves. waitErr := kubectlCmd.Wait() @@ -194,7 +198,7 @@ func TestE2EFullIntegration(t *testing.T) { // Start a background goroutine to read stderr from the CLI and parse out the login URL. loginURLChan := make(chan string, 1) - spawnTestGoroutine(t, func() error { + spawnTestGoroutine(testCtx, t, func() error { reader := bufio.NewReader(testlib.NewLoggerReader(t, "stderr", stderrPipe)) scanner := bufio.NewScanner(reader) for scanner.Scan() { @@ -209,8 +213,8 @@ func TestE2EFullIntegration(t *testing.T) { // Start a background goroutine to read stdout from kubectl and return the result as a string. kubectlOutputChan := make(chan string, 1) - spawnTestGoroutine(t, func() error { - output, err := ioutil.ReadAll(stdoutPipe) + spawnTestGoroutine(testCtx, t, func() error { + output, err := readAllCtx(testCtx, stdoutPipe) if err != nil { return err } @@ -227,7 +231,7 @@ func TestE2EFullIntegration(t *testing.T) { require.Fail(t, "timed out waiting for login URL") case loginURL = <-loginURLChan: } - t.Logf("navigating to login page") + t.Logf("navigating to login page: %q", loginURL) require.NoError(t, page.Navigate(loginURL)) // Expect to be redirected to the upstream provider and log in. @@ -253,7 +257,7 @@ func TestE2EFullIntegration(t *testing.T) { t.Logf("first kubectl command took %s", time.Since(start).String()) - requireUserCanUseKubectlWithoutAuthenticatingAgain(ctx, t, env, + requireUserCanUseKubectlWithoutAuthenticatingAgain(testCtx, t, env, downstream, kubeconfigPath, sessionCachePath, @@ -1170,3 +1174,42 @@ func getSecretNameFromSignature(t *testing.T, signature string, typeLabel string signatureAsValidName := strings.ToLower(b32.EncodeToString(signatureBytes)) return fmt.Sprintf("pinniped-storage-%s-%s", typeLabel, signatureAsValidName) } + +func readAllCtx(ctx context.Context, r io.Reader) ([]byte, error) { + errCh := make(chan error, 1) + data := &atomic.Value{} + go func() { // copied from io.ReadAll and modified to use the atomic.Value above + b := make([]byte, 0, 512) + data.Store(string(b)) // cast to string to make a copy of the byte slice + for { + if len(b) == cap(b) { + // Add more capacity (let append pick how much). + b = append(b, 0)[:len(b)] + data.Store(string(b)) // cast to string to make a copy of the byte slice + } + n, err := r.Read(b[len(b):cap(b)]) + b = b[:len(b)+n] + data.Store(string(b)) // cast to string to make a copy of the byte slice + if err != nil { + if err == io.EOF { + err = nil + } + errCh <- err + return + } + } + }() + + select { + case <-ctx.Done(): + b, _ := data.Load().(string) + return nil, fmt.Errorf("failed to complete read all: %w, data read so far:\n%q", ctx.Err(), b) + + case err := <-errCh: + b, _ := data.Load().(string) + if len(b) == 0 { + return nil, err + } + return []byte(b), err + } +} From cd825c5e5172e00107f973265a7ab4e354e71e14 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Tue, 8 Feb 2022 13:00:49 -0800 Subject: [PATCH 11/24] Use "-v6" for kubectl for an e2e test so we can get more failure output --- test/integration/e2e_test.go | 15 ++++++++++++--- test/testlib/browsertest/browsertest.go | 4 ++-- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/test/integration/e2e_test.go b/test/integration/e2e_test.go index b4b5f494..38317adb 100644 --- a/test/integration/e2e_test.go +++ b/test/integration/e2e_test.go @@ -162,7 +162,7 @@ func TestE2EFullIntegration(t *testing.T) { // Run "kubectl get namespaces" which should trigger a browser login via the plugin. start := time.Now() - kubectlCmd := exec.CommandContext(testCtx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath) + kubectlCmd := exec.CommandContext(testCtx, "kubectl", "get", "namespace", "--kubeconfig", kubeconfigPath, "-v", "6") kubectlCmd.Env = append(os.Environ(), env.ProxyEnv()...) // Wrap the stdout and stderr pipes with TeeReaders which will copy each incremental read to an @@ -192,8 +192,17 @@ func TestE2EFullIntegration(t *testing.T) { testAlreadyFailedErr = errors.New("test failed prior to clean up function") } cleanupErrs := utilerrors.NewAggregate([]error{waitErr, stdoutReadAllErr, stderrReadAllErr, testAlreadyFailedErr}) - require.NoErrorf(t, cleanupErrs, "kubectl process did not exit cleanly and/or the test failed\nstdout: %q\nstderr: %q", - stdoutPipeBuf.String(), stderrPipeBuf.String()) + + if cleanupErrs != nil { + t.Logf("kubectl stdout was:\n----start of stdout\n%s\n----end of stdout", stdoutPipeBuf.String()) + t.Logf("kubectl stderr was:\n----start of stderr\n%s\n----end of stderr", stderrPipeBuf.String()) + } + require.NoErrorf(t, cleanupErrs, "kubectl process did not exit cleanly and/or the test failed. "+ + "Note: if kubectl's first call to the Pinniped CLI results in the Pinniped CLI returning an error, "+ + "then kubectl may call the Pinniped CLI again, which may hang because it will wait for the user "+ + "to finish the login. This test will kill the kubectl process after a timeout. In this case, the "+ + " kubectl output printed above will include multiple prompts for the user to enter their authcode.", + ) }) // Start a background goroutine to read stderr from the CLI and parse out the login URL. diff --git a/test/testlib/browsertest/browsertest.go b/test/testlib/browsertest/browsertest.go index f2dc0702..c94850d2 100644 --- a/test/testlib/browsertest/browsertest.go +++ b/test/testlib/browsertest/browsertest.go @@ -1,4 +1,4 @@ -// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. +// Copyright 2020-2022 the Pinniped contributors. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package browsertest provides integration test helpers for our browser-based tests. @@ -119,7 +119,7 @@ func LoginToUpstream(t *testing.T, page *agouti.Page, upstream testlib.TestOIDCU { Name: "Okta", IssuerPattern: regexp.MustCompile(`\Ahttps://.+\.okta\.com/.+\z`), - LoginPagePattern: regexp.MustCompile(`\Ahttps://.+\.okta\.com/.+\z`), + LoginPagePattern: regexp.MustCompile(`\Ahttps://.+\.okta\.com/.*\z`), UsernameSelector: "input#okta-signin-username", PasswordSelector: "input#okta-signin-password", LoginButtonSelector: "input#okta-signin-submit", From 29368e8242110026ab13c203aa6f3ed56cc25653 Mon Sep 17 00:00:00 2001 From: Mo Khan Date: Tue, 8 Feb 2022 16:31:04 -0500 Subject: [PATCH 12/24] Make the linter happy --- test/integration/e2e_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/e2e_test.go b/test/integration/e2e_test.go index 38317adb..2af62cc2 100644 --- a/test/integration/e2e_test.go +++ b/test/integration/e2e_test.go @@ -48,7 +48,7 @@ import ( ) // TestE2EFullIntegration tests a full integration scenario that combines the supervisor, concierge, and CLI. -func TestE2EFullIntegration(t *testing.T) { +func TestE2EFullIntegration(t *testing.T) { // nolint:gocyclo env := testlib.IntegrationEnv(t) ctx, cancelFunc := context.WithTimeout(context.Background(), 10*time.Minute) From 5d79d4b9dc3a2ce7918f581d178b5a763ec516ab Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Tue, 8 Feb 2022 17:30:48 -0800 Subject: [PATCH 13/24] Fix form_post.js mistake from recent commit; Better CORS on callback --- .../oidc/provider/formposthtml/form_post.js | 18 +++-- .../formposthtml/formposthtml_test.go | 4 +- pkg/oidcclient/login.go | 38 ++++++---- pkg/oidcclient/login_test.go | 71 +++++++++++++++++-- test/integration/formposthtml_test.go | 19 ++++- 5 files changed, 123 insertions(+), 27 deletions(-) diff --git a/internal/oidc/provider/formposthtml/form_post.js b/internal/oidc/provider/formposthtml/form_post.js index 57a18725..9e4b0203 100644 --- a/internal/oidc/provider/formposthtml/form_post.js +++ b/internal/oidc/provider/formposthtml/form_post.js @@ -44,18 +44,22 @@ window.onload = () => { responseParams['redirect_uri'].value, { method: 'POST', - mode: 'no-cors', + mode: 'no-cors', // in the future, we could change this to "cors" (see comment below) headers: {'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}, body: responseParams['encoded_params'].value, }) .then(response => { clearTimeout(timeout); - if (response.ok) { - transitionToState('success'); - } else { - // Got non-2XX http response status. - transitionToState('manual'); - } + // Requests made using "no-cors" mode will hide the real response.status by making it 0 + // and the real response.ok by making it false. + // If the real response was success, then we would like to show the success state. + // If the real response was an error, then we wish we could show the manual + // state, but we have no way to know that, as long as we are making "no-cors" requests. + // For now, show the success status for all responses. + // In the future, we could make this request in "cors" mode once old versions of our CLI + // which did not handle CORS are upgraded out by our users. That would allow us to use + // a conditional statement based on response.ok here to decide which state to transition into. + transitionToState('success'); }) .catch(() => transitionToState('manual')); }; diff --git a/internal/oidc/provider/formposthtml/formposthtml_test.go b/internal/oidc/provider/formposthtml/formposthtml_test.go index 0a6a30ec..afeb2755 100644 --- a/internal/oidc/provider/formposthtml/formposthtml_test.go +++ b/internal/oidc/provider/formposthtml/formposthtml_test.go @@ -30,7 +30,7 @@ var ( - + @@ -61,7 +61,7 @@ var ( // It's okay if this changes in the future, but this gives us a chance to eyeball the formatting. // Our browser-based integration tests should find any incompatibilities. testExpectedCSP = `default-src 'none'; ` + - `script-src 'sha256-Lon+X41NoXuVGPqi3LsAPmBqlDmwbu3lGhQii7/Zjrc='; ` + + `script-src 'sha256-P1dCaXS9frmkvGZ/cH/UljR70IOH963lmfptEgcn9j8='; ` + `style-src 'sha256-CtfkX7m8x2UdGYvGgDq+6b6yIAQsASW9pbQK+sG8fNA='; ` + `img-src data:; ` + `connect-src *; ` + diff --git a/pkg/oidcclient/login.go b/pkg/oidcclient/login.go index ca2335bc..afda042a 100644 --- a/pkg/oidcclient/login.go +++ b/pkg/oidcclient/login.go @@ -835,6 +835,20 @@ func (h *handlerState) handleAuthCodeCallback(w http.ResponseWriter, r *http.Req var params url.Values if h.useFormPost { // nolint:nestif + // Return HTTP 405 for anything that's not a POST or an OPTIONS request. + if r.Method != http.MethodPost && r.Method != http.MethodOptions { + h.logger.V(debugLogLevel).Info("Pinniped: Got unexpected request on callback listener", "method", r.Method) + w.WriteHeader(http.StatusMethodNotAllowed) + return nil // keep listening for more requests + } + + // For POST and OPTIONS requests, calculate the allowed origin for CORS. + issuerURL, parseErr := url.Parse(h.issuer) + if parseErr != nil { + return httperr.Wrap(http.StatusInternalServerError, "invalid issuer url", parseErr) + } + allowOrigin := issuerURL.Scheme + "://" + issuerURL.Host + if r.Method == http.MethodOptions { // Google Chrome decided that it should do CORS preflight checks for this Javascript form submission POST request. // See https://developer.chrome.com/blog/private-network-access-preflight/ @@ -846,12 +860,9 @@ func (h *handlerState) handleAuthCodeCallback(w http.ResponseWriter, r *http.Req return nil // keep listening for more requests } h.logger.V(debugLogLevel).Info("Pinniped: Got CORS preflight request from browser", "origin", origin) - issuerURL, parseErr := url.Parse(h.issuer) - if parseErr != nil { - return httperr.Wrap(http.StatusInternalServerError, "invalid issuer url", parseErr) - } // To tell the browser that it is okay to make the real POST request, return the following response. - w.Header().Set("Access-Control-Allow-Origin", issuerURL.Scheme+"://"+issuerURL.Host) + w.Header().Set("Access-Control-Allow-Origin", allowOrigin) + w.Header().Set("Vary", "*") // supposed to use Vary when Access-Control-Allow-Origin is a specific host w.Header().Set("Access-Control-Allow-Credentials", "false") w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS") w.Header().Set("Access-Control-Allow-Private-Network", "true") @@ -864,20 +875,21 @@ func (h *handlerState) handleAuthCodeCallback(w http.ResponseWriter, r *http.Req } w.WriteHeader(http.StatusNoContent) return nil // keep listening for more requests - } - - // Return HTTP 405 for anything that's not a POST. - if r.Method != http.MethodPost { - h.logger.V(debugLogLevel).Info("Pinniped: Got unexpected request on callback listener", "method", r.Method) - w.WriteHeader(http.StatusMethodNotAllowed) - return nil // keep listening for more requests - } + } // Otherwise, this is a POST request... // Parse and pull the response parameters from an application/x-www-form-urlencoded request body. if err := r.ParseForm(); err != nil { return httperr.Wrap(http.StatusBadRequest, "invalid form", err) } params = r.Form + + // Allow CORS requests for POST so in the future our Javascript code can be updated to use the fetch API's + // mode "cors", and still be compatible with older CLI versions starting with those that have this code + // for CORS headers. Updating to use CORS would allow our Javascript code (form_post.js) to see the true + // http response status from this endpoint. Note that the POST response does not need to set as many CORS + // headers as the OPTIONS preflight response. + w.Header().Set("Access-Control-Allow-Origin", allowOrigin) + w.Header().Set("Vary", "*") // supposed to use Vary when Access-Control-Allow-Origin is a specific host } else { // Return HTTP 405 for anything that's not a GET. if r.Method != http.MethodGet { diff --git a/pkg/oidcclient/login_test.go b/pkg/oidcclient/login_test.go index a7c765c8..fe6a8ce5 100644 --- a/pkg/oidcclient/login_test.go +++ b/pkg/oidcclient/login_test.go @@ -1885,6 +1885,7 @@ func TestHandleAuthCodeCallback(t *testing.T) { method: http.MethodPost, query: "", wantNoCallbacks: true, + wantHeaders: map[string][]string{}, wantHTTPStatus: http.StatusMethodNotAllowed, }, { @@ -1893,6 +1894,7 @@ func TestHandleAuthCodeCallback(t *testing.T) { query: "", opt: withFormPostMode, wantNoCallbacks: true, + wantHeaders: map[string][]string{}, wantHTTPStatus: http.StatusMethodNotAllowed, }, { @@ -1903,24 +1905,28 @@ func TestHandleAuthCodeCallback(t *testing.T) { body: []byte(`%`), opt: withFormPostMode, wantErr: `invalid form: invalid URL escape "%"`, + wantHeaders: map[string][]string{}, wantHTTPStatus: http.StatusBadRequest, }, { name: "invalid state", query: "state=invalid", wantErr: "missing or invalid state parameter", + wantHeaders: map[string][]string{}, wantHTTPStatus: http.StatusForbidden, }, { name: "error code from provider", query: "state=test-state&error=some_error", wantErr: `login failed with code "some_error"`, + wantHeaders: map[string][]string{}, wantHTTPStatus: http.StatusBadRequest, }, { name: "error code with a description from provider", query: "state=test-state&error=some_error&error_description=optional%20error%20description", wantErr: `login failed with code "some_error": optional error description`, + wantHeaders: map[string][]string{}, wantHTTPStatus: http.StatusBadRequest, }, { @@ -1929,6 +1935,23 @@ func TestHandleAuthCodeCallback(t *testing.T) { query: "", headers: map[string][]string{"Origin": {"https://some-origin.com"}}, wantErr: `invalid issuer url: parse "://bad-url": missing protocol scheme`, + wantHeaders: map[string][]string{}, + wantHTTPStatus: http.StatusInternalServerError, + opt: func(t *testing.T) Option { + return func(h *handlerState) error { + h.useFormPost = true + h.issuer = "://bad-url" + return nil + } + }, + }, + { + name: "in form post mode, invalid issuer url config during POST request returns an error", + method: http.MethodPost, + query: "", + headers: map[string][]string{"Origin": {"https://some-origin.com"}}, + wantErr: `invalid issuer url: parse "://bad-url": missing protocol scheme`, + wantHeaders: map[string][]string{}, wantHTTPStatus: http.StatusInternalServerError, opt: func(t *testing.T) Option { return func(h *handlerState) error { @@ -1944,6 +1967,7 @@ func TestHandleAuthCodeCallback(t *testing.T) { query: "", opt: withFormPostMode, wantNoCallbacks: true, + wantHeaders: map[string][]string{}, wantHTTPStatus: http.StatusBadRequest, }, { @@ -1957,6 +1981,7 @@ func TestHandleAuthCodeCallback(t *testing.T) { "Access-Control-Allow-Credentials": {"false"}, "Access-Control-Allow-Methods": {"POST, OPTIONS"}, "Access-Control-Allow-Origin": {"https://valid-issuer.com"}, + "Vary": {"*"}, "Access-Control-Allow-Private-Network": {"true"}, }, opt: func(t *testing.T) Option { @@ -1981,6 +2006,7 @@ func TestHandleAuthCodeCallback(t *testing.T) { "Access-Control-Allow-Credentials": {"false"}, "Access-Control-Allow-Methods": {"POST, OPTIONS"}, "Access-Control-Allow-Origin": {"https://valid-issuer.com"}, + "Vary": {"*"}, "Access-Control-Allow-Private-Network": {"true"}, "Access-Control-Allow-Headers": {"header1, header2, header3"}, }, @@ -1996,6 +2022,7 @@ func TestHandleAuthCodeCallback(t *testing.T) { name: "invalid code", query: "state=test-state&code=invalid", wantErr: "could not complete code exchange: some exchange error", + wantHeaders: map[string][]string{}, wantHTTPStatus: http.StatusBadRequest, opt: func(t *testing.T) Option { return func(h *handlerState) error { @@ -2015,6 +2042,7 @@ func TestHandleAuthCodeCallback(t *testing.T) { name: "valid", query: "state=test-state&code=valid", wantHTTPStatus: http.StatusOK, + wantHeaders: map[string][]string{"Content-Type": {"text/plain; charset=utf-8"}}, opt: func(t *testing.T) Option { return func(h *handlerState) error { h.oauth2Config = &oauth2.Config{RedirectURL: testRedirectURI} @@ -2030,10 +2058,44 @@ func TestHandleAuthCodeCallback(t *testing.T) { }, }, { - name: "valid form_post", - method: http.MethodPost, - headers: map[string][]string{"Content-Type": {"application/x-www-form-urlencoded"}}, - body: []byte(`state=test-state&code=valid`), + name: "valid form_post", + method: http.MethodPost, + headers: map[string][]string{"Content-Type": {"application/x-www-form-urlencoded"}}, + body: []byte(`state=test-state&code=valid`), + wantHeaders: map[string][]string{ + "Access-Control-Allow-Origin": {"https://valid-issuer.com"}, + "Vary": {"*"}, + "Content-Type": {"text/plain; charset=utf-8"}, + }, + wantHTTPStatus: http.StatusOK, + opt: func(t *testing.T) Option { + return func(h *handlerState) error { + h.useFormPost = true + h.oauth2Config = &oauth2.Config{RedirectURL: testRedirectURI} + h.getProvider = func(_ *oauth2.Config, _ *oidc.Provider, _ *http.Client) provider.UpstreamOIDCIdentityProviderI { + mock := mockUpstream(t) + mock.EXPECT(). + ExchangeAuthcodeAndValidateTokens(gomock.Any(), "valid", pkce.Code("test-pkce"), nonce.Nonce("test-nonce"), testRedirectURI). + Return(&oidctypes.Token{IDToken: &oidctypes.IDToken{Token: "test-id-token"}}, nil) + return mock + } + return nil + } + }, + }, + { + name: "valid form_post made with the same origin headers that would be used by a Javascript fetch client using mode=cors", + method: http.MethodPost, + headers: map[string][]string{ + "Content-Type": {"application/x-www-form-urlencoded"}, + "Origin": {"https://some-origin.com"}, + }, + body: []byte(`state=test-state&code=valid`), + wantHeaders: map[string][]string{ + "Access-Control-Allow-Origin": {"https://valid-issuer.com"}, + "Vary": {"*"}, + "Content-Type": {"text/plain; charset=utf-8"}, + }, wantHTTPStatus: http.StatusOK, opt: func(t *testing.T) Option { return func(h *handlerState) error { @@ -2062,6 +2124,7 @@ func TestHandleAuthCodeCallback(t *testing.T) { pkce: pkce.Code("test-pkce"), nonce: nonce.Nonce("test-nonce"), logger: testlogger.New(t).Logger, + issuer: "https://valid-issuer.com/with/some/path", } if tt.opt != nil { require.NoError(t, tt.opt(t)(h)) diff --git a/test/integration/formposthtml_test.go b/test/integration/formposthtml_test.go index d845a60c..d891488e 100644 --- a/test/integration/formposthtml_test.go +++ b/test/integration/formposthtml_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 the Pinniped contributors. All Rights Reserved. +// Copyright 2021-2022 the Pinniped contributors. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package integration @@ -60,6 +60,10 @@ func TestFormPostHTML_Parallel(t *testing.T) { // // This case is fairly unlikely in practice, and if the CLI encounters // an error it can also expose it via stderr anyway. + // + // In the future, we could change the Javascript code to use mode 'cors' + // because we have upgraded our CLI callback endpoint to handle CORS, + // and then we could change this to formpostExpectManualState(). formpostExpectSuccessState(t, page) }) @@ -109,6 +113,19 @@ func formpostCallbackServer(t *testing.T) (string, func(*testing.T, url.Values)) results := make(chan url.Values) server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // 404 for any other requests aside from POSTs. We do not need to support CORS preflight OPTIONS + // requests for this test because both the web page and the callback are on 127.0.0.1 (same origin). + if r.Method != http.MethodPost { + t.Logf("test callback server got unexpeted request method") + w.WriteHeader(http.StatusNotFound) + return + } + + // Allow CORS requests. This will be needed for this test in the future if we change + // the Javascript code from using mode 'no-cors' to instead use mode 'cors'. At the + // moment it should be ignored by the browser. + w.Header().Set("Access-Control-Allow-Origin", "*") + assert.NoError(t, r.ParseForm()) // Extract only the POST parameters (r.Form also contains URL query parameters). From d728c89ba680a6fe88a2546221ebc91a3e3fa702 Mon Sep 17 00:00:00 2001 From: Nanci Lancaster Date: Fri, 4 Feb 2022 12:59:44 -0600 Subject: [PATCH 14/24] updated search functionality of docs on site Signed-off-by: Nanci Lancaster --- site/themes/pinniped/layouts/_default/baseof.html | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/site/themes/pinniped/layouts/_default/baseof.html b/site/themes/pinniped/layouts/_default/baseof.html index e9a0e599..c6dff211 100644 --- a/site/themes/pinniped/layouts/_default/baseof.html +++ b/site/themes/pinniped/layouts/_default/baseof.html @@ -23,7 +23,8 @@ {{ partial "footer" . }} + @@ -61,7 +61,7 @@ var ( // It's okay if this changes in the future, but this gives us a chance to eyeball the formatting. // Our browser-based integration tests should find any incompatibilities. testExpectedCSP = `default-src 'none'; ` + - `script-src 'sha256-P1dCaXS9frmkvGZ/cH/UljR70IOH963lmfptEgcn9j8='; ` + + `script-src 'sha256-1LS3gM7wTGc0dYXZiqW6HK1LHk74YSG8GsJBC/j1/i8='; ` + `style-src 'sha256-CtfkX7m8x2UdGYvGgDq+6b6yIAQsASW9pbQK+sG8fNA='; ` + `img-src data:; ` + `connect-src *; ` + From 93e4d5d956db0056e87f8e2d1e7d7a538dc44287 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Feb 2022 01:13:54 +0000 Subject: [PATCH 20/24] Bump golang from 1.17.6 to 1.17.7 Bumps golang from 1.17.6 to 1.17.7. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 3d7f3f23..7f2b58d2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ # Copyright 2020-2022 the Pinniped contributors. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -FROM golang:1.17.6 as build-env +FROM golang:1.17.7 as build-env WORKDIR /work COPY . . From e57a1a789195cec47fb76531ef828150b1cc259c Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Fri, 11 Feb 2022 17:03:13 -0800 Subject: [PATCH 21/24] Overwrite the old Supervisor+Concierge tutorial with the new one And make it easier for web site readers to find by adding prominent links to it from several places. --- site/content/docs/_index.md | 4 + site/content/docs/background/architecture.md | 2 + .../configure-concierge-supervisor-jwt.md | 2 +- site/content/docs/howto/login.md | 13 +- .../concierge-and-supervisor-demo.md | 765 ++++++++++++++---- .../concierge-and-supervisor-gke-demo.md | 462 ----------- .../docs/tutorials/concierge-only-demo.md | 36 +- site/content/resources/_index.html | 92 ++- 8 files changed, 680 insertions(+), 696 deletions(-) delete mode 100644 site/content/docs/tutorials/concierge-and-supervisor-gke-demo.md diff --git a/site/content/docs/_index.md b/site/content/docs/_index.md index e297ecf3..d78b655e 100644 --- a/site/content/docs/_index.md +++ b/site/content/docs/_index.md @@ -13,6 +13,10 @@ As a Kubernetes cluster administrator or user, you can learn how Pinniped works, Have a question, comment, or idea? Please reach out via [GitHub Discussions](https://github.com/vmware-tanzu/pinniped/discussions) or [join the Pinniped community meetings]({{< ref "/community" >}}). +## New to Pinniped? + +- ⚠️ **Start here:** [Learn to use Pinniped for federated authentication to Kubernetes clusters]({{< ref "tutorials/concierge-and-supervisor-demo" >}}) + ## Background {{< docsmenu "background" >}} diff --git a/site/content/docs/background/architecture.md b/site/content/docs/background/architecture.md index 6991e2bb..e1bf72af 100644 --- a/site/content/docs/background/architecture.md +++ b/site/content/docs/background/architecture.md @@ -45,6 +45,8 @@ Pinniped supports the following IDPs. 1. Any [LDAP](https://ldap.com) identity provider. +1. Any Active Directory identity provider (via LDAP). + The [`idp.supervisor.pinniped.dev`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-idp-supervisor-pinniped-dev-v1alpha1) API group contains the Kubernetes custom resources that configure the Pinniped diff --git a/site/content/docs/howto/configure-concierge-supervisor-jwt.md b/site/content/docs/howto/configure-concierge-supervisor-jwt.md index c7ee2418..bc15b3a9 100644 --- a/site/content/docs/howto/configure-concierge-supervisor-jwt.md +++ b/site/content/docs/howto/configure-concierge-supervisor-jwt.md @@ -26,7 +26,7 @@ If you would rather not use the Supervisor, you may want to [configure the Conci This how-to guide assumes that you have already [installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress, and that you have [configured a FederationDomain to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}). -It also assumes that you have configured an `OIDCIdentityProvider` or an `LDAPIdentityProvider` for the Supervisor as the source of your user's identities. +It also assumes that you have configured an `OIDCIdentityProvider`, `LDAPIdentityProvider`, or `ActiveDirectoryIdentityProvider` for the Supervisor as the source of your user's identities. Various examples of configuring these resources can be found in these guides. It also assumes that you have already [installed the Pinniped Concierge]({{< ref "install-concierge" >}}) diff --git a/site/content/docs/howto/login.md b/site/content/docs/howto/login.md index edc3329a..45cfb3ad 100644 --- a/site/content/docs/howto/login.md +++ b/site/content/docs/howto/login.md @@ -18,7 +18,7 @@ This how-to guide assumes that you have already configured the following Pinnipe then you have already: 1. [Installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}) with working ingress. 1. [Configured a FederationDomain to issue tokens for your downstream clusters]({{< ref "configure-supervisor" >}}). - 1. Configured an `OIDCIdentityProvider` or an `LDAPIdentityProvider` for the Supervisor as the source of your user's identities. + 1. Configured an `OIDCIdentityProvider`, `LDAPIdentityProvider`, or `ActiveDirectoryIdentityProvider` for the Supervisor as the source of your user's identities. Various examples of configuring these resources can be found in these guides. 1. In each cluster for which you would like to use Pinniped for authentication, you have [installed the Concierge]({{< ref "install-concierge" >}}). 1. In each cluster's Concierge, you have configured an authenticator. For example, if you are using the Pinniped Supervisor, @@ -35,8 +35,13 @@ You should have also already [installed the `pinniped` command-line]({{< ref "in Although you can choose to use Pinniped without using the Pinniped Supervisor, there are several key advantages of choosing to use the Pinniped Supervisor to manage identity across fleets of Kubernetes clusters. +1. The Supervisor makes it easy to **bring your own OIDC, LDAP, or Active Directory identity provider to act as the source of user identities**. + It also allows you to configure how identities and group memberships in the identity provider map to identities + and group memberships in the Kubernetes clusters. + 1. A generated kubeconfig for a cluster will be specific for that cluster, however **it will not contain any specific user identity or credentials. - This kubeconfig file can be safely shared with all cluster users.** When the user runs `kubectl` commands using this kubeconfig, they will be interactively prompted to log in using their own unique identity from the OIDC or LDAP identity provider configured in the Supervisor. + This kubeconfig file can be safely shared with all cluster users.** When the user runs `kubectl` commands using this kubeconfig, + they will be interactively prompted to log in using their own unique identity from the identity provider configured in the Supervisor. 1. The Supervisor will provide a federated identity across all clusters that use the same `FederationDomain`. The user will be **prompted by `kubectl` to interactively authenticate once per day**, and then will be able to use all clusters @@ -44,10 +49,6 @@ Although you can choose to use Pinniped without using the Pinniped Supervisor, t This federated identity is secure because behind the scenes the Supervisor is issuing very short-lived credentials that are uniquely scoped to each cluster. -1. The Supervisor makes it easy to **bring your own OIDC or LDAP identity provider to act as the source of user identities**. - It also allows you to configure how identities and group memberships in the OIDC or LDAP identity provider map to identities - and group memberships in the Kubernetes clusters. - ## Generate a Pinniped-compatible kubeconfig file You will need to generate a Pinniped-compatible kubeconfig file for each cluster in which you have installed the Concierge. diff --git a/site/content/docs/tutorials/concierge-and-supervisor-demo.md b/site/content/docs/tutorials/concierge-and-supervisor-demo.md index a8b4def5..8668bff7 100644 --- a/site/content/docs/tutorials/concierge-and-supervisor-demo.md +++ b/site/content/docs/tutorials/concierge-and-supervisor-demo.md @@ -1,5 +1,5 @@ --- -title: Learn to use the Pinniped Supervisor alongside the Concierge +title: Learn to use Pinniped for federated authentication to Kubernetes clusters description: See how the Pinniped Supervisor streamlines login to multiple Kubernetes clusters. cascade: layout: docs @@ -7,233 +7,646 @@ menu: docs: name: Concierge with Supervisor parent: tutorials + weight: 1 --- -## Prerequisites +## Why Pinniped? -1. A Kubernetes cluster of a type supported by Pinniped Concierge as described in [architecture](/docs/background/architecture). +There are many benefits to using the Pinniped Supervisor, Concierge, and CLI components together +to provide Kubernetes authentication. - Don't have a cluster handy? Consider using [kind](https://kind.sigs.k8s.io/) on your local machine. - See below for an example of using kind. +- It's easy to **bring your own OIDC, LDAP, or Active Directory identity provider** to act as the source of user identities. + A user's identity in the external identity provider becomes their identity in Kubernetes. + All other aspects of Kubernetes that are sensitive to identity, such as authorization policies and audit logging, are then + based on the user identities from your identity provider. -1. A Kubernetes cluster of a type supported by Pinniped Supervisor (this can be the same cluster as the first, or different). +- You can **bring identities from your own identity provider into many types of Kubernetes clusters in a consistent way**. + This includes clusters from various vendors run on-prem, and clusters provided as a cloud service by various popular cloud companies. -1. A kubeconfig that has administrator-like privileges on each cluster. +- Kubeconfig files **will not contain any specific user identity or credentials, so they can be safely shared**. -1. An external OIDC identity provider to use as the source of identity for Pinniped. +- Deep integration with `kubectl` means that when a user runs `kubectl` commands, + they will be **interactively prompted to log in using their own unique identity** from your identity provider. -## Overview +- Users will be prompted by `kubectl` to interactively **authenticate only once per day**, and then will be able to + use multiple clusters for the rest of the day without being asked to authenticate again. -Installing and trying Pinniped on any cluster consists of the following general steps. See the next section below -for a more specific example, including the commands to use for that case. +- All credentials are short-lived, and refreshed often. Additionally, **frequent checks are made against your identity provider + to ensure that the user should continue to have access to the Kubernetes clusters**. For example, within minutes + of locking an Active Directory account, that user will lose access to Kubernetes clusters, even if they were + already logged in. -1. [Install the Supervisor]({{< ref "../howto/install-supervisor" >}}). -1. Create a - [`FederationDomain`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-supervisor-config-v1alpha1-federationdomain) - via the installed Pinniped Supervisor. -1. Create an - [`OIDCIdentityProvider`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-supervisor-idp-v1alpha1-oidcidentityprovider) - via the installed Pinniped Supervisor. -1. Install the Pinniped Concierge. See [deploy/concierge/README.md](https://github.com/vmware-tanzu/pinniped/blob/main/deploy/concierge/README.md). -1. Create a - [`JWTAuthenticator`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-concierge-authentication-v1alpha1-jwtauthenticator) - via the installed Pinniped Concierge. -1. [Install the Pinniped command-line tool]({{< ref "../howto/install-cli" >}}). -1. Generate a kubeconfig using the Pinniped command-line tool. Run `pinniped get kubeconfig --help` for more information. -1. Run `kubectl` commands using the generated kubeconfig. The Pinniped Supervisor and Concierge are automatically used for authentication during those commands. +- A **user can safely be granted high levels of authorization on a cluster**, if needed. + Even if they abuse their privilege by capturing the credentials sent by other users to the cluster, + they will not be able to use the captured credentials to access other clusters, because all credentials + sent to clusters are uniquely scoped to each individual cluster. -## Example of deploying on multiple kind clusters +- Pinniped will not interfere with a cluster's original vendor-specific authentication system. + The **original admin-level kubeconfig from a cluster can be privately kept by the cluster's creator** for + bootstrapping and break-glass access purposes. -[kind](https://kind.sigs.k8s.io) is a tool for creating and managing Kubernetes clusters on your local machine -which uses Docker containers as the cluster's nodes. This is a convenient way to try out Pinniped on local -non-production clusters. +- Pinniped is **open source** and will never be tied to any one vendor's authentication system. + As Pinniped improves in the future, all your Kubernetes clusters can benefit, regardless of which vendor provided the clusters. + The code is available on GitHub for any expert to audit, and for any community member to contribute. -The following steps deploy the latest release of Pinniped on kind. They deploy the Pinniped -Supervisor on one cluster, and the Pinniped Concierge on another cluster. A multi-cluster deployment -strategy is typical for Pinniped. The Pinniped Concierge uses a -[`JWTAuthenticator`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-concierge-authentication-v1alpha1-jwtauthenticator) -to authenticate federated identities from the Supervisor. +## What this tutorial will show -1. Install the tools required for the following steps. +This tutorial will show: +- A detailed example of how to install and configure a Supervisor with ingress, DNS, TLS, and an external identity provider +- How to install the Concierge onto multiple workload clusters and configure them all to trust identities from the Supervisor +- How an admin can create and distribute kubeconfig files for the workload clusters +- How a developer or devops user can authenticate with kubectl using their identity from the external identity provider, + and how they can securely access all workload clusters for the rest of the day without needing to authenticate again - - [Install kind](https://kind.sigs.k8s.io/docs/user/quick-start/), if not already installed. For example, `brew install kind` on macOS. +## Tutorial background - - kind depends on Docker. If not already installed, [install Docker](https://docs.docker.com/get-docker/), for example `brew cask install docker` on macOS. +This tutorial is intended to be a step-by-step example of installing and configuring the Pinniped components +to provide a multi-cluster federated authentication solution. It will show every +command needed to replicate the same setup to allow the reader to follow the same steps themselves. - - This demo requires `kubectl`, which comes with Docker, or can be [installed separately](https://kubernetes.io/docs/tasks/tools/install-kubectl/). +A single Pinniped Supervisor can provide authentication for any number of Kubernetes clusters. In a typical deployment: - - This demo requires `openssl`, which is installed on macOS by default, or can be [installed separately](https://www.openssl.org/). +- A single Supervisor is deployed on a special cluster where app developers and devops users have no access. + App developers and devops users should have no access at least to the resources in the Supervisor's namespace, + but usually have no access to the whole cluster. For this tutorial, let's call this cluster the "supervisor cluster". +- App developers and devops users can then use their identities provided by the Supervisor to log in to many + clusters where they can manage their apps. For this tutorial, let's call these clusters the "workload clusters". + The Pinniped Concierge component is installed into each workload cluster and is configured to trust the single Supervisor. + The Concierge acts as an in-cluster agent to provide authentication services. -1. Create a new Kubernetes cluster for the Pinniped Supervisor using `kind create cluster --name pinniped-supervisor`. +There are many ways to install and configure Pinniped. To make the steps of this tutorial as specific as possible, we +had to make some choices. The choices made for this tutorial were: -1. Create a new Kubernetes cluster for the Pinniped Concierge using `kind create cluster --name pinniped-concierge`. +- The Pinniped Supervisor can draw user identities from OIDC identity providers, Active Directory providers (via LDAP), + and generic LDAP providers. In this tutorial we will use Okta as an OIDC identity provider. + Okta offers a free developer account, so any reader should be able to sign up for an Okta + account if they would like to try these steps themselves. +- The Pinniped Supervisor can be installed on any type of Kubernetes cluster. In this tutorial we will + demonstrate the installation process for GKE because any reader should be able to sign up for a Google Cloud + account if they would like to try these steps themselves. We will use separate supervisor and workload clusters. +- The Pinniped Supervisor needs working ingress. There are many ways to configure ingress for apps running on + Kubernetes clusters, as described in the [howto guide for installing the Supervisor]({{< ref "../howto/install-supervisor" >}}). + For this tutorial we will use a LoadBalancer Service with a public IP address. This is a simple setup which + allows us to terminate TLS inside the Supervisor app, keeping the connection secure all the way into + the Supervisor app's pods. A corporate installation of the Supervisor might keep it behind the corporate firewall, + but for this tutorial a public IP also allows your desktop (and anyone on the internet) to access the Supervisor's endpoints. + The HTTPS endpoints of a properly configured Supervisor are generally safe to expose publicly, as long as you are not concerned + with denial of service attacks (or have some external protection against such attacks). +- Although it is possible to configure the Supervisor's FederationDomain to use an IP address, it is better to + use a DNS name. There are many ways to manage DNS. For this tutorial, we will use Google Cloud's + [Cloud DNS](https://cert-manager.io/docs/) service to register a new hostname for the Supervisor + app's load balancer's public IP address. We won't describe how to prepare Cloud DNS to manage DNS for + the parent domain in this tutorial. This typically involves setting up Cloud DNS's servers as the list of DNS servers + for your domain within your domain registrar. We'll assume that this has already been done. +- For web-based login flows as used by OIDC identity providers, the Pinniped Supervisor needs TLS certificates + that are trusted by the end users' web browsers. There are many ways to create TLS certificates. + There are also several ways to configure the TLS certificates on the Supervisor, as described in the + [docs for configuring the Supervisor]({{< ref "../howto/configure-supervisor" >}}). + For this tutorial we will use [Let's Encrypt](https://letsencrypt.org) with [cert-manager](https://cert-manager.io/docs/), + because any reader could use these services if they would like to try these steps themselves. +- The Pinniped Concierge can be installed in many types of Kubernetes clusters, as described in + [supported Kubernetes clusters]({{< ref "../reference/supported-clusters" >}}). In this tutorial we will + use GKE clusters as our workload clusters, for the same reasons that we are using GKE for the supervisor cluster. + It is worth noting that a Supervisor running on GKE can provide authentication for workload clusters of any supported + Kubernetes type, not only for GKE workload clusters. +- GKE and Google Cloud DNS can be managed in the Google Cloud Console web UI, or via the gcloud CLI. For this tutorial, + we will use the [gcloud CLI](https://cloud.google.com/sdk/docs/quickstart) so we can be as specific as possible. + However, the same steps could be performed via the UI instead. + This tutorial assumes that you have already authenticated with the gcloud CLI as a user who has permission to + run all the gcloud commands used below. +- Pinniped provides authentication, not authorization. Inside Kubernetes, a user authenticated via Pinniped will have a username + and may also have a list of group names. These usernames and group names can be used to create authorization policies using any + Kubernetes authorization system, usually using [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac). -1. Deploy the Pinniped Supervisor with a valid serving certificate and network path. See - [deploy/supervisor/README.md](https://github.com/vmware-tanzu/pinniped/blob/main/deploy/supervisor/README.md). +The details of the steps shown in this tutorial would be different if any of the above choices were made differently, +however the general concepts at each step would still apply. - For purposes of this demo, the following issuer is used. This issuer is specific to DNS and - TLS infrastructure set up for this demo: +### Install the Pinniped CLI - ```sh - issuer=https://my-supervisor.demo.pinniped.dev - ``` +If you have not already done so, [install the Pinniped command-line tool]({{< ref "../howto/install-cli" >}}). - This demo uses a `Secret` named `my-federation-domain-tls` to provide the serving certificate for - the - [`FederationDomain`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-supervisor-config-v1alpha1-federationdomain). The - serving certificate `Secret` must be of type `kubernetes.io/tls`. +On macOS or Linux, you can do this using Homebrew: - The CA bundle for this serving - certificate is assumed to be written, base64-encoded, to a file named - `/tmp/pinniped-supervisor-ca-bundle-base64-encoded.pem`. +```sh +brew install vmware-tanzu/pinniped/pinniped-cli +``` -1. Create a - [`FederationDomain`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-supervisor-config-v1alpha1-federationdomain) - object to configure the Pinniped Supervisor to issue federated identities. +On other platforms, see the [command-line installation guide]({{< ref "../howto/install-cli" >}}) for more details. - ```sh - cat <}}/install-pinniped-concierge-crds.yaml - kubectl apply --context kind-pinniped-concierge \ - -f https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-resources.yaml - ``` +### Get the admin kubeconfigs for each GKE cluster - The `install-pinniped-concierge-crds.yaml` file contains the Concierge CustomResourceDefinitions. - These define the custom APIs that you use to configure and interact with the Concierge. +Most of the following installation and configuration steps are performed using the cluster's admin kubeconfig. +Let's download those kubeconfig files now. - The `install-pinniped-concierge-resources.yaml` file includes the rest of the Concierge resources with default deployment options. - If you would prefer to customize the available options, please see the [Concierge installation guide]({{< ref "../howto/install-concierge" >}}) - for instructions on how to deploy using `ytt`. +```sh +# Note: KUBECONFIG determines the output location for these commands. -1. Generate a random audience value for this cluster. +KUBECONFIG="supervisor-admin.yaml" gcloud container clusters get-credentials \ + "demo-supervisor-cluster" --project "$PROJECT" --zone "$ZONE" - ```sh - audience="$(openssl rand -hex 8)" - ``` +KUBECONFIG="workload1-admin.yaml" gcloud container clusters get-credentials \ + "demo-workload-cluster1" --project "$PROJECT" --zone "$ZONE" -1. Create a - [`JWTAuthenticator`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#k8s-api-go-pinniped-dev-generated-1-19-apis-concierge-authentication-v1alpha1-jwtauthenticator) - object to configure the Pinniped Concierge to authenticate using the Pinniped Supervisor. +KUBECONFIG="workload2-admin.yaml" gcloud container clusters get-credentials \ + "demo-workload-cluster2" --project "$PROJECT" --zone "$ZONE" +``` - ```sh - cat <}}) for more details. +There are several installation options described in the +[howto guide for installing the Supervisor]({{< ref "../howto/install-supervisor" >}}). +For this tutorial, we will install the latest version using the `kapp` CLI. -1. Generate a kubeconfig for the current cluster. +```sh +kapp deploy --app pinniped-supervisor \ + --file https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-supervisor.yaml \ + --yes --kubeconfig supervisor-admin.yaml +``` - ```sh - pinniped get kubeconfig \ - --kubeconfig-context kind-pinniped-concierge \ - > /tmp/pinniped-kubeconfig - ``` +### Create a LoadBalancer Service for the Supervisor -1. Try using the generated kubeconfig to issue arbitrary `kubectl` commands. The `pinniped` command-line tool - opens a browser page that can be used to login to the external OIDC identity provider configured earlier. +Create a LoadBalancer to expose the Supervisor service to the public, being careful to only +expose the HTTPS endpoint (not the HTTP endpoint). - ```sh - kubectl --kubeconfig /tmp/pinniped-kubeconfig get pods -n pinniped-concierge - ``` +```sh +cat <}}) for examples of using other identity +providers. + +The Pinniped Supervisor app will be a client of Okta. +The general steps required to create and configure a client in Okta are: + +1. Sign up for Okta if you don't already have an account. They offer a free developer account. +2. Login to the admin UI of your account. +3. Create a test user with an email and a password. It does not need to be a real email address for the purposes of this tutorial. +4. Create an app in the Okta UI. + 1. For more information about creating an app in the Okta UI, see the + [Configure Supervisor With Okta OIDC howto doc]({{< ref "../howto/configure-supervisor-with-okta" >}}). + 2. Make sure that the test user is assigned to the app in the app's "Assignments" tab. + 3. Add the FederationDomain's callback endpoint to the "Sign-in redirect URIs" list on the app in the UI. + The callback endpoint is the FederationDomain's issuer URL plus `/callback`, + e.g. `https://demo-supervisor.pinniped.dev/demo-issuer/callback`. + 4. Get the app's "Okta Domain", "Client ID", and "Client secret" from the UI for use in the next step. + +### Configure the Supervisor to use Okta as the external identity provider + +Create an [OIDCIdentityProvider](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#oidcidentityprovider) and a Secret. + +```sh +# Replace the issuer's domain, the client ID, and client secret below. +cat <" +EOF +``` + +To check that the connection to Okta is working, look at the status conditions and status phase of the resource. +It should be in phase "Ready". + +```sh +kubectl get OIDCIdentityProvider okta \ + --namespace pinniped-supervisor --kubeconfig supervisor-admin.yaml -o yaml +``` + +### Install and configure the Concierge on the workload clusters + +There are several installation options described in the +[howto guide for installing the Concierge]({{< ref "../howto/install-concierge" >}}). +For this tutorial, we will install the latest version using the `kapp` CLI. + +```sh +kapp deploy --app pinniped-concierge \ + --file https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge.yaml \ + --yes --kubeconfig workload1-admin.yaml +``` + +Configure the Concierge on the first workload cluster to trust the Supervisor's +FederationDomain for authentication by creating a +[JWTAuthenticator](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#jwtauthenticator). + +```sh +# The audience value below is an arbitrary value which must uniquely +# identify this cluster. No other workload cluster should use the same value. +# It can have a human-readable component, but part of it should be random +# enough to ensure its uniqueness. +# The command `openssl rand -hex 8` can help in generating random values. +cat < workload1-developer.yaml + +pinniped get kubeconfig \ + --kubeconfig-context workload2-cluster \ + --kubeconfig workload2-admin.yaml > workload2-developer.yaml +``` + +These new kubeconfig files may be distributed to the app developers and devops users who +will be using these workload clusters. They do not contain any particular identity or credential. + +As the cluster creator, do not share the admin kubeconfig files with your workload cluster users. +Save the admin kubeconfig files somewhere private and secure for your own future use. + +See the [full documentation for the `pinniped get kubeconfig` command]({{< ref "../reference/cli" >}}) +for other available optional parameters. + +### As a developer or devops user, access the workload clusters by using regular kubectl commands + +A developer or devops user who would like to use the workload clusters may do so using kubectl with +the kubeconfig files provided to them by the cluster admin in the previous step. + +The kubeconfig files tell kubectl how to invoke the Pinniped CLI as a plugin to aid in authentication. +First, the user will need to install the Pinniped CLI at the same full path where it is referenced +inside the kubeconfig file. Or, they can adjust the full path to the Pinniped CLI inside +their own copy of the kubeconfig file, to make it match where they have locally installed the Pinniped CLI. + +Then the developer can run any kubectl command using a kubeconfig file +that was provided to them by the cluster admin. For example, let's run a command against the first workload cluster. + +```sh +kubectl get namespaces --kubeconfig workload1-developer.yaml +``` + +The first time this command is run, it will open their default web browser and redirect them to Okta for login. +After successfully logging in to Okta, for example as the user `walrus@example.com`, the kubectl command will +continue and will list the namespaces. +The user's identity in Kubernetes (username and group memberships) came from Okta, through Pinniped. + +That same developer user can access all other workload clusters in a similar fashion. For example, +let's run a command against the second workload cluster. + +```sh +kubectl get namespaces --kubeconfig workload2-developer.yaml +``` + +This time, the command will list namespaces immediately. +Even though you are accessing a different cluster, the web browser will not open again. +You do not need to interactively sign in again for the rest of the day to access +any workload cluster within the same FederationDomain. +Behind the scenes, Pinniped is performing token refreshes and token exchanges +on behalf of the user to create a short-lived, cluster-scoped token to access +this new workload cluster using the same identity from Okta. + +If the user did not have RBAC permissions to perform the requested action, then they would see an error +from kubectl similar to +`Error from server (Forbidden): namespaces is forbidden: User "walrus@example.com" cannot list resource "namespaces" in API group "" `. + +Note that users can use any of kubectl's supported means of providing kubeconfig information to kubectl. +They are not limited to only using the `--kubeconfig` flag. For example, they could set the `KUBECONFIG` +environment variable instead. + +For more information about logging in to workload clusters, see the [howto doc about login]({{< ref "../howto/login" >}}). + +## What we've learned + +This tutorial showed: +- A detailed example of how to install and configure a Supervisor with ingress, DNS, TLS, and an external identity provider +- How to install the Concierge onto multiple workload clusters and configure them all to trust identities from the Supervisor +- How an admin can create and distribute kubeconfig files for the workload clusters +- How a developer or devops user can authenticate with kubectl using their identity from the external identity provider, + and how they can securely access all workload clusters for the rest of the day without needing to authenticate again + +## Removing the resources created in this tutorial + +If you would like to delete the resources created in this tutorial, you can use the following commands. + +```sh +# To uninstall the Pinniped Supervisor app and all related configuration: +kapp delete --app pinniped-supervisor --yes --kubeconfig supervisor-admin.yaml + +# To uninstall the Pinniped Concierge apps and all related configuration: +kapp delete --app pinniped-concierge --yes --kubeconfig workload1-admin.yaml + +kapp delete --app pinniped-concierge --yes --kubeconfig workload2-admin.yaml + +# To delete the GKE clusters entirely: +gcloud container clusters delete "demo-supervisor-cluster" \ + --project "$PROJECT" --zone "$ZONE" --quiet + +gcloud container clusters delete "demo-workload-cluster1" \ + --project "$PROJECT" --zone "$ZONE" --quiet + +gcloud container clusters delete "demo-workload-cluster2" \ + --project "$PROJECT" --zone "$ZONE" --quiet + +# To delete the DNS entry for the Supervisor: +gcloud dns record-sets transaction start \ + --zone="$DNS_ZONE" --project "$PROJECT" + +gcloud dns record-sets transaction remove "$PUBLIC_IP" \ + --name="$DNS_NAME." --ttl="300" --type="A" \ + --zone="$DNS_ZONE" --project "$PROJECT" + +gcloud dns record-sets transaction execute \ + --zone="$DNS_ZONE" --project "$PROJECT" + +# To delete the service account created above for cert-manager: +gcloud iam service-accounts delete \ + "demo-dns-solver@$PROJECT.iam.gserviceaccount.com" \ + --project "$PROJECT" --quiet +``` diff --git a/site/content/docs/tutorials/concierge-and-supervisor-gke-demo.md b/site/content/docs/tutorials/concierge-and-supervisor-gke-demo.md deleted file mode 100644 index 7d240e57..00000000 --- a/site/content/docs/tutorials/concierge-and-supervisor-gke-demo.md +++ /dev/null @@ -1,462 +0,0 @@ ---- -title: Use the Pinniped Supervisor and Concierge for federated login on GKE -description: See how the Pinniped Supervisor streamlines login to multiple Kubernetes clusters. -cascade: -layout: docs -menu: -docs: -name: Concierge with Supervisor on GKE -parent: tutorials ---- - -## Overview - -This tutorial is intended to be a step-by-step example of installing and configuring the Pinniped Supervisor -and Concierge components for a multi-cluster federated authentication solution. It will show every -command needed to replicate the same setup to allow the reader to follow the same steps themselves. - -A single Pinniped Supervisor can provide authentication for many Kubernetes clusters. In a typical deployment: - -- A single Supervisor is deployed on a special cluster where app developers and devops users have no access. - App developers and devops users should have no access at least to the resources in the Supervisor's namespace, - but usually have no access to the whole cluster. For this tutorial, let's call this cluster the "supervisor cluster". -- App developers and devops users can then use their identities provided by the Supervisor to log in to many - clusters where they can manage their apps. For this tutorial, let's call these clusters the "workload clusters". - -Choices made for this tutorial: - -- The Pinniped Supervisor can draw user identities from OIDC identity providers, Active Directory providers (via LDAP), - and generic LDAP providers. In this tutorial we will use Okta as an OIDC identity provider. - Okta offers a free developer account, so any reader should be able to sign up for an Okta - account if they would like to try these steps themselves. -- The Pinniped Supervisor can be installed on any type of Kubernetes cluster. In this tutorial we will - demonstrate the installation process for GKE because any reader should be able to sign up for a Google Cloud - account if they would like to try these steps themselves. We will use separate supervisor and workload clusters. -- The Pinniped Supervisor needs working ingress. There are many ways to configure ingress for apps running on - Kubernetes clusters, as described in the [howto guide for installing the Supervisor]({{< ref "../howto/install-supervisor" >}}). - For this tutorial we will use a LoadBalancer Service with a public IP address. This is a simple setup which - allows us to terminate TLS inside the Supervisor app, keeping the connection secure all the way into - the Supervisor app's pods. A corporate installation of the Supervisor might keep it behind the corporate firewall, - but for this tutorial a public IP also allows your desktop (and anyone on the internet) to access the Supervisor's endpoints. - The HTTPS endpoints of a properly configured Supervisor are generally safe to expose publicly, as long as you are not concerned - with denial of service attacks (or have some external protection against such attacks). -- Although it is possible to configure the Supervisor's FederationDomain to use an IP address, it is better to - use a DNS name. There are many ways to manage DNS. For this tutorial, we will use Google Cloud's - [Cloud DNS](https://cert-manager.io/docs/) service to register a new subdomain for the Supervisor - app's load balancer's public IP address. We won't describe how to prepare Cloud DNS to manage DNS for - the parent domain in this tutorial. This typically involves setting up Cloud DNS's servers as the list of DNS servers - for your domain within your domain registrar. We'll assume that this has already been done. -- For web-based login flows as used by OIDC identity providers, the Pinniped Supervisor needs TLS certificates - that are trusted by the end users' web browsers. There are many ways to create TLS certificates. - There are also several ways to configure the TLS certificates on the Supervisor, as described in the - [docs for configuring the Supervisor]({{< ref "../howto/configure-supervisor" >}}). - For this tutorial we will use [Let's Encrypt](https://letsencrypt.org) with [cert-manager](https://cert-manager.io/docs/), - because any reader could use these services if they would like to try these steps themselves. -- The Pinniped Concierge can be installed in many types of Kubernetes clusters, as described in - [supported Kubernetes clusters]({{< ref "../reference/supported-clusters" >}}). In this tutorial we will - use GKE clusters as our workload clusters, for the same reasons that we are using GKE for the supervisor cluster. - It is worth noting that a Supervisor running on GKE can provide authentication for workload clusters of any supported - Kubernetes type, not only for GKE workload clusters. -- GKE and Google Cloud DNS can be managed in the Google Cloud Console web UI, or via the `gcloud` CLI. For this tutorial, - we will use the [`glcoud` CLI](https://cloud.google.com/sdk/docs/quickstart) so we can be as specific as possible. - However, the same steps could be performed via the UI instead. - This tutorial assumes that you have already authenticated with the `gcloud` CLI. -- Pinniped provides authentication, not authorization. A user authenticated via Pinniped will have a username - and may have a list of group names. These names can be used to create authorization policies using any - Kubernetes authorization system, usually using Kubernetes RBAC. - -The details of the steps shown in this tutorial would be different if any of the above choices were made differently, -however the general concepts at each step would still apply. - -### Install the Pinniped CLI - -If you have not already done so, [install the Pinniped command-line tool]({{< ref "../howto/install-cli" >}}). - -### Create some GKE clusters - -For the rest of this tutorial, let's assume that your Google Cloud project name and your favorite Google Cloud zone name -are set as environment variables. - -```sh -export PROJECT="my-gcp-project-name" -export ZONE="us-central1-c" -``` - -Let's create one supervisor cluster and two workload clusters. There are many options to consider here, but for this -tutorial we will use only the most basic options. - -```sh -gcloud container clusters create "demo-supervisor-cluster" --project "$PROJECT" --zone "$ZONE" -gcloud container clusters create "demo-workload-cluster1" --project "$PROJECT" --zone "$ZONE" -gcloud container clusters create "demo-workload-cluster2" --project "$PROJECT" --zone "$ZONE" -``` - -### Get the admin kubeconfigs for each GKE clsuter - -Most of the following installation and configuration steps are performed using the cluster's admin kubeconfig. -Let's download those kubeconfig files now. - -```sh -# The KUBECONFIG variable determines the output location. -KUBECONFIG="supervisor-admin.yaml" gcloud container clusters get-credentials "demo-supervisor-cluster" --project "$PROJECT" --zone "$ZONE" -KUBECONFIG="workload1-admin.yaml" gcloud container clusters get-credentials "demo-workload-cluster1" --project "$PROJECT" --zone "$ZONE" -KUBECONFIG="workload2-admin.yaml" gcloud container clusters get-credentials "demo-workload-cluster2" --project "$PROJECT" --zone "$ZONE" -``` - -### Decide which domain or subdomain will be used for the Supervisor - -The Pinniped maintainers own the pinniped.dev domain and have already set it up for use with Google Cloud DNS, -so for this tutorial we will call our Supervisor server `demo-supervisor.pinniped.dev`. - -### Install the Pinniped Supervisor on the supervisor cluster - -There are several installation options described in the -[howto guide for installing the Supervisor]({{< ref "../howto/install-supervisor" >}}). -For this tutorial, we will install the latest version using the `kapp` CLI. - -```sh -kapp deploy --app pinniped-supervisor \ - --file https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-supervisor.yaml \ - --yes --kubeconfig supervisor-admin.yaml -``` - -### Create a LoadBalancer Service for the Supervisor - -Create a LoadBalancer to expose the Supervisor service to the public, being careful to only -expose the HTTPS endpoint (not the HTTP endpoint). - -```sh -cat <}}). - 2. Make sure that the test user is assigned to the app in the app's "Assignments" tab. - 3. Add the FederationDomain's callback endpoint to the "Sign-in redirect URIs" list on the app in the UI. - The callback endpoint is the FederationDomain's issuer URL plus `/callback`, - e.g. `https://demo-supervisor.pinniped.dev/demo-issuer/callback`. - 4. Get the app's "Okta Domain", "Client ID", and "Client secret" from the UI for use in the next step. - -### Configure the Supervisor to use Okta as the identity provider - -Create an OIDCIdentityProvider and a Secret. - -```sh -cat <" -EOF -``` - -To check that the connection to Okta is working, look at the status conditions and status phase of the resource. -It should be in phase "Ready". - -```sh -kubectl get OIDCIdentityProvider okta --namespace pinniped-supervisor --kubeconfig supervisor-admin.yaml -o yaml -``` - -### Install and configure the Concierge on the workload clusters - -There are several installation options described in the -[howto guide for installing the Concierge]({{< ref "../howto/install-concierge" >}}). -For this tutorial, we will install the latest version using the `kapp` CLI. - -```sh -kapp deploy --app pinniped-concierge \ - --file https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge.yaml \ - --yes --kubeconfig workload1-admin.yaml -``` - -Configure the Concierge on the first workload cluster to trust the Supervisor's -FederationDomain for authentication. - -```sh -cat < workload1-developer.yaml -pinniped get kubeconfig --kubeconfig workload2-admin.yaml > workload2-developer.yaml -``` - -These new kubeconfig files may be distributed to the app developers and devops users who -will be using these workload clusters. They do not contain any particular identity or credential. - -### As a developer or devops user, access the workload clusters by using regular kubectl commands - -A developer or devops user who would like to use the workload clusters may do so using kubectl with -the kubeconfig files provided to them by the cluster admin in the previous step. - -First, they will need to install the Pinniped CLI at the same full path where it is referenced -inside the kubeconfig file, or they will need to adjust the full path to the Pinniped CLI in -their own copy of the kubeconfig file. - -Then the developer can run any kubectl command using the `workload1-developer.yaml` kubeconfig file -that was provided to them by the cluster admin. - -```sh -kubectl get namespaces --kubeconfig workload1-developer.yaml -``` - -The first time this command is run, it will open their default web browser and redirect them to Okta for login. -After successfully logging in to Okta, the kubectl command will complete and list the namespaces. -The user's identity in Kubernetes (usernames and group memberships) came from Okta, through Pinniped. - -That same developer user can access all other workload clusters in a similar fashion. For example, -they can use the `workload2-developer.yaml` kubeconfig file to access the second workload cluster. - -```sh -kubectl get namespaces --kubeconfig workload2-developer.yaml -``` - -This time the command will list namespace immediately. -Even though the user is accessing a different cluster, the web browser will not open again. -The user does not need to interactively sign in again for the rest of the day to access -any workload cluster within the same FederationDomain. -Behind the scenes, Pinniped is performing token refreshes and token exchanges -on behalf of the user to create a short-lived, cluster-scoped token to access -this new workload cluster using the same identity from Okta. - -### Removing the resources created in this tutorial - -If you would like to delete all the resources created in this tutorial, you can use the following commands. - -```sh -TODO -``` diff --git a/site/content/docs/tutorials/concierge-only-demo.md b/site/content/docs/tutorials/concierge-only-demo.md index e6adfd49..9d012e20 100644 --- a/site/content/docs/tutorials/concierge-only-demo.md +++ b/site/content/docs/tutorials/concierge-only-demo.md @@ -10,6 +10,27 @@ menu: weight: 100 --- +## Overview + +This tutorial shows how to use the Pinniped Concierge on Kubernetes clusters. +If you would like to learn how to use the Pinniped Supervisor and Concierge together to +provided federated identity with a single sign-on user experience to many Kubernetes clusters, +please instead see this other tutorial: +- [Concierge with Supervisor: a complete example of every step, demonstrated using GKE clusters]({{< ref "concierge-and-supervisor-demo" >}}) + +Installing and trying the Pinniped Concierge on any cluster consists of the following general steps. See the next section below +for a more specific example of installing onto a local kind cluster, including the exact commands to use for that case. + +1. [Install the Concierge]({{< ref "../howto/install-concierge" >}}). +1. [Install the Pinniped command-line tool]({{< ref "../howto/install-cli" >}}). +1. Configure the Concierge with a + [JWT]({{< ref "../howto/configure-concierge-jwt" >}}) or + [webhook]({{< ref "../howto/configure-concierge-webhook" >}}) authenticator. +1. Generate a kubeconfig using the Pinniped command-line tool (run `pinniped get kubeconfig --help` for more information). +1. Run `kubectl` commands using the generated kubeconfig. + + The Pinniped Concierge is automatically be used for authentication during those commands. + ## Prerequisites 1. A Kubernetes cluster of a type supported by Pinniped as described in [architecture](/docs/background/architecture). @@ -27,21 +48,6 @@ menu: 1. A kubeconfig where the current context points to the cluster and has administrator-like privileges on that cluster. -## Overview - -Installing and trying the Pinniped Concierge on any cluster consists of the following general steps. See the next section below -for a more specific example of installing onto a local kind cluster, including the exact commands to use for that case. - -1. [Install the Concierge]({{< ref "../howto/install-concierge" >}}). -1. [Install the Pinniped command-line tool]({{< ref "../howto/install-cli" >}}). -1. Configure the Concierge with a - [JWT]({{< ref "../howto/configure-concierge-jwt" >}}) or - [webhook]({{< ref "../howto/configure-concierge-webhook" >}}) authenticator. -1. Generate a kubeconfig using the Pinniped command-line tool (run `pinniped get kubeconfig --help` for more information). -1. Run `kubectl` commands using the generated kubeconfig. - - The Pinniped Concierge is automatically be used for authentication during those commands. - ## Example of deploying on kind [kind](https://kind.sigs.k8s.io) is a tool for creating and managing Kubernetes clusters on your local machine diff --git a/site/content/resources/_index.html b/site/content/resources/_index.html index 3cfc848b..a141bb43 100644 --- a/site/content/resources/_index.html +++ b/site/content/resources/_index.html @@ -8,8 +8,11 @@ layout: section

Resources

+ From 05ec8cba8c6511a694c3fe14629db95b54fadc69 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Fri, 11 Feb 2022 17:16:40 -0800 Subject: [PATCH 22/24] Add a new subheading to the tutorial doc --- site/content/docs/tutorials/concierge-and-supervisor-demo.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/site/content/docs/tutorials/concierge-and-supervisor-demo.md b/site/content/docs/tutorials/concierge-and-supervisor-demo.md index 8668bff7..9c711869 100644 --- a/site/content/docs/tutorials/concierge-and-supervisor-demo.md +++ b/site/content/docs/tutorials/concierge-and-supervisor-demo.md @@ -121,6 +121,8 @@ had to make some choices. The choices made for this tutorial were: The details of the steps shown in this tutorial would be different if any of the above choices were made differently, however the general concepts at each step would still apply. +## Ready? Let's go! + ### Install the Pinniped CLI If you have not already done so, [install the Pinniped command-line tool]({{< ref "../howto/install-cli" >}}). From 230e563ab7391060811c2dc75dfb3bdd611a8687 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Mon, 14 Feb 2022 17:23:57 -0800 Subject: [PATCH 23/24] Another draft of the new tutorial guide --- .../concierge-and-supervisor-demo.md | 190 ++++++++++++++---- .../docs/tutorials/concierge-only-demo.md | 7 +- site/content/resources/_index.html | 24 +-- 3 files changed, 168 insertions(+), 53 deletions(-) diff --git a/site/content/docs/tutorials/concierge-and-supervisor-demo.md b/site/content/docs/tutorials/concierge-and-supervisor-demo.md index 9c711869..f545a243 100644 --- a/site/content/docs/tutorials/concierge-and-supervisor-demo.md +++ b/site/content/docs/tutorials/concierge-and-supervisor-demo.md @@ -68,9 +68,9 @@ A single Pinniped Supervisor can provide authentication for any number of Kubern - A single Supervisor is deployed on a special cluster where app developers and devops users have no access. App developers and devops users should have no access at least to the resources in the Supervisor's namespace, - but usually have no access to the whole cluster. For this tutorial, let's call this cluster the "supervisor cluster". + but usually have no access to the whole cluster. For this tutorial, let's call this cluster the *"supervisor cluster"*. - App developers and devops users can then use their identities provided by the Supervisor to log in to many - clusters where they can manage their apps. For this tutorial, let's call these clusters the "workload clusters". + clusters where they can manage their apps. For this tutorial, let's call these clusters the *"workload clusters"*. The Pinniped Concierge component is installed into each workload cluster and is configured to trust the single Supervisor. The Concierge acts as an in-cluster agent to provide authentication services. @@ -181,23 +181,27 @@ KUBECONFIG="workload2-admin.yaml" gcloud container clusters get-credentials \ ### Decide which hostname and domain or subdomain will be used for the Supervisor The Pinniped maintainers own the pinniped.dev domain and have already set it up for use with Google Cloud DNS, -so for this tutorial we will call our Supervisor server demo-supervisor.pinniped.dev. +so for this tutorial we will call our Supervisor server `demo-supervisor.pinniped.dev`. ### Install the Pinniped Supervisor on the supervisor cluster There are several installation options described in the [howto guide for installing the Supervisor]({{< ref "../howto/install-supervisor" >}}). -For this tutorial, we will install the latest version using the `kapp` CLI. +For this tutorial, we will install the latest version using the `kubectl` CLI. ```sh -kapp deploy --app pinniped-supervisor \ - --file https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-supervisor.yaml \ - --yes --kubeconfig supervisor-admin.yaml +kubectl apply \ + -f https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-supervisor.yaml \ + --kubeconfig supervisor-admin.yaml ``` ### Create a LoadBalancer Service for the Supervisor -Create a LoadBalancer to expose the Supervisor service to the public, being careful to only +There are several options for exposing the Supervisor's endpoints outside the cluster, which are described in the +[howto guide for configuring the Supervisor]({{< ref "../howto/configure-supervisor" >}}). For this tutorial, +we will use a public LoadBalancer. + +Create a LoadBalancer to expose the Supervisor's endpoints to the public, being careful to only expose the HTTPS endpoint (not the HTTP endpoint). ```sh @@ -218,13 +222,14 @@ spec: EOF ``` -It may take a little time for the LoadBalancer to be assigned a public IP. -Check for an `EXTERNAL-IP` using the following command. The value of the -`EXTERNAL-IP` is the public IP of you LoadBalancer, which will be used -in the steps below. +Check for an IP using the following command. The value returned +is the public IP of you LoadBalancer, which will be used +in the steps below. It may take a little time for the LoadBalancer to be assigned a public IP, and this +command will have empty output until then. ```sh kubectl get service pinniped-supervisor-loadbalancer \ + -o jsonpath='{.status.loadBalancer.ingress[*].ip}' \ --namespace pinniped-supervisor --kubeconfig supervisor-admin.yaml ``` @@ -251,6 +256,7 @@ gcloud projects add-iam-policy-binding "$PROJECT" \ ``` Create and download a key for the new service account, and then put it into a Secret on the cluster. +Be careful with this key as it allows full control over the DNS of your Cloud DNS zones. ```sh gcloud iam service-accounts keys create demo-dns-solver-key.json \ @@ -346,7 +352,7 @@ spec: EOF ``` -Wait for the Secret to get created. Use the following command to see if it exists. +Wait for the Secret to get created. This may take a few minutes. Use the following command to see if it exists. ```sh kubectl get secret supervisor-tls-cert \ @@ -458,12 +464,26 @@ kubectl get OIDCIdentityProvider okta \ There are several installation options described in the [howto guide for installing the Concierge]({{< ref "../howto/install-concierge" >}}). -For this tutorial, we will install the latest version using the `kapp` CLI. +For this tutorial, we will install the latest version using the `kubectl` CLI. ```sh -kapp deploy --app pinniped-concierge \ - --file https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge.yaml \ - --yes --kubeconfig workload1-admin.yaml +# Install onto the first workload cluster. +kubectl apply -f \ + "https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-crds.yaml" \ + --kubeconfig workload1-admin.yaml + +kubectl apply -f \ + "https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-resources.yaml" \ + --kubeconfig workload1-admin.yaml + +# Install onto the second workload cluster. +kubectl apply -f \ + "https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-crds.yaml" \ + --kubeconfig workload2-admin.yaml + +kubectl apply -f \ + "https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-resources.yaml" \ + --kubeconfig workload2-admin.yaml ``` Configure the Concierge on the first workload cluster to trust the Supervisor's @@ -507,19 +527,39 @@ EOF ### Configure RBAC rules for the developer and devops users -For this tutorial, we will keep the Kubernetes RBAC configuration simple. For example, -if one of your Okta users has the email address `walrus@example.com`, +For this tutorial, we will keep the Kubernetes RBAC configuration simple. +We'll use a contrived example of RBAC policies to avoid getting into RBAC policy design discussions. + +If one of your Okta users has the email address `walrus@example.com`, then you could allow that user to [edit](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) -most things in one workload cluster, +things in a new namespace in one workload cluster, and [view](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) most things in the other workload cluster, with the following commands. ```sh -kubectl create clusterrolebinding developer-can-edit \ - --clusterrole edit \ - --user walrus@example.com \ +# Create a namespace in the first workload cluster. +kubectl create namespace "dev" \ --kubeconfig workload1-admin.yaml +# Allow the developer to edit everything in the new namespace. +cat < workload1-developer.yaml pinniped get kubeconfig \ - --kubeconfig-context workload2-cluster \ --kubeconfig workload2-admin.yaml > workload2-developer.yaml ``` @@ -557,6 +598,22 @@ Save the admin kubeconfig files somewhere private and secure for your own future See the [full documentation for the `pinniped get kubeconfig` command]({{< ref "../reference/cli" >}}) for other available optional parameters. +### Optional: Merge the developer kubeconfig files to distribute them as one file + +The `kubectl` CLI [can merge kubeconfig files](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#merging-kubeconfig-files). +If you wanted to distribute one kubeconfig file instead of one per cluster, +you could choose to merge the Pinniped-compatible kubeconfig files. + +```sh +# For this command, KUBECONFIG is treated as a list of input files. +KUBECONFIG="workload1-developer.yaml:workload2-developer.yaml" kubectl \ + config view --flatten -o yaml > all-workload-clusters-developer.yaml +``` + +The developer who uses the combined kubeconfig file will need to use the standard `kubectl` methods to choose their current context. + +For clarity, the steps shown below will continue to use the separate kubeconfig files. + ### As a developer or devops user, access the workload clusters by using regular kubectl commands A developer or devops user who would like to use the workload clusters may do so using kubectl with @@ -576,11 +633,24 @@ kubectl get namespaces --kubeconfig workload1-developer.yaml The first time this command is run, it will open their default web browser and redirect them to Okta for login. After successfully logging in to Okta, for example as the user `walrus@example.com`, the kubectl command will -continue and will list the namespaces. +continue and will try to list the namespaces. The user's identity in Kubernetes (username and group memberships) came from Okta, through Pinniped. +Oops! This results in an RBAC error similar to +`Error from server (Forbidden): namespaces is forbidden: User "walrus@example.com" cannot list resource "namespaces" in API group "" at the cluster scope`. +Recall that in the first workload cluster, the user only has RBAC permissions in the `dev` namespace. +Let's try again, but this time we will list something in the `dev` namespace. + +```sh +kubectl get serviceaccounts --namespace dev \ + --kubeconfig workload1-developer.yaml +``` + +This will successfully list the default service account in the `dev` namespace. + That same developer user can access all other workload clusters in a similar fashion. For example, -let's run a command against the second workload cluster. +let's run a command against the second workload cluster. Recall that the developer is allowed +to read everthing in the second workload cluster. ```sh kubectl get namespaces --kubeconfig workload2-developer.yaml @@ -594,16 +664,34 @@ Behind the scenes, Pinniped is performing token refreshes and token exchanges on behalf of the user to create a short-lived, cluster-scoped token to access this new workload cluster using the same identity from Okta. -If the user did not have RBAC permissions to perform the requested action, then they would see an error -from kubectl similar to -`Error from server (Forbidden): namespaces is forbidden: User "walrus@example.com" cannot list resource "namespaces" in API group "" `. - Note that users can use any of kubectl's supported means of providing kubeconfig information to kubectl. They are not limited to only using the `--kubeconfig` flag. For example, they could set the `KUBECONFIG` environment variable instead. For more information about logging in to workload clusters, see the [howto doc about login]({{< ref "../howto/login" >}}). +### Whoami + +Not sure what identity you're using on the cluster? Pinniped has a convenient feature to help out with that. + +```sh +pinniped whoami --kubeconfig workload2-developer.yaml +``` + +The output will include your username and group names, and will look similar to the following output. + +``` +Current cluster info: + +Name: gke_your_project_us-central1-c_demo-workload-cluster2-pinniped +URL: https://1.2.3.4 + +Current user info: + +Username: walrus@example.com +Groups: Everyone, developers, system:authenticated +``` + ## What we've learned This tutorial showed: @@ -618,13 +706,33 @@ This tutorial showed: If you would like to delete the resources created in this tutorial, you can use the following commands. ```sh -# To uninstall the Pinniped Supervisor app and all related configuration: -kapp delete --app pinniped-supervisor --yes --kubeconfig supervisor-admin.yaml +# To uninstall the Pinniped Supervisor app and all related configuration +# (including the GCP load balancer): +kubectl delete \ + -f "https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-supervisor.yaml" \ + --kubeconfig supervisor-admin.yaml + +# To uninstall cert-manager (assuming you already ran the above command): +kubectl delete -f \ + "https://github.com/jetstack/cert-manager/releases/download/v1.5.3/cert-manager.yaml" \ + --kubeconfig supervisor-admin.yaml # To uninstall the Pinniped Concierge apps and all related configuration: -kapp delete --app pinniped-concierge --yes --kubeconfig workload1-admin.yaml +kubectl delete -f \ + "https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-resources.yaml" \ + --kubeconfig workload1-admin.yaml -kapp delete --app pinniped-concierge --yes --kubeconfig workload2-admin.yaml +kubectl delete -f \ + "https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-crds.yaml" \ + --kubeconfig workload1-admin.yaml + +kubectl delete -f \ + "https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-resources.yaml" \ + --kubeconfig workload2-admin.yaml + +kubectl delete -f \ + "https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-concierge-crds.yaml" \ + --kubeconfig workload2-admin.yaml # To delete the GKE clusters entirely: gcloud container clusters delete "demo-supervisor-cluster" \ @@ -647,7 +755,11 @@ gcloud dns record-sets transaction remove "$PUBLIC_IP" \ gcloud dns record-sets transaction execute \ --zone="$DNS_ZONE" --project "$PROJECT" -# To delete the service account created above for cert-manager: +# To delete the service account we created for cert-manager: +gcloud projects remove-iam-policy-binding "$PROJECT" \ + --member "serviceAccount:demo-dns-solver@$PROJECT.iam.gserviceaccount.com" \ + --role roles/dns.admin --condition=None + gcloud iam service-accounts delete \ "demo-dns-solver@$PROJECT.iam.gserviceaccount.com" \ --project "$PROJECT" --quiet diff --git a/site/content/docs/tutorials/concierge-only-demo.md b/site/content/docs/tutorials/concierge-only-demo.md index 9d012e20..ab6d299c 100644 --- a/site/content/docs/tutorials/concierge-only-demo.md +++ b/site/content/docs/tutorials/concierge-only-demo.md @@ -27,9 +27,12 @@ for a more specific example of installing onto a local kind cluster, including t [JWT]({{< ref "../howto/configure-concierge-jwt" >}}) or [webhook]({{< ref "../howto/configure-concierge-webhook" >}}) authenticator. 1. Generate a kubeconfig using the Pinniped command-line tool (run `pinniped get kubeconfig --help` for more information). -1. Run `kubectl` commands using the generated kubeconfig. +1. Run `kubectl` commands using the generated kubeconfig. The Pinniped Concierge will automatically be used for authentication during those commands. - The Pinniped Concierge is automatically be used for authentication during those commands. +Please be aware that using the Concierge without the Supervisor is an advanced use case, not the typical use case. +For example, the Supervisor issues cluster-scoped credentials that cannot be replayed against other clusters, +so using the Concierge without the Supervisor removes that protection. You might have designed another system to provide +that protection, but if not then please carefully consider the security implications. ## Prerequisites diff --git a/site/content/resources/_index.html b/site/content/resources/_index.html index a141bb43..a26648fa 100644 --- a/site/content/resources/_index.html +++ b/site/content/resources/_index.html @@ -15,6 +15,18 @@ layout: section From f728ea743f2d28737dc3baa28de93da73653eece Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Tue, 15 Feb 2022 09:04:47 -0800 Subject: [PATCH 24/24] Add --ignore-not-found to delete Supervisor app command --- site/content/docs/tutorials/concierge-and-supervisor-demo.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/site/content/docs/tutorials/concierge-and-supervisor-demo.md b/site/content/docs/tutorials/concierge-and-supervisor-demo.md index f545a243..394d3166 100644 --- a/site/content/docs/tutorials/concierge-and-supervisor-demo.md +++ b/site/content/docs/tutorials/concierge-and-supervisor-demo.md @@ -710,7 +710,8 @@ If you would like to delete the resources created in this tutorial, you can use # (including the GCP load balancer): kubectl delete \ -f "https://get.pinniped.dev/{{< latestversion >}}/install-pinniped-supervisor.yaml" \ - --kubeconfig supervisor-admin.yaml + --kubeconfig supervisor-admin.yaml \ + --ignore-not-found # To uninstall cert-manager (assuming you already ran the above command): kubectl delete -f \