Merge branch 'main' into 2022-readme

This commit is contained in:
Ryan Richard 2022-03-21 10:44:32 -07:00 committed by GitHub
commit bcfddec0a2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 96 additions and 99 deletions

View File

@ -33,15 +33,14 @@ The following table includes the current roadmap for Pinniped. If you have any q
Last Updated: Jan 2022
Last Updated: March 2022
|Theme|Description|Timeline|
|--|--|--|
|Improving Security Posture|Support for refreshing LDAP/AD Group information |March 2022|
|Improving Security Posture|Support FIPS compliant Boring crypto libraries |March/April 2022|
|Multiple IDP support|Support multiple IDPs configured on a single Supervisor|April 2022|
|Improving Security Posture|TLS hardening |March/April 2022|
|Improving Security Posture|Support Audit logging of security events related to Authentication |April/May 2022|
|Improving Usability|Support for integrating with UI/Dashboards |June/July 2022|
|Multiple IDP support|Support multiple IDPs configured on a single Supervisor|Exploring/Ongoing|
|Improving Security Posture|mTLS for Supervisor sessions |Exploring/Ongoing|
|Improving Security Posture|Key management/rotation for Pinniped components with minimal downtime |Exploring/Ongoing|
|Improving Security Posture|Support for Session Logout |Exploring/Ongoing|

View File

@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Copyright 2020 the Pinniped contributors. All Rights Reserved.
# Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
@ -9,4 +9,5 @@ ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
cd "${ROOT}"
# To choose a specific version of kube, add this option to the command below: `--image kindest/node:v1.18.8`.
# To debug the kind config, add this option to the command below: `-v 10`
kind create cluster --config "hack/lib/kind-config/single-node.yaml" --name pinniped

View File

@ -24,6 +24,11 @@ nodes:
containerPort: 31235
hostPort: 12346
listenAddress: 127.0.0.1
# Kind v0.12.0 ignores kubeadm.k8s.io/v1beta2 for Kube v1.23+ but uses it for older versions of Kube.
# Previous versions of Kind would use kubeadm.k8s.io/v1beta2 for all versions of Kube including 1.23.
# To try to maximize compatibility with various versions of Kind and Kube, define this
# ClusterConfiguration twice and hope that Kind will use the one that it likes for the given version
# of Kube, and ignore the one that it doesn't like. This seems to work, at least for Kind v0.12.0.
kubeadmConfigPatches:
- |
apiVersion: kubeadm.k8s.io/v1beta2
@ -39,3 +44,10 @@ kubeadmConfigPatches:
# are exercised. For whatever reason, leaving this as false (i.e. use kube-proxy) appears to
# hide some network misconfigurations when used internally by the API server aggregation layer.
enable-aggregator-routing: "true"
- |
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
extraArgs:
# See comment above.
enable-aggregator-routing: "true"

View File

@ -32,116 +32,122 @@ import (
"go.pinniped.dev/test/testlib"
)
// This test is intended to exercise the supervisor's HTTP port 8080. It can either access it directly via
// the env.SupervisorHTTPAddress setting, or it can access it indirectly through a TLS-enabled Ingress which
// uses the supervisor's port 8080 as its backend via the env.SupervisorHTTPSIngressAddress and
// env.SupervisorHTTPSIngressCABundle settings, or it can exercise it both ways when all of those
// env settings are present.
//
// Testing talking to the supervisor's port 8443 where the supervisor is terminating TLS itself is
// handled by the others tests in this file.
// TestSupervisorOIDCDiscovery_Disruptive is intended to exercise the supervisor's HTTPS port.
// It can either access it directly via the env.SupervisorHTTPSAddress setting, or it can access
// it indirectly through a TLS-enabled Ingress which uses the supervisor's HTTPS port as its backend
// (via the env.SupervisorHTTPSIngressAddress and env.SupervisorHTTPSIngressCABundle settings),
// or it can exercise it both ways when all of those env settings are present.
// Never run this test in parallel since deleting all federation domains is disruptive, see main_test.go.
func TestSupervisorOIDCDiscovery_Disruptive(t *testing.T) {
env := testlib.IntegrationEnv(t)
client := testlib.NewSupervisorClientset(t)
kubeClient := testlib.NewKubernetesClientset(t)
ns := env.SupervisorNamespace
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
httpsAddress := env.SupervisorHTTPSAddress
var ips []net.IP
if host, _, err := net.SplitHostPort(httpsAddress); err == nil {
httpsAddress = host
}
if ip := net.ParseIP(httpsAddress); ip != nil {
ips = append(ips, ip)
}
temporarilyRemoveAllFederationDomainsAndDefaultTLSCertSecret(ctx, t, ns, defaultTLSCertSecretName(env), client, testlib.NewKubernetesClientset(t))
defaultCA := createTLSCertificateSecret(ctx, t, ns, httpsAddress, ips, defaultTLSCertSecretName(env), kubeClient)
tests := []struct {
Name string
Scheme string
Address string
CABundle string
}{
{Scheme: "http", Address: env.SupervisorHTTPAddress},
{Scheme: "https", Address: env.SupervisorHTTPSIngressAddress, CABundle: env.SupervisorHTTPSIngressCABundle},
{Name: "direct https", Scheme: "https", Address: env.SupervisorHTTPSAddress, CABundle: string(defaultCA.Bundle())},
{Name: "ingress https", Scheme: "https", Address: env.SupervisorHTTPSIngressAddress, CABundle: env.SupervisorHTTPSIngressCABundle},
}
for _, test := range tests {
scheme := test.Scheme
addr := test.Address
caBundle := test.CABundle
test := test
t.Run(test.Name, func(t *testing.T) {
scheme := test.Scheme
addr := test.Address
caBundle := test.CABundle
if addr == "" {
// Both cases are not required, so when one is empty skip it.
continue
}
if addr == "" {
// Both cases are not required, so when one is empty skip it.
t.Skip("no address defined")
}
// Test that there is no default discovery endpoint available when there are no FederationDomains.
requireDiscoveryEndpointsAreNotFound(t, scheme, addr, caBundle, fmt.Sprintf("%s://%s", scheme, addr))
// Test that there is no default discovery endpoint available when there are no FederationDomains.
requireDiscoveryEndpointsAreNotFound(t, scheme, addr, caBundle, fmt.Sprintf("%s://%s", scheme, addr))
// Define several unique issuer strings. Always use https in the issuer name even when we are accessing the http port.
issuer1 := fmt.Sprintf("https://%s/nested/issuer1", addr)
issuer2 := fmt.Sprintf("https://%s/nested/issuer2", addr)
issuer3 := fmt.Sprintf("https://%s/issuer3", addr)
issuer4 := fmt.Sprintf("https://%s/issuer4", addr)
issuer5 := fmt.Sprintf("https://%s/issuer5", addr)
issuer6 := fmt.Sprintf("https://%s/issuer6", addr)
badIssuer := fmt.Sprintf("https://%s/badIssuer?cannot-use=queries", addr)
// Define several unique issuer strings. Always use https in the issuer name even when we are accessing the http port.
issuer1 := fmt.Sprintf("https://%s/nested/issuer1", addr)
issuer2 := fmt.Sprintf("https://%s/nested/issuer2", addr)
issuer3 := fmt.Sprintf("https://%s/issuer3", addr)
issuer4 := fmt.Sprintf("https://%s/issuer4", addr)
issuer5 := fmt.Sprintf("https://%s/issuer5", addr)
issuer6 := fmt.Sprintf("https://%s/issuer6", addr)
badIssuer := fmt.Sprintf("https://%s/badIssuer?cannot-use=queries", addr)
// When FederationDomain are created in sequence they each cause a discovery endpoint to appear only for as long as the FederationDomain exists.
config1, jwks1 := requireCreatingFederationDomainCausesDiscoveryEndpointsToAppear(ctx, t, scheme, addr, caBundle, issuer1, client)
requireDeletingFederationDomainCausesDiscoveryEndpointsToDisappear(t, config1, client, ns, scheme, addr, caBundle, issuer1)
config2, jwks2 := requireCreatingFederationDomainCausesDiscoveryEndpointsToAppear(ctx, t, scheme, addr, caBundle, issuer2, client)
requireDeletingFederationDomainCausesDiscoveryEndpointsToDisappear(t, config2, client, ns, scheme, addr, caBundle, issuer2)
// The auto-created JWK's were different from each other.
require.NotEqual(t, jwks1.Keys[0]["x"], jwks2.Keys[0]["x"])
require.NotEqual(t, jwks1.Keys[0]["y"], jwks2.Keys[0]["y"])
// When FederationDomain are created in sequence they each cause a discovery endpoint to appear only for as long as the FederationDomain exists.
config1, jwks1 := requireCreatingFederationDomainCausesDiscoveryEndpointsToAppear(ctx, t, scheme, addr, caBundle, issuer1, client)
requireDeletingFederationDomainCausesDiscoveryEndpointsToDisappear(t, config1, client, ns, scheme, addr, caBundle, issuer1)
config2, jwks2 := requireCreatingFederationDomainCausesDiscoveryEndpointsToAppear(ctx, t, scheme, addr, caBundle, issuer2, client)
requireDeletingFederationDomainCausesDiscoveryEndpointsToDisappear(t, config2, client, ns, scheme, addr, caBundle, issuer2)
// The auto-created JWK's were different from each other.
require.NotEqual(t, jwks1.Keys[0]["x"], jwks2.Keys[0]["x"])
require.NotEqual(t, jwks1.Keys[0]["y"], jwks2.Keys[0]["y"])
// When multiple FederationDomains exist at the same time they each serve a unique discovery endpoint.
config3, jwks3 := requireCreatingFederationDomainCausesDiscoveryEndpointsToAppear(ctx, t, scheme, addr, caBundle, issuer3, client)
config4, jwks4 := requireCreatingFederationDomainCausesDiscoveryEndpointsToAppear(ctx, t, scheme, addr, caBundle, issuer4, client)
requireDiscoveryEndpointsAreWorking(t, scheme, addr, caBundle, issuer3, nil) // discovery for issuer3 is still working after issuer4 started working
// The auto-created JWK's were different from each other.
require.NotEqual(t, jwks3.Keys[0]["x"], jwks4.Keys[0]["x"])
require.NotEqual(t, jwks3.Keys[0]["y"], jwks4.Keys[0]["y"])
// When multiple FederationDomains exist at the same time they each serve a unique discovery endpoint.
config3, jwks3 := requireCreatingFederationDomainCausesDiscoveryEndpointsToAppear(ctx, t, scheme, addr, caBundle, issuer3, client)
config4, jwks4 := requireCreatingFederationDomainCausesDiscoveryEndpointsToAppear(ctx, t, scheme, addr, caBundle, issuer4, client)
requireDiscoveryEndpointsAreWorking(t, scheme, addr, caBundle, issuer3, nil) // discovery for issuer3 is still working after issuer4 started working
// The auto-created JWK's were different from each other.
require.NotEqual(t, jwks3.Keys[0]["x"], jwks4.Keys[0]["x"])
require.NotEqual(t, jwks3.Keys[0]["y"], jwks4.Keys[0]["y"])
// Editing a provider to change the issuer name updates the endpoints that are being served.
updatedConfig4 := editFederationDomainIssuerName(t, config4, client, ns, issuer5)
requireDiscoveryEndpointsAreNotFound(t, scheme, addr, caBundle, issuer4)
jwks5 := requireDiscoveryEndpointsAreWorking(t, scheme, addr, caBundle, issuer5, nil)
// The JWK did not change when the issuer name was updated.
require.Equal(t, jwks4.Keys[0], jwks5.Keys[0])
// Editing a provider to change the issuer name updates the endpoints that are being served.
updatedConfig4 := editFederationDomainIssuerName(t, config4, client, ns, issuer5)
requireDiscoveryEndpointsAreNotFound(t, scheme, addr, caBundle, issuer4)
jwks5 := requireDiscoveryEndpointsAreWorking(t, scheme, addr, caBundle, issuer5, nil)
// The JWK did not change when the issuer name was updated.
require.Equal(t, jwks4.Keys[0], jwks5.Keys[0])
// When they are deleted they stop serving discovery endpoints.
requireDeletingFederationDomainCausesDiscoveryEndpointsToDisappear(t, config3, client, ns, scheme, addr, caBundle, issuer3)
requireDeletingFederationDomainCausesDiscoveryEndpointsToDisappear(t, updatedConfig4, client, ns, scheme, addr, caBundle, issuer5)
// When they are deleted they stop serving discovery endpoints.
requireDeletingFederationDomainCausesDiscoveryEndpointsToDisappear(t, config3, client, ns, scheme, addr, caBundle, issuer3)
requireDeletingFederationDomainCausesDiscoveryEndpointsToDisappear(t, updatedConfig4, client, ns, scheme, addr, caBundle, issuer5)
// When the same issuer is added twice, both issuers are marked as duplicates, and neither provider is serving.
config6Duplicate1, _ := requireCreatingFederationDomainCausesDiscoveryEndpointsToAppear(ctx, t, scheme, addr, caBundle, issuer6, client)
config6Duplicate2 := testlib.CreateTestFederationDomain(ctx, t, issuer6, "", "")
requireStatus(t, client, ns, config6Duplicate1.Name, v1alpha1.DuplicateFederationDomainStatusCondition)
requireStatus(t, client, ns, config6Duplicate2.Name, v1alpha1.DuplicateFederationDomainStatusCondition)
requireDiscoveryEndpointsAreNotFound(t, scheme, addr, caBundle, issuer6)
// When the same issuer is added twice, both issuers are marked as duplicates, and neither provider is serving.
config6Duplicate1, _ := requireCreatingFederationDomainCausesDiscoveryEndpointsToAppear(ctx, t, scheme, addr, caBundle, issuer6, client)
config6Duplicate2 := testlib.CreateTestFederationDomain(ctx, t, issuer6, "", "")
requireStatus(t, client, ns, config6Duplicate1.Name, v1alpha1.DuplicateFederationDomainStatusCondition)
requireStatus(t, client, ns, config6Duplicate2.Name, v1alpha1.DuplicateFederationDomainStatusCondition)
requireDiscoveryEndpointsAreNotFound(t, scheme, addr, caBundle, issuer6)
// If we delete the first duplicate issuer, the second duplicate issuer starts serving.
requireDelete(t, client, ns, config6Duplicate1.Name)
requireWellKnownEndpointIsWorking(t, scheme, addr, caBundle, issuer6, nil)
requireStatus(t, client, ns, config6Duplicate2.Name, v1alpha1.SuccessFederationDomainStatusCondition)
// If we delete the first duplicate issuer, the second duplicate issuer starts serving.
requireDelete(t, client, ns, config6Duplicate1.Name)
requireWellKnownEndpointIsWorking(t, scheme, addr, caBundle, issuer6, nil)
requireStatus(t, client, ns, config6Duplicate2.Name, v1alpha1.SuccessFederationDomainStatusCondition)
// When we finally delete all issuers, the endpoint should be down.
requireDeletingFederationDomainCausesDiscoveryEndpointsToDisappear(t, config6Duplicate2, client, ns, scheme, addr, caBundle, issuer6)
// When we finally delete all issuers, the endpoint should be down.
requireDeletingFederationDomainCausesDiscoveryEndpointsToDisappear(t, config6Duplicate2, client, ns, scheme, addr, caBundle, issuer6)
// Only test this for http endpoints because https endpoints are going through an Ingress,
// and while it is possible to configure an Ingress to serve multiple hostnames with matching TLS certs
// for each hostname, that it not something that we felt like doing on all of our clusters that we
// run tests against. :)
if scheme == "http" {
// "Host" headers can be used to send requests to discovery endpoints when the public address is different from the issuer name.
issuer7 := "https://some-issuer-host-and-port-that-doesnt-match-public-supervisor-address.com:2684/issuer7"
config7, _ := requireCreatingFederationDomainCausesDiscoveryEndpointsToAppear(ctx, t, scheme, addr, caBundle, issuer7, client)
requireDeletingFederationDomainCausesDiscoveryEndpointsToDisappear(t, config7, client, ns, scheme, addr, caBundle, issuer7)
}
// When we create a provider with an invalid issuer, the status is set to invalid.
badConfig := testlib.CreateTestFederationDomain(ctx, t, badIssuer, "", "")
requireStatus(t, client, ns, badConfig.Name, v1alpha1.InvalidFederationDomainStatusCondition)
requireDiscoveryEndpointsAreNotFound(t, scheme, addr, caBundle, badIssuer)
requireDeletingFederationDomainCausesDiscoveryEndpointsToDisappear(t, badConfig, client, ns, scheme, addr, caBundle, badIssuer)
// When we create a provider with an invalid issuer, the status is set to invalid.
badConfig := testlib.CreateTestFederationDomain(ctx, t, badIssuer, "", "")
requireStatus(t, client, ns, badConfig.Name, v1alpha1.InvalidFederationDomainStatusCondition)
requireDiscoveryEndpointsAreNotFound(t, scheme, addr, caBundle, badIssuer)
requireDeletingFederationDomainCausesDiscoveryEndpointsToDisappear(t, badConfig, client, ns, scheme, addr, caBundle, badIssuer)
})
}
}

View File

@ -17,27 +17,6 @@ import (
"go.pinniped.dev/test/testlib"
)
// The Supervisor health endpoint is public because that makes it easier
// for users to create an Ingress for the supervisor on platforms like
// GKE where the Ingress wants to perform a health check. It's somewhere
// between inconvenient and impossible to make that Ingress health check
// happen on a private container port at this time.
// This test checks that it is working and that it is public.
func TestSupervisorHealthz(t *testing.T) {
env := testlib.IntegrationEnv(t)
if env.SupervisorHTTPAddress == "" {
t.Skip("PINNIPED_TEST_SUPERVISOR_HTTP_ADDRESS not defined")
}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
httpClient := &http.Client{}
httpGet(ctx, t, httpClient, fmt.Sprintf("http://%s/healthz", env.SupervisorHTTPAddress), http.StatusOK, "ok")
}
// Never run this test in parallel since deleting all federation domains and the default TLS secret is disruptive, see main_test.go.
func TestSupervisorHealthzBootstrap_Disruptive(t *testing.T) {
env := testlib.IntegrationEnv(t)