Merge branch 'main' into upstream_refresh

This commit is contained in:
Ryan Richard 2021-10-18 15:35:46 -07:00
commit d68bebeb49
7 changed files with 34 additions and 17 deletions

View File

@ -3,7 +3,7 @@
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
FROM golang:1.17.1 as build-env
FROM golang:1.17.2 as build-env
WORKDIR /work
COPY . .
@ -24,7 +24,7 @@ RUN \
ln -s /usr/local/bin/pinniped-server /usr/local/bin/local-user-authenticator
# Use a distroless runtime image with CA certificates, timezone data, and not much else.
FROM gcr.io/distroless/static:nonroot@sha256:be5d77c62dbe7fedfb0a4e5ec2f91078080800ab1f18358e5f31fcc8faa023c4
FROM gcr.io/distroless/static:nonroot@sha256:07869abb445859465749913267a8c7b3b02dc4236fbc896e29ae859e4b360851
# Copy the server binary from the build-env stage.
COPY --from=build-env /usr/local/bin /usr/local/bin

View File

@ -36,9 +36,10 @@ The following table includes the current roadmap for Pinniped. If you have any q
Last Updated: Sept 2021
|Theme|Description|Timeline|
|--|--|--|
|Improving Security Posture|Supervisor token refresh fails when the upstream refresh token no longer works|Sept 2021|
|Wider Concierge cluster support|Support for OpenShift cluster types in the Concierge|Sept 2021|
|Improving Security Posture|Supervisor token refresh fails when the upstream refresh token no longer works for OIDC |Oct 2021|
|Improving Security Posture|Supervisor token refresh fails when the upstream refresh token no longer works for LDAP/AD |Nov 2021|
|Multiple IDP support|Support multiple IDPs configured on a single Supervisor|Exploring/Ongoing|
|Wider Concierge cluster support|Support for OpenShift cluster types in the Concierge|Exploring/Ongoing|
|Identity transforms|Support prefixing, filtering, or performing coarse-grained checks on upstream users and groups|Exploring/Ongoing|
|CLI SSO|Support Kerberos based authentication on CLI |Exploring/Ongoing|
|Extended IDP support|Support more types of identity providers on the Supervisor|Exploring/Ongoing|

View File

@ -14,11 +14,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"go.pinniped.dev/internal/constable"
pinnipedcontroller "go.pinniped.dev/internal/controller"
"go.pinniped.dev/internal/controllerlib"
"go.pinniped.dev/internal/plog"
)
type certsExpirerController struct {
@ -74,7 +74,13 @@ func (c *certsExpirerController) Sync(ctx controllerlib.Context) error {
return fmt.Errorf("failed to get %s/%s secret: %w", c.namespace, c.certsSecretResourceName, err)
}
if notFound {
klog.Info("certsExpirerController Sync found that the secret does not exist yet or was deleted")
plog.Info("secret does not exist yet or was deleted",
"controller", ctx.Name,
"namespace", c.namespace,
"name", c.certsSecretResourceName,
"key", c.secretKey,
"renewBefore", c.renewBefore.String(),
)
return nil
}
@ -85,7 +91,17 @@ func (c *certsExpirerController) Sync(ctx controllerlib.Context) error {
certAge := time.Since(notBefore)
renewDelta := certAge - c.renewBefore
klog.Infof("certsExpirerController Sync found a renew delta of %s", renewDelta)
plog.Debug("found renew delta",
"controller", ctx.Name,
"namespace", c.namespace,
"name", c.certsSecretResourceName,
"key", c.secretKey,
"renewBefore", c.renewBefore.String(),
"notBefore", notBefore.String(),
"notAfter", notAfter.String(),
"certAge", certAge.String(),
"renewDelta", renewDelta.String(),
)
if renewDelta >= 0 || time.Now().After(notAfter) {
err := c.k8sClient.
CoreV1().
@ -107,9 +123,7 @@ func (c *certsExpirerController) Sync(ctx controllerlib.Context) error {
}
// getCertBounds returns the NotBefore and NotAfter fields of the TLS
// certificate in the provided secret, or an error. Not that it expects the
// provided secret to contain the well-known data keys from this package (see
// certs_manager.go).
// certificate in the provided secret, or an error.
func (c *certsExpirerController) getCertBounds(secret *corev1.Secret) (time.Time, time.Time, error) {
certPEM := secret.Data[c.secretKey]
if certPEM == nil {

View File

@ -51,7 +51,7 @@ const (
impersonationProxyPort = 8444
defaultHTTPSPort = 443
approximatelyOneHundredYears = 100 * 365 * 24 * time.Hour
caCommonName = "Pinniped Impersonation Proxy CA"
caCommonName = "Pinniped Impersonation Proxy Serving CA"
caCrtKey = "ca.crt"
caKeyKey = "ca.key"
appLabelKey = "app"

View File

@ -1055,7 +1055,7 @@ func TestImpersonatorConfigControllerSync(t *testing.T) {
require.NotNil(t, block)
caCert, err := x509.ParseCertificate(block.Bytes)
require.NoError(t, err)
require.Equal(t, "Pinniped Impersonation Proxy CA", caCert.Subject.CommonName)
require.Equal(t, "Pinniped Impersonation Proxy Serving CA", caCert.Subject.CommonName)
require.WithinDuration(t, time.Now().Add(-5*time.Minute), caCert.NotBefore, 10*time.Second)
require.WithinDuration(t, time.Now().Add(100*time.Hour*24*365), caCert.NotAfter, 10*time.Second)
return createdCertPEM

View File

@ -148,7 +148,7 @@ func PrepareControllers(c *Config) (controllerinit.RunnerBuilder, error) {
controllerlib.WithInformer,
controllerlib.WithInitialEvent,
c.ServingCertDuration,
"Pinniped CA",
"Pinniped Aggregation CA",
c.NamesConfig.APIService,
),
singletonWorker,
@ -285,7 +285,7 @@ func PrepareControllers(c *Config) (controllerinit.RunnerBuilder, error) {
controllerlib.WithInformer,
controllerlib.WithInitialEvent,
365*24*time.Hour, // 1 year hard coded value
"Pinniped Impersonation Proxy CA",
"Pinniped Impersonation Proxy Signer CA",
"", // optional, means do not give me a serving cert
),
singletonWorker,
@ -297,7 +297,7 @@ func PrepareControllers(c *Config) (controllerinit.RunnerBuilder, error) {
client.Kubernetes,
informers.installationNamespaceK8s.Core().V1().Secrets(),
controllerlib.WithInformer,
c.ServingCertRenewBefore,
365*24*time.Hour-time.Hour, // 1 year minus 1 hour hard coded value (i.e. wait until the last moment to break the signer)
apicerts.CACertificateSecretKey,
),
singletonWorker,

View File

@ -27,8 +27,10 @@ The Pinniped Concierge has two strategies available to support clusters, under t
This type of cluster is typically called "self-hosted" because the cluster's control plane is running on nodes that are part of the cluster itself.
Most managed Kubernetes services do not support this.
2. Impersonation Proxy: Can be run on any Kubernetes cluster where a `LoadBalancer` service can be created. Most cloud-hosted Kubernetes environments have this
capability. The Impersonation Proxy automatically provisions a `LoadBalancer` for ingress to the impersonation endpoint.
2. Impersonation Proxy: Can be run on any Kubernetes cluster. Default configuration requires that a `LoadBalancer` service can be created. Most cloud-hosted Kubernetes environments have this
capability. The Impersonation Proxy automatically provisions (when `spec.impersonationProxy.mode` is set to `auto`) a `LoadBalancer` for ingress to the impersonation endpoint. Users who wish to use the impersonation proxy without an automatically
configured `LoadBalancer` can do so with an automatically provisioned `ClusterIP` or with a Service that they provision themselves. These options
can be configured in the spec of the [`CredentialIssuer`](https://github.com/vmware-tanzu/pinniped/blob/main/generated/1.20/README.adoc#credentialissuer).
If a cluster is capable of supporting both strategies, the Pinniped CLI will use the
token credential request API strategy by default.