Allow configuration of supervisor endpoints

This change allows configuration of the http and https listeners
used by the supervisor.

TCP (IPv4 and IPv6 with any interface and port) and Unix domain
socket based listeners are supported.  Listeners may also be
disabled.

Binding the http listener to TCP addresses other than 127.0.0.1 or
::1 is deprecated.

The deployment now uses https health checks.  The supervisor is
always able to complete a TLS connection with the use of a bootstrap
certificate that is signed by an in-memory certificate authority.

To support sidecar containers used by service meshes, Unix domain
socket based listeners include ACLs that allow writes to the socket
file from any runAsUser specified in the pod's containers.

Signed-off-by: Monis Khan <mok@vmware.com>
This commit is contained in:
Monis Khan 2021-12-15 15:48:55 -05:00
parent 01a7978387
commit 1e1789f6d1
No known key found for this signature in database
GPG Key ID: 52C90ADA01B269B8
20 changed files with 705 additions and 177 deletions

View File

@ -2,8 +2,17 @@
#! SPDX-License-Identifier: Apache-2.0 #! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:data", "data") #@ load("@ytt:data", "data")
#@ load("@ytt:json", "json") #@ load("@ytt:yaml", "yaml")
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel") #@ load("helpers.lib.yaml",
#@ "defaultLabel",
#@ "labels",
#@ "deploymentPodLabel",
#@ "namespace",
#@ "defaultResourceName",
#@ "defaultResourceNameWithSuffix",
#@ "getPinnipedConfigMapData",
#@ "hasUnixNetworkEndpoint",
#@ )
#@ load("@ytt:template", "template") #@ load("@ytt:template", "template")
#@ if not data.values.into_namespace: #@ if not data.values.into_namespace:
@ -30,14 +39,7 @@ metadata:
labels: #@ labels() labels: #@ labels()
data: data:
#@yaml/text-templated-strings #@yaml/text-templated-strings
pinniped.yaml: | pinniped.yaml: #@ yaml.encode(getPinnipedConfigMapData())
apiGroupSuffix: (@= data.values.api_group_suffix @)
names:
defaultTLSCertificateSecret: (@= defaultResourceNameWithSuffix("default-tls-certificate") @)
labels: (@= json.encode(labels()).rstrip() @)
(@ if data.values.log_level: @)
logLevel: (@= getAndValidateLogLevel() @)
(@ end @)
--- ---
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "": #@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
apiVersion: v1 apiVersion: v1
@ -107,6 +109,11 @@ spec:
- name: podinfo - name: podinfo
mountPath: /etc/podinfo mountPath: /etc/podinfo
readOnly: true readOnly: true
#@ if hasUnixNetworkEndpoint():
- name: socket
mountPath: /pinniped_socket
readOnly: false #! writable to allow for socket use
#@ end
ports: ports:
- containerPort: 8080 - containerPort: 8080
protocol: TCP protocol: TCP
@ -124,8 +131,8 @@ spec:
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthz path: /healthz
port: 8080 port: 8443
scheme: HTTP scheme: HTTPS
initialDelaySeconds: 2 initialDelaySeconds: 2
timeoutSeconds: 15 timeoutSeconds: 15
periodSeconds: 10 periodSeconds: 10
@ -133,8 +140,8 @@ spec:
readinessProbe: readinessProbe:
httpGet: httpGet:
path: /healthz path: /healthz
port: 8080 port: 8443
scheme: HTTP scheme: HTTPS
initialDelaySeconds: 2 initialDelaySeconds: 2
timeoutSeconds: 3 timeoutSeconds: 3
periodSeconds: 10 periodSeconds: 10
@ -155,6 +162,10 @@ spec:
- path: "name" - path: "name"
fieldRef: fieldRef:
fieldPath: metadata.name fieldPath: metadata.name
#@ if hasUnixNetworkEndpoint():
- name: socket
emptyDir: {}
#@ end
#! This will help make sure our multiple pods run on different nodes, making #! This will help make sure our multiple pods run on different nodes, making
#! our deployment "more" "HA". #! our deployment "more" "HA".
affinity: affinity:

View File

@ -44,3 +44,37 @@ _: #@ template.replace(data.values.custom_labels)
#@ end #@ end
#@ return log_level #@ return log_level
#@ end #@ end
#@ def getPinnipedConfigMapData():
#@ config = {
#@ "apiGroupSuffix": data.values.api_group_suffix,
#@ "names": {
#@ "defaultTLSCertificateSecret": defaultResourceNameWithSuffix("default-tls-certificate"),
#@ },
#@ "labels": labels(),
#@ }
#@ if data.values.log_level:
#@ config["logLevel"] = getAndValidateLogLevel()
#@ end
#@ if data.values.endpoints:
#@ config["endpoints"] = data.values.endpoints
#@ end
#@ return config
#@ end
#@ def getattr_safe(val, *args):
#@ out = None
#@ for arg in args:
#@ if not hasattr(val, arg):
#@ return None
#@ end
#@ out = getattr(val, arg)
#@ val = out
#@ end
#@ return out
#@ end
#@ def hasUnixNetworkEndpoint():
#@ return getattr_safe(data.values.endpoints, "http", "network") == "unix" or \
#@ getattr_safe(data.values.endpoints, "https", "network") == "unix"
#@ end

View File

@ -73,3 +73,48 @@ api_group_suffix: pinniped.dev
#! Optional. #! Optional.
https_proxy: #! e.g. http://proxy.example.com https_proxy: #! e.g. http://proxy.example.com
no_proxy: "$(KUBERNETES_SERVICE_HOST),169.254.169.254,127.0.0.1,localhost,.svc,.cluster.local" #! do not proxy Kubernetes endpoints no_proxy: "$(KUBERNETES_SERVICE_HOST),169.254.169.254,127.0.0.1,localhost,.svc,.cluster.local" #! do not proxy Kubernetes endpoints
#! Control the https and http listeners of the Supervisor.
#!
#! The schema of this config is as follows:
#!
#! endpoints:
#! https:
#! network: tcp | unix | disabled
#! address: interface:port when network=tcp or /pinniped_socket/socketfile.sock when network=unix
#! http:
#! network: same as above
#! address: same as above
#!
#! Setting network to disabled turns off that particular listener.
#! See https://pkg.go.dev/net#Listen and https://pkg.go.dev/net#Dial for a description of what can be
#! specified in the address parameter based on the given network parameter. To aid in the use of unix
#! domain sockets, a writable empty dir volume is mounted at /pinniped_socket when network is set to "unix."
#!
#! The current defaults are:
#!
#! endpoints:
#! https:
#! network: tcp
#! address: :8443
#! http:
#! network: tcp
#! address: :8080
#!
#! These defaults mean: bind to all interfaces using TCP. Use port 8443 for https and 8080 for http.
#! The defaults will change over time. Users should explicitly set this value if they wish to avoid
#! any changes on upgrade.
#!
#! A future version of the Supervisor app may include a breaking change to adjust the default
#! behavior of the http listener to only listen on 127.0.0.1 (or perhaps even to be disabled).
#!
#! Binding the http listener to addresses other than 127.0.0.1 or ::1 is deprecated.
#!
#! Unix domain sockets are recommended for integrations with service meshes. Ingresses that terminate
#! TLS connections at the edge should re-encrypt the data and route traffic to the https listener.
#!
#! Changing the port numbers used must be accompanied with matching changes to the service and deployment
#! manifests. Changes to the https listener must be coordinated with the deployment health checks.
#!
#! Optional.
endpoints:

2
go.mod
View File

@ -52,6 +52,7 @@ require (
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0
github.com/gorilla/securecookie v1.1.1 github.com/gorilla/securecookie v1.1.1
github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket v1.4.2
github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
github.com/ory/fosite v0.41.0 github.com/ory/fosite v0.41.0
github.com/ory/x v0.0.331 github.com/ory/x v0.0.331
@ -126,6 +127,7 @@ require (
github.com/imdario/mergo v0.3.12 // indirect github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/magiconair/properties v1.8.5 // indirect github.com/magiconair/properties v1.8.5 // indirect
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect

4
go.sum
View File

@ -1203,6 +1203,10 @@ github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9q
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 h1:hgVxRoDDPtQE68PT4LFvNlPz2nBKd3OMlGKIQ69OmR4=
github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531/go.mod h1:fqTUQpVYBvhCNIsMXGl2GE9q6z94DIP6NtFKXCSTVbg=
github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d h1:J8tJzRyiddAFF65YVgxli+TyWBi0f79Sld6rJP6CBcY=
github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d/go.mod h1:b+Q3v8Yrg5o15d71PSUraUzYb+jWl6wQMSBXSGS/hv0=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=

View File

@ -289,6 +289,9 @@ ytt --file . \
--data-value-yaml 'service_https_nodeport_nodeport=31243' \ --data-value-yaml 'service_https_nodeport_nodeport=31243' \
--data-value-yaml 'service_https_clusterip_port=443' \ --data-value-yaml 'service_https_clusterip_port=443' \
>"$manifest" >"$manifest"
# example of how to disable the http endpoint
# this is left enabled for now because our integration tests still rely on it
# --data-value-yaml 'endpoints={"http": {"network": "disabled"}}' \
kapp deploy --yes --app "$supervisor_app_name" --diff-changes --file "$manifest" kapp deploy --yes --app "$supervisor_app_name" --diff-changes --file "$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema. kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.

View File

@ -18,6 +18,12 @@ import (
"go.pinniped.dev/internal/plog" "go.pinniped.dev/internal/plog"
) )
const (
NetworkDisabled = "disabled"
NetworkUnix = "unix"
NetworkTCP = "tcp"
)
// FromPath loads an Config from a provided local file path, inserts any // FromPath loads an Config from a provided local file path, inserts any
// defaults (from the Config documentation), and verifies that the config is // defaults (from the Config documentation), and verifies that the config is
// valid (Config documentation). // valid (Config documentation).
@ -50,9 +56,40 @@ func FromPath(path string) (*Config, error) {
return nil, fmt.Errorf("validate log level: %w", err) return nil, fmt.Errorf("validate log level: %w", err)
} }
// support setting this to null or {} or empty in the YAML
if config.Endpoints == nil {
config.Endpoints = &Endpoints{}
}
maybeSetEndpointDefault(&config.Endpoints.HTTPS, Endpoint{
Network: NetworkTCP,
Address: ":8443",
})
maybeSetEndpointDefault(&config.Endpoints.HTTP, Endpoint{
Network: NetworkTCP,
Address: ":8080",
})
if err := validateEndpoint(*config.Endpoints.HTTPS); err != nil {
return nil, fmt.Errorf("validate https endpoint: %w", err)
}
if err := validateEndpoint(*config.Endpoints.HTTP); err != nil {
return nil, fmt.Errorf("validate http endpoint: %w", err)
}
if err := validateAtLeastOneEnabledEndpoint(*config.Endpoints.HTTPS, *config.Endpoints.HTTP); err != nil {
return nil, fmt.Errorf("validate endpoints: %w", err)
}
return &config, nil return &config, nil
} }
func maybeSetEndpointDefault(endpoint **Endpoint, defaultEndpoint Endpoint) {
if *endpoint != nil {
return
}
*endpoint = &defaultEndpoint
}
func maybeSetAPIGroupSuffixDefault(apiGroupSuffix **string) { func maybeSetAPIGroupSuffixDefault(apiGroupSuffix **string) {
if *apiGroupSuffix == nil { if *apiGroupSuffix == nil {
*apiGroupSuffix = pointer.StringPtr(groupsuffix.PinnipedDefaultSuffix) *apiGroupSuffix = pointer.StringPtr(groupsuffix.PinnipedDefaultSuffix)
@ -73,3 +110,29 @@ func validateNames(names *NamesConfigSpec) error {
} }
return nil return nil
} }
func validateEndpoint(endpoint Endpoint) error {
switch n := endpoint.Network; n {
case NetworkTCP, NetworkUnix:
if len(endpoint.Address) == 0 {
return fmt.Errorf("address must be set with %q network", n)
}
return nil
case NetworkDisabled:
if len(endpoint.Address) != 0 {
return fmt.Errorf("address set to %q when disabled, should be empty", endpoint.Address)
}
return nil
default:
return fmt.Errorf("unknown network %q", n)
}
}
func validateAtLeastOneEnabledEndpoint(endpoints ...Endpoint) error {
for _, endpoint := range endpoints {
if endpoint.Network != NetworkDisabled {
return nil
}
}
return constable.Error("all endpoints are disabled")
}

View File

@ -32,6 +32,12 @@ func TestFromPath(t *testing.T) {
myLabelKey2: myLabelValue2 myLabelKey2: myLabelValue2
names: names:
defaultTLSCertificateSecret: my-secret-name defaultTLSCertificateSecret: my-secret-name
endpoints:
https:
network: unix
address: :1234
http:
network: disabled
`), `),
wantConfig: &Config{ wantConfig: &Config{
APIGroupSuffix: pointer.StringPtr("some.suffix.com"), APIGroupSuffix: pointer.StringPtr("some.suffix.com"),
@ -42,6 +48,15 @@ func TestFromPath(t *testing.T) {
NamesConfig: NamesConfigSpec{ NamesConfig: NamesConfigSpec{
DefaultTLSCertificateSecret: "my-secret-name", DefaultTLSCertificateSecret: "my-secret-name",
}, },
Endpoints: &Endpoints{
HTTPS: &Endpoint{
Network: "unix",
Address: ":1234",
},
HTTP: &Endpoint{
Network: "disabled",
},
},
}, },
}, },
{ {
@ -57,8 +72,97 @@ func TestFromPath(t *testing.T) {
NamesConfig: NamesConfigSpec{ NamesConfig: NamesConfigSpec{
DefaultTLSCertificateSecret: "my-secret-name", DefaultTLSCertificateSecret: "my-secret-name",
}, },
Endpoints: &Endpoints{
HTTPS: &Endpoint{
Network: "tcp",
Address: ":8443",
},
HTTP: &Endpoint{
Network: "tcp",
Address: ":8080",
},
},
}, },
}, },
{
name: "all endpoints disabled",
yaml: here.Doc(`
---
names:
defaultTLSCertificateSecret: my-secret-name
endpoints:
https:
network: disabled
http:
network: disabled
`),
wantError: "validate endpoints: all endpoints are disabled",
},
{
name: "invalid https endpoint",
yaml: here.Doc(`
---
names:
defaultTLSCertificateSecret: my-secret-name
endpoints:
https:
network: foo
http:
network: disabled
`),
wantError: `validate https endpoint: unknown network "foo"`,
},
{
name: "invalid http endpoint",
yaml: here.Doc(`
---
names:
defaultTLSCertificateSecret: my-secret-name
endpoints:
https:
network: disabled
http:
network: bar
`),
wantError: `validate http endpoint: unknown network "bar"`,
},
{
name: "endpoint disabled with non-empty address",
yaml: here.Doc(`
---
names:
defaultTLSCertificateSecret: my-secret-name
endpoints:
https:
network: disabled
address: wee
`),
wantError: `validate https endpoint: address set to "wee" when disabled, should be empty`,
},
{
name: "endpoint tcp with empty address",
yaml: here.Doc(`
---
names:
defaultTLSCertificateSecret: my-secret-name
endpoints:
http:
network: tcp
`),
wantError: `validate http endpoint: address must be set with "tcp" network`,
},
{
name: "endpoint unix with empty address",
yaml: here.Doc(`
---
names:
defaultTLSCertificateSecret: my-secret-name
endpoints:
https:
network: unix
`),
wantError: `validate https endpoint: address must be set with "unix" network`,
},
{ {
name: "Missing defaultTLSCertificateSecret name", name: "Missing defaultTLSCertificateSecret name",
yaml: here.Doc(` yaml: here.Doc(`

View File

@ -11,9 +11,20 @@ type Config struct {
Labels map[string]string `json:"labels"` Labels map[string]string `json:"labels"`
NamesConfig NamesConfigSpec `json:"names"` NamesConfig NamesConfigSpec `json:"names"`
LogLevel plog.LogLevel `json:"logLevel"` LogLevel plog.LogLevel `json:"logLevel"`
Endpoints *Endpoints `json:"endpoints"`
} }
// NamesConfigSpec configures the names of some Kubernetes resources for the Supervisor. // NamesConfigSpec configures the names of some Kubernetes resources for the Supervisor.
type NamesConfigSpec struct { type NamesConfigSpec struct {
DefaultTLSCertificateSecret string `json:"defaultTLSCertificateSecret"` DefaultTLSCertificateSecret string `json:"defaultTLSCertificateSecret"`
} }
type Endpoints struct {
HTTPS *Endpoint `json:"https,omitempty"`
HTTP *Endpoint `json:"http,omitempty"`
}
type Endpoint struct {
Network string `json:"network"`
Address string `json:"address"`
}

View File

@ -101,7 +101,7 @@ type Config struct {
func PrepareControllers(c *Config) (controllerinit.RunnerBuilder, error) { func PrepareControllers(c *Config) (controllerinit.RunnerBuilder, error) {
loginConciergeGroupData, identityConciergeGroupData := groupsuffix.ConciergeAggregatedGroups(c.APIGroupSuffix) loginConciergeGroupData, identityConciergeGroupData := groupsuffix.ConciergeAggregatedGroups(c.APIGroupSuffix)
dref, deployment, err := deploymentref.New(c.ServerInstallationInfo) dref, deployment, _, err := deploymentref.New(c.ServerInstallationInfo)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot create deployment ref: %w", err) return nil, fmt.Errorf("cannot create deployment ref: %w", err)
} }

View File

@ -9,6 +9,7 @@ import (
"time" "time"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
@ -31,24 +32,24 @@ var getTempClient = func() (kubernetes.Interface, error) {
return client.Kubernetes, nil return client.Kubernetes, nil
} }
func New(podInfo *downward.PodInfo) (kubeclient.Option, *appsv1.Deployment, error) { func New(podInfo *downward.PodInfo) (kubeclient.Option, *appsv1.Deployment, *corev1.Pod, error) {
tempClient, err := getTempClient() tempClient, err := getTempClient()
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("cannot create temp client: %w", err) return nil, nil, nil, fmt.Errorf("cannot create temp client: %w", err)
} }
deployment, err := getDeployment(tempClient, podInfo) deployment, pod, err := getDeploymentAndPod(tempClient, podInfo)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("cannot get deployment: %w", err) return nil, nil, nil, fmt.Errorf("cannot get deployment: %w", err)
} }
// work around stupid behavior of WithoutVersionDecoder.Decode // work around stupid behavior of WithoutVersionDecoder.Decode
deployment.APIVersion, deployment.Kind = appsv1.SchemeGroupVersion.WithKind("Deployment").ToAPIVersionAndKind() deployment.APIVersion, deployment.Kind = appsv1.SchemeGroupVersion.WithKind("Deployment").ToAPIVersionAndKind()
return kubeclient.WithMiddleware(ownerref.New(deployment)), deployment, nil return kubeclient.WithMiddleware(ownerref.New(deployment)), deployment, pod, nil
} }
func getDeployment(kubeClient kubernetes.Interface, podInfo *downward.PodInfo) (*appsv1.Deployment, error) { func getDeploymentAndPod(kubeClient kubernetes.Interface, podInfo *downward.PodInfo) (*appsv1.Deployment, *corev1.Pod, error) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel() defer cancel()
@ -56,28 +57,28 @@ func getDeployment(kubeClient kubernetes.Interface, podInfo *downward.PodInfo) (
pod, err := kubeClient.CoreV1().Pods(ns).Get(ctx, podInfo.Name, metav1.GetOptions{}) pod, err := kubeClient.CoreV1().Pods(ns).Get(ctx, podInfo.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("could not get pod: %w", err) return nil, nil, fmt.Errorf("could not get pod: %w", err)
} }
podOwner := metav1.GetControllerOf(pod) podOwner := metav1.GetControllerOf(pod)
if podOwner == nil { if podOwner == nil {
return nil, fmt.Errorf("pod %s/%s is missing owner", ns, podInfo.Name) return nil, nil, fmt.Errorf("pod %s/%s is missing owner", ns, podInfo.Name)
} }
rs, err := kubeClient.AppsV1().ReplicaSets(ns).Get(ctx, podOwner.Name, metav1.GetOptions{}) rs, err := kubeClient.AppsV1().ReplicaSets(ns).Get(ctx, podOwner.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("could not get replicaset: %w", err) return nil, nil, fmt.Errorf("could not get replicaset: %w", err)
} }
rsOwner := metav1.GetControllerOf(rs) rsOwner := metav1.GetControllerOf(rs)
if rsOwner == nil { if rsOwner == nil {
return nil, fmt.Errorf("replicaset %s/%s is missing owner", ns, podInfo.Name) return nil, nil, fmt.Errorf("replicaset %s/%s is missing owner", ns, podInfo.Name)
} }
d, err := kubeClient.AppsV1().Deployments(ns).Get(ctx, rsOwner.Name, metav1.GetOptions{}) d, err := kubeClient.AppsV1().Deployments(ns).Get(ctx, rsOwner.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("could not get deployment: %w", err) return nil, nil, fmt.Errorf("could not get deployment: %w", err)
} }
return d, nil return d, pod, nil
} }

View File

@ -31,6 +31,18 @@ func TestNew(t *testing.T) {
Name: "some-name", Name: "some-name",
}, },
} }
goodPod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: "some-namespace",
Name: "some-name-rsname-podhash",
OwnerReferences: []metav1.OwnerReference{
{
Controller: &troo,
Name: "some-name-rsname",
},
},
},
}
tests := []struct { tests := []struct {
name string name string
apiObjects []runtime.Object apiObjects []runtime.Object
@ -38,6 +50,7 @@ func TestNew(t *testing.T) {
createClientErr error createClientErr error
podInfo *downward.PodInfo podInfo *downward.PodInfo
wantDeployment *appsv1.Deployment wantDeployment *appsv1.Deployment
wantPod *corev1.Pod
wantError string wantError string
}{ }{
{ {
@ -56,24 +69,14 @@ func TestNew(t *testing.T) {
}, },
}, },
}, },
&corev1.Pod{ goodPod,
ObjectMeta: metav1.ObjectMeta{
Namespace: "some-namespace",
Name: "some-name-rsname-podhash",
OwnerReferences: []metav1.OwnerReference{
{
Controller: &troo,
Name: "some-name-rsname",
},
},
},
},
}, },
podInfo: &downward.PodInfo{ podInfo: &downward.PodInfo{
Namespace: "some-namespace", Namespace: "some-namespace",
Name: "some-name-rsname-podhash", Name: "some-name-rsname-podhash",
}, },
wantDeployment: goodDeployment, wantDeployment: goodDeployment,
wantPod: goodPod,
}, },
{ {
name: "failed to create client", name: "failed to create client",
@ -114,7 +117,7 @@ func TestNew(t *testing.T) {
return client, test.createClientErr return client, test.createClientErr
} }
_, d, err := New(test.podInfo) _, d, p, err := New(test.podInfo)
if test.wantError != "" { if test.wantError != "" {
require.EqualError(t, err, test.wantError) require.EqualError(t, err, test.wantError)
return return
@ -122,6 +125,7 @@ func TestNew(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, test.wantDeployment, d) require.Equal(t, test.wantDeployment, d)
require.Equal(t, test.wantPod, p)
}) })
} }
} }

View File

@ -0,0 +1,66 @@
// Copyright 2021 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package server
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"time"
"go.uber.org/atomic"
"k8s.io/apimachinery/pkg/util/sets"
"go.pinniped.dev/internal/certauthority"
)
// contextKey type is unexported to prevent collisions.
type contextKey int
const bootstrapKey contextKey = iota
func withBootstrapConnCtx(ctx context.Context, _ net.Conn) context.Context {
isBootstrap := atomic.NewBool(false) // safe for concurrent access
return context.WithValue(ctx, bootstrapKey, isBootstrap)
}
func setIsBootstrapConn(ctx context.Context) {
isBootstrap, _ := ctx.Value(bootstrapKey).(*atomic.Bool)
if isBootstrap == nil {
return
}
isBootstrap.Store(true)
}
func withBootstrapPaths(handler http.Handler, paths ...string) http.Handler {
bootstrapPaths := sets.NewString(paths...)
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
isBootstrap, _ := req.Context().Value(bootstrapKey).(*atomic.Bool)
if isBootstrap != nil && isBootstrap.Load() && !bootstrapPaths.Has(req.URL.Path) {
http.Error(w, "pinniped supervisor has invalid TLS serving certificate configuration", http.StatusInternalServerError)
return
}
handler.ServeHTTP(w, req)
})
}
func getBootstrapCert() (*tls.Certificate, error) {
const forever = 10 * 365 * 24 * time.Hour
bootstrapCA, err := certauthority.New("pinniped-supervisor-bootstrap-ca", forever)
if err != nil {
return nil, fmt.Errorf("failed to create bootstrap CA: %w", err)
}
bootstrapCert, err := bootstrapCA.IssueServerCert([]string{"pinniped-supervisor-bootstrap-cert"}, nil, forever)
if err != nil {
return nil, fmt.Errorf("failed to create bootstrap cert: %w", err)
}
return bootstrapCert, nil // this is just enough to complete a TLS handshake, trust distribution does not matter
}

View File

@ -13,11 +13,13 @@ import (
"net/http" "net/http"
"os" "os"
"os/signal" "os/signal"
"strconv"
"strings" "strings"
"sync" "sync"
"syscall" "syscall"
"time" "time"
"github.com/joshlf/go-acl"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
genericapifilters "k8s.io/apiserver/pkg/endpoints/filters" genericapifilters "k8s.io/apiserver/pkg/endpoints/filters"
@ -61,7 +63,13 @@ const (
) )
func startServer(ctx context.Context, shutdown *sync.WaitGroup, l net.Listener, handler http.Handler) { func startServer(ctx context.Context, shutdown *sync.WaitGroup, l net.Listener, handler http.Handler) {
server := http.Server{Handler: genericapifilters.WithWarningRecorder(handler)} handler = genericapifilters.WithWarningRecorder(handler)
handler = withBootstrapPaths(handler, "/healthz") // only health checks are allowed for bootstrap connections
server := http.Server{
Handler: handler,
ConnContext: withBootstrapConnCtx,
}
shutdown.Add(1) shutdown.Add(1)
go func() { go func() {
@ -306,10 +314,11 @@ func startControllers(ctx context.Context, shutdown *sync.WaitGroup, buildContro
return nil return nil
} }
//nolint:funlen
func runSupervisor(podInfo *downward.PodInfo, cfg *supervisor.Config) error { func runSupervisor(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
serverInstallationNamespace := podInfo.Namespace serverInstallationNamespace := podInfo.Namespace
dref, supervisorDeployment, err := deploymentref.New(podInfo) dref, supervisorDeployment, supervisorPod, err := deploymentref.New(podInfo)
if err != nil { if err != nil {
return fmt.Errorf("cannot create deployment ref: %w", err) return fmt.Errorf("cannot create deployment ref: %w", err)
} }
@ -387,40 +396,76 @@ func runSupervisor(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
return err return err
} }
//nolint: gosec // Intentionally binding to all network interfaces. if e := cfg.Endpoints.HTTP; e.Network != supervisor.NetworkDisabled {
httpListener, err := net.Listen("tcp", ":8080") finishSetupPerms := maybeSetupUnixPerms(e, supervisorPod)
if err != nil {
return fmt.Errorf("cannot create listener: %w", err)
}
defer func() { _ = httpListener.Close() }()
startServer(ctx, shutdown, httpListener, oidProvidersManager)
c := ptls.Default(nil) httpListener, err := net.Listen(e.Network, e.Address)
c.GetCertificate = func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { if err != nil {
cert := dynamicTLSCertProvider.GetTLSCert(strings.ToLower(info.ServerName)) return fmt.Errorf("cannot create http listener with network %q and address %q: %w", e.Network, e.Address, err)
defaultCert := dynamicTLSCertProvider.GetDefaultTLSCert()
plog.Debug("GetCertificate called for port 8443",
"info.ServerName", info.ServerName,
"foundSNICert", cert != nil,
"foundDefaultCert", defaultCert != nil,
)
if cert == nil {
cert = defaultCert
} }
return cert, nil
}
//nolint: gosec // Intentionally binding to all network interfaces.
httpsListener, err := tls.Listen("tcp", ":8443", c)
if err != nil {
return fmt.Errorf("cannot create listener: %w", err)
}
defer func() { _ = httpsListener.Close() }()
startServer(ctx, shutdown, httpsListener, oidProvidersManager)
plog.Debug("supervisor is ready", if err := finishSetupPerms(); err != nil {
"httpAddress", httpListener.Addr().String(), return fmt.Errorf("cannot setup http listener permissions for network %q and address %q: %w", e.Network, e.Address, err)
"httpsAddress", httpsListener.Addr().String(), }
)
defer func() { _ = httpListener.Close() }()
startServer(ctx, shutdown, httpListener, oidProvidersManager)
plog.Debug("supervisor http listener started", "address", httpListener.Addr().String())
}
if e := cfg.Endpoints.HTTPS; e.Network != supervisor.NetworkDisabled { //nolint:nestif
finishSetupPerms := maybeSetupUnixPerms(e, supervisorPod)
bootstrapCert, err := getBootstrapCert() // generate this in-memory once per process startup
if err != nil {
return fmt.Errorf("https listener bootstrap error: %w", err)
}
c := ptls.Default(nil)
c.GetCertificate = func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
cert := dynamicTLSCertProvider.GetTLSCert(strings.ToLower(info.ServerName))
defaultCert := dynamicTLSCertProvider.GetDefaultTLSCert()
if plog.Enabled(plog.LevelTrace) { // minor CPU optimization as this is generally just noise
host, port, _ := net.SplitHostPort(info.Conn.LocalAddr().String()) // error is safe to ignore here
plog.Trace("GetCertificate called",
"info.ServerName", info.ServerName,
"foundSNICert", cert != nil,
"foundDefaultCert", defaultCert != nil,
"host", host,
"port", port,
)
}
if cert == nil {
cert = defaultCert
}
if cert == nil {
setIsBootstrapConn(info.Context()) // make this connection only work for bootstrap requests
cert = bootstrapCert
}
return cert, nil
}
httpsListener, err := tls.Listen(e.Network, e.Address, c)
if err != nil {
return fmt.Errorf("cannot create https listener with network %q and address %q: %w", e.Network, e.Address, err)
}
if err := finishSetupPerms(); err != nil {
return fmt.Errorf("cannot setup https listener permissions for network %q and address %q: %w", e.Network, e.Address, err)
}
defer func() { _ = httpsListener.Close() }()
startServer(ctx, shutdown, httpsListener, oidProvidersManager)
plog.Debug("supervisor https listener started", "address", httpsListener.Addr().String())
}
plog.Debug("supervisor started")
defer plog.Debug("supervisor exiting") defer plog.Debug("supervisor exiting")
shutdown.Wait() shutdown.Wait()
@ -428,6 +473,37 @@ func runSupervisor(podInfo *downward.PodInfo, cfg *supervisor.Config) error {
return nil return nil
} }
func maybeSetupUnixPerms(endpoint *supervisor.Endpoint, pod *corev1.Pod) func() error {
if endpoint.Network != supervisor.NetworkUnix {
return func() error { return nil }
}
_ = os.Remove(endpoint.Address) // empty dir volumes persist across container crashes
return func() error {
selfUser := int64(os.Getuid())
var entries []acl.Entry
for _, container := range pod.Spec.Containers {
if container.SecurityContext == nil ||
container.SecurityContext.RunAsUser == nil ||
*container.SecurityContext.RunAsUser == selfUser {
continue
}
plog.Debug("adding write permission",
"address", endpoint.Address,
"uid", *container.SecurityContext.RunAsUser,
)
entries = append(entries, acl.Entry{
Tag: acl.TagUser,
Qualifier: strconv.FormatInt(*container.SecurityContext.RunAsUser, 10),
Perms: 2, // write permission
})
}
return acl.Add(endpoint.Address, entries...) // allow all containers in the pod to write to the socket
}
}
func main() error { // return an error instead of klog.Fatal to allow defer statements to run func main() error { // return an error instead of klog.Fatal to allow defer statements to run
logs.InitLogs() logs.InitLogs()
defer logs.FlushLogs() defer logs.FlushLogs()

View File

@ -9,23 +9,55 @@ menu:
weight: 70 weight: 70
parent: howtos parent: howtos
--- ---
The Supervisor is an [OpenID Connect (OIDC)](https://openid.net/connect/) issuer that supports connecting a single The Supervisor is an [OpenID Connect (OIDC)](https://openid.net/connect/) issuer that supports connecting a single
"upstream" identity provider to many "downstream" cluster clients. "upstream" identity provider to many "downstream" cluster clients. When a user authenticates, the Supervisor can issue
[JSON Web Tokens (JWTs)](https://tools.ietf.org/html/rfc7519) that can be [validated by the Pinniped Concierge]({{< ref "configure-concierge-jwt" >}}).
This guide show you how to use this capability to issue [JSON Web Tokens (JWTs)](https://tools.ietf.org/html/rfc7519) that can be validated by the [Pinniped Concierge]({{< ref "configure-concierge-jwt" >}}). This guide explains how to expose the Supervisor's REST endpoints to clients.
Before starting, you should have the [command-line tool installed]({{< ref "install-cli" >}}) locally and the Concierge [installed]({{< ref "install-concierge" >}}) in your cluster. ## Prerequisites
## Expose the Supervisor app as a service This how-to guide assumes that you have already [installed the Pinniped Supervisor]({{< ref "install-supervisor" >}}).
The Supervisor app's endpoints should be exposed as HTTPS endpoints with proper TLS certificates signed by a certificate authority (CA) which is trusted by your user's web browsers. ## Exposing the Supervisor app's endpoints outside the cluster
The Supervisor app's endpoints should be exposed as HTTPS endpoints with proper TLS certificates signed by a
certificate authority (CA) which is trusted by your end user's web browsers.
It is recommended that the traffic to these endpoints should be encrypted via TLS all the way into the
Supervisor pods, even when crossing boundaries that are entirely inside the Kubernetes cluster.
The credentials and tokens that are handled by these endpoints are too sensitive to transmit without encryption.
In all versions of the Supervisor app so far, there are both HTTP and HTTPS ports available for use by default.
These ports each host all the Supervisor's endpoints. Unfortunately, this has caused some confusion in the community
and some blog posts have been written which demonstrate using the HTTP port in such a way that a portion of the traffic's
path is unencrypted. **Anything which exposes the non-TLS HTTP port outside the Pod should be considered deprecated**.
A future version of the Supervisor app may include a breaking change to adjust the default behavior of the HTTP port
to only listen on 127.0.0.1 (or perhaps even to be disabled) to make it more clear that the Supervisor app is not intended
to receive non-TLS HTTP traffic from outside the Pod.
Because there are many ways to expose TLS services from a Kubernetes cluster, the Supervisor app leaves this up to the user. Because there are many ways to expose TLS services from a Kubernetes cluster, the Supervisor app leaves this up to the user.
The most common ways are: The most common ways are:
1. Define an [`Ingress` resource](https://kubernetes.io/docs/concepts/services-networking/ingress/) with TLS certificates. - Define a [TCP LoadBalancer Service](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer).
In this case, the ingress terminates TLS. Typically, the ingress then talks plain HTTP to its backend,
In this case, the Service is a layer 4 load balancer which does not terminate TLS, so the Supervisor app needs to be
configured with TLS certificates and will terminate the TLS connection itself (see the section about FederationDomain
below). The LoadBalancer Service should be configured to use the HTTPS port 443 of the Supervisor pods as its `targetPort`.
*Warning:* Never expose the Supervisor's HTTP port 8080 to the public. It would not be secure for the OIDC protocol
to use HTTP, because the user's secret OIDC tokens would be transmitted across the network without encryption.
- Or, define an [Ingress resource](https://kubernetes.io/docs/concepts/services-networking/ingress/).
In this case, the [Ingress typically terminates TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls)
and then talks plain HTTP to its backend,
which would be a NodePort or LoadBalancer Service in front of the HTTP port 8080 of the Supervisor pods. which would be a NodePort or LoadBalancer Service in front of the HTTP port 8080 of the Supervisor pods.
However, because the Supervisor's endpoints deal with sensitive credentials, it is much better if the
traffic is encrypted using TLS all the way into the Supervisor's Pods. Some Ingress implementations
may support re-encrypting the traffic before sending it to the backend. If your Ingress controller does not
support this, then consider using one of the other configurations described here instead of using an Ingress.
The required configuration of the Ingress is specific to your cluster's Ingress Controller, so please refer to the The required configuration of the Ingress is specific to your cluster's Ingress Controller, so please refer to the
documentation from your Kubernetes provider. If you are using a cluster from a cloud provider, then you'll probably documentation from your Kubernetes provider. If you are using a cluster from a cloud provider, then you'll probably
@ -35,26 +67,52 @@ The most common ways are:
[Ingress Controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/), including [Ingress Controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/), including
[Contour](https://projectcontour.io/) and many others. [Contour](https://projectcontour.io/) and many others.
1. Or, define a [TCP LoadBalancer Service](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) - Or, expose the Supervisor app using a Kubernetes service mesh technology (e.g. [Istio](https://istio.io/)).
which is a layer 4 load balancer and does not terminate TLS. In this case, the Supervisor app needs to be
configured with TLS certificates and terminates the TLS connection itself (see the section about FederationDomain
below). The LoadBalancer Service should be configured to use the HTTPS port 443 of the Supervisor pods as its `targetPort`.
*Warning:* do not expose the Supervisor's port 8080 to the public. It would not be secure for the OIDC protocol In this case, the setup would be similar to the previous description
to use HTTP, because the user's secret OIDC tokens would be transmitted across the network without encryption. for defining an Ingress, except the service mesh would probably provide both the ingress with TLS termination
and the service. Please see the documentation for your service mesh.
1. Or, expose the Supervisor app using a Kubernetes service mesh technology, for example [Istio](https://istio.io/). If your service mesh is capable of transparently encrypting traffic all the way into the
Please see the documentation for your service mesh. Generally, the setup would be similar to the previous description Supervisor Pods, then you should use that capability. In this case, it may make sense to configure the Supervisor's
for defining an ingress, except the service mesh would probably provide both the ingress with TLS termination HTTP port to only listen on a Unix domain socket, such as when the service mesh injects a sidecar container that can
and the service. securely access the socket from within the same Pod.
See the `endpoints` option in [deploy/supervisor/values.yml](https://github.com/vmware-tanzu/pinniped/blob/main/deploy/supervisor/values.yaml)
for more information.
This would prevent any unencrypted traffic from accidentally being transmitted from outside the Pod into the
Supervisor app's HTTP port.
For either of the first two options, if you installed using `ytt` then you can use For example, the following high level steps cover configuring Istio for use with the Supervisor:
the related `service_*` options from [deploy/supervisor/values.yml](https://github.com/vmware-tanzu/pinniped/blob/main/deploy/supervisor/values.yaml) to create a Service.
If you installed using `install-supervisor.yaml` then you can create
the Service separately after installing the Supervisor app. There is no `Ingress` included in the `ytt` templates,
so if you choose to use an Ingress then you'll need to create that separately after installing the Supervisor app.
#### Example: Using a LoadBalancer Service - Update the http listener to use a Unix domain socket
i.e. `--data-value-yaml 'endpoints={"http":{"network":"unix","address":"/pinniped_socket/socketfile.sock"}}'`
- Arrange for the Istio sidecar to be injected into the Supervisor app with an appropriate `IstioIngressListener`
i.e `defaultEndpoint: unix:///pinniped_socket/socketfile.sock`
- Mount the socket volume into the Istio sidecar container by including the appropriate annotation on the Supervisor pods
i.e. `sidecar.istio.io/userVolumeMount: '{"socket":{"mountPath":"/pinniped_socket"}}'`
- Disable the https listener and update the deployment health checks as desired
For service meshes that do not support Unix domain sockets, the http listener should be configured to listen on 127.0.0.1.
## Creating a Service to expose the Supervisor app's endpoints within the cluster
Now that you've selected a strategy to expose the endpoints outside the cluster, you can choose how to expose
the endpoints inside the cluster in support of that strategy.
If you've decided to use a LoadBalancer Service then you'll need to create it. On the other hand, if you've decided to
use an Ingress then you'll need to create a Service which the Ingress can use as its backend. Either way, how you
create the Service will depend on how you choose to install the Supervisor:
- If you installed using `ytt` then you can use
the related `service_*` options from [deploy/supervisor/values.yml](https://github.com/vmware-tanzu/pinniped/blob/main/deploy/supervisor/values.yaml)
to create a Service.
- If you installed using the pre-rendered manifests attached to the Pinniped GitHub releases, then you can create
the Service separately after installing the Supervisor app.
There is no Ingress included in either the `ytt` templates or the pre-rendered manifests,
so if you choose to use an Ingress then you'll need to create the Ingress separately after installing the Supervisor app.
### Example: Creating a LoadBalancer Service
This is an example of creating a LoadBalancer Service to expose port 8443 of the Supervisor app outside the cluster. This is an example of creating a LoadBalancer Service to expose port 8443 of the Supervisor app outside the cluster.
@ -63,70 +121,79 @@ apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: pinniped-supervisor-loadbalancer name: pinniped-supervisor-loadbalancer
# Assuming that this is the namespace where the supervisor was installed. This is the default in install-supervisor.yaml. # Assuming that this is the namespace where the Supervisor was installed.
# This is the default.
namespace: pinniped-supervisor namespace: pinniped-supervisor
spec: spec:
type: LoadBalancer type: LoadBalancer
selector: selector:
# Assuming that this is how the supervisor pods are labeled. This is the default in install-supervisor.yaml. # Assuming that this is how the Supervisor Pods are labeled.
# This is the default.
app: pinniped-supervisor app: pinniped-supervisor
ports: ports:
- protocol: TCP - protocol: TCP
port: 443 port: 443
targetPort: 8443 targetPort: 8443 # 8443 is the TLS port. Do not expose port 8080.
``` ```
#### Example: Using a NodePort Service ### Example: Creating a NodePort Service
A NodePort Service exposes the app as a port on the nodes of the cluster. A NodePort Service exposes the app as a port on the nodes of the cluster. For example, a NodePort Service could also be
used as the backend of an Ingress.
This is convenient for use with kind clusters, because kind can This is also convenient for use with Kind clusters, because kind can
[expose node ports as localhost ports on the host machine](https://kind.sigs.k8s.io/docs/user/configuration/#extra-port-mappings) [expose node ports as localhost ports on the host machine](https://kind.sigs.k8s.io/docs/user/configuration/#extra-port-mappings)
without requiring an Ingress, although without requiring an Ingress, although
[kind also supports several Ingress Controllers](https://kind.sigs.k8s.io/docs/user/ingress). [Kind also supports several Ingress Controllers](https://kind.sigs.k8s.io/docs/user/ingress).
A NodePort Service could also be used behind an Ingress which is terminating TLS.
For example:
```yaml ```yaml
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: pinniped-supervisor-nodeport name: pinniped-supervisor-nodeport
# Assuming that this is the namespace where the supervisor was installed. This is the default in install-supervisor.yaml. # Assuming that this is the namespace where the Supervisor was installed.
# This is the default.
namespace: pinniped-supervisor namespace: pinniped-supervisor
spec: spec:
type: NodePort type: NodePort
selector: selector:
# Assuming that this is how the supervisor pods are labeled. This is the default in install-supervisor.yaml. # Assuming that this is how the Supervisor Pods are labeled.
# This is the default.
app: pinniped-supervisor app: pinniped-supervisor
ports: ports:
- protocol: TCP - protocol: TCP
port: 80 port: 443
targetPort: 8080 targetPort: 8443
nodePort: 31234 # This is the port that you would forward to the kind host. Or omit this key for a random port. # This is the port that you would forward to the kind host.
# Or omit this key for a random port on the node.
nodePort: 31234
``` ```
### Configuring the Supervisor to act as an OIDC provider ## Configuring the Supervisor to act as an OIDC provider
The Supervisor can be configured as an OIDC provider by creating FederationDomain resources The Supervisor can be configured as an OIDC provider by creating FederationDomain resources
in the same namespace where the Supervisor app was installed. For example: in the same namespace where the Supervisor app was installed. At least one FederationDomain must be configured
for the Supervisor to provide its functionality.
Here is an example of a FederationDomain.
```yaml ```yaml
apiVersion: config.supervisor.pinniped.dev/v1alpha1 apiVersion: config.supervisor.pinniped.dev/v1alpha1
kind: FederationDomain kind: FederationDomain
metadata: metadata:
name: my-provider name: my-provider
# Assuming that this is the namespace where the supervisor was installed. This is the default in install-supervisor.yaml. # Assuming that this is the namespace where the supervisor was installed.
# This is the default.
namespace: pinniped-supervisor namespace: pinniped-supervisor
spec: spec:
# The hostname would typically match the DNS name of the public ingress or load balancer for the cluster. # The hostname would typically match the DNS name of the public ingress
# Any path can be specified, which allows a single hostname to have multiple different issuers. The path is optional. # or load balancer for the cluster.
# Any path can be specified, which allows a single hostname to have
# multiple different issuers. The path is optional.
issuer: https://my-issuer.example.com/any/path issuer: https://my-issuer.example.com/any/path
# Optionally configure the name of a Secret in the same namespace,
# Optionally configure the name of a Secret in the same namespace, of type `kubernetes.io/tls`, # of type `kubernetes.io/tls`, which contains the TLS serving certificate
# which contains the TLS serving certificate for the HTTPS endpoints served by this OIDC Provider. # for the HTTPS endpoints served by this OIDC Provider.
tls: tls:
secretName: my-tls-cert-secret secretName: my-tls-cert-secret
``` ```
@ -134,38 +201,41 @@ spec:
You can create multiple FederationDomains as long as each has a unique issuer string. You can create multiple FederationDomains as long as each has a unique issuer string.
Each FederationDomain can be used to provide access to a set of Kubernetes clusters for a set of user identities. Each FederationDomain can be used to provide access to a set of Kubernetes clusters for a set of user identities.
#### Configuring TLS for the Supervisor OIDC endpoints ### Configuring TLS for the Supervisor OIDC endpoints
If you have terminated TLS outside the app, for example using an Ingress with TLS certificates, then you do not need to If you have terminated TLS outside the app, for example using service mesh which handles encrypting the traffic for you,
configure TLS certificates on the FederationDomain. then you do not need to configure TLS certificates on the FederationDomain. Otherwise, you need to configure the
Supervisor app to terminate TLS.
If you are using a LoadBalancer Service to expose the Supervisor app outside your cluster, then you There are two places to optionally configure TLS certificates:
also need to configure the Supervisor app to terminate TLS. There are two places to configure TLS certificates:
1. Each `FederationDomain` can be configured with TLS certificates, using the `spec.tls.secretName` field. 1. Each FederationDomain can be configured with TLS certificates, using the `spec.tls.secretName` field.
1. The default TLS certificate for all OIDC providers can be configured by creating a Secret called 1. The default TLS certificate for all FederationDomains can be configured by creating a Secret called
`pinniped-supervisor-default-tls-certificate` in the same namespace in which the Supervisor was installed. `pinniped-supervisor-default-tls-certificate` in the same namespace in which the Supervisor was installed.
The default TLS certificate are used for all OIDC providers which did not declare a `spec.tls.secretName`. Each incoming request to the endpoints of the Supervisor may use TLS certificates that were configured in either
Also, the `spec.tls.secretName` is ignored for incoming requests to the OIDC endpoints of the above ways. The TLS certificate to present to the client is selected dynamically for each request
that use an IP address as the host, so those requests always present the default TLS certificates using Server Name Indication (SNI):
to the client. When the request includes the hostname, and that hostname matches the hostname of an `Issuer`, - When incoming requests use SNI to specify a hostname, and that hostname matches the hostname
then the TLS certificate defined by the `spec.tls.secretName` is used. If that issuer did not of a FederationDomain, and that FederationDomain specifies `spec.tls.secretName`, then the TLS certificate from the
define `spec.tls.secretName` then the default TLS certificate is used. If neither exists, `spec.tls.secretName` Secret will be used.
then the client gets a TLS error because the server does not present any TLS certificate. - Any other request will use the default TLS certificate, if it is specified. This includes any request to a host
which is an IP address, because SNI does not work for IP addresses. If the default TLS certificate is not specified,
then these requests will fail TLS certificate verification.
It is recommended that you have a DNS entry for your load balancer or Ingress, and that you configure the It is recommended that you have a DNS entry for your load balancer or Ingress, and that you configure the
OIDC provider's `Issuer` using that DNS hostname, and that the TLS certificate for that provider also OIDC provider's `issuer` using that DNS hostname, and that the TLS certificate for that provider also
covers that same hostname. covers that same hostname.
You can create the certificate Secrets however you like, for example you could use [cert-manager](https://cert-manager.io/) You can create the certificate Secrets however you like, for example you could use [cert-manager](https://cert-manager.io/)
or `kubectl create secret tls`. or `kubectl create secret tls`. They must be Secrets of type `kubernetes.io/tls`.
Keep in mind that your users must load some of these endpoints in their web browsers, so the TLS certificates Keep in mind that your end users must load some of these endpoints in their web browsers, so the TLS certificates
should be signed by a certificate authority that is trusted by their browsers. should be signed by a certificate authority that is trusted by their browsers.
## Next steps ## Next steps
Next, configure an `OIDCIdentityProvider` or an `LDAPIdentityProvider` for the Supervisor (several examples are available in these guides), Next, configure an OIDCIdentityProvider, ActiveDirectoryIdentityProvider, or an LDAPIdentityProvider for the Supervisor
(several examples are available in these guides),
and [configure the Concierge to use the Supervisor for authentication]({{< ref "configure-concierge-supervisor-jwt" >}}) and [configure the Concierge to use the Supervisor for authentication]({{< ref "configure-concierge-supervisor-jwt" >}})
on each cluster! on each cluster!

View File

@ -9,13 +9,22 @@ menu:
weight: 60 weight: 60
parent: howtos parent: howtos
--- ---
This guide shows you how to install the Pinniped Supervisor, which allows seamless login across one or many Kubernetes clusters. This guide shows you how to install the Pinniped Supervisor, which allows seamless login across one or many Kubernetes clusters.
You should have a supported Kubernetes cluster with working HTTPS ingress capabilities.
<!-- TODO: link to support matrix -->
In the examples below, you can replace *{{< latestversion >}}* with your preferred version number. In the examples below, you can replace *{{< latestversion >}}* with your preferred version number.
You can find a list of Pinniped releases [on GitHub](https://github.com/vmware-tanzu/pinniped/releases). You can find a list of Pinniped releases [on GitHub](https://github.com/vmware-tanzu/pinniped/releases).
## Prerequisites
You should have a Kubernetes cluster with working HTTPS ingress or load balancer capabilities. Unlike the Concierge app, which can
only run on [supported Kubernetes cluster types]({{< ref "supported-clusters" >}}), the Supervisor app can run on almost any Kubernetes cluster.
The Supervisor app controls authentication to Kubernetes clusters, so access to its settings and internals should be protected carefully.
Typically, the Supervisor is installed on a secure Kubernetes cluster which is only accessible by administrators,
separate from the clusters for which it is providing authentication services which are accessible by application
developers or devops teams.
## With default options ## With default options
### Using kapp ### Using kapp

View File

@ -316,9 +316,6 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
require.NotEmpty(t, supervisorPods.Items, "could not find supervisor pods") require.NotEmpty(t, supervisorPods.Items, "could not find supervisor pods")
supervisorPod := supervisorPods.Items[0] supervisorPod := supervisorPods.Items[0]
// make sure the supervisor has a default TLS cert during this test so that it can handle a TLS connection
createSupervisorDefaultTLSCertificateSecretIfNeeded(ctx, t)
// Test that the user can perform basic actions through the client with their username and group membership // Test that the user can perform basic actions through the client with their username and group membership
// influencing RBAC checks correctly. // influencing RBAC checks correctly.
t.Run( t.Run(

View File

@ -108,9 +108,6 @@ func TestSecureTLSSupervisor(t *testing.T) { // does not run in parallel because
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel) t.Cleanup(cancel)
// make sure the supervisor has a default TLS cert during this test so that it can handle a TLS connection
createSupervisorDefaultTLSCertificateSecretIfNeeded(ctx, t)
startKubectlPortForward(ctx, t, "10447", "443", env.SupervisorAppName+"-nodeport", env.SupervisorNamespace) startKubectlPortForward(ctx, t, "10447", "443", env.SupervisorAppName+"-nodeport", env.SupervisorNamespace)
stdout, stderr := runNmapSSLEnum(t, "127.0.0.1", 10447) stdout, stderr := runNmapSSLEnum(t, "127.0.0.1", 10447)

View File

@ -9,6 +9,7 @@ import (
"crypto/x509" "crypto/x509"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"io/ioutil" "io/ioutil"
"net" "net"
"net/http" "net/http"
@ -168,7 +169,7 @@ func TestSupervisorTLSTerminationWithSNI_Disruptive(t *testing.T) {
requireStatus(t, pinnipedClient, federationDomain1.Namespace, federationDomain1.Name, v1alpha1.SuccessFederationDomainStatusCondition) requireStatus(t, pinnipedClient, federationDomain1.Namespace, federationDomain1.Name, v1alpha1.SuccessFederationDomainStatusCondition)
// The spec.tls.secretName Secret does not exist, so the endpoints should fail with TLS errors. // The spec.tls.secretName Secret does not exist, so the endpoints should fail with TLS errors.
requireEndpointHasTLSErrorBecauseCertificatesAreNotReady(t, issuer1) requireEndpointHasBootstrapTLSErrorBecauseCertificatesAreNotReady(t, issuer1)
// Create the Secret. // Create the Secret.
ca1 := createTLSCertificateSecret(ctx, t, ns, hostname1, nil, certSecretName1, kubeClient) ca1 := createTLSCertificateSecret(ctx, t, ns, hostname1, nil, certSecretName1, kubeClient)
@ -189,7 +190,7 @@ func TestSupervisorTLSTerminationWithSNI_Disruptive(t *testing.T) {
})) }))
// The the endpoints should fail with TLS errors again. // The the endpoints should fail with TLS errors again.
requireEndpointHasTLSErrorBecauseCertificatesAreNotReady(t, issuer1) requireEndpointHasBootstrapTLSErrorBecauseCertificatesAreNotReady(t, issuer1)
// Create a Secret at the updated name. // Create a Secret at the updated name.
ca1update := createTLSCertificateSecret(ctx, t, ns, hostname1, nil, certSecretName1update, kubeClient) ca1update := createTLSCertificateSecret(ctx, t, ns, hostname1, nil, certSecretName1update, kubeClient)
@ -252,7 +253,7 @@ func TestSupervisorTLSTerminationWithDefaultCerts_Disruptive(t *testing.T) {
requireStatus(t, pinnipedClient, federationDomain1.Namespace, federationDomain1.Name, v1alpha1.SuccessFederationDomainStatusCondition) requireStatus(t, pinnipedClient, federationDomain1.Namespace, federationDomain1.Name, v1alpha1.SuccessFederationDomainStatusCondition)
// There is no default TLS cert and the spec.tls.secretName was not set, so the endpoints should fail with TLS errors. // There is no default TLS cert and the spec.tls.secretName was not set, so the endpoints should fail with TLS errors.
requireEndpointHasTLSErrorBecauseCertificatesAreNotReady(t, issuerUsingIPAddress) requireEndpointHasBootstrapTLSErrorBecauseCertificatesAreNotReady(t, issuerUsingIPAddress)
// Create a Secret at the special name which represents the default TLS cert. // Create a Secret at the special name which represents the default TLS cert.
defaultCA := createTLSCertificateSecret(ctx, t, ns, "cert-hostname-doesnt-matter", []net.IP{ips[0]}, defaultTLSCertSecretName(env), kubeClient) defaultCA := createTLSCertificateSecret(ctx, t, ns, "cert-hostname-doesnt-matter", []net.IP{ips[0]}, defaultTLSCertSecretName(env), kubeClient)
@ -320,22 +321,6 @@ func createTLSCertificateSecret(ctx context.Context, t *testing.T, ns string, ho
return ca return ca
} }
func createSupervisorDefaultTLSCertificateSecretIfNeeded(ctx context.Context, t *testing.T) {
env := testlib.IntegrationEnv(t)
adminClient := testlib.NewKubernetesClientset(t)
ns := env.SupervisorNamespace
name := defaultTLSCertSecretName(env)
_, err := adminClient.CoreV1().Secrets(ns).Get(ctx, name, metav1.GetOptions{})
if k8serrors.IsNotFound(err) {
_ = createTLSCertificateSecret(ctx, t, ns, "cert-hostname-doesnt-matter", nil, name, adminClient)
} else {
require.NoError(t, err)
}
}
func temporarilyRemoveAllFederationDomainsAndDefaultTLSCertSecret( func temporarilyRemoveAllFederationDomainsAndDefaultTLSCertSecret(
ctx context.Context, ctx context.Context,
t *testing.T, t *testing.T,
@ -425,9 +410,14 @@ func requireEndpointNotFound(t *testing.T, url, host, caBundle string) {
}, 2*time.Minute, 200*time.Millisecond) }, 2*time.Minute, 200*time.Millisecond)
} }
func requireEndpointHasTLSErrorBecauseCertificatesAreNotReady(t *testing.T, url string) { func requireEndpointHasBootstrapTLSErrorBecauseCertificatesAreNotReady(t *testing.T, url string) {
t.Helper() t.Helper()
httpClient := newHTTPClient(t, "", nil)
httpClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec // there is no way for us to know the bootstrap CA
},
}
testlib.RequireEventually(t, func(requireEventually *require.Assertions) { testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
@ -437,11 +427,18 @@ func requireEndpointHasTLSErrorBecauseCertificatesAreNotReady(t *testing.T, url
requireEventually.NoError(err) requireEventually.NoError(err)
response, err := httpClient.Do(request) response, err := httpClient.Do(request)
if err == nil { requireEventually.NoError(err)
t.Cleanup(func() {
_ = response.Body.Close() _ = response.Body.Close()
} })
requireEventually.Error(err)
requireEventually.EqualError(err, fmt.Sprintf(`Get "%s": remote error: tls: unrecognized name`, url)) requireEventually.Equal(http.StatusInternalServerError, response.StatusCode)
body, err := io.ReadAll(response.Body)
requireEventually.NoError(err)
requireEventually.Equal("pinniped supervisor has invalid TLS serving certificate configuration\n", string(body))
}, 2*time.Minute, 200*time.Millisecond) }, 2*time.Minute, 200*time.Millisecond)
} }

View File

@ -5,6 +5,7 @@ package integration
import ( import (
"context" "context"
"crypto/tls"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -32,22 +33,55 @@ func TestSupervisorHealthz(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel() defer cancel()
requestHealthEndpoint, err := http.NewRequestWithContext( httpClient := &http.Client{}
httpGet(ctx, t, httpClient, fmt.Sprintf("http://%s/healthz", env.SupervisorHTTPAddress), http.StatusOK, "ok")
}
// Never run this test in parallel since deleting all federation domains and the default TLS secret is disruptive, see main_test.go.
func TestSupervisorHealthzBootstrap_Disruptive(t *testing.T) {
env := testlib.IntegrationEnv(t)
pinnipedClient := testlib.NewSupervisorClientset(t)
kubeClient := testlib.NewKubernetesClientset(t)
ns := env.SupervisorNamespace
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
temporarilyRemoveAllFederationDomainsAndDefaultTLSCertSecret(ctx, t, ns, defaultTLSCertSecretName(env), pinnipedClient, kubeClient)
httpClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec // there is no way for us to know the bootstrap CA
},
}
const badTLSConfigBody = "pinniped supervisor has invalid TLS serving certificate configuration\n"
httpGet(ctx, t, httpClient, fmt.Sprintf("https://%s/healthz", env.SupervisorHTTPSAddress), http.StatusOK, "ok")
httpGet(ctx, t, httpClient, fmt.Sprintf("https://%s", env.SupervisorHTTPSAddress), http.StatusInternalServerError, badTLSConfigBody)
httpGet(ctx, t, httpClient, fmt.Sprintf("https://%s/nothealthz", env.SupervisorHTTPSAddress), http.StatusInternalServerError, badTLSConfigBody)
httpGet(ctx, t, httpClient, fmt.Sprintf("https://%s/healthz/something", env.SupervisorHTTPSAddress), http.StatusInternalServerError, badTLSConfigBody)
}
func httpGet(ctx context.Context, t *testing.T, client *http.Client, url string, expectedStatus int, expectedBody string) {
t.Helper()
req, err := http.NewRequestWithContext(
ctx, ctx,
http.MethodGet, http.MethodGet,
fmt.Sprintf("http://%s/healthz", env.SupervisorHTTPAddress), url,
nil, nil,
) )
require.NoError(t, err) require.NoError(t, err)
httpClient := &http.Client{} response, err := client.Do(req) //nolint:bodyclose
response, err := httpClient.Do(requestHealthEndpoint) //nolint:bodyclose
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, http.StatusOK, response.StatusCode) require.Equal(t, expectedStatus, response.StatusCode)
responseBody, err := ioutil.ReadAll(response.Body) responseBody, err := ioutil.ReadAll(response.Body)
require.NoError(t, err) require.NoError(t, err)
err = response.Body.Close() err = response.Body.Close()
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "ok", string(responseBody)) require.Equal(t, expectedBody, string(responseBody))
} }