ContainerImage.Pinniped/deploy/supervisor/deployment.yaml
Ryan Richard 8b7c30cfbd Supervisor listens for HTTPS on port 443 with configurable TLS certs
- TLS certificates can be configured on the OIDCProviderConfig using
  the `secretName` field.
- When listening for incoming TLS connections, choose the TLS cert
  based on the SNI hostname of the incoming request.
- Because SNI hostname information on incoming requests does not include
  the port number of the request, we add a validation that
  OIDCProviderConfigs where the issuer hostnames (not including port
  number) are the same must use the same `secretName`.
- Note that this approach does not yet support requests made to an
  IP address instead of a hostname. Also note that `localhost` is
  considered a hostname by SNI.
- Add port 443 as a container port to the pod spec.
- A new controller watches for TLS secrets and caches them in memory.
  That same in-memory cache is used while servicing incoming connections
  on the TLS port.
- Make it easy to configure both port 443 and/or port 80 for various
  Service types using our ytt templates for the supervisor.
- When deploying to kind, add another nodeport and forward it to the
  host on another port to expose our new HTTPS supervisor port to the
  host.
2020-10-26 17:03:26 -07:00

133 lines
3.8 KiB
YAML

#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:data", "data")
#@ load("@ytt:json", "json")
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix")
#@ if not data.values.into_namespace:
---
apiVersion: v1
kind: Namespace
metadata:
name: #@ data.values.namespace
labels: #@ labels()
#@ end
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceName()
namespace: #@ namespace()
labels: #@ labels()
---
apiVersion: v1
kind: ConfigMap
metadata:
name: #@ defaultResourceNameWithSuffix("static-config")
namespace: #@ namespace()
labels: #@ labels()
data:
#@yaml/text-templated-strings
pinniped.yaml: |
labels: (@= json.encode(labels()).rstrip() @)
---
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
apiVersion: v1
kind: Secret
metadata:
name: image-pull-secret
namespace: #@ namespace()
labels: #@ labels()
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: #@ defaultResourceName()
namespace: #@ namespace()
labels: #@ labels()
spec:
replicas: #@ data.values.replicas
selector:
matchLabels: #@ defaultLabel()
template:
metadata:
labels: #@ defaultLabel()
spec:
serviceAccountName: #@ defaultResourceName()
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
imagePullSecrets:
- name: image-pull-secret
#@ end
containers:
- name: #@ defaultResourceName()
#@ if data.values.image_digest:
image: #@ data.values.image_repo + "@" + data.values.image_digest
#@ else:
image: #@ data.values.image_repo + ":" + data.values.image_tag
#@ end
imagePullPolicy: IfNotPresent
command: #! override the default entrypoint
- /usr/local/bin/pinniped-supervisor
args:
- /etc/podinfo
- /etc/config/pinniped.yaml
resources:
requests:
memory: "128Mi"
volumeMounts:
- name: config-volume
mountPath: /etc/config
- name: podinfo
mountPath: /etc/podinfo
ports:
- containerPort: 80
protocol: TCP
- containerPort: 443
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: 80
scheme: HTTP
initialDelaySeconds: 2
timeoutSeconds: 15
periodSeconds: 10
failureThreshold: 5
readinessProbe:
httpGet:
path: /healthz
port: 80
scheme: HTTP
initialDelaySeconds: 2
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 3
volumes:
- name: config-volume
configMap:
name: #@ defaultResourceNameWithSuffix("static-config")
- name: podinfo
downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
#! This will help make sure our multiple pods run on different nodes, making
#! our deployment "more" "HA".
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchLabels: #@ defaultLabel()
topologyKey: kubernetes.io/hostname