ContainerImage.Pinniped/deploy/deployment.yaml
Andrew Keesler d853cbc7ff
Plumb through ImagePullSecrets to agent pod
Right now in the YTT templates we assume that the agent pods are gonna use
the same image as the main Pinniped deployment, so we can use the same logic
for the image pull secrets.

Signed-off-by: Andrew Keesler <akeesler@vmware.com>
2020-09-24 15:52:05 -04:00

193 lines
5.8 KiB
YAML

#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:data", "data")
---
apiVersion: v1
kind: Namespace
metadata:
name: #@ data.values.namespace
labels:
name: #@ data.values.namespace
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ data.values.app_name
namespace: #@ data.values.namespace
---
apiVersion: v1
kind: ConfigMap
metadata:
name: #@ data.values.app_name + "-config"
namespace: #@ data.values.namespace
labels:
app: #@ data.values.app_name
data:
#! If names.apiService is changed in this ConfigMap, must also change name of the ClusterIP Service resource below.
#@yaml/text-templated-strings
pinniped.yaml: |
discovery:
url: (@= data.values.discovery_url or "null" @)
api:
servingCertificate:
durationSeconds: (@= str(data.values.api_serving_certificate_duration_seconds) @)
renewBeforeSeconds: (@= str(data.values.api_serving_certificate_renew_before_seconds) @)
names:
servingCertificateSecret: (@= data.values.app_name + "-api-tls-serving-certificate" @)
credentialIssuerConfig: (@= data.values.app_name + "-config" @)
apiService: (@= data.values.app_name + "-api" @)
kubeCertAgent:
namePrefix: (@= data.values.app_name + "-kube-cert-agent-" @)
(@ if data.values.image_digest: @)
image: (@= data.values.image_repo + "@" + data.values.image_digest @)
(@ else: @)
image: (@= data.values.image_repo + ":" + data.values.image_tag @)
(@ end @)
(@ if data.values.image_pull_dockerconfigjson: @)
imagePullSecrets:
- image-pull-secret
(@ end @)
---
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
apiVersion: v1
kind: Secret
metadata:
name: image-pull-secret
namespace: #@ data.values.namespace
labels:
app: #@ data.values.app_name
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: #@ data.values.app_name
namespace: #@ data.values.namespace
labels:
app: #@ data.values.app_name
spec:
replicas: #@ data.values.replicas
selector:
matchLabels:
app: #@ data.values.app_name
template:
metadata:
labels:
app: #@ data.values.app_name
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
spec:
serviceAccountName: #@ data.values.app_name
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
imagePullSecrets:
- name: image-pull-secret
#@ end
containers:
- name: pinniped
#@ if data.values.image_digest:
image: #@ data.values.image_repo + "@" + data.values.image_digest
#@ else:
image: #@ data.values.image_repo + ":" + data.values.image_tag
#@ end
imagePullPolicy: IfNotPresent
resources:
requests:
memory: "128Mi"
args:
- --config=/etc/config/pinniped.yaml
- --downward-api-path=/etc/podinfo
volumeMounts:
- name: config-volume
mountPath: /etc/config
- name: podinfo
mountPath: /etc/podinfo
livenessProbe:
httpGet:
path: /healthz
port: 443
scheme: HTTPS
initialDelaySeconds: 2
timeoutSeconds: 15
periodSeconds: 10
failureThreshold: 5
readinessProbe:
httpGet:
path: /healthz
port: 443
scheme: HTTPS
initialDelaySeconds: 2
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 3
volumes:
- name: config-volume
configMap:
name: #@ data.values.app_name + "-config"
- name: podinfo
downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master #! Allow running on master nodes too
effect: NoSchedule
#! "system-cluster-critical" cannot be used outside the kube-system namespace until Kubernetes >= 1.17,
#! so we skip setting this for now (see https://github.com/kubernetes/kubernetes/issues/60596).
#!priorityClassName: system-cluster-critical
#! This will help make sure our multiple pods run on different nodes, making
#! our deployment "more" "HA".
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchLabels:
app: #@ data.values.app_name
topologyKey: kubernetes.io/hostname
---
apiVersion: v1
kind: Service
metadata:
#! If name is changed, must also change names.apiService in the ConfigMap above
name: #@ data.values.app_name + "-api"
namespace: #@ data.values.namespace
labels:
app: #@ data.values.app_name
spec:
type: ClusterIP
selector:
app: #@ data.values.app_name
ports:
- protocol: TCP
port: 443
targetPort: 443
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1alpha1.login.pinniped.dev
labels:
app: #@ data.values.app_name
spec:
version: v1alpha1
group: login.pinniped.dev
groupPriorityMinimum: 2500
versionPriority: 10
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
service:
name: pinniped-api
namespace: #@ data.values.namespace
port: 443