ContainerImage.Pinniped/deploy/deployment.yaml

138 lines
4.5 KiB
YAML
Raw Normal View History

#@ load("@ytt:data", "data")
---
apiVersion: v1
kind: Namespace
metadata:
name: #@ data.values.namespace
labels:
name: #@ data.values.namespace
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ data.values.app_name + "-service-account"
namespace: #@ data.values.namespace
---
apiVersion: v1
kind: ConfigMap
metadata:
name: #@ data.values.app_name + "-config"
namespace: #@ data.values.namespace
labels:
app: #@ data.values.app_name
data:
#@yaml/text-templated-strings
placeholder-name.yaml: |
discovery:
url: (@= data.values.discovery_url or "null" @)
webhook:
url: (@= data.values.webhook_url @)
caBundle: (@= data.values.webhook_ca_bundle @)
---
#! TODO set up healthy, ready, etc. probes correctly for our deployment
#! TODO set the priority-critical-urgent on our deployment to ask kube to never let it die
#! TODO set resource minimums (e.g. 512MB RAM) on the deployment to make sure we get scheduled onto a reasonable node
apiVersion: apps/v1
kind: Deployment
metadata:
name: #@ data.values.app_name + "-deployment"
namespace: #@ data.values.namespace
labels:
app: #@ data.values.app_name
spec:
replicas: 1 #! TODO more than one replica for high availability, and share the same serving certificate among them (maybe using client-go leader election)
selector:
matchLabels:
app: #@ data.values.app_name
template:
metadata:
labels:
app: #@ data.values.app_name
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
spec:
serviceAccountName: #@ data.values.app_name + "-service-account"
containers:
- name: placeholder-name
#@ if data.values.image_digest:
image: #@ data.values.image_repo + "@" + data.values.image_digest
#@ else:
image: #@ data.values.image_repo + ":" + data.values.image_tag
#@ end
imagePullPolicy: IfNotPresent
command:
- ./placeholder-name-server
args:
- --config=/etc/config/placeholder-name.yaml
- --downward-api-path=/etc/podinfo
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
volumeMounts:
- name: config-volume
mountPath: /etc/config
- name: podinfo
mountPath: /etc/podinfo
- name: k8s-certs
mountPath: /etc/kubernetes/pki
volumes:
- name: config-volume
configMap:
name: #@ data.values.app_name + "-config"
- name: podinfo
downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
- name: k8s-certs
hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
#! "system-cluster-critical" cannot be used outside the kube-system namespace until Kubernetes >= 1.17,
#! so we skip setting this for now (see https://github.com/kubernetes/kubernetes/issues/60596).
#! priorityClassName: system-cluster-critical
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: v1
kind: Service
metadata:
name: placeholder-name-api #! the golang code assumes this specific name as part of the common name during cert generation
namespace: #@ data.values.namespace
labels:
app: #@ data.values.app_name
spec:
type: ClusterIP
selector:
app: #@ data.values.app_name
ports:
- protocol: TCP
port: 443
targetPort: 443
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1alpha1.placeholder.suzerain-io.github.io
labels:
app: #@ data.values.app_name
spec:
version: v1alpha1
group: placeholder.suzerain-io.github.io
groupPriorityMinimum: 2500 #! TODO what is the right value? https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#apiservicespec-v1beta1-apiregistration-k8s-io
versionPriority: 10 #! TODO what is the right value? https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#apiservicespec-v1beta1-apiregistration-k8s-io
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
service:
name: placeholder-name-api
namespace: #@ data.values.namespace
port: 443