2020-07-07 20:17:34 +00:00
#@ load("@ytt:data", "data")
---
apiVersion : v1
kind : Namespace
metadata :
name : #@ data.values.namespace
labels :
name : #@ data.values.namespace
2020-07-09 16:42:31 +00:00
---
apiVersion : v1
2020-07-17 21:42:02 +00:00
kind : ServiceAccount
metadata :
name : #@ data.values.app_name + "-service-account"
namespace : #@ data.values.namespace
---
apiVersion : v1
2020-07-09 16:42:31 +00:00
kind : ConfigMap
metadata :
name : #@ data.values.app_name + "-config"
namespace : #@ data.values.namespace
labels :
app : #@ data.values.app_name
data :
2020-07-09 16:58:28 +00:00
#@yaml/text-templated-strings
2020-07-23 15:05:21 +00:00
placeholder-name.yaml : |
2020-08-03 14:17:11 +00:00
discovery :
2020-08-03 18:36:08 +00:00
url : (@= data.values.discovery_url or "null" @)
2020-07-14 16:38:43 +00:00
webhook :
url : (@= data.values.webhook_url @)
caBundle : (@= data.values.webhook_ca_bundle @)
2020-07-07 20:17:34 +00:00
---
2020-08-13 00:02:43 +00:00
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
apiVersion : v1
kind : Secret
metadata :
name : image-pull-secret
namespace : #@ data.values.namespace
labels :
app : #@ data.values.app_name
2020-08-13 20:34:23 +00:00
type : kubernetes.io/dockerconfigjson
2020-08-13 00:02:43 +00:00
data :
.dockerconfigjson : #@ data.values.image_pull_dockerconfigjson
#@ end
---
2020-08-12 00:55:34 +00:00
#! TODO set up healthy, ready, etc. probes correctly?
#! TODO set resource minimums (e.g. 512MB RAM) to make sure we get scheduled onto a reasonable node?
2020-07-07 20:17:34 +00:00
apiVersion : apps/v1
2020-08-12 00:55:34 +00:00
kind : DaemonSet
2020-07-07 20:17:34 +00:00
metadata :
2020-08-12 00:55:34 +00:00
name : #@ data.values.app_name
2020-07-07 20:17:34 +00:00
namespace : #@ data.values.namespace
labels :
app : #@ data.values.app_name
spec :
selector :
matchLabels :
app : #@ data.values.app_name
template :
metadata :
labels :
app : #@ data.values.app_name
2020-08-04 21:34:10 +00:00
annotations :
scheduler.alpha.kubernetes.io/critical-pod : ""
2020-07-07 20:17:34 +00:00
spec :
2020-07-17 21:42:02 +00:00
serviceAccountName : #@ data.values.app_name + "-service-account"
2020-08-13 00:02:43 +00:00
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
imagePullSecrets :
- name : image-pull-secret
#@ end
2020-07-07 20:17:34 +00:00
containers :
- name : placeholder-name
2020-07-09 17:16:46 +00:00
#@ if data.values.image_digest:
2020-07-07 20:17:34 +00:00
image : #@ data.values.image_repo + "@" + data.values.image_digest
2020-07-09 17:16:46 +00:00
#@ else:
image : #@ data.values.image_repo + ":" + data.values.image_tag
#@ end
2020-07-09 04:39:56 +00:00
imagePullPolicy : IfNotPresent
2020-07-17 21:42:02 +00:00
args :
2020-07-23 15:05:21 +00:00
- --config=/etc/config/placeholder-name.yaml
2020-07-17 21:42:02 +00:00
- --downward-api-path=/etc/podinfo
2020-07-24 20:41:51 +00:00
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
2020-07-09 16:42:31 +00:00
volumeMounts :
2020-07-17 21:42:02 +00:00
- name : config-volume
mountPath : /etc/config
- name : podinfo
mountPath : /etc/podinfo
2020-07-24 20:41:51 +00:00
- name : k8s-certs
mountPath : /etc/kubernetes/pki
2020-08-17 23:44:42 +00:00
livenessProbe :
httpGet :
path : /healthz
port : 443
scheme : HTTPS
initialDelaySeconds : 20
timeoutSeconds : 15
periodSeconds : 10
failureThreshold : 5
readinessProbe :
httpGet :
path : /healthz
port : 443
scheme : HTTPS
initialDelaySeconds : 20
timeoutSeconds : 3
periodSeconds : 10
failureThreshold : 3
2020-07-09 16:42:31 +00:00
volumes :
2020-07-17 21:42:02 +00:00
- name : config-volume
configMap :
name : #@ data.values.app_name + "-config"
2020-07-16 19:24:30 +00:00
- name : podinfo
2020-07-17 21:42:02 +00:00
downwardAPI :
items :
- path : "labels"
fieldRef :
fieldPath : metadata.labels
- path : "namespace"
fieldRef :
fieldPath : metadata.namespace
2020-07-24 20:41:51 +00:00
- name : k8s-certs
hostPath :
path : /etc/kubernetes/pki
type : DirectoryOrCreate
2020-08-12 00:55:34 +00:00
nodeSelector : #! Create Pods on all nodes which match this node selector, and not on any other nodes.
2020-07-24 20:41:51 +00:00
node-role.kubernetes.io/master : ""
tolerations :
2020-08-12 00:55:34 +00:00
- key : CriticalAddonsOnly
operator : Exists
- key : node-role.kubernetes.io/master #! Allow running on master nodes.
effect : NoSchedule
#! "system-cluster-critical" cannot be used outside the kube-system namespace until Kubernetes >= 1.17,
#! so we skip setting this for now (see https://github.com/kubernetes/kubernetes/issues/60596).
#!priorityClassName: system-cluster-critical
2020-08-04 23:46:27 +00:00
---
apiVersion : v1
kind : Service
metadata :
name : placeholder-name-api #! the golang code assumes this specific name as part of the common name during cert generation
namespace : #@ data.values.namespace
labels :
app : #@ data.values.app_name
spec :
type : ClusterIP
selector :
app : #@ data.values.app_name
ports :
- protocol : TCP
port : 443
targetPort : 443
---
apiVersion : apiregistration.k8s.io/v1
kind : APIService
metadata :
name : v1alpha1.placeholder.suzerain-io.github.io
labels :
app : #@ data.values.app_name
spec :
version : v1alpha1
group : placeholder.suzerain-io.github.io
groupPriorityMinimum : 2500 #! TODO what is the right value? https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#apiservicespec-v1beta1-apiregistration-k8s-io
versionPriority : 10 #! TODO what is the right value? https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#apiservicespec-v1beta1-apiregistration-k8s-io
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
service :
name : placeholder-name-api
namespace : #@ data.values.namespace
port : 443