ContainerImage.Pinniped/deploy/concierge/deployment.yaml

335 lines
12 KiB
YAML
Raw Normal View History

#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:data", "data")
#@ load("@ytt:json", "json")
Improve the selectors of Deployments and Services Fixes #801. The solution is complicated by the fact that the Selector field of Deployments is immutable. It would have been easy to just make the Selectors of the main Concierge Deployment, the Kube cert agent Deployment, and the various Services use more specific labels, but that would break upgrades. Instead, we make the Pod template labels and the Service selectors more specific, because those not immutable, and then handle the Deployment selectors in a special way. For the main Concierge and Supervisor Deployments, we cannot change their selectors, so they remain "app: app_name", and we make other changes to ensure that only the intended pods are selected. We keep the original "app" label on those pods and remove the "app" label from the pods of the Kube cert agent Deployment. By removing it from the Kube cert agent pods, there is no longer any chance that they will accidentally get selected by the main Concierge Deployment. For the Kube cert agent Deployment, we can change the immutable selector by deleting and recreating the Deployment. The new selector uses only the unique label that has always been applied to the pods of that deployment. Upon recreation, these pods no longer have the "app" label, so they will not be selected by the main Concierge Deployment's selector. The selector of all Services have been updated to use new labels to more specifically target the intended pods. For the Concierge Services, this will prevent them from accidentally including the Kube cert agent pods. For the Supervisor Services, we follow the same convention just to be consistent and to help future-proof the Supervisor app in case it ever has a second Deployment added to it. The selector of the auto-created impersonation proxy Service was also previously using the "app" label. There is no change to this Service because that label will now select the correct pods, since the Kube cert agent pods no longer have that label. It would be possible to update that selector to use the new more specific label, but then we would need to invent a way to pass that label into the controller, so it seemed like more work than was justified.
2021-09-14 20:35:10 +00:00
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel", "pinnipedDevAPIGroupWithPrefix")
#@ load("@ytt:template", "template")
#@ if not data.values.into_namespace:
---
apiVersion: v1
kind: Namespace
metadata:
name: #@ data.values.namespace
labels: #@ labels()
#@ end
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceName()
namespace: #@ namespace()
labels: #@ labels()
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
namespace: #@ namespace()
labels: #@ labels()
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
namespace: #@ namespace()
labels: #@ labels()
annotations:
#! we need to create this service account before we create the secret
kapp.k14s.io/change-group: "impersonation-proxy.concierge.pinniped.dev/serviceaccount"
secrets: #! make sure the token controller does not create any other secrets
- name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
---
apiVersion: v1
kind: ConfigMap
metadata:
name: #@ defaultResourceNameWithSuffix("config")
namespace: #@ namespace()
labels: #@ labels()
data:
#! If names.apiService is changed in this ConfigMap, must also change name of the ClusterIP Service resource below.
#@yaml/text-templated-strings
2020-08-20 17:54:15 +00:00
pinniped.yaml: |
discovery:
url: (@= data.values.discovery_url or "null" @)
api:
servingCertificate:
durationSeconds: (@= str(data.values.api_serving_certificate_duration_seconds) @)
renewBeforeSeconds: (@= str(data.values.api_serving_certificate_renew_before_seconds) @)
apiGroupSuffix: (@= data.values.api_group_suffix @)
names:
servingCertificateSecret: (@= defaultResourceNameWithSuffix("api-tls-serving-certificate") @)
credentialIssuer: (@= defaultResourceNameWithSuffix("config") @)
apiService: (@= defaultResourceNameWithSuffix("api") @)
impersonationLoadBalancerService: (@= defaultResourceNameWithSuffix("impersonation-proxy-load-balancer") @)
impersonationClusterIPService: (@= defaultResourceNameWithSuffix("impersonation-proxy-cluster-ip") @)
impersonationTLSCertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-tls-serving-certificate") @)
impersonationCACertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-ca-certificate") @)
impersonationSignerSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-signer-ca-certificate") @)
agentServiceAccount: (@= defaultResourceNameWithSuffix("kube-cert-agent") @)
labels: (@= json.encode(labels()).rstrip() @)
kubeCertAgent:
namePrefix: (@= defaultResourceNameWithSuffix("kube-cert-agent-") @)
(@ if data.values.kube_cert_agent_image: @)
image: (@= data.values.kube_cert_agent_image @)
(@ else: @)
(@ if data.values.image_digest: @)
image: (@= data.values.image_repo + "@" + data.values.image_digest @)
(@ else: @)
image: (@= data.values.image_repo + ":" + data.values.image_tag @)
(@ end @)
(@ end @)
(@ if data.values.image_pull_dockerconfigjson: @)
imagePullSecrets:
- image-pull-secret
(@ end @)
(@ if data.values.log_level: @)
logLevel: (@= getAndValidateLogLevel() @)
(@ end @)
---
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
apiVersion: v1
kind: Secret
metadata:
name: image-pull-secret
namespace: #@ namespace()
labels: #@ labels()
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: #@ defaultResourceName()
namespace: #@ namespace()
labels: #@ labels()
spec:
replicas: #@ data.values.replicas
selector:
Improve the selectors of Deployments and Services Fixes #801. The solution is complicated by the fact that the Selector field of Deployments is immutable. It would have been easy to just make the Selectors of the main Concierge Deployment, the Kube cert agent Deployment, and the various Services use more specific labels, but that would break upgrades. Instead, we make the Pod template labels and the Service selectors more specific, because those not immutable, and then handle the Deployment selectors in a special way. For the main Concierge and Supervisor Deployments, we cannot change their selectors, so they remain "app: app_name", and we make other changes to ensure that only the intended pods are selected. We keep the original "app" label on those pods and remove the "app" label from the pods of the Kube cert agent Deployment. By removing it from the Kube cert agent pods, there is no longer any chance that they will accidentally get selected by the main Concierge Deployment. For the Kube cert agent Deployment, we can change the immutable selector by deleting and recreating the Deployment. The new selector uses only the unique label that has always been applied to the pods of that deployment. Upon recreation, these pods no longer have the "app" label, so they will not be selected by the main Concierge Deployment's selector. The selector of all Services have been updated to use new labels to more specifically target the intended pods. For the Concierge Services, this will prevent them from accidentally including the Kube cert agent pods. For the Supervisor Services, we follow the same convention just to be consistent and to help future-proof the Supervisor app in case it ever has a second Deployment added to it. The selector of the auto-created impersonation proxy Service was also previously using the "app" label. There is no change to this Service because that label will now select the correct pods, since the Kube cert agent pods no longer have that label. It would be possible to update that selector to use the new more specific label, but then we would need to invent a way to pass that label into the controller, so it seemed like more work than was justified.
2021-09-14 20:35:10 +00:00
#! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades.
matchLabels: #@ defaultLabel()
template:
metadata:
Improve the selectors of Deployments and Services Fixes #801. The solution is complicated by the fact that the Selector field of Deployments is immutable. It would have been easy to just make the Selectors of the main Concierge Deployment, the Kube cert agent Deployment, and the various Services use more specific labels, but that would break upgrades. Instead, we make the Pod template labels and the Service selectors more specific, because those not immutable, and then handle the Deployment selectors in a special way. For the main Concierge and Supervisor Deployments, we cannot change their selectors, so they remain "app: app_name", and we make other changes to ensure that only the intended pods are selected. We keep the original "app" label on those pods and remove the "app" label from the pods of the Kube cert agent Deployment. By removing it from the Kube cert agent pods, there is no longer any chance that they will accidentally get selected by the main Concierge Deployment. For the Kube cert agent Deployment, we can change the immutable selector by deleting and recreating the Deployment. The new selector uses only the unique label that has always been applied to the pods of that deployment. Upon recreation, these pods no longer have the "app" label, so they will not be selected by the main Concierge Deployment's selector. The selector of all Services have been updated to use new labels to more specifically target the intended pods. For the Concierge Services, this will prevent them from accidentally including the Kube cert agent pods. For the Supervisor Services, we follow the same convention just to be consistent and to help future-proof the Supervisor app in case it ever has a second Deployment added to it. The selector of the auto-created impersonation proxy Service was also previously using the "app" label. There is no change to this Service because that label will now select the correct pods, since the Kube cert agent pods no longer have that label. It would be possible to update that selector to use the new more specific label, but then we would need to invent a way to pass that label into the controller, so it seemed like more work than was justified.
2021-09-14 20:35:10 +00:00
labels:
#! This has always included defaultLabel(), which is used by this Deployment's selector.
_: #@ template.replace(defaultLabel())
#! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically
#! without accidentally selecting any other Deployment's Pods, especially the kube cert agent Deployment's Pods.
_: #@ template.replace(deploymentPodLabel())
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
spec:
securityContext:
runAsUser: #@ data.values.run_as_user
runAsGroup: #@ data.values.run_as_group
serviceAccountName: #@ defaultResourceName()
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
imagePullSecrets:
- name: image-pull-secret
#@ end
containers:
- name: #@ defaultResourceName()
#@ if data.values.image_digest:
image: #@ data.values.image_repo + "@" + data.values.image_digest
#@ else:
image: #@ data.values.image_repo + ":" + data.values.image_tag
#@ end
imagePullPolicy: IfNotPresent
securityContext:
readOnlyRootFilesystem: true
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "100m"
memory: "128Mi"
Switch to a slimmer distroless base image. At a high level, it switches us to a distroless base container image, but that also includes several related bits: - Add a writable /tmp but make the rest of our filesystems read-only at runtime. - Condense our main server binaries into a single pinniped-server binary. This saves a bunch of space in the image due to duplicated library code. The correct behavior is dispatched based on `os.Args[0]`, and the `pinniped-server` binary is symlinked to `pinniped-concierge` and `pinniped-supervisor`. - Strip debug symbols from our binaries. These aren't really useful in a distroless image anyway and all the normal stuff you'd expect to work, such as stack traces, still does. - Add a separate `pinniped-concierge-kube-cert-agent` binary with "sleep" and "print" functionality instead of using builtin /bin/sleep and /bin/cat for the kube-cert-agent. This is split from the main server binary because the loading/init time of the main server binary was too large for the tiny resource footprint we established in our kube-cert-agent PodSpec. Using a separate binary eliminates this issue and the extra binary adds only around 1.5MiB of image size. - Switch the kube-cert-agent code to use a JSON `{"tls.crt": "<b64 cert>", "tls.key": "<b64 key>"}` format. This is more robust to unexpected input formatting than the old code, which simply concatenated the files with some extra newlines and split on whitespace. - Update integration tests that made now-invalid assumptions about the `pinniped-server` image. Signed-off-by: Matt Moyer <moyerm@vmware.com>
2021-07-26 16:18:43 +00:00
command:
- pinniped-concierge
2020-08-20 17:54:15 +00:00
- --config=/etc/config/pinniped.yaml
- --downward-api-path=/etc/podinfo
volumeMounts:
Switch to a slimmer distroless base image. At a high level, it switches us to a distroless base container image, but that also includes several related bits: - Add a writable /tmp but make the rest of our filesystems read-only at runtime. - Condense our main server binaries into a single pinniped-server binary. This saves a bunch of space in the image due to duplicated library code. The correct behavior is dispatched based on `os.Args[0]`, and the `pinniped-server` binary is symlinked to `pinniped-concierge` and `pinniped-supervisor`. - Strip debug symbols from our binaries. These aren't really useful in a distroless image anyway and all the normal stuff you'd expect to work, such as stack traces, still does. - Add a separate `pinniped-concierge-kube-cert-agent` binary with "sleep" and "print" functionality instead of using builtin /bin/sleep and /bin/cat for the kube-cert-agent. This is split from the main server binary because the loading/init time of the main server binary was too large for the tiny resource footprint we established in our kube-cert-agent PodSpec. Using a separate binary eliminates this issue and the extra binary adds only around 1.5MiB of image size. - Switch the kube-cert-agent code to use a JSON `{"tls.crt": "<b64 cert>", "tls.key": "<b64 key>"}` format. This is more robust to unexpected input formatting than the old code, which simply concatenated the files with some extra newlines and split on whitespace. - Update integration tests that made now-invalid assumptions about the `pinniped-server` image. Signed-off-by: Matt Moyer <moyerm@vmware.com>
2021-07-26 16:18:43 +00:00
- name: tmp
mountPath: /tmp
- name: config-volume
mountPath: /etc/config
readOnly: true
- name: podinfo
mountPath: /etc/podinfo
readOnly: true
- name: impersonation-proxy
mountPath: /var/run/secrets/impersonation-proxy.concierge.pinniped.dev/serviceaccount
readOnly: true
env:
#@ if data.values.https_proxy:
- name: HTTPS_PROXY
value: #@ data.values.https_proxy
#@ end
#@ if data.values.https_proxy and data.values.no_proxy:
- name: NO_PROXY
value: #@ data.values.no_proxy
#@ end
livenessProbe:
httpGet:
path: /healthz
port: 8443
scheme: HTTPS
initialDelaySeconds: 2
timeoutSeconds: 15
periodSeconds: 10
failureThreshold: 5
readinessProbe:
httpGet:
path: /healthz
port: 8443
scheme: HTTPS
initialDelaySeconds: 2
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 3
volumes:
Switch to a slimmer distroless base image. At a high level, it switches us to a distroless base container image, but that also includes several related bits: - Add a writable /tmp but make the rest of our filesystems read-only at runtime. - Condense our main server binaries into a single pinniped-server binary. This saves a bunch of space in the image due to duplicated library code. The correct behavior is dispatched based on `os.Args[0]`, and the `pinniped-server` binary is symlinked to `pinniped-concierge` and `pinniped-supervisor`. - Strip debug symbols from our binaries. These aren't really useful in a distroless image anyway and all the normal stuff you'd expect to work, such as stack traces, still does. - Add a separate `pinniped-concierge-kube-cert-agent` binary with "sleep" and "print" functionality instead of using builtin /bin/sleep and /bin/cat for the kube-cert-agent. This is split from the main server binary because the loading/init time of the main server binary was too large for the tiny resource footprint we established in our kube-cert-agent PodSpec. Using a separate binary eliminates this issue and the extra binary adds only around 1.5MiB of image size. - Switch the kube-cert-agent code to use a JSON `{"tls.crt": "<b64 cert>", "tls.key": "<b64 key>"}` format. This is more robust to unexpected input formatting than the old code, which simply concatenated the files with some extra newlines and split on whitespace. - Update integration tests that made now-invalid assumptions about the `pinniped-server` image. Signed-off-by: Matt Moyer <moyerm@vmware.com>
2021-07-26 16:18:43 +00:00
- name: tmp
emptyDir:
medium: Memory
sizeLimit: 100Mi
- name: config-volume
configMap:
name: #@ defaultResourceNameWithSuffix("config")
- name: impersonation-proxy
secret:
secretName: #@ defaultResourceNameWithSuffix("impersonation-proxy")
items: #! make sure our pod does not start until the token controller has a chance to populate the secret
- key: token
path: token
- name: podinfo
downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- path: "name"
fieldRef:
fieldPath: metadata.name
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master #! Allow running on master nodes too
effect: NoSchedule
#! "system-cluster-critical" cannot be used outside the kube-system namespace until Kubernetes >= 1.17,
#! so we skip setting this for now (see https://github.com/kubernetes/kubernetes/issues/60596).
#!priorityClassName: system-cluster-critical
#! This will help make sure our multiple pods run on different nodes, making
#! our deployment "more" "HA".
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
Improve the selectors of Deployments and Services Fixes #801. The solution is complicated by the fact that the Selector field of Deployments is immutable. It would have been easy to just make the Selectors of the main Concierge Deployment, the Kube cert agent Deployment, and the various Services use more specific labels, but that would break upgrades. Instead, we make the Pod template labels and the Service selectors more specific, because those not immutable, and then handle the Deployment selectors in a special way. For the main Concierge and Supervisor Deployments, we cannot change their selectors, so they remain "app: app_name", and we make other changes to ensure that only the intended pods are selected. We keep the original "app" label on those pods and remove the "app" label from the pods of the Kube cert agent Deployment. By removing it from the Kube cert agent pods, there is no longer any chance that they will accidentally get selected by the main Concierge Deployment. For the Kube cert agent Deployment, we can change the immutable selector by deleting and recreating the Deployment. The new selector uses only the unique label that has always been applied to the pods of that deployment. Upon recreation, these pods no longer have the "app" label, so they will not be selected by the main Concierge Deployment's selector. The selector of all Services have been updated to use new labels to more specifically target the intended pods. For the Concierge Services, this will prevent them from accidentally including the Kube cert agent pods. For the Supervisor Services, we follow the same convention just to be consistent and to help future-proof the Supervisor app in case it ever has a second Deployment added to it. The selector of the auto-created impersonation proxy Service was also previously using the "app" label. There is no change to this Service because that label will now select the correct pods, since the Kube cert agent pods no longer have that label. It would be possible to update that selector to use the new more specific label, but then we would need to invent a way to pass that label into the controller, so it seemed like more work than was justified.
2021-09-14 20:35:10 +00:00
matchLabels: #@ deploymentPodLabel()
topologyKey: kubernetes.io/hostname
---
apiVersion: v1
kind: Service
metadata:
#! If name is changed, must also change names.apiService in the ConfigMap above and spec.service.name in the APIService below.
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
labels: #@ labels()
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: ClusterIP
Improve the selectors of Deployments and Services Fixes #801. The solution is complicated by the fact that the Selector field of Deployments is immutable. It would have been easy to just make the Selectors of the main Concierge Deployment, the Kube cert agent Deployment, and the various Services use more specific labels, but that would break upgrades. Instead, we make the Pod template labels and the Service selectors more specific, because those not immutable, and then handle the Deployment selectors in a special way. For the main Concierge and Supervisor Deployments, we cannot change their selectors, so they remain "app: app_name", and we make other changes to ensure that only the intended pods are selected. We keep the original "app" label on those pods and remove the "app" label from the pods of the Kube cert agent Deployment. By removing it from the Kube cert agent pods, there is no longer any chance that they will accidentally get selected by the main Concierge Deployment. For the Kube cert agent Deployment, we can change the immutable selector by deleting and recreating the Deployment. The new selector uses only the unique label that has always been applied to the pods of that deployment. Upon recreation, these pods no longer have the "app" label, so they will not be selected by the main Concierge Deployment's selector. The selector of all Services have been updated to use new labels to more specifically target the intended pods. For the Concierge Services, this will prevent them from accidentally including the Kube cert agent pods. For the Supervisor Services, we follow the same convention just to be consistent and to help future-proof the Supervisor app in case it ever has a second Deployment added to it. The selector of the auto-created impersonation proxy Service was also previously using the "app" label. There is no change to this Service because that label will now select the correct pods, since the Kube cert agent pods no longer have that label. It would be possible to update that selector to use the new more specific label, but then we would need to invent a way to pass that label into the controller, so it seemed like more work than was justified.
2021-09-14 20:35:10 +00:00
selector: #@ deploymentPodLabel()
ports:
- protocol: TCP
port: 443
targetPort: 8443
---
apiVersion: v1
kind: Service
metadata:
name: #@ defaultResourceNameWithSuffix("proxy")
namespace: #@ namespace()
labels: #@ labels()
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: ClusterIP
Improve the selectors of Deployments and Services Fixes #801. The solution is complicated by the fact that the Selector field of Deployments is immutable. It would have been easy to just make the Selectors of the main Concierge Deployment, the Kube cert agent Deployment, and the various Services use more specific labels, but that would break upgrades. Instead, we make the Pod template labels and the Service selectors more specific, because those not immutable, and then handle the Deployment selectors in a special way. For the main Concierge and Supervisor Deployments, we cannot change their selectors, so they remain "app: app_name", and we make other changes to ensure that only the intended pods are selected. We keep the original "app" label on those pods and remove the "app" label from the pods of the Kube cert agent Deployment. By removing it from the Kube cert agent pods, there is no longer any chance that they will accidentally get selected by the main Concierge Deployment. For the Kube cert agent Deployment, we can change the immutable selector by deleting and recreating the Deployment. The new selector uses only the unique label that has always been applied to the pods of that deployment. Upon recreation, these pods no longer have the "app" label, so they will not be selected by the main Concierge Deployment's selector. The selector of all Services have been updated to use new labels to more specifically target the intended pods. For the Concierge Services, this will prevent them from accidentally including the Kube cert agent pods. For the Supervisor Services, we follow the same convention just to be consistent and to help future-proof the Supervisor app in case it ever has a second Deployment added to it. The selector of the auto-created impersonation proxy Service was also previously using the "app" label. There is no change to this Service because that label will now select the correct pods, since the Kube cert agent pods no longer have that label. It would be possible to update that selector to use the new more specific label, but then we would need to invent a way to pass that label into the controller, so it seemed like more work than was justified.
2021-09-14 20:35:10 +00:00
selector: #@ deploymentPodLabel()
ports:
- protocol: TCP
port: 443
targetPort: 8444
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: #@ pinnipedDevAPIGroupWithPrefix("v1alpha1.login.concierge")
labels: #@ labels()
spec:
version: v1alpha1
group: #@ pinnipedDevAPIGroupWithPrefix("login.concierge")
groupPriorityMinimum: 9900
versionPriority: 15
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
service:
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
port: 443
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: #@ pinnipedDevAPIGroupWithPrefix("v1alpha1.identity.concierge")
labels: #@ labels()
spec:
version: v1alpha1
group: #@ pinnipedDevAPIGroupWithPrefix("identity.concierge")
groupPriorityMinimum: 9900
versionPriority: 15
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
service:
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
port: 443
---
apiVersion: #@ pinnipedDevAPIGroupWithPrefix("config.concierge") + "/v1alpha1"
kind: CredentialIssuer
metadata:
name: #@ defaultResourceNameWithSuffix("config")
labels: #@ labels()
spec:
impersonationProxy:
mode: #@ data.values.impersonation_proxy_spec.mode
#@ if data.values.impersonation_proxy_spec.external_endpoint:
externalEndpoint: #@ data.values.impersonation_proxy_spec.external_endpoint
#@ end
service:
type: #@ data.values.impersonation_proxy_spec.service.type
#@ if data.values.impersonation_proxy_spec.service.load_balancer_ip:
loadBalancerIP: #@ data.values.impersonation_proxy_spec.service.load_balancer_ip
#@ end
annotations: #@ data.values.impersonation_proxy_spec.service.annotations
---
apiVersion: v1
kind: Secret
metadata:
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
namespace: #@ namespace()
labels: #@ labels()
annotations:
#! wait until the SA exists to create this secret so that the token controller does not delete it
#! we have this secret at the end so that kubectl will create the service account first
kapp.k14s.io/change-rule: "upsert after upserting impersonation-proxy.concierge.pinniped.dev/serviceaccount"
kubernetes.io/service-account.name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
type: kubernetes.io/service-account-token