2023-09-26 20:16:13 +00:00
|
|
|
#! Copyright 2020-2023 the Pinniped contributors. All Rights Reserved.
|
2020-08-25 01:07:34 +00:00
|
|
|
#! SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
2020-07-07 20:17:34 +00:00
|
|
|
#@ load("@ytt:data", "data")
|
2020-10-15 17:14:23 +00:00
|
|
|
#@ load("@ytt:json", "json")
|
Improve the selectors of Deployments and Services
Fixes #801. The solution is complicated by the fact that the Selector
field of Deployments is immutable. It would have been easy to just
make the Selectors of the main Concierge Deployment, the Kube cert agent
Deployment, and the various Services use more specific labels, but
that would break upgrades. Instead, we make the Pod template labels and
the Service selectors more specific, because those not immutable, and
then handle the Deployment selectors in a special way.
For the main Concierge and Supervisor Deployments, we cannot change
their selectors, so they remain "app: app_name", and we make other
changes to ensure that only the intended pods are selected. We keep the
original "app" label on those pods and remove the "app" label from the
pods of the Kube cert agent Deployment. By removing it from the Kube
cert agent pods, there is no longer any chance that they will
accidentally get selected by the main Concierge Deployment.
For the Kube cert agent Deployment, we can change the immutable selector
by deleting and recreating the Deployment. The new selector uses only
the unique label that has always been applied to the pods of that
deployment. Upon recreation, these pods no longer have the "app" label,
so they will not be selected by the main Concierge Deployment's
selector.
The selector of all Services have been updated to use new labels to
more specifically target the intended pods. For the Concierge Services,
this will prevent them from accidentally including the Kube cert agent
pods. For the Supervisor Services, we follow the same convention just
to be consistent and to help future-proof the Supervisor app in case it
ever has a second Deployment added to it.
The selector of the auto-created impersonation proxy Service was
also previously using the "app" label. There is no change to this
Service because that label will now select the correct pods, since
the Kube cert agent pods no longer have that label. It would be possible
to update that selector to use the new more specific label, but then we
would need to invent a way to pass that label into the controller, so
it seemed like more work than was justified.
2021-09-14 20:35:10 +00:00
|
|
|
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel", "pinnipedDevAPIGroupWithPrefix")
|
|
|
|
#@ load("@ytt:template", "template")
|
2020-07-07 20:17:34 +00:00
|
|
|
|
2020-10-14 22:05:42 +00:00
|
|
|
#@ if not data.values.into_namespace:
|
2020-07-07 20:17:34 +00:00
|
|
|
---
|
|
|
|
apiVersion: v1
|
|
|
|
kind: Namespace
|
|
|
|
metadata:
|
|
|
|
name: #@ data.values.namespace
|
2022-09-15 21:58:15 +00:00
|
|
|
labels:
|
|
|
|
_: #@ template.replace(labels())
|
|
|
|
#! When deploying onto a cluster which has PSAs enabled by default for namespaces,
|
|
|
|
#! effectively disable them for this namespace. The kube-cert-agent Deployment's pod
|
|
|
|
#! created by the Concierge in this namespace needs to be able to perform privileged
|
|
|
|
#! actions. The regular Concierge pod containers created by the Deployment below do
|
|
|
|
#! not need special privileges and are marked as such in their securityContext settings.
|
|
|
|
pod-security.kubernetes.io/enforce: privileged
|
2020-10-14 22:05:42 +00:00
|
|
|
#@ end
|
2020-07-09 16:42:31 +00:00
|
|
|
---
|
|
|
|
apiVersion: v1
|
2020-07-17 21:42:02 +00:00
|
|
|
kind: ServiceAccount
|
|
|
|
metadata:
|
2020-10-14 22:05:42 +00:00
|
|
|
name: #@ defaultResourceName()
|
|
|
|
namespace: #@ namespace()
|
|
|
|
labels: #@ labels()
|
2020-07-17 21:42:02 +00:00
|
|
|
---
|
|
|
|
apiVersion: v1
|
2021-05-03 21:31:48 +00:00
|
|
|
kind: ServiceAccount
|
|
|
|
metadata:
|
|
|
|
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
|
|
|
|
namespace: #@ namespace()
|
|
|
|
labels: #@ labels()
|
|
|
|
---
|
|
|
|
apiVersion: v1
|
2021-06-09 23:00:54 +00:00
|
|
|
kind: ServiceAccount
|
|
|
|
metadata:
|
|
|
|
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
|
|
|
|
namespace: #@ namespace()
|
|
|
|
labels: #@ labels()
|
|
|
|
annotations:
|
|
|
|
#! we need to create this service account before we create the secret
|
|
|
|
kapp.k14s.io/change-group: "impersonation-proxy.concierge.pinniped.dev/serviceaccount"
|
|
|
|
secrets: #! make sure the token controller does not create any other secrets
|
|
|
|
- name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
|
|
|
|
---
|
|
|
|
apiVersion: v1
|
2020-07-09 16:42:31 +00:00
|
|
|
kind: ConfigMap
|
|
|
|
metadata:
|
2020-10-14 22:05:42 +00:00
|
|
|
name: #@ defaultResourceNameWithSuffix("config")
|
|
|
|
namespace: #@ namespace()
|
|
|
|
labels: #@ labels()
|
2020-07-09 16:42:31 +00:00
|
|
|
data:
|
Rename many of resources that are created in Kubernetes by Pinniped
New resource naming conventions:
- Do not repeat the Kind in the name,
e.g. do not call it foo-cluster-role-binding, just call it foo
- Names will generally start with a prefix to identify our component,
so when a user lists all objects of that kind, they can tell to which
component it is related,
e.g. `kubectl get configmaps` would list one named "pinniped-config"
- It should be possible for an operator to make the word "pinniped"
mostly disappear if they choose, by specifying the app_name in
values.yaml, to the extent that is practical (but not from APIService
names because those are hardcoded in golang)
- Each role/clusterrole and its corresponding binding have the same name
- Pinniped resource names that must be known by the server golang code
are passed to the code at run time via ConfigMap, rather than
hardcoded in the golang code. This also allows them to be prepended
with the app_name from values.yaml while creating the ConfigMap.
- Since the CLI `get-kubeconfig` command cannot guess the name of the
CredentialIssuerConfig resource in advance anymore, it lists all
CredentialIssuerConfig in the app's namespace and returns an error
if there is not exactly one found, and then uses that one regardless
of its name
2020-09-18 22:56:50 +00:00
|
|
|
#! If names.apiService is changed in this ConfigMap, must also change name of the ClusterIP Service resource below.
|
2020-07-09 16:58:28 +00:00
|
|
|
#@yaml/text-templated-strings
|
2020-08-20 17:54:15 +00:00
|
|
|
pinniped.yaml: |
|
2020-08-03 14:17:11 +00:00
|
|
|
discovery:
|
2020-08-03 18:36:08 +00:00
|
|
|
url: (@= data.values.discovery_url or "null" @)
|
2020-08-20 21:13:02 +00:00
|
|
|
api:
|
|
|
|
servingCertificate:
|
|
|
|
durationSeconds: (@= str(data.values.api_serving_certificate_duration_seconds) @)
|
|
|
|
renewBeforeSeconds: (@= str(data.values.api_serving_certificate_renew_before_seconds) @)
|
2021-01-19 22:23:06 +00:00
|
|
|
apiGroupSuffix: (@= data.values.api_group_suffix @)
|
2021-11-17 00:43:51 +00:00
|
|
|
# aggregatedAPIServerPort may be set here, although other YAML references to the default port (10250) may also need to be updated
|
2021-11-17 21:27:59 +00:00
|
|
|
# impersonationProxyServerPort may be set here, although other YAML references to the default port (8444) may also need to be updated
|
Rename many of resources that are created in Kubernetes by Pinniped
New resource naming conventions:
- Do not repeat the Kind in the name,
e.g. do not call it foo-cluster-role-binding, just call it foo
- Names will generally start with a prefix to identify our component,
so when a user lists all objects of that kind, they can tell to which
component it is related,
e.g. `kubectl get configmaps` would list one named "pinniped-config"
- It should be possible for an operator to make the word "pinniped"
mostly disappear if they choose, by specifying the app_name in
values.yaml, to the extent that is practical (but not from APIService
names because those are hardcoded in golang)
- Each role/clusterrole and its corresponding binding have the same name
- Pinniped resource names that must be known by the server golang code
are passed to the code at run time via ConfigMap, rather than
hardcoded in the golang code. This also allows them to be prepended
with the app_name from values.yaml while creating the ConfigMap.
- Since the CLI `get-kubeconfig` command cannot guess the name of the
CredentialIssuerConfig resource in advance anymore, it lists all
CredentialIssuerConfig in the app's namespace and returns an error
if there is not exactly one found, and then uses that one regardless
of its name
2020-09-18 22:56:50 +00:00
|
|
|
names:
|
2020-10-14 22:05:42 +00:00
|
|
|
servingCertificateSecret: (@= defaultResourceNameWithSuffix("api-tls-serving-certificate") @)
|
2020-11-02 21:39:43 +00:00
|
|
|
credentialIssuer: (@= defaultResourceNameWithSuffix("config") @)
|
2020-10-14 22:05:42 +00:00
|
|
|
apiService: (@= defaultResourceNameWithSuffix("api") @)
|
2021-03-02 17:31:24 +00:00
|
|
|
impersonationLoadBalancerService: (@= defaultResourceNameWithSuffix("impersonation-proxy-load-balancer") @)
|
2021-05-20 21:11:35 +00:00
|
|
|
impersonationClusterIPService: (@= defaultResourceNameWithSuffix("impersonation-proxy-cluster-ip") @)
|
2021-03-02 17:31:24 +00:00
|
|
|
impersonationTLSCertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-tls-serving-certificate") @)
|
|
|
|
impersonationCACertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-ca-certificate") @)
|
2021-03-10 18:30:06 +00:00
|
|
|
impersonationSignerSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-signer-ca-certificate") @)
|
2021-05-03 21:31:48 +00:00
|
|
|
agentServiceAccount: (@= defaultResourceNameWithSuffix("kube-cert-agent") @)
|
2020-10-15 17:14:23 +00:00
|
|
|
labels: (@= json.encode(labels()).rstrip() @)
|
2020-09-21 18:16:32 +00:00
|
|
|
kubeCertAgent:
|
2020-10-14 22:05:42 +00:00
|
|
|
namePrefix: (@= defaultResourceNameWithSuffix("kube-cert-agent-") @)
|
2020-10-05 19:53:50 +00:00
|
|
|
(@ if data.values.kube_cert_agent_image: @)
|
|
|
|
image: (@= data.values.kube_cert_agent_image @)
|
|
|
|
(@ else: @)
|
2020-09-21 18:16:32 +00:00
|
|
|
(@ if data.values.image_digest: @)
|
|
|
|
image: (@= data.values.image_repo + "@" + data.values.image_digest @)
|
|
|
|
(@ else: @)
|
|
|
|
image: (@= data.values.image_repo + ":" + data.values.image_tag @)
|
|
|
|
(@ end @)
|
2020-10-05 19:53:50 +00:00
|
|
|
(@ end @)
|
2020-09-24 19:52:05 +00:00
|
|
|
(@ if data.values.image_pull_dockerconfigjson: @)
|
|
|
|
imagePullSecrets:
|
|
|
|
- image-pull-secret
|
|
|
|
(@ end @)
|
2022-04-16 02:43:53 +00:00
|
|
|
(@ if data.values.log_level or data.values.deprecated_log_format: @)
|
|
|
|
log:
|
|
|
|
(@ if data.values.log_level: @)
|
|
|
|
level: (@= getAndValidateLogLevel() @)
|
|
|
|
(@ end @)
|
|
|
|
(@ if data.values.deprecated_log_format: @)
|
|
|
|
format: (@= data.values.deprecated_log_format @)
|
|
|
|
(@ end @)
|
2020-11-11 12:49:46 +00:00
|
|
|
(@ end @)
|
2020-07-07 20:17:34 +00:00
|
|
|
---
|
2020-08-13 00:02:43 +00:00
|
|
|
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
|
|
|
apiVersion: v1
|
|
|
|
kind: Secret
|
|
|
|
metadata:
|
|
|
|
name: image-pull-secret
|
2020-10-14 22:05:42 +00:00
|
|
|
namespace: #@ namespace()
|
|
|
|
labels: #@ labels()
|
2020-08-13 20:34:23 +00:00
|
|
|
type: kubernetes.io/dockerconfigjson
|
2020-08-13 00:02:43 +00:00
|
|
|
data:
|
|
|
|
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
|
|
|
|
#@ end
|
|
|
|
---
|
2020-07-07 20:17:34 +00:00
|
|
|
apiVersion: apps/v1
|
2020-08-19 18:21:07 +00:00
|
|
|
kind: Deployment
|
2020-07-07 20:17:34 +00:00
|
|
|
metadata:
|
2020-10-14 22:05:42 +00:00
|
|
|
name: #@ defaultResourceName()
|
|
|
|
namespace: #@ namespace()
|
|
|
|
labels: #@ labels()
|
2020-07-07 20:17:34 +00:00
|
|
|
spec:
|
2020-09-04 03:52:01 +00:00
|
|
|
replicas: #@ data.values.replicas
|
2020-07-07 20:17:34 +00:00
|
|
|
selector:
|
Improve the selectors of Deployments and Services
Fixes #801. The solution is complicated by the fact that the Selector
field of Deployments is immutable. It would have been easy to just
make the Selectors of the main Concierge Deployment, the Kube cert agent
Deployment, and the various Services use more specific labels, but
that would break upgrades. Instead, we make the Pod template labels and
the Service selectors more specific, because those not immutable, and
then handle the Deployment selectors in a special way.
For the main Concierge and Supervisor Deployments, we cannot change
their selectors, so they remain "app: app_name", and we make other
changes to ensure that only the intended pods are selected. We keep the
original "app" label on those pods and remove the "app" label from the
pods of the Kube cert agent Deployment. By removing it from the Kube
cert agent pods, there is no longer any chance that they will
accidentally get selected by the main Concierge Deployment.
For the Kube cert agent Deployment, we can change the immutable selector
by deleting and recreating the Deployment. The new selector uses only
the unique label that has always been applied to the pods of that
deployment. Upon recreation, these pods no longer have the "app" label,
so they will not be selected by the main Concierge Deployment's
selector.
The selector of all Services have been updated to use new labels to
more specifically target the intended pods. For the Concierge Services,
this will prevent them from accidentally including the Kube cert agent
pods. For the Supervisor Services, we follow the same convention just
to be consistent and to help future-proof the Supervisor app in case it
ever has a second Deployment added to it.
The selector of the auto-created impersonation proxy Service was
also previously using the "app" label. There is no change to this
Service because that label will now select the correct pods, since
the Kube cert agent pods no longer have that label. It would be possible
to update that selector to use the new more specific label, but then we
would need to invent a way to pass that label into the controller, so
it seemed like more work than was justified.
2021-09-14 20:35:10 +00:00
|
|
|
#! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades.
|
2020-10-14 22:05:42 +00:00
|
|
|
matchLabels: #@ defaultLabel()
|
2020-07-07 20:17:34 +00:00
|
|
|
template:
|
|
|
|
metadata:
|
Improve the selectors of Deployments and Services
Fixes #801. The solution is complicated by the fact that the Selector
field of Deployments is immutable. It would have been easy to just
make the Selectors of the main Concierge Deployment, the Kube cert agent
Deployment, and the various Services use more specific labels, but
that would break upgrades. Instead, we make the Pod template labels and
the Service selectors more specific, because those not immutable, and
then handle the Deployment selectors in a special way.
For the main Concierge and Supervisor Deployments, we cannot change
their selectors, so they remain "app: app_name", and we make other
changes to ensure that only the intended pods are selected. We keep the
original "app" label on those pods and remove the "app" label from the
pods of the Kube cert agent Deployment. By removing it from the Kube
cert agent pods, there is no longer any chance that they will
accidentally get selected by the main Concierge Deployment.
For the Kube cert agent Deployment, we can change the immutable selector
by deleting and recreating the Deployment. The new selector uses only
the unique label that has always been applied to the pods of that
deployment. Upon recreation, these pods no longer have the "app" label,
so they will not be selected by the main Concierge Deployment's
selector.
The selector of all Services have been updated to use new labels to
more specifically target the intended pods. For the Concierge Services,
this will prevent them from accidentally including the Kube cert agent
pods. For the Supervisor Services, we follow the same convention just
to be consistent and to help future-proof the Supervisor app in case it
ever has a second Deployment added to it.
The selector of the auto-created impersonation proxy Service was
also previously using the "app" label. There is no change to this
Service because that label will now select the correct pods, since
the Kube cert agent pods no longer have that label. It would be possible
to update that selector to use the new more specific label, but then we
would need to invent a way to pass that label into the controller, so
it seemed like more work than was justified.
2021-09-14 20:35:10 +00:00
|
|
|
labels:
|
|
|
|
#! This has always included defaultLabel(), which is used by this Deployment's selector.
|
|
|
|
_: #@ template.replace(defaultLabel())
|
|
|
|
#! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically
|
|
|
|
#! without accidentally selecting any other Deployment's Pods, especially the kube cert agent Deployment's Pods.
|
|
|
|
_: #@ template.replace(deploymentPodLabel())
|
2020-07-07 20:17:34 +00:00
|
|
|
spec:
|
2020-11-02 16:57:05 +00:00
|
|
|
securityContext:
|
2021-01-13 14:47:39 +00:00
|
|
|
runAsUser: #@ data.values.run_as_user
|
|
|
|
runAsGroup: #@ data.values.run_as_group
|
2020-10-14 22:05:42 +00:00
|
|
|
serviceAccountName: #@ defaultResourceName()
|
2020-08-13 00:02:43 +00:00
|
|
|
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
|
|
|
imagePullSecrets:
|
|
|
|
- name: image-pull-secret
|
|
|
|
#@ end
|
2020-07-07 20:17:34 +00:00
|
|
|
containers:
|
2020-10-14 22:05:42 +00:00
|
|
|
- name: #@ defaultResourceName()
|
2020-07-09 17:16:46 +00:00
|
|
|
#@ if data.values.image_digest:
|
2020-07-07 20:17:34 +00:00
|
|
|
image: #@ data.values.image_repo + "@" + data.values.image_digest
|
2020-07-09 17:16:46 +00:00
|
|
|
#@ else:
|
|
|
|
image: #@ data.values.image_repo + ":" + data.values.image_tag
|
|
|
|
#@ end
|
2020-07-09 04:39:56 +00:00
|
|
|
imagePullPolicy: IfNotPresent
|
2021-09-02 21:08:00 +00:00
|
|
|
securityContext:
|
|
|
|
readOnlyRootFilesystem: true
|
2022-09-15 21:58:15 +00:00
|
|
|
runAsNonRoot: true
|
|
|
|
allowPrivilegeEscalation: false
|
|
|
|
capabilities:
|
|
|
|
drop: [ "ALL" ]
|
|
|
|
#! seccompProfile was introduced in Kube v1.19. Using it on an older Kube version will result in a
|
|
|
|
#! kubectl validation error when installing via `kubectl apply`, which can be ignored using kubectl's
|
|
|
|
#! `--validate=false` flag. Note that installing via `kapp` does not complain about this validation error.
|
|
|
|
seccompProfile:
|
|
|
|
type: "RuntimeDefault"
|
2020-08-28 19:18:48 +00:00
|
|
|
resources:
|
|
|
|
requests:
|
2020-11-02 23:47:20 +00:00
|
|
|
cpu: "100m"
|
2020-08-28 19:18:48 +00:00
|
|
|
memory: "128Mi"
|
2020-11-02 19:57:39 +00:00
|
|
|
limits:
|
2020-11-02 23:47:20 +00:00
|
|
|
cpu: "100m"
|
2020-08-28 19:18:48 +00:00
|
|
|
memory: "128Mi"
|
2021-07-26 16:18:43 +00:00
|
|
|
command:
|
|
|
|
- pinniped-concierge
|
2020-08-20 17:54:15 +00:00
|
|
|
- --config=/etc/config/pinniped.yaml
|
2020-07-17 21:42:02 +00:00
|
|
|
- --downward-api-path=/etc/podinfo
|
2020-07-09 16:42:31 +00:00
|
|
|
volumeMounts:
|
2021-07-26 16:18:43 +00:00
|
|
|
- name: tmp
|
|
|
|
mountPath: /tmp
|
2020-07-17 21:42:02 +00:00
|
|
|
- name: config-volume
|
|
|
|
mountPath: /etc/config
|
2021-09-02 21:08:00 +00:00
|
|
|
readOnly: true
|
2020-07-17 21:42:02 +00:00
|
|
|
- name: podinfo
|
|
|
|
mountPath: /etc/podinfo
|
2021-09-02 21:08:00 +00:00
|
|
|
readOnly: true
|
2021-06-09 23:00:54 +00:00
|
|
|
- name: impersonation-proxy
|
|
|
|
mountPath: /var/run/secrets/impersonation-proxy.concierge.pinniped.dev/serviceaccount
|
2021-09-02 21:08:00 +00:00
|
|
|
readOnly: true
|
2021-08-17 13:44:40 +00:00
|
|
|
env:
|
|
|
|
#@ if data.values.https_proxy:
|
|
|
|
- name: HTTPS_PROXY
|
|
|
|
value: #@ data.values.https_proxy
|
|
|
|
#@ end
|
|
|
|
#@ if data.values.https_proxy and data.values.no_proxy:
|
|
|
|
- name: NO_PROXY
|
|
|
|
value: #@ data.values.no_proxy
|
|
|
|
#@ end
|
2020-08-17 23:44:42 +00:00
|
|
|
livenessProbe:
|
|
|
|
httpGet:
|
|
|
|
path: /healthz
|
2021-11-17 00:43:51 +00:00
|
|
|
port: 10250
|
2020-08-17 23:44:42 +00:00
|
|
|
scheme: HTTPS
|
2020-08-18 16:18:51 +00:00
|
|
|
initialDelaySeconds: 2
|
2020-08-17 23:44:42 +00:00
|
|
|
timeoutSeconds: 15
|
|
|
|
periodSeconds: 10
|
|
|
|
failureThreshold: 5
|
|
|
|
readinessProbe:
|
|
|
|
httpGet:
|
|
|
|
path: /healthz
|
2021-11-17 00:43:51 +00:00
|
|
|
port: 10250
|
2020-08-17 23:44:42 +00:00
|
|
|
scheme: HTTPS
|
2020-08-18 16:18:51 +00:00
|
|
|
initialDelaySeconds: 2
|
2020-08-17 23:44:42 +00:00
|
|
|
timeoutSeconds: 3
|
|
|
|
periodSeconds: 10
|
|
|
|
failureThreshold: 3
|
2020-07-09 16:42:31 +00:00
|
|
|
volumes:
|
2021-07-26 16:18:43 +00:00
|
|
|
- name: tmp
|
|
|
|
emptyDir:
|
|
|
|
medium: Memory
|
|
|
|
sizeLimit: 100Mi
|
2020-07-17 21:42:02 +00:00
|
|
|
- name: config-volume
|
|
|
|
configMap:
|
2020-10-14 22:05:42 +00:00
|
|
|
name: #@ defaultResourceNameWithSuffix("config")
|
2021-06-09 23:00:54 +00:00
|
|
|
- name: impersonation-proxy
|
|
|
|
secret:
|
|
|
|
secretName: #@ defaultResourceNameWithSuffix("impersonation-proxy")
|
|
|
|
items: #! make sure our pod does not start until the token controller has a chance to populate the secret
|
|
|
|
- key: token
|
|
|
|
path: token
|
2020-07-16 19:24:30 +00:00
|
|
|
- name: podinfo
|
2020-07-17 21:42:02 +00:00
|
|
|
downwardAPI:
|
|
|
|
items:
|
|
|
|
- path: "labels"
|
|
|
|
fieldRef:
|
|
|
|
fieldPath: metadata.labels
|
2021-01-05 22:07:33 +00:00
|
|
|
- path: "name"
|
|
|
|
fieldRef:
|
|
|
|
fieldPath: metadata.name
|
2020-07-17 21:42:02 +00:00
|
|
|
- path: "namespace"
|
|
|
|
fieldRef:
|
|
|
|
fieldPath: metadata.namespace
|
2020-07-24 20:41:51 +00:00
|
|
|
tolerations:
|
2020-08-12 00:55:34 +00:00
|
|
|
- key: CriticalAddonsOnly
|
|
|
|
operator: Exists
|
2022-02-22 19:24:26 +00:00
|
|
|
- key: node-role.kubernetes.io/master #! Allow running on master nodes too (name deprecated by kubernetes 1.20).
|
|
|
|
effect: NoSchedule
|
|
|
|
- key: node-role.kubernetes.io/control-plane #! The new name for these nodes as of Kubernetes 1.24.
|
2020-08-12 00:55:34 +00:00
|
|
|
effect: NoSchedule
|
2020-08-21 15:14:45 +00:00
|
|
|
#! This will help make sure our multiple pods run on different nodes, making
|
|
|
|
#! our deployment "more" "HA".
|
|
|
|
affinity:
|
|
|
|
podAntiAffinity:
|
|
|
|
preferredDuringSchedulingIgnoredDuringExecution:
|
|
|
|
- weight: 50
|
|
|
|
podAffinityTerm:
|
|
|
|
labelSelector:
|
Improve the selectors of Deployments and Services
Fixes #801. The solution is complicated by the fact that the Selector
field of Deployments is immutable. It would have been easy to just
make the Selectors of the main Concierge Deployment, the Kube cert agent
Deployment, and the various Services use more specific labels, but
that would break upgrades. Instead, we make the Pod template labels and
the Service selectors more specific, because those not immutable, and
then handle the Deployment selectors in a special way.
For the main Concierge and Supervisor Deployments, we cannot change
their selectors, so they remain "app: app_name", and we make other
changes to ensure that only the intended pods are selected. We keep the
original "app" label on those pods and remove the "app" label from the
pods of the Kube cert agent Deployment. By removing it from the Kube
cert agent pods, there is no longer any chance that they will
accidentally get selected by the main Concierge Deployment.
For the Kube cert agent Deployment, we can change the immutable selector
by deleting and recreating the Deployment. The new selector uses only
the unique label that has always been applied to the pods of that
deployment. Upon recreation, these pods no longer have the "app" label,
so they will not be selected by the main Concierge Deployment's
selector.
The selector of all Services have been updated to use new labels to
more specifically target the intended pods. For the Concierge Services,
this will prevent them from accidentally including the Kube cert agent
pods. For the Supervisor Services, we follow the same convention just
to be consistent and to help future-proof the Supervisor app in case it
ever has a second Deployment added to it.
The selector of the auto-created impersonation proxy Service was
also previously using the "app" label. There is no change to this
Service because that label will now select the correct pods, since
the Kube cert agent pods no longer have that label. It would be possible
to update that selector to use the new more specific label, but then we
would need to invent a way to pass that label into the controller, so
it seemed like more work than was justified.
2021-09-14 20:35:10 +00:00
|
|
|
matchLabels: #@ deploymentPodLabel()
|
2020-08-21 15:14:45 +00:00
|
|
|
topologyKey: kubernetes.io/hostname
|
2020-08-04 23:46:27 +00:00
|
|
|
---
|
|
|
|
apiVersion: v1
|
|
|
|
kind: Service
|
|
|
|
metadata:
|
2020-10-08 23:20:21 +00:00
|
|
|
#! If name is changed, must also change names.apiService in the ConfigMap above and spec.service.name in the APIService below.
|
2020-10-14 22:05:42 +00:00
|
|
|
name: #@ defaultResourceNameWithSuffix("api")
|
|
|
|
namespace: #@ namespace()
|
|
|
|
labels: #@ labels()
|
2021-09-15 20:08:49 +00:00
|
|
|
#! prevent kapp from altering the selector of our services to match kubectl behavior
|
|
|
|
annotations:
|
|
|
|
kapp.k14s.io/disable-default-label-scoping-rules: ""
|
2020-08-04 23:46:27 +00:00
|
|
|
spec:
|
|
|
|
type: ClusterIP
|
Improve the selectors of Deployments and Services
Fixes #801. The solution is complicated by the fact that the Selector
field of Deployments is immutable. It would have been easy to just
make the Selectors of the main Concierge Deployment, the Kube cert agent
Deployment, and the various Services use more specific labels, but
that would break upgrades. Instead, we make the Pod template labels and
the Service selectors more specific, because those not immutable, and
then handle the Deployment selectors in a special way.
For the main Concierge and Supervisor Deployments, we cannot change
their selectors, so they remain "app: app_name", and we make other
changes to ensure that only the intended pods are selected. We keep the
original "app" label on those pods and remove the "app" label from the
pods of the Kube cert agent Deployment. By removing it from the Kube
cert agent pods, there is no longer any chance that they will
accidentally get selected by the main Concierge Deployment.
For the Kube cert agent Deployment, we can change the immutable selector
by deleting and recreating the Deployment. The new selector uses only
the unique label that has always been applied to the pods of that
deployment. Upon recreation, these pods no longer have the "app" label,
so they will not be selected by the main Concierge Deployment's
selector.
The selector of all Services have been updated to use new labels to
more specifically target the intended pods. For the Concierge Services,
this will prevent them from accidentally including the Kube cert agent
pods. For the Supervisor Services, we follow the same convention just
to be consistent and to help future-proof the Supervisor app in case it
ever has a second Deployment added to it.
The selector of the auto-created impersonation proxy Service was
also previously using the "app" label. There is no change to this
Service because that label will now select the correct pods, since
the Kube cert agent pods no longer have that label. It would be possible
to update that selector to use the new more specific label, but then we
would need to invent a way to pass that label into the controller, so
it seemed like more work than was justified.
2021-09-14 20:35:10 +00:00
|
|
|
selector: #@ deploymentPodLabel()
|
2020-08-04 23:46:27 +00:00
|
|
|
ports:
|
|
|
|
- protocol: TCP
|
|
|
|
port: 443
|
2021-11-17 00:43:51 +00:00
|
|
|
targetPort: 10250
|
2020-08-04 23:46:27 +00:00
|
|
|
---
|
2021-01-22 18:00:27 +00:00
|
|
|
apiVersion: v1
|
|
|
|
kind: Service
|
|
|
|
metadata:
|
|
|
|
name: #@ defaultResourceNameWithSuffix("proxy")
|
|
|
|
namespace: #@ namespace()
|
|
|
|
labels: #@ labels()
|
2021-09-15 20:08:49 +00:00
|
|
|
#! prevent kapp from altering the selector of our services to match kubectl behavior
|
|
|
|
annotations:
|
|
|
|
kapp.k14s.io/disable-default-label-scoping-rules: ""
|
2021-01-22 18:00:27 +00:00
|
|
|
spec:
|
|
|
|
type: ClusterIP
|
Improve the selectors of Deployments and Services
Fixes #801. The solution is complicated by the fact that the Selector
field of Deployments is immutable. It would have been easy to just
make the Selectors of the main Concierge Deployment, the Kube cert agent
Deployment, and the various Services use more specific labels, but
that would break upgrades. Instead, we make the Pod template labels and
the Service selectors more specific, because those not immutable, and
then handle the Deployment selectors in a special way.
For the main Concierge and Supervisor Deployments, we cannot change
their selectors, so they remain "app: app_name", and we make other
changes to ensure that only the intended pods are selected. We keep the
original "app" label on those pods and remove the "app" label from the
pods of the Kube cert agent Deployment. By removing it from the Kube
cert agent pods, there is no longer any chance that they will
accidentally get selected by the main Concierge Deployment.
For the Kube cert agent Deployment, we can change the immutable selector
by deleting and recreating the Deployment. The new selector uses only
the unique label that has always been applied to the pods of that
deployment. Upon recreation, these pods no longer have the "app" label,
so they will not be selected by the main Concierge Deployment's
selector.
The selector of all Services have been updated to use new labels to
more specifically target the intended pods. For the Concierge Services,
this will prevent them from accidentally including the Kube cert agent
pods. For the Supervisor Services, we follow the same convention just
to be consistent and to help future-proof the Supervisor app in case it
ever has a second Deployment added to it.
The selector of the auto-created impersonation proxy Service was
also previously using the "app" label. There is no change to this
Service because that label will now select the correct pods, since
the Kube cert agent pods no longer have that label. It would be possible
to update that selector to use the new more specific label, but then we
would need to invent a way to pass that label into the controller, so
it seemed like more work than was justified.
2021-09-14 20:35:10 +00:00
|
|
|
selector: #@ deploymentPodLabel()
|
2021-01-22 18:00:27 +00:00
|
|
|
ports:
|
|
|
|
- protocol: TCP
|
|
|
|
port: 443
|
|
|
|
targetPort: 8444
|
|
|
|
---
|
2020-08-04 23:46:27 +00:00
|
|
|
apiVersion: apiregistration.k8s.io/v1
|
|
|
|
kind: APIService
|
2020-09-16 20:00:03 +00:00
|
|
|
metadata:
|
2021-01-19 22:23:06 +00:00
|
|
|
name: #@ pinnipedDevAPIGroupWithPrefix("v1alpha1.login.concierge")
|
2020-10-14 22:05:42 +00:00
|
|
|
labels: #@ labels()
|
2020-09-16 20:00:03 +00:00
|
|
|
spec:
|
|
|
|
version: v1alpha1
|
2021-01-19 22:23:06 +00:00
|
|
|
group: #@ pinnipedDevAPIGroupWithPrefix("login.concierge")
|
2021-02-19 12:47:38 +00:00
|
|
|
groupPriorityMinimum: 9900
|
|
|
|
versionPriority: 15
|
2020-09-16 20:00:03 +00:00
|
|
|
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
|
|
|
|
service:
|
2020-10-14 22:05:42 +00:00
|
|
|
name: #@ defaultResourceNameWithSuffix("api")
|
|
|
|
namespace: #@ namespace()
|
2020-09-16 20:00:03 +00:00
|
|
|
port: 443
|
2021-02-19 18:21:10 +00:00
|
|
|
---
|
|
|
|
apiVersion: apiregistration.k8s.io/v1
|
|
|
|
kind: APIService
|
|
|
|
metadata:
|
|
|
|
name: #@ pinnipedDevAPIGroupWithPrefix("v1alpha1.identity.concierge")
|
|
|
|
labels: #@ labels()
|
|
|
|
spec:
|
|
|
|
version: v1alpha1
|
|
|
|
group: #@ pinnipedDevAPIGroupWithPrefix("identity.concierge")
|
|
|
|
groupPriorityMinimum: 9900
|
|
|
|
versionPriority: 15
|
2020-09-16 20:00:03 +00:00
|
|
|
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
|
|
|
|
service:
|
2020-10-14 22:05:42 +00:00
|
|
|
name: #@ defaultResourceNameWithSuffix("api")
|
|
|
|
namespace: #@ namespace()
|
2020-09-16 20:00:03 +00:00
|
|
|
port: 443
|
2021-05-17 15:05:42 +00:00
|
|
|
---
|
2021-05-19 21:53:00 +00:00
|
|
|
apiVersion: #@ pinnipedDevAPIGroupWithPrefix("config.concierge") + "/v1alpha1"
|
2021-05-17 15:05:42 +00:00
|
|
|
kind: CredentialIssuer
|
|
|
|
metadata:
|
2021-05-19 21:53:00 +00:00
|
|
|
name: #@ defaultResourceNameWithSuffix("config")
|
|
|
|
labels: #@ labels()
|
2021-05-17 15:05:42 +00:00
|
|
|
spec:
|
|
|
|
impersonationProxy:
|
2021-05-27 20:36:18 +00:00
|
|
|
mode: #@ data.values.impersonation_proxy_spec.mode
|
|
|
|
#@ if data.values.impersonation_proxy_spec.external_endpoint:
|
|
|
|
externalEndpoint: #@ data.values.impersonation_proxy_spec.external_endpoint
|
|
|
|
#@ end
|
2021-05-17 15:05:42 +00:00
|
|
|
service:
|
2021-06-02 19:48:18 +00:00
|
|
|
type: #@ data.values.impersonation_proxy_spec.service.type
|
2021-05-27 20:36:18 +00:00
|
|
|
#@ if data.values.impersonation_proxy_spec.service.load_balancer_ip:
|
|
|
|
loadBalancerIP: #@ data.values.impersonation_proxy_spec.service.load_balancer_ip
|
|
|
|
#@ end
|
|
|
|
annotations: #@ data.values.impersonation_proxy_spec.service.annotations
|
2021-06-09 23:00:54 +00:00
|
|
|
---
|
|
|
|
apiVersion: v1
|
|
|
|
kind: Secret
|
|
|
|
metadata:
|
|
|
|
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
|
|
|
|
namespace: #@ namespace()
|
|
|
|
labels: #@ labels()
|
|
|
|
annotations:
|
|
|
|
#! wait until the SA exists to create this secret so that the token controller does not delete it
|
|
|
|
#! we have this secret at the end so that kubectl will create the service account first
|
|
|
|
kapp.k14s.io/change-rule: "upsert after upserting impersonation-proxy.concierge.pinniped.dev/serviceaccount"
|
|
|
|
kubernetes.io/service-account.name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
|
|
|
|
type: kubernetes.io/service-account-token
|