From 7d9a8f77ff22c24d4cb76fd521c0a08e0c3bc6ca Mon Sep 17 00:00:00 2001 From: "Benjamin A. Petersen" Date: Mon, 28 Aug 2023 17:26:47 -0400 Subject: [PATCH] fix ytt templating error --- deploy_carvel/build.sh | 40 +- .../concierge/config/deployment-HACKED.yaml | 177 --------- .../concierge/config/deployment.yaml | 360 ++++++++++++++++++ deploy_carvel/concierge/package-template.yml | 1 + .../package-repository/.imgpkg/images.yml | 4 +- .../concierge.pinniped.dev/0.25.0.yml | 1 + .../supervisor.pinniped.dev/0.25.0.yml | 1 + deploy_carvel/packagerepository.0.25.0.yml | 9 + .../supervisor/config/deployment-HACKED.yaml | 73 ---- .../supervisor/config/deployment.yaml | 236 ++++++++++++ deploy_carvel/supervisor/package-template.yml | 1 + 11 files changed, 638 insertions(+), 265 deletions(-) delete mode 100644 deploy_carvel/concierge/config/deployment-HACKED.yaml create mode 100644 deploy_carvel/concierge/config/deployment.yaml create mode 100644 deploy_carvel/packagerepository.0.25.0.yml delete mode 100644 deploy_carvel/supervisor/config/deployment-HACKED.yaml create mode 100644 deploy_carvel/supervisor/config/deployment.yaml diff --git a/deploy_carvel/build.sh b/deploy_carvel/build.sh index 835d5b2f..0d0d7581 100755 --- a/deploy_carvel/build.sh +++ b/deploy_carvel/build.sh @@ -36,7 +36,7 @@ echo_yellow "Verify you have a functional kind cluster, otherwise this will fail # ./kind-with-registry.sh # got kapp-controller bits? kubectl get customresourcedefinitions -kapp deploy --app kapp-controller --file https://github.com/vmware-tanzu/carvel-kapp-controller/releases/latest/download/release.yml # -y +kapp deploy --app kapp-controller --file https://github.com/vmware-tanzu/carvel-kapp-controller/releases/latest/download/release.yml -y kubectl get customresourcedefinitions # TODO: since I removed the deployments there is not much in the ./imgpkg/images.yaml output @@ -54,6 +54,11 @@ kubectl get customresourcedefinitions PACKAGE_REPO_HOST="benjaminapetersen/pinniped-package-repo" PINNIPED_PACKAGE_VERSION="0.25.0" +# TODO: cp ./deploy/supervisor.... into ./deploy_carvel/supervisor/config... +# TODO: cp ./deploy/concierge.... into ./deploy_carvel/concierge/config... +# -- we should copy this over, yeah? +# NOTE: I did make changes to values.yaml to turn it into a values schema.... + echo "" echo_yellow "cleaning ./package-repository..." rm -rf "./package-repository" @@ -97,13 +102,13 @@ do echo_yellow "pushing package image: ${package_push_repo_location} ..." imgpkg push --bundle "${package_push_repo_location}" --file "./${resource_name}" - resource_package_version="${resource_name}.pinniped.dev" echo_yellow "generating ./package-repository/packages/${resource_package_version}/${PINNIPED_PACKAGE_VERSION}.yml" ytt \ --file "${resource_name}/package-template.yml" \ --data-value-file openapi="$(pwd)/${resource_name}/schema-openapi.yml" \ --data-value package_version="${PINNIPED_PACKAGE_VERSION}" \ + --data-value namespace="${resource_name}-ns" \ --data-value package_image_repo="${package_push_repo_location}" > "package-repository/packages/${resource_package_version}/${PINNIPED_PACKAGE_VERSION}.yml" echo_yellow "copying ${resource_name}/metadata.yml to ./package-repository/packages/${resource_name}" @@ -111,7 +116,6 @@ do done - echo_yellow "generating ./package-repository/.imgpkg/images.yml" kbld --file ./package-repository/packages/ --imgpkg-lock-output package-repository/.imgpkg/images.yml package_repository_push_repo_location="${PACKAGE_REPO_HOST}:${PINNIPED_PACKAGE_VERSION}" @@ -122,6 +126,8 @@ echo_yellow "validating imgpkg package bundle contents..." imgpkg pull --bundle "${PACKAGE_REPO_HOST}:${PINNIPED_PACKAGE_VERSION}" --output "/tmp/${PACKAGE_REPO_HOST}:${PINNIPED_PACKAGE_VERSION}" ls -la "/tmp/${PACKAGE_REPO_HOST}:${PINNIPED_PACKAGE_VERSION}" + +echo_yellow "deploying PackageRepository..." PINNIPED_PACKGE_REPOSITORY_NAME="pinniped-package-repository" PINNIPED_PACKGE_REPOSITORY_FILE="packagerepository.${PINNIPED_PACKAGE_VERSION}.yml" echo -n "" > "${PINNIPED_PACKGE_REPOSITORY_FILE}" @@ -139,9 +145,10 @@ EOT # Now, gotta make this work. It'll be interesting if we can... -kapp deploy --app "${PINNIPED_PACKGE_REPOSITORY_NAME}" --file "${PINNIPED_PACKGE_REPOSITORY_FILE}" +kapp deploy --app "${PINNIPED_PACKGE_REPOSITORY_NAME}" --file "${PINNIPED_PACKGE_REPOSITORY_FILE}" -y kapp inspect --app "${PINNIPED_PACKGE_REPOSITORY_NAME}" --tree +sleep 2 # TODO: remove # this is just a note to break this up, probably should use a separate ./deploy_stuff.sh file. # at this point, we are "consumers". @@ -152,7 +159,7 @@ echo_green "Package Installation...." echo_yellow "deploying RBAC for use with pinniped PackageInstall..." -# TODO: obviously a mega-role that can do everything is not good. +# TODO: obviously a mega-role that can do everything is not good. we need to scope this down to appropriate things. declare -a arr=("supervisor" "concierge") for resource_name in "${arr[@]}" do @@ -197,16 +204,10 @@ roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: "${PINNIPED_PACKAGE_RBAC_PREFIX}-role-superadmin-dangerous" + EOF kapp deploy --app "${PINNIPED_PACKAGE_RBAC_PREFIX}" --file "${PINNIPED_PACKAGE_RBAC_FILE}" -y - -echo_yellow "verifying RBAC resources created (namespace, serviceaccount, clusterrole, clusterrolebinding)..." -kubectl get ns -A | grep pinniped -kubectl get sa -A | grep pinniped -kubectl get ClusterRole -A | grep pinniped -kubectl get clusterrolebinding -A | grep pinniped - done #FOOBAR="pinniped-package-rbac" @@ -229,8 +230,9 @@ cat > "${PACKAGE_INSTALL_FILE_NAME}" << EOF apiVersion: packaging.carvel.dev/v1alpha1 kind: PackageInstall metadata: + # name, does not have to be versioned, versionSelection.constraints below will handle name: "${resource_name}-package-install" - namespace: "${NAMESPACE}" + namespace: "${NAMESPACE}" # TODO: ---????? is this namespace ok? spec: serviceAccountName: "${PINNIPED_PACKAGE_RBAC_PREFIX}-sa-superadmin-dangerous" packageRef: @@ -286,3 +288,15 @@ kubectl get app --all-namespaces # docker pull benjaminapetersen/pinniped-package-repo:latest # docker pull benjaminapetersen/pinniped-package-repo-package-supervisor:0.25.0 # docker pull benjaminapetersen/pinniped-package-repo-package-concierge:0.25.0 + +# echo_yellow "verifying RBAC resources created (namespace, serviceaccount, clusterrole, clusterrolebinding)..." +# kubectl get ns -A | grep pinniped +# kubectl get sa -A | grep pinniped +# kubectl get ClusterRole -A | grep pinniped +# kubectl get clusterrolebinding -A | grep pinniped + + +# stuff +kubectl get PackageRepository -A +kubectl get Package -A +kubectl get PackageInstall -A diff --git a/deploy_carvel/concierge/config/deployment-HACKED.yaml b/deploy_carvel/concierge/config/deployment-HACKED.yaml deleted file mode 100644 index 2b00fc08..00000000 --- a/deploy_carvel/concierge/config/deployment-HACKED.yaml +++ /dev/null @@ -1,177 +0,0 @@ -#! Copyright 2020-2022 the Pinniped contributors. All Rights Reserved. -#! SPDX-License-Identifier: Apache-2.0 - -#@ load("@ytt:data", "data") -#@ load("@ytt:json", "json") -#@ load("helpers.lib.yaml", "defaultLabel", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel", "pinnipedDevAPIGroupWithPrefix") -#@ load("@ytt:template", "template") - -#@ if not data.values.into_namespace: ---- -apiVersion: v1 -kind: Namespace -metadata: - name: #@ data.values.namespace - labels: - _: #@ template.replace(labels()) - #! When deploying onto a cluster which has PSAs enabled by default for namespaces, - #! effectively disable them for this namespace. The kube-cert-agent Deployment's pod - #! created by the Concierge in this namespace needs to be able to perform privileged - #! actions. The regular Concierge pod containers created by the Deployment below do - #! not need special privileges and are marked as such in their securityContext settings. - pod-security.kubernetes.io/enforce: privileged -#@ end ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: #@ defaultResourceName() - namespace: #@ namespace() - labels: #@ labels() ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: #@ defaultResourceNameWithSuffix("kube-cert-agent") - namespace: #@ namespace() - labels: #@ labels() ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: #@ defaultResourceNameWithSuffix("impersonation-proxy") - namespace: #@ namespace() - labels: #@ labels() - annotations: - #! we need to create this service account before we create the secret - kapp.k14s.io/change-group: "impersonation-proxy.concierge.pinniped.dev/serviceaccount" -secrets: #! make sure the token controller does not create any other secrets -- name: #@ defaultResourceNameWithSuffix("impersonation-proxy") ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: #@ defaultResourceNameWithSuffix("config") - namespace: #@ namespace() - labels: #@ labels() -data: - #! If names.apiService is changed in this ConfigMap, must also change name of the ClusterIP Service resource below. - #@yaml/text-templated-strings - pinniped.yaml: | - discovery: - url: (@= data.values.discovery_url or "null" @) - api: - servingCertificate: - durationSeconds: (@= str(data.values.api_serving_certificate_duration_seconds) @) - renewBeforeSeconds: (@= str(data.values.api_serving_certificate_renew_before_seconds) @) - apiGroupSuffix: (@= data.values.api_group_suffix @) - # aggregatedAPIServerPort may be set here, although other YAML references to the default port (10250) may also need to be updated - # impersonationProxyServerPort may be set here, although other YAML references to the default port (8444) may also need to be updated - names: - servingCertificateSecret: (@= defaultResourceNameWithSuffix("api-tls-serving-certificate") @) - credentialIssuer: (@= defaultResourceNameWithSuffix("config") @) - apiService: (@= defaultResourceNameWithSuffix("api") @) - impersonationLoadBalancerService: (@= defaultResourceNameWithSuffix("impersonation-proxy-load-balancer") @) - impersonationClusterIPService: (@= defaultResourceNameWithSuffix("impersonation-proxy-cluster-ip") @) - impersonationTLSCertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-tls-serving-certificate") @) - impersonationCACertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-ca-certificate") @) - impersonationSignerSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-signer-ca-certificate") @) - agentServiceAccount: (@= defaultResourceNameWithSuffix("kube-cert-agent") @) - labels: (@= json.encode(labels()).rstrip() @) - kubeCertAgent: - namePrefix: (@= defaultResourceNameWithSuffix("kube-cert-agent-") @) - (@ if data.values.kube_cert_agent_image: @) - image: (@= data.values.kube_cert_agent_image @) - (@ else: @) - (@ if data.values.image_digest: @) - image: (@= data.values.image_repo + "@" + data.values.image_digest @) - (@ else: @) - image: (@= data.values.image_repo + ":" + data.values.image_tag @) - (@ end @) - (@ end @) - (@ if data.values.image_pull_dockerconfigjson: @) - imagePullSecrets: - - image-pull-secret - (@ end @) - (@ if data.values.log_level or data.values.deprecated_log_format: @) - log: - (@ if data.values.log_level: @) - level: (@= getAndValidateLogLevel() @) - (@ end @) - (@ if data.values.deprecated_log_format: @) - format: (@= data.values.deprecated_log_format @) - (@ end @) - (@ end @) ---- -#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "": -apiVersion: v1 -kind: Secret -metadata: - name: image-pull-secret - namespace: #@ namespace() - labels: #@ labels() -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: #@ data.values.image_pull_dockerconfigjson -#@ end ---- -#! THE DEPLOYMENT IS GONE!!! -#! THE DEPLOYMENT IS GONE!!! -#! THE DEPLOYMENT IS GONE!!! For initial prototype, just installing some simple things. -#! THE DEPLOYMENT IS GONE!!! -#! THE DEPLOYMENT IS GONE!!! ---- -#! THE SERVICE IS GONE!!! -#! THE SERVICE IS GONE!!! -#! THE SERVICE IS GONE!!! For initial prototype, just installing some simple things. -#! THE SERVICE IS GONE!!! -#! THE SERVICE IS GONE!!! ---- -#! THE SECOND SERVICE IS GONE!!! -#! THE SECOND SERVICE IS GONE!!! -#! THE SECOND SERVICE IS GONE!!! For initial prototype, just installing some simple things. -#! THE SECOND SERVICE IS GONE!!! -#! THE SECOND SERVICE IS GONE!!! ---- -#! THE API SERVICE IS GONE!!! -#! THE API SERVICE IS GONE!!! -#! THE API SERVICE IS GONE!!! For initial prototype, just installing some simple things. -#! THE API SERVICE IS GONE!!! -#! THE API SERVICE IS GONE!!! ---- -#! THE SECOND API SERVICE IS GONE!!! -#! THE SECOND API SERVICE IS GONE!!! -#! THE SECOND API SERVICE IS GONE!!! For initial prototype, just installing some simple things. -#! THE SECOND API SERVICE IS GONE!!! -#! THE SECOND API SERVICE IS GONE!!! ---- -apiVersion: #@ pinnipedDevAPIGroupWithPrefix("config.concierge") + "/v1alpha1" -kind: CredentialIssuer -metadata: - name: #@ defaultResourceNameWithSuffix("config") - labels: #@ labels() -spec: - impersonationProxy: - mode: #@ data.values.impersonation_proxy_spec.mode - #@ if data.values.impersonation_proxy_spec.external_endpoint: - externalEndpoint: #@ data.values.impersonation_proxy_spec.external_endpoint - #@ end - service: - type: #@ data.values.impersonation_proxy_spec.service.type - #@ if data.values.impersonation_proxy_spec.service.load_balancer_ip: - loadBalancerIP: #@ data.values.impersonation_proxy_spec.service.load_balancer_ip - #@ end - annotations: #@ data.values.impersonation_proxy_spec.service.annotations ---- -apiVersion: v1 -kind: Secret -metadata: - name: #@ defaultResourceNameWithSuffix("impersonation-proxy") - namespace: #@ namespace() - labels: #@ labels() - annotations: - #! wait until the SA exists to create this secret so that the token controller does not delete it - #! we have this secret at the end so that kubectl will create the service account first - kapp.k14s.io/change-rule: "upsert after upserting impersonation-proxy.concierge.pinniped.dev/serviceaccount" - kubernetes.io/service-account.name: #@ defaultResourceNameWithSuffix("impersonation-proxy") -type: kubernetes.io/service-account-token diff --git a/deploy_carvel/concierge/config/deployment.yaml b/deploy_carvel/concierge/config/deployment.yaml new file mode 100644 index 00000000..bc8397cc --- /dev/null +++ b/deploy_carvel/concierge/config/deployment.yaml @@ -0,0 +1,360 @@ +#! Copyright 2020-2022 the Pinniped contributors. All Rights Reserved. +#! SPDX-License-Identifier: Apache-2.0 + +#@ load("@ytt:data", "data") +#@ load("@ytt:json", "json") +#@ load("helpers.lib.yaml", "defaultLabel", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel", "pinnipedDevAPIGroupWithPrefix") +#@ load("@ytt:template", "template") + +#@ if not data.values.into_namespace: +--- +apiVersion: v1 +kind: Namespace +metadata: + name: #@ data.values.namespace + labels: + _: #@ template.replace(labels()) + #! When deploying onto a cluster which has PSAs enabled by default for namespaces, + #! effectively disable them for this namespace. The kube-cert-agent Deployment's pod + #! created by the Concierge in this namespace needs to be able to perform privileged + #! actions. The regular Concierge pod containers created by the Deployment below do + #! not need special privileges and are marked as such in their securityContext settings. + pod-security.kubernetes.io/enforce: privileged +#@ end +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: #@ defaultResourceName() + namespace: #@ namespace() + labels: #@ labels() +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: #@ defaultResourceNameWithSuffix("kube-cert-agent") + namespace: #@ namespace() + labels: #@ labels() +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: #@ defaultResourceNameWithSuffix("impersonation-proxy") + namespace: #@ namespace() + labels: #@ labels() + annotations: + #! we need to create this service account before we create the secret + kapp.k14s.io/change-group: "impersonation-proxy.concierge.pinniped.dev/serviceaccount" +secrets: #! make sure the token controller does not create any other secrets +- name: #@ defaultResourceNameWithSuffix("impersonation-proxy") +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: #@ defaultResourceNameWithSuffix("config") + namespace: #@ namespace() + labels: #@ labels() +data: + #! If names.apiService is changed in this ConfigMap, must also change name of the ClusterIP Service resource below. + #@yaml/text-templated-strings + pinniped.yaml: | + discovery: + url: (@= data.values.discovery_url or "null" @) + api: + servingCertificate: + durationSeconds: (@= str(data.values.api_serving_certificate_duration_seconds) @) + renewBeforeSeconds: (@= str(data.values.api_serving_certificate_renew_before_seconds) @) + apiGroupSuffix: (@= data.values.api_group_suffix @) + # aggregatedAPIServerPort may be set here, although other YAML references to the default port (10250) may also need to be updated + # impersonationProxyServerPort may be set here, although other YAML references to the default port (8444) may also need to be updated + names: + servingCertificateSecret: (@= defaultResourceNameWithSuffix("api-tls-serving-certificate") @) + credentialIssuer: (@= defaultResourceNameWithSuffix("config") @) + apiService: (@= defaultResourceNameWithSuffix("api") @) + impersonationLoadBalancerService: (@= defaultResourceNameWithSuffix("impersonation-proxy-load-balancer") @) + impersonationClusterIPService: (@= defaultResourceNameWithSuffix("impersonation-proxy-cluster-ip") @) + impersonationTLSCertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-tls-serving-certificate") @) + impersonationCACertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-ca-certificate") @) + impersonationSignerSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-signer-ca-certificate") @) + agentServiceAccount: (@= defaultResourceNameWithSuffix("kube-cert-agent") @) + labels: (@= json.encode(labels()).rstrip() @) + kubeCertAgent: + namePrefix: (@= defaultResourceNameWithSuffix("kube-cert-agent-") @) + (@ if data.values.kube_cert_agent_image: @) + image: (@= data.values.kube_cert_agent_image @) + (@ else: @) + (@ if data.values.image_digest: @) + image: (@= data.values.image_repo + "@" + data.values.image_digest @) + (@ else: @) + image: (@= data.values.image_repo + ":" + data.values.image_tag @) + (@ end @) + (@ end @) + (@ if data.values.image_pull_dockerconfigjson: @) + imagePullSecrets: + - image-pull-secret + (@ end @) + (@ if data.values.log_level or data.values.deprecated_log_format: @) + log: + (@ if data.values.log_level: @) + level: (@= getAndValidateLogLevel() @) + (@ end @) + (@ if data.values.deprecated_log_format: @) + format: (@= data.values.deprecated_log_format @) + (@ end @) + (@ end @) +--- +#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "": +apiVersion: v1 +kind: Secret +metadata: + name: image-pull-secret + namespace: #@ namespace() + labels: #@ labels() +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: #@ data.values.image_pull_dockerconfigjson +#@ end +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: #@ defaultResourceName() + namespace: #@ namespace() + labels: #@ labels() +spec: + replicas: #@ data.values.replicas + selector: + #! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades. + matchLabels: #@ defaultLabel() + template: + metadata: + labels: + #! This has always included defaultLabel(), which is used by this Deployment's selector. + _: #@ template.replace(defaultLabel()) + #! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically + #! without accidentally selecting any other Deployment's Pods, especially the kube cert agent Deployment's Pods. + _: #@ template.replace(deploymentPodLabel()) + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + spec: + securityContext: + runAsUser: #@ data.values.run_as_user + runAsGroup: #@ data.values.run_as_group + serviceAccountName: #@ defaultResourceName() + #@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "": + imagePullSecrets: + - name: image-pull-secret + #@ end + containers: + - name: #@ defaultResourceName() + #@ if data.values.image_digest: + image: #@ data.values.image_repo + "@" + data.values.image_digest + #@ else: + image: #@ data.values.image_repo + ":" + data.values.image_tag + #@ end + imagePullPolicy: IfNotPresent + securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] + #! seccompProfile was introduced in Kube v1.19. Using it on an older Kube version will result in a + #! kubectl validation error when installing via `kubectl apply`, which can be ignored using kubectl's + #! `--validate=false` flag. Note that installing via `kapp` does not complain about this validation error. + seccompProfile: + type: "RuntimeDefault" + resources: + requests: + cpu: "100m" + memory: "128Mi" + limits: + cpu: "100m" + memory: "128Mi" + command: + - pinniped-concierge + - --config=/etc/config/pinniped.yaml + - --downward-api-path=/etc/podinfo + volumeMounts: + - name: tmp + mountPath: /tmp + - name: config-volume + mountPath: /etc/config + readOnly: true + - name: podinfo + mountPath: /etc/podinfo + readOnly: true + - name: impersonation-proxy + mountPath: /var/run/secrets/impersonation-proxy.concierge.pinniped.dev/serviceaccount + readOnly: true + env: + #@ if data.values.https_proxy: + - name: HTTPS_PROXY + value: #@ data.values.https_proxy + #@ end + #@ if data.values.https_proxy and data.values.no_proxy: + - name: NO_PROXY + value: #@ data.values.no_proxy + #@ end + livenessProbe: + httpGet: + path: /healthz + port: 10250 + scheme: HTTPS + initialDelaySeconds: 2 + timeoutSeconds: 15 + periodSeconds: 10 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /healthz + port: 10250 + scheme: HTTPS + initialDelaySeconds: 2 + timeoutSeconds: 3 + periodSeconds: 10 + failureThreshold: 3 + volumes: + - name: tmp + emptyDir: + medium: Memory + sizeLimit: 100Mi + - name: config-volume + configMap: + name: #@ defaultResourceNameWithSuffix("config") + - name: impersonation-proxy + secret: + secretName: #@ defaultResourceNameWithSuffix("impersonation-proxy") + items: #! make sure our pod does not start until the token controller has a chance to populate the secret + - key: token + path: token + - name: podinfo + downwardAPI: + items: + - path: "labels" + fieldRef: + fieldPath: metadata.labels + - path: "name" + fieldRef: + fieldPath: metadata.name + - path: "namespace" + fieldRef: + fieldPath: metadata.namespace + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master #! Allow running on master nodes too (name deprecated by kubernetes 1.20). + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane #! The new name for these nodes as of Kubernetes 1.24. + effect: NoSchedule + #! "system-cluster-critical" cannot be used outside the kube-system namespace until Kubernetes >= 1.17, + #! so we skip setting this for now (see https://github.com/kubernetes/kubernetes/issues/60596). + #!priorityClassName: system-cluster-critical + #! This will help make sure our multiple pods run on different nodes, making + #! our deployment "more" "HA". + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchLabels: #@ deploymentPodLabel() + topologyKey: kubernetes.io/hostname +--- +apiVersion: v1 +kind: Service +metadata: + #! If name is changed, must also change names.apiService in the ConfigMap above and spec.service.name in the APIService below. + name: #@ defaultResourceNameWithSuffix("api") + namespace: #@ namespace() + labels: #@ labels() + #! prevent kapp from altering the selector of our services to match kubectl behavior + annotations: + kapp.k14s.io/disable-default-label-scoping-rules: "" +spec: + type: ClusterIP + selector: #@ deploymentPodLabel() + ports: + - protocol: TCP + port: 443 + targetPort: 10250 +--- +apiVersion: v1 +kind: Service +metadata: + name: #@ defaultResourceNameWithSuffix("proxy") + namespace: #@ namespace() + labels: #@ labels() + #! prevent kapp from altering the selector of our services to match kubectl behavior + annotations: + kapp.k14s.io/disable-default-label-scoping-rules: "" +spec: + type: ClusterIP + selector: #@ deploymentPodLabel() + ports: + - protocol: TCP + port: 443 + targetPort: 8444 +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: #@ pinnipedDevAPIGroupWithPrefix("v1alpha1.login.concierge") + labels: #@ labels() +spec: + version: v1alpha1 + group: #@ pinnipedDevAPIGroupWithPrefix("login.concierge") + groupPriorityMinimum: 9900 + versionPriority: 15 + #! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code. + service: + name: #@ defaultResourceNameWithSuffix("api") + namespace: #@ namespace() + port: 443 +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: #@ pinnipedDevAPIGroupWithPrefix("v1alpha1.identity.concierge") + labels: #@ labels() +spec: + version: v1alpha1 + group: #@ pinnipedDevAPIGroupWithPrefix("identity.concierge") + groupPriorityMinimum: 9900 + versionPriority: 15 + #! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code. + service: + name: #@ defaultResourceNameWithSuffix("api") + namespace: #@ namespace() + port: 443 +--- +apiVersion: #@ pinnipedDevAPIGroupWithPrefix("config.concierge") + "/v1alpha1" +kind: CredentialIssuer +metadata: + name: #@ defaultResourceNameWithSuffix("config") + labels: #@ labels() +spec: + impersonationProxy: + mode: #@ data.values.impersonation_proxy_spec.mode + #@ if data.values.impersonation_proxy_spec.external_endpoint: + externalEndpoint: #@ data.values.impersonation_proxy_spec.external_endpoint + #@ end + service: + type: #@ data.values.impersonation_proxy_spec.service.type + #@ if data.values.impersonation_proxy_spec.service.load_balancer_ip: + loadBalancerIP: #@ data.values.impersonation_proxy_spec.service.load_balancer_ip + #@ end + annotations: #@ data.values.impersonation_proxy_spec.service.annotations +--- +apiVersion: v1 +kind: Secret +metadata: + name: #@ defaultResourceNameWithSuffix("impersonation-proxy") + namespace: #@ namespace() + labels: #@ labels() + annotations: + #! wait until the SA exists to create this secret so that the token controller does not delete it + #! we have this secret at the end so that kubectl will create the service account first + kapp.k14s.io/change-rule: "upsert after upserting impersonation-proxy.concierge.pinniped.dev/serviceaccount" + kubernetes.io/service-account.name: #@ defaultResourceNameWithSuffix("impersonation-proxy") +type: kubernetes.io/service-account-token diff --git a/deploy_carvel/concierge/package-template.yml b/deploy_carvel/concierge/package-template.yml index 96cfd06e..6b351420 100644 --- a/deploy_carvel/concierge/package-template.yml +++ b/deploy_carvel/concierge/package-template.yml @@ -5,6 +5,7 @@ apiVersion: data.packaging.carvel.dev/v1alpha1 kind: Package metadata: name: #@ "concierge.pinniped.dev." + data.values.package_version + namespace: #@ data.values.namespace spec: refName: concierge.pinniped.dev version: #@ data.values.package_version diff --git a/deploy_carvel/package-repository/.imgpkg/images.yml b/deploy_carvel/package-repository/.imgpkg/images.yml index 33c4fd40..1a9a5122 100644 --- a/deploy_carvel/package-repository/.imgpkg/images.yml +++ b/deploy_carvel/package-repository/.imgpkg/images.yml @@ -7,12 +7,12 @@ images: - resolved: tag: 0.25.0 url: benjaminapetersen/pinniped-package-repo-package-concierge:0.25.0 - image: index.docker.io/benjaminapetersen/pinniped-package-repo-package-concierge@sha256:753cf50f06f1d0c12a3ab94186d67a9a136bb5589e1ab23303b27dc064eea788 + image: index.docker.io/benjaminapetersen/pinniped-package-repo-package-concierge@sha256:eb9f9d4ad2690443b7824a183c2a296a744995559285352422f94f2295c4a754 - annotations: kbld.carvel.dev/id: benjaminapetersen/pinniped-package-repo-package-supervisor:0.25.0 kbld.carvel.dev/origins: | - resolved: tag: 0.25.0 url: benjaminapetersen/pinniped-package-repo-package-supervisor:0.25.0 - image: index.docker.io/benjaminapetersen/pinniped-package-repo-package-supervisor@sha256:eed4e31b7bed04f41320b995bdaedfe93b258066666c88df313b1cc6e1a95ff7 + image: index.docker.io/benjaminapetersen/pinniped-package-repo-package-supervisor@sha256:95ce6e00bc9eb9becea021c5d8fe0fdeb241fd974c7fc8076220cb24d65650e1 kind: ImagesLock diff --git a/deploy_carvel/package-repository/packages/concierge.pinniped.dev/0.25.0.yml b/deploy_carvel/package-repository/packages/concierge.pinniped.dev/0.25.0.yml index 05cab1ac..c7b6f146 100644 --- a/deploy_carvel/package-repository/packages/concierge.pinniped.dev/0.25.0.yml +++ b/deploy_carvel/package-repository/packages/concierge.pinniped.dev/0.25.0.yml @@ -2,6 +2,7 @@ apiVersion: data.packaging.carvel.dev/v1alpha1 kind: Package metadata: name: concierge.pinniped.dev.0.25.0 + namespace: concierge-ns spec: refName: concierge.pinniped.dev version: 0.25.0 diff --git a/deploy_carvel/package-repository/packages/supervisor.pinniped.dev/0.25.0.yml b/deploy_carvel/package-repository/packages/supervisor.pinniped.dev/0.25.0.yml index 0526f318..9cb200c0 100644 --- a/deploy_carvel/package-repository/packages/supervisor.pinniped.dev/0.25.0.yml +++ b/deploy_carvel/package-repository/packages/supervisor.pinniped.dev/0.25.0.yml @@ -2,6 +2,7 @@ apiVersion: data.packaging.carvel.dev/v1alpha1 kind: Package metadata: name: supervisor.pinniped.dev.0.25.0 + namespace: supervisor-ns spec: refName: supervisor.pinniped.dev version: 0.25.0 diff --git a/deploy_carvel/packagerepository.0.25.0.yml b/deploy_carvel/packagerepository.0.25.0.yml new file mode 100644 index 00000000..6b7b54dc --- /dev/null +++ b/deploy_carvel/packagerepository.0.25.0.yml @@ -0,0 +1,9 @@ +--- +apiVersion: packaging.carvel.dev/v1alpha1 +kind: PackageRepository +metadata: + name: "pinniped-package-repository" +spec: + fetch: + imgpkgBundle: + image: "benjaminapetersen/pinniped-package-repo:0.25.0" diff --git a/deploy_carvel/supervisor/config/deployment-HACKED.yaml b/deploy_carvel/supervisor/config/deployment-HACKED.yaml deleted file mode 100644 index 5c02dd83..00000000 --- a/deploy_carvel/supervisor/config/deployment-HACKED.yaml +++ /dev/null @@ -1,73 +0,0 @@ -#! Copyright 2020-2022 the Pinniped contributors. All Rights Reserved. -#! SPDX-License-Identifier: Apache-2.0 - -#@ load("@ytt:data", "data") -#@ load("@ytt:yaml", "yaml") -#@ load("helpers.lib.yaml", -#@ "defaultLabel", -#@ "labels", -#@ "deploymentPodLabel", -#@ "namespace", -#@ "defaultResourceName", -#@ "defaultResourceNameWithSuffix", -#@ "pinnipedDevAPIGroupWithPrefix", -#@ "getPinnipedConfigMapData", -#@ "hasUnixNetworkEndpoint", -#@ ) -#@ load("@ytt:template", "template") - -#@ if not data.values.into_namespace: ---- -apiVersion: v1 -kind: Namespace -metadata: - name: #@ data.values.namespace - labels: #@ labels() -#@ end ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: #@ defaultResourceName() - namespace: #@ namespace() - labels: #@ labels() ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: #@ defaultResourceNameWithSuffix("static-config") - namespace: #@ namespace() - labels: #@ labels() -data: - #@yaml/text-templated-strings - pinniped.yaml: #@ yaml.encode(getPinnipedConfigMapData()) ---- -#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "": -apiVersion: v1 -kind: Secret -metadata: - name: image-pull-secret - namespace: #@ namespace() - labels: #@ labels() -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: #@ data.values.image_pull_dockerconfigjson -#@ end ---- -#! THE DEPLOYMENT IS GONE!!! -#! THE DEPLOYMENT IS GONE!!! -#! THE DEPLOYMENT IS GONE!!! For initial prototype, just installing some simple things. -#! THE DEPLOYMENT IS GONE!!! -#! THE DEPLOYMENT IS GONE!!! ---- -#! THE SERVICE IS GONE!!! -#! THE SERVICE IS GONE!!! -#! THE SERVICE IS GONE!!! For initial prototype, just installing some simple things. -#! THE SERVICE IS GONE!!! -#! THE SERVICE IS GONE!!! ---- -#! THE API SERVICE IS GONE!!! -#! THE API SERVICE IS GONE!!! -#! THE API SERVICE IS GONE!!! For initial prototype, just installing some simple things. -#! THE API SERVICE IS GONE!!! -#! THE API SERVICE IS GONE!!! diff --git a/deploy_carvel/supervisor/config/deployment.yaml b/deploy_carvel/supervisor/config/deployment.yaml new file mode 100644 index 00000000..30791a1b --- /dev/null +++ b/deploy_carvel/supervisor/config/deployment.yaml @@ -0,0 +1,236 @@ +#! Copyright 2020-2022 the Pinniped contributors. All Rights Reserved. +#! SPDX-License-Identifier: Apache-2.0 + +#@ load("@ytt:data", "data") +#@ load("@ytt:yaml", "yaml") +#@ load("helpers.lib.yaml", +#@ "defaultLabel", +#@ "labels", +#@ "deploymentPodLabel", +#@ "namespace", +#@ "defaultResourceName", +#@ "defaultResourceNameWithSuffix", +#@ "pinnipedDevAPIGroupWithPrefix", +#@ "getPinnipedConfigMapData", +#@ "hasUnixNetworkEndpoint", +#@ ) +#@ load("@ytt:template", "template") + +#@ if not data.values.into_namespace: +--- +apiVersion: v1 +kind: Namespace +metadata: + name: #@ data.values.namespace + labels: #@ labels() +#@ end +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: #@ defaultResourceName() + namespace: #@ namespace() + labels: #@ labels() +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: #@ defaultResourceNameWithSuffix("static-config") + namespace: #@ namespace() + labels: #@ labels() +data: + #@yaml/text-templated-strings + pinniped.yaml: #@ yaml.encode(getPinnipedConfigMapData()) +--- +#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "": +apiVersion: v1 +kind: Secret +metadata: + name: image-pull-secret + namespace: #@ namespace() + labels: #@ labels() +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: #@ data.values.image_pull_dockerconfigjson +#@ end +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: #@ defaultResourceName() + namespace: #@ namespace() + labels: #@ labels() +spec: + replicas: #@ data.values.replicas + selector: + #! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades. + matchLabels: #@ defaultLabel() + template: + metadata: + labels: + #! This has always included defaultLabel(), which is used by this Deployment's selector. + _: #@ template.replace(defaultLabel()) + #! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically + #! without accidentally selecting pods from any future Deployments which might also want to use the defaultLabel(). + _: #@ template.replace(deploymentPodLabel()) + spec: + securityContext: + runAsUser: #@ data.values.run_as_user + runAsGroup: #@ data.values.run_as_group + serviceAccountName: #@ defaultResourceName() + #@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "": + imagePullSecrets: + - name: image-pull-secret + #@ end + containers: + - name: #@ defaultResourceName() + #@ if data.values.image_digest: + image: #@ data.values.image_repo + "@" + data.values.image_digest + #@ else: + image: #@ data.values.image_repo + ":" + data.values.image_tag + #@ end + imagePullPolicy: IfNotPresent + command: + - pinniped-supervisor + - /etc/podinfo + - /etc/config/pinniped.yaml + securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] + #! seccompProfile was introduced in Kube v1.19. Using it on an older Kube version will result in a + #! kubectl validation error when installing via `kubectl apply`, which can be ignored using kubectl's + #! `--validate=false` flag. Note that installing via `kapp` does not complain about this validation error. + seccompProfile: + type: "RuntimeDefault" + resources: + requests: + #! If OIDCClient CRs are being used, then the Supervisor needs enough CPU to run expensive bcrypt + #! operations inside the implementation of the token endpoint for any authcode flows performed by those + #! clients, so for that use case administrators may wish to increase the requests.cpu value to more + #! closely align with their anticipated needs. Increasing this value will cause Kubernetes to give more + #! available CPU to this process during times of high CPU contention. By default, don't ask for too much + #! because that would make it impossible to install the Pinniped Supervisor on small clusters. + #! Aside from performing bcrypts at the token endpoint for those clients, the Supervisor is not a + #! particularly CPU-intensive process. + cpu: "100m" #! by default, request one-tenth of a CPU + memory: "128Mi" + limits: + #! By declaring a CPU limit that is not equal to the CPU request value, the Supervisor will be classified + #! by Kubernetes to have "burstable" quality of service. + #! See https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-burstable + #! If OIDCClient CRs are being used, and lots of simultaneous users have active sessions, then it is hard + #! pre-determine what the CPU limit should be for that use case. Guessing too low would cause the + #! pod's CPU usage to be throttled, resulting in poor performance. Guessing too high would allow clients + #! to cause the usage of lots of CPU resources. Administrators who have a good sense of anticipated usage + #! patterns may choose to set the requests.cpu and limits.cpu differently from these defaults. + cpu: "1000m" #! by default, throttle each pod's usage at 1 CPU + memory: "128Mi" + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + - name: podinfo + mountPath: /etc/podinfo + readOnly: true + #@ if hasUnixNetworkEndpoint(): + - name: socket + mountPath: /pinniped_socket + readOnly: false #! writable to allow for socket use + #@ end + ports: + - containerPort: 8443 + protocol: TCP + env: + #@ if data.values.https_proxy: + - name: HTTPS_PROXY + value: #@ data.values.https_proxy + #@ end + #@ if data.values.https_proxy and data.values.no_proxy: + - name: NO_PROXY + value: #@ data.values.no_proxy + #@ end + livenessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 2 + timeoutSeconds: 15 + periodSeconds: 10 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 2 + timeoutSeconds: 3 + periodSeconds: 10 + failureThreshold: 3 + volumes: + - name: config-volume + configMap: + name: #@ defaultResourceNameWithSuffix("static-config") + - name: podinfo + downwardAPI: + items: + - path: "labels" + fieldRef: + fieldPath: metadata.labels + - path: "namespace" + fieldRef: + fieldPath: metadata.namespace + - path: "name" + fieldRef: + fieldPath: metadata.name + #@ if hasUnixNetworkEndpoint(): + - name: socket + emptyDir: {} + #@ end + #! This will help make sure our multiple pods run on different nodes, making + #! our deployment "more" "HA". + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchLabels: #@ deploymentPodLabel() + topologyKey: kubernetes.io/hostname +--- +apiVersion: v1 +kind: Service +metadata: + #! If name is changed, must also change names.apiService in the ConfigMap above and spec.service.name in the APIService below. + name: #@ defaultResourceNameWithSuffix("api") + namespace: #@ namespace() + labels: #@ labels() + #! prevent kapp from altering the selector of our services to match kubectl behavior + annotations: + kapp.k14s.io/disable-default-label-scoping-rules: "" +spec: + type: ClusterIP + selector: #@ deploymentPodLabel() + ports: + - protocol: TCP + port: 443 + targetPort: 10250 +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: #@ pinnipedDevAPIGroupWithPrefix("v1alpha1.clientsecret.supervisor") + labels: #@ labels() +spec: + version: v1alpha1 + group: #@ pinnipedDevAPIGroupWithPrefix("clientsecret.supervisor") + groupPriorityMinimum: 9900 + versionPriority: 15 + #! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code. + service: + name: #@ defaultResourceNameWithSuffix("api") + namespace: #@ namespace() + port: 443 diff --git a/deploy_carvel/supervisor/package-template.yml b/deploy_carvel/supervisor/package-template.yml index 150c6419..b4e1f25e 100644 --- a/deploy_carvel/supervisor/package-template.yml +++ b/deploy_carvel/supervisor/package-template.yml @@ -5,6 +5,7 @@ apiVersion: data.packaging.carvel.dev/v1alpha1 kind: Package metadata: name: #@ "supervisor.pinniped.dev." + data.values.package_version + namespace: #@ data.values.namespace spec: refName: supervisor.pinniped.dev version: #@ data.values.package_version