This commit is contained in:
Benjamin A. Petersen 2023-09-25 12:42:01 -04:00
parent 6a847aaec7
commit 96c094c901
No known key found for this signature in database
GPG Key ID: EF6EF83523A4BE46
6 changed files with 0 additions and 3453 deletions

View File

@ -1,360 +0,0 @@
#! Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:data", "data")
#@ load("@ytt:json", "json")
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel", "pinnipedDevAPIGroupWithPrefix")
#@ load("@ytt:template", "template")
#@ if not data.values.into_namespace:
---
apiVersion: v1
kind: Namespace
metadata:
name: #@ data.values.namespace
labels:
_: #@ template.replace(labels())
#! When deploying onto a cluster which has PSAs enabled by default for namespaces,
#! effectively disable them for this namespace. The kube-cert-agent Deployment's pod
#! created by the Concierge in this namespace needs to be able to perform privileged
#! actions. The regular Concierge pod containers created by the Deployment below do
#! not need special privileges and are marked as such in their securityContext settings.
pod-security.kubernetes.io/enforce: privileged
#@ end
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceName()
namespace: #@ namespace()
labels: #@ labels()
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
namespace: #@ namespace()
labels: #@ labels()
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
namespace: #@ namespace()
labels: #@ labels()
annotations:
#! we need to create this service account before we create the secret
kapp.k14s.io/change-group: "impersonation-proxy.concierge.pinniped.dev/serviceaccount"
secrets: #! make sure the token controller does not create any other secrets
- name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
---
apiVersion: v1
kind: ConfigMap
metadata:
name: #@ defaultResourceNameWithSuffix("config")
namespace: #@ namespace()
labels: #@ labels()
data:
#! If names.apiService is changed in this ConfigMap, must also change name of the ClusterIP Service resource below.
#@yaml/text-templated-strings
pinniped.yaml: |
discovery:
url: (@= data.values.discovery_url or "null" @)
api:
servingCertificate:
durationSeconds: (@= str(data.values.api_serving_certificate_duration_seconds) @)
renewBeforeSeconds: (@= str(data.values.api_serving_certificate_renew_before_seconds) @)
apiGroupSuffix: (@= data.values.api_group_suffix @)
# aggregatedAPIServerPort may be set here, although other YAML references to the default port (10250) may also need to be updated
# impersonationProxyServerPort may be set here, although other YAML references to the default port (8444) may also need to be updated
names:
servingCertificateSecret: (@= defaultResourceNameWithSuffix("api-tls-serving-certificate") @)
credentialIssuer: (@= defaultResourceNameWithSuffix("config") @)
apiService: (@= defaultResourceNameWithSuffix("api") @)
impersonationLoadBalancerService: (@= defaultResourceNameWithSuffix("impersonation-proxy-load-balancer") @)
impersonationClusterIPService: (@= defaultResourceNameWithSuffix("impersonation-proxy-cluster-ip") @)
impersonationTLSCertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-tls-serving-certificate") @)
impersonationCACertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-ca-certificate") @)
impersonationSignerSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-signer-ca-certificate") @)
agentServiceAccount: (@= defaultResourceNameWithSuffix("kube-cert-agent") @)
labels: (@= json.encode(labels()).rstrip() @)
kubeCertAgent:
namePrefix: (@= defaultResourceNameWithSuffix("kube-cert-agent-") @)
(@ if data.values.kube_cert_agent_image: @)
image: (@= data.values.kube_cert_agent_image @)
(@ else: @)
(@ if data.values.image_digest: @)
image: (@= data.values.image_repo + "@" + data.values.image_digest @)
(@ else: @)
image: (@= data.values.image_repo + ":" + data.values.image_tag @)
(@ end @)
(@ end @)
(@ if data.values.image_pull_dockerconfigjson: @)
imagePullSecrets:
- image-pull-secret
(@ end @)
(@ if data.values.log_level or data.values.deprecated_log_format: @)
log:
(@ if data.values.log_level: @)
level: (@= getAndValidateLogLevel() @)
(@ end @)
(@ if data.values.deprecated_log_format: @)
format: (@= data.values.deprecated_log_format @)
(@ end @)
(@ end @)
---
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
apiVersion: v1
kind: Secret
metadata:
name: image-pull-secret
namespace: #@ namespace()
labels: #@ labels()
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: #@ defaultResourceName()
namespace: #@ namespace()
labels: #@ labels()
spec:
replicas: #@ data.values.replicas
selector:
#! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades.
matchLabels: #@ defaultLabel()
template:
metadata:
labels:
#! This has always included defaultLabel(), which is used by this Deployment's selector.
_: #@ template.replace(defaultLabel())
#! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically
#! without accidentally selecting any other Deployment's Pods, especially the kube cert agent Deployment's Pods.
_: #@ template.replace(deploymentPodLabel())
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
spec:
securityContext:
runAsUser: #@ data.values.run_as_user
runAsGroup: #@ data.values.run_as_group
serviceAccountName: #@ defaultResourceName()
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
imagePullSecrets:
- name: image-pull-secret
#@ end
containers:
- name: #@ defaultResourceName()
#@ if data.values.image_digest:
image: #@ data.values.image_repo + "@" + data.values.image_digest
#@ else:
image: #@ data.values.image_repo + ":" + data.values.image_tag
#@ end
imagePullPolicy: IfNotPresent
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop: [ "ALL" ]
#! seccompProfile was introduced in Kube v1.19. Using it on an older Kube version will result in a
#! kubectl validation error when installing via `kubectl apply`, which can be ignored using kubectl's
#! `--validate=false` flag. Note that installing via `kapp` does not complain about this validation error.
seccompProfile:
type: "RuntimeDefault"
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "100m"
memory: "128Mi"
command:
- pinniped-concierge
- --config=/etc/config/pinniped.yaml
- --downward-api-path=/etc/podinfo
volumeMounts:
- name: tmp
mountPath: /tmp
- name: config-volume
mountPath: /etc/config
readOnly: true
- name: podinfo
mountPath: /etc/podinfo
readOnly: true
- name: impersonation-proxy
mountPath: /var/run/secrets/impersonation-proxy.concierge.pinniped.dev/serviceaccount
readOnly: true
env:
#@ if data.values.https_proxy:
- name: HTTPS_PROXY
value: #@ data.values.https_proxy
#@ end
#@ if data.values.https_proxy and data.values.no_proxy:
- name: NO_PROXY
value: #@ data.values.no_proxy
#@ end
livenessProbe:
httpGet:
path: /healthz
port: 10250
scheme: HTTPS
initialDelaySeconds: 2
timeoutSeconds: 15
periodSeconds: 10
failureThreshold: 5
readinessProbe:
httpGet:
path: /healthz
port: 10250
scheme: HTTPS
initialDelaySeconds: 2
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 3
volumes:
- name: tmp
emptyDir:
medium: Memory
sizeLimit: 100Mi
- name: config-volume
configMap:
name: #@ defaultResourceNameWithSuffix("config")
- name: impersonation-proxy
secret:
secretName: #@ defaultResourceNameWithSuffix("impersonation-proxy")
items: #! make sure our pod does not start until the token controller has a chance to populate the secret
- key: token
path: token
- name: podinfo
downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- path: "name"
fieldRef:
fieldPath: metadata.name
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master #! Allow running on master nodes too (name deprecated by kubernetes 1.20).
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane #! The new name for these nodes as of Kubernetes 1.24.
effect: NoSchedule
#! "system-cluster-critical" cannot be used outside the kube-system namespace until Kubernetes >= 1.17,
#! so we skip setting this for now (see https://github.com/kubernetes/kubernetes/issues/60596).
#!priorityClassName: system-cluster-critical
#! This will help make sure our multiple pods run on different nodes, making
#! our deployment "more" "HA".
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchLabels: #@ deploymentPodLabel()
topologyKey: kubernetes.io/hostname
---
apiVersion: v1
kind: Service
metadata:
#! If name is changed, must also change names.apiService in the ConfigMap above and spec.service.name in the APIService below.
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
labels: #@ labels()
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: ClusterIP
selector: #@ deploymentPodLabel()
ports:
- protocol: TCP
port: 443
targetPort: 10250
---
apiVersion: v1
kind: Service
metadata:
name: #@ defaultResourceNameWithSuffix("proxy")
namespace: #@ namespace()
labels: #@ labels()
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: ClusterIP
selector: #@ deploymentPodLabel()
ports:
- protocol: TCP
port: 443
targetPort: 8444
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: #@ pinnipedDevAPIGroupWithPrefix("v1alpha1.login.concierge")
labels: #@ labels()
spec:
version: v1alpha1
group: #@ pinnipedDevAPIGroupWithPrefix("login.concierge")
groupPriorityMinimum: 9900
versionPriority: 15
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
service:
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
port: 443
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: #@ pinnipedDevAPIGroupWithPrefix("v1alpha1.identity.concierge")
labels: #@ labels()
spec:
version: v1alpha1
group: #@ pinnipedDevAPIGroupWithPrefix("identity.concierge")
groupPriorityMinimum: 9900
versionPriority: 15
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
service:
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
port: 443
---
apiVersion: #@ pinnipedDevAPIGroupWithPrefix("config.concierge") + "/v1alpha1"
kind: CredentialIssuer
metadata:
name: #@ defaultResourceNameWithSuffix("config")
labels: #@ labels()
spec:
impersonationProxy:
mode: #@ data.values.impersonation_proxy_spec.mode
#@ if data.values.impersonation_proxy_spec.external_endpoint:
externalEndpoint: #@ data.values.impersonation_proxy_spec.external_endpoint
#@ end
service:
type: #@ data.values.impersonation_proxy_spec.service.type
#@ if data.values.impersonation_proxy_spec.service.load_balancer_ip:
loadBalancerIP: #@ data.values.impersonation_proxy_spec.service.load_balancer_ip
#@ end
annotations: #@ data.values.impersonation_proxy_spec.service.annotations
---
apiVersion: v1
kind: Secret
metadata:
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
namespace: #@ namespace()
labels: #@ labels()
annotations:
#! wait until the SA exists to create this secret so that the token controller does not delete it
#! we have this secret at the end so that kubectl will create the service account first
kapp.k14s.io/change-rule: "upsert after upserting impersonation-proxy.concierge.pinniped.dev/serviceaccount"
kubernetes.io/service-account.name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
type: kubernetes.io/service-account-token

View File

@ -1,41 +0,0 @@
#!/bin/bash
APP="pinn-conci"
kapp deploy --app "${APP}" --diff-changes --file <(ytt \
--file concierge/config/authentication.concierge.pinniped.dev_jwtauthenticators.yaml
--file concierge/config/authentication.concierge.pinniped.dev_webhookauthenticcators.yaml
--file concierge/config/config.concierge.pinniped.dev_credential_issuers.yaml
--file concierge/config/deployment-HACKED.yaml \
--file concierge/config/helpers.lib.yaml \
--file concierge/config/rbac.yaml \
--file concierge/config/z0_crd_overlay.yaml \
--file concierge/config/values.yaml \
--data-value app_name=pinn-conci \
--data-value namespace=pinn-conci \
--data-value-yaml 'custom_labels={"foo": bar}' \
--data-value replicas=3)
## template the thing
#RENDER_OUTPUT_FILE=$(
#ytt \
# --file concierge/config/helpers.lib.yaml \
# --file concierge/config/deployment.yaml \
# --file concierge/config/service.yaml \
# --file concierge/config/values.yaml \
# --data-value app_name=pinn-super \
# --data-value namespace=pinn-super \
# --data-value-yaml 'custom_labels={"foo": bar}' \
# --data-value replicas=3
#)
#
## view it
#echo "$RENDER_OUTPUT_FILE"
#
## give it to kapp
#kapp deploy \
# --app pinn-super \
# --diff-changes \
# --file <( "${RENDER_OUTPUT_FILE}" )

View File

@ -1,66 +0,0 @@
#!/bin/bash
# need to maintain this if used.
# but there must be a way to get ytt to read a directory of files.
#RENDERED_OUTPUT_FILES=$(
#ytt \
# --file supervisor/config/helpers.lib.yaml \
# --file supervisor/config/config.supervisor.pinniped.dev_federationdomains.yaml \
# --file supervisor/config/config.supervisor.pinniped.dev_oidcclients.yaml \
# --file supervisor/config/idp.supervisor.pinniped.dev_activedirectoryidentityproviders.yaml \
# --file supervisor/config/idp.supervisor.pinniped.dev_ldapidentityproviders.yaml \
# --file supervisor/config/idp.supervisor.pinniped.dev_oidcidentityproviders.yaml \
# --file supervisor/config/z0_crd_overlay.yaml \
# --file supervisor/config/rbac.yaml \
# --file supervisor/config/service.yaml \
# --file supervisor/config/deployment.yaml \
# --file supervisor/config/values.yaml \
# --data-value app_name=pinn-super \
# --data-value namespace=pinn-super \
# --data-value-yaml 'custom_labels={"foo": bar}' \
# --data-value replicas=3
#)
#
#echo "${RENDERED_OUTPUT_FILES}"
APP="pinn-super"
kapp deploy --app "${APP}" --diff-changes --file <(ytt \
--file supervisor/config/helpers.lib.yaml \
--file supervisor/config/config.supervisor.pinniped.dev_federationdomains.yaml \
--file supervisor/config/config.supervisor.pinniped.dev_oidcclients.yaml \
--file supervisor/config/idp.supervisor.pinniped.dev_activedirectoryidentityproviders.yaml \
--file supervisor/config/idp.supervisor.pinniped.dev_ldapidentityproviders.yaml \
--file supervisor/config/idp.supervisor.pinniped.dev_oidcidentityproviders.yaml \
--file supervisor/config/z0_crd_overlay.yaml \
--file supervisor/config/rbac.yaml \
--file supervisor/config/service.yaml \
--file supervisor/config/deployment-HACKED.yaml \
--file supervisor/config/values.yaml \
--data-value app_name=pinn-super \
--data-value namespace=pinn-super \
--data-value-yaml 'custom_labels={"foo": bar}' \
--data-value replicas=3)
## template the thing
#RENDER_OUTPUT_FILE=$(
#ytt \
# --file supervisor/config/helpers.lib.yaml \
# --file supervisor/config/deployment.yaml \
# --file supervisor/config/service.yaml \
# --file supervisor/config/values.yaml \
# --data-value app_name=pinn-super \
# --data-value namespace=pinn-super \
# --data-value-yaml 'custom_labels={"foo": bar}' \
# --data-value replicas=3
#)
#
## view it
#echo "$RENDER_OUTPUT_FILE"
#
## give it to kapp
#kapp deploy \
# --app pinn-super \
# --diff-changes \
# --file <( "${RENDER_OUTPUT_FILE}" )

View File

@ -1,89 +0,0 @@
# The following env vars should be set before running 'go test -v -count 1 -timeout 0 ./test/integration'
export PINNIPED_TEST_TOOLS_NAMESPACE="tools"
export PINNIPED_TEST_CONCIERGE_NAMESPACE=concierge
export PINNIPED_TEST_CONCIERGE_APP_NAME=pinniped-concierge
export PINNIPED_TEST_CONCIERGE_CUSTOM_LABELS='{myConciergeCustomLabelName: myConciergeCustomLabelValue}'
export PINNIPED_TEST_USER_USERNAME=test-username
export PINNIPED_TEST_USER_GROUPS=test-group-0,test-group-1
export PINNIPED_TEST_USER_TOKEN=test-username:bf1dc425a45f9ee37ccf6f35931a3609
export PINNIPED_TEST_WEBHOOK_ENDPOINT=https://local-user-authenticator.local-user-authenticator.svc/authenticate
export PINNIPED_TEST_WEBHOOK_CA_BUNDLE=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJyRENDQVZPZ0F3SUJBZ0lRZlR6OG9EQlpuQVJvaTdCS1AySmtYVEFLQmdncWhrak9QUVFEQWpBbU1TUXcKSWdZRFZRUURFeHRzYjJOaGJDMTFjMlZ5TFdGMWRHaGxiblJwWTJGMGIzSWdRMEV3SUJjTk1qTXdPREk1TVRrdwpNekEyV2hnUE1qRXlNekE0TURVeE9UQTRNRFphTUNZeEpEQWlCZ05WQkFNVEcyeHZZMkZzTFhWelpYSXRZWFYwCmFHVnVkR2xqWVhSdmNpQkRRVEJaTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCS0UxN3NwNW8zdTkKK0dKQlFtbHBuay9sU3pTVWdQUHUvY0ltQnpqWFNPMXBrcGMwcU9BRndjRVZxYlg5V05LdFFBMXplOVA5TXBOdgpMKzdJNnNpN0xmMmpZVEJmTUE0R0ExVWREd0VCL3dRRUF3SUNoREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNECkFnWUlLd1lCQlFVSEF3RXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVVlRjR0bDZRT24yd3kKSUVPcm00WVFOWWtVOU0wd0NnWUlLb1pJemowRUF3SURSd0F3UkFJZ0cvQTluQisyUVpDME82aStmWFhGRm1XYQpQWXMyanNxbU5lbVBBMkdDNVZzQ0lFN2w1L1lNSE8xUHdYQWlwYXRaYjkwUDhaZ2pFc0x0Qi9lb3BLQlpBT0VSCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
export PINNIPED_TEST_SUPERVISOR_NAMESPACE=supervisor
export PINNIPED_TEST_SUPERVISOR_APP_NAME=pinniped-supervisor
export PINNIPED_TEST_SUPERVISOR_CUSTOM_LABELS='{mySupervisorCustomLabelName: mySupervisorCustomLabelValue}'
export PINNIPED_TEST_SUPERVISOR_HTTPS_ADDRESS="localhost:12344"
export PINNIPED_TEST_PROXY=http://127.0.0.1:12346
export PINNIPED_TEST_LDAP_HOST=ldap.tools.svc.cluster.local
export PINNIPED_TEST_LDAP_STARTTLS_ONLY_HOST=ldapstarttls.tools.svc.cluster.local
export PINNIPED_TEST_LDAP_LDAPS_CA_BUNDLE="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJjekNDQVJxZ0F3SUJBZ0lVUmE4OENCQWhwbnpNVmt3bmJtQnJ2RXZQdzdVd0NnWUlLb1pJemowRUF3SXcKR0RFV01CUUdBMVVFQXhNTlVHbHVibWx3WldRZ1ZHVnpkREFlRncweU16QTRNamt4T1RBME1EQmFGdzB5T0RBNApNamN4T1RBME1EQmFNQmd4RmpBVUJnTlZCQU1URFZCcGJtNXBjR1ZrSUZSbGMzUXdXVEFUQmdjcWhrak9QUUlCCkJnZ3Foa2pPUFFNQkJ3TkNBQVN1cWVzRStZM1RwWER1c0lKSUFkUHVQU3N5Q3BzUGVUM3BhYnZHdTIwRlpNYXEKTWZLejJrZFlqenhKNlN4b2lTM3dmSkFwc0VRRU9MV1NTaG51QmlrdG8wSXdRREFPQmdOVkhROEJBZjhFQkFNQwpBUVl3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVam5Ua3dPc1NhbHVHOXZlcnBtc0VWVGRLCjZZd3dDZ1lJS29aSXpqMEVBd0lEUndBd1JBSWdkeTNUcFA3WUFXaVdaaWV6WFBBVVhLOWNIWDJmUW9GVndFZGIKaGhDSDRib0NJR2trNTg5VzZIcHRUMHFVR0sreG9YbzkzeXA4NDBCcXNHMEtoeW5GV29JTQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
export PINNIPED_TEST_LDAP_BIND_ACCOUNT_USERNAME="cn=admin,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_BIND_ACCOUNT_PASSWORD=password
export PINNIPED_TEST_LDAP_USERS_SEARCH_BASE="ou=users,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_GROUPS_SEARCH_BASE="ou=groups,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_USER_DN="cn=pinny,ou=users,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_USER_CN="pinny"
export PINNIPED_TEST_LDAP_USER_PASSWORD=342db8a6d3416ecc99a735f7d00db93d
export PINNIPED_TEST_LDAP_USER_UNIQUE_ID_ATTRIBUTE_NAME="uidNumber"
export PINNIPED_TEST_LDAP_USER_UNIQUE_ID_ATTRIBUTE_VALUE="1000"
export PINNIPED_TEST_LDAP_USER_EMAIL_ATTRIBUTE_NAME="mail"
export PINNIPED_TEST_LDAP_USER_EMAIL_ATTRIBUTE_VALUE="pinny.ldap@example.com"
export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_GROUPS_DN="cn=ball-game-players,ou=beach-groups,ou=groups,dc=pinniped,dc=dev;cn=seals,ou=groups,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_EXPECTED_INDIRECT_GROUPS_DN="cn=pinnipeds,ou=groups,dc=pinniped,dc=dev;cn=mammals,ou=groups,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_GROUPS_CN="ball-game-players;seals"
export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_POSIX_GROUPS_CN="ball-game-players-posix;seals-posix"
export PINNIPED_TEST_LDAP_EXPECTED_INDIRECT_GROUPS_CN="pinnipeds;mammals"
export PINNIPED_TEST_CLI_OIDC_ISSUER=https://dex.tools.svc.cluster.local/dex
export PINNIPED_TEST_CLI_OIDC_ISSUER_CA_BUNDLE="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJjekNDQVJxZ0F3SUJBZ0lVUmE4OENCQWhwbnpNVmt3bmJtQnJ2RXZQdzdVd0NnWUlLb1pJemowRUF3SXcKR0RFV01CUUdBMVVFQXhNTlVHbHVibWx3WldRZ1ZHVnpkREFlRncweU16QTRNamt4T1RBME1EQmFGdzB5T0RBNApNamN4T1RBME1EQmFNQmd4RmpBVUJnTlZCQU1URFZCcGJtNXBjR1ZrSUZSbGMzUXdXVEFUQmdjcWhrak9QUUlCCkJnZ3Foa2pPUFFNQkJ3TkNBQVN1cWVzRStZM1RwWER1c0lKSUFkUHVQU3N5Q3BzUGVUM3BhYnZHdTIwRlpNYXEKTWZLejJrZFlqenhKNlN4b2lTM3dmSkFwc0VRRU9MV1NTaG51QmlrdG8wSXdRREFPQmdOVkhROEJBZjhFQkFNQwpBUVl3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVam5Ua3dPc1NhbHVHOXZlcnBtc0VWVGRLCjZZd3dDZ1lJS29aSXpqMEVBd0lEUndBd1JBSWdkeTNUcFA3WUFXaVdaaWV6WFBBVVhLOWNIWDJmUW9GVndFZGIKaGhDSDRib0NJR2trNTg5VzZIcHRUMHFVR0sreG9YbzkzeXA4NDBCcXNHMEtoeW5GV29JTQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
export PINNIPED_TEST_CLI_OIDC_CLIENT_ID=pinniped-cli
export PINNIPED_TEST_CLI_OIDC_CALLBACK_URL=http://127.0.0.1:48095/callback
export PINNIPED_TEST_CLI_OIDC_USERNAME=pinny@example.com
export PINNIPED_TEST_CLI_OIDC_PASSWORD=9306dcb43f0f8d0ccbad3d431c05940d
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER=https://dex.tools.svc.cluster.local/dex
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER_CA_BUNDLE="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJjekNDQVJxZ0F3SUJBZ0lVUmE4OENCQWhwbnpNVmt3bmJtQnJ2RXZQdzdVd0NnWUlLb1pJemowRUF3SXcKR0RFV01CUUdBMVVFQXhNTlVHbHVibWx3WldRZ1ZHVnpkREFlRncweU16QTRNamt4T1RBME1EQmFGdzB5T0RBNApNamN4T1RBME1EQmFNQmd4RmpBVUJnTlZCQU1URFZCcGJtNXBjR1ZrSUZSbGMzUXdXVEFUQmdjcWhrak9QUUlCCkJnZ3Foa2pPUFFNQkJ3TkNBQVN1cWVzRStZM1RwWER1c0lKSUFkUHVQU3N5Q3BzUGVUM3BhYnZHdTIwRlpNYXEKTWZLejJrZFlqenhKNlN4b2lTM3dmSkFwc0VRRU9MV1NTaG51QmlrdG8wSXdRREFPQmdOVkhROEJBZjhFQkFNQwpBUVl3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVam5Ua3dPc1NhbHVHOXZlcnBtc0VWVGRLCjZZd3dDZ1lJS29aSXpqMEVBd0lEUndBd1JBSWdkeTNUcFA3WUFXaVdaaWV6WFBBVVhLOWNIWDJmUW9GVndFZGIKaGhDSDRib0NJR2trNTg5VzZIcHRUMHFVR0sreG9YbzkzeXA4NDBCcXNHMEtoeW5GV29JTQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ADDITIONAL_SCOPES="offline_access,email"
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_USERNAME_CLAIM=email
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_GROUPS_CLAIM=groups
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CLIENT_ID=pinniped-supervisor
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CLIENT_SECRET=pinniped-supervisor-secret
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CALLBACK_URL=https://pinniped-supervisor-clusterip.supervisor.svc.cluster.local/some/path/callback
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_USERNAME=pinny@example.com
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_PASSWORD=9306dcb43f0f8d0ccbad3d431c05940d
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_EXPECTED_GROUPS= # Dex's local user store does not let us configure groups.
export PINNIPED_TEST_API_GROUP_SUFFIX='pinniped.dev'
# PINNIPED_TEST_SHELL_CONTAINER_IMAGE should be a container which includes bash and sleep, used by some tests.
export PINNIPED_TEST_SHELL_CONTAINER_IMAGE="ghcr.io/pinniped-ci-bot/test-kubectl:latest"
# We can't set up an in-cluster active directory instance, but
# if you have an active directory instance that you wish to run the tests against,
# specify a script to set the ad-related environment variables.
# You will need to set the environment variables that start with "PINNIPED_TEST_AD_"
# found in pinniped/test/testlib/env.go.
if [[ "" != "" ]]; then
source
fi
read -r -d '' PINNIPED_TEST_CLUSTER_CAPABILITY_YAML << PINNIPED_TEST_CLUSTER_CAPABILITY_YAML_EOF || true
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# The name of the cluster type.
kubernetesDistribution: Kind
# Describe the capabilities of the cluster against which the integration tests will run.
capabilities:
# Is it possible to borrow the cluster's signing key from the kube API server?
clusterSigningKeyIsAvailable: true
# Will the cluster successfully provision a load balancer if requested?
hasExternalLoadBalancerProvider: false
# Does the cluster allow requests without authentication?
# https://kubernetes.io/docs/reference/access-authn-authz/authentication/#anonymous-requests
anonymousAuthenticationSupported: true
# Are LDAP ports on the Internet reachable without interference from network firewalls or proxies?
canReachInternetLDAPPorts: true
PINNIPED_TEST_CLUSTER_CAPABILITY_YAML_EOF
export PINNIPED_TEST_CLUSTER_CAPABILITY_YAML

File diff suppressed because it is too large Load Diff

View File

@ -1,235 +0,0 @@
#! Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:data", "data")
#@ load("@ytt:yaml", "yaml")
#@ load("helpers.lib.yaml",
#@ "defaultLabel",
#@ "labels",
#@ "deploymentPodLabel",
#@ "namespace",
#@ "defaultResourceName",
#@ "defaultResourceNameWithSuffix",
#@ "pinnipedDevAPIGroupWithPrefix",
#@ "getPinnipedConfigMapData",
#@ "hasUnixNetworkEndpoint",
#@ )
#@ load("@ytt:template", "template")
#@ if not data.values.into_namespace:
---
apiVersion: v1
kind: Namespace
metadata:
name: #@ data.values.namespace
labels: #@ labels()
#@ end
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceName()
namespace: #@ namespace()
labels: #@ labels()
---
apiVersion: v1
kind: ConfigMap
metadata:
name: #@ defaultResourceNameWithSuffix("static-config")
namespace: #@ namespace()
labels: #@ labels()
data:
#@yaml/text-templated-strings
pinniped.yaml: #@ yaml.encode(getPinnipedConfigMapData())
---
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
apiVersion: v1
kind: Secret
metadata:
name: image-pull-secret
namespace: #@ namespace()
labels: #@ labels()
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: #@ defaultResourceName()
namespace: #@ namespace()
labels: #@ labels()
spec:
replicas: #@ data.values.replicas
selector:
#! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades.
matchLabels: #@ defaultLabel()
template:
metadata:
labels:
#! This has always included defaultLabel(), which is used by this Deployment's selector.
_: #@ template.replace(defaultLabel())
#! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically
#! without accidentally selecting pods from any future Deployments which might also want to use the defaultLabel().
_: #@ template.replace(deploymentPodLabel())
spec:
securityContext:
runAsUser: #@ data.values.run_as_user
runAsGroup: #@ data.values.run_as_group
serviceAccountName: #@ defaultResourceName()
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
imagePullSecrets:
- name: image-pull-secret
#@ end
containers:
- name: #@ defaultResourceName()
#@ if data.values.image_digest:
image: #@ data.values.image_repo + "@" + data.values.image_digest
#@ else:
image: #@ data.values.image_repo + ":" + data.values.image_tag
#@ end
imagePullPolicy: IfNotPresent
command:
- pinniped-supervisor
- /etc/podinfo
- /etc/config/pinniped.yaml
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop: [ "ALL" ]
#! seccompProfile was introduced in Kube v1.19. Using it on an older Kube version will result in a
#! kubectl validation error when installing via `kubectl apply`, which can be ignored using kubectl's
#! `--validate=false` flag. Note that installing via `kapp` does not complain about this validation error.
seccompProfile:
type: "RuntimeDefault"
resources:
requests:
#! If OIDCClient CRs are being used, then the Supervisor needs enough CPU to run expensive bcrypt
#! operations inside the implementation of the token endpoint for any authcode flows performed by those
#! clients, so for that use case administrators may wish to increase the requests.cpu value to more
#! closely align with their anticipated needs. Increasing this value will cause Kubernetes to give more
#! available CPU to this process during times of high CPU contention. By default, don't ask for too much
#! because that would make it impossible to install the Pinniped Supervisor on small clusters.
#! Aside from performing bcrypts at the token endpoint for those clients, the Supervisor is not a
#! particularly CPU-intensive process.
cpu: "100m" #! by default, request one-tenth of a CPU
memory: "128Mi"
limits:
#! By declaring a CPU limit that is not equal to the CPU request value, the Supervisor will be classified
#! by Kubernetes to have "burstable" quality of service.
#! See https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-burstable
#! If OIDCClient CRs are being used, and lots of simultaneous users have active sessions, then it is hard
#! pre-determine what the CPU limit should be for that use case. Guessing too low would cause the
#! pod's CPU usage to be throttled, resulting in poor performance. Guessing too high would allow clients
#! to cause the usage of lots of CPU resources. Administrators who have a good sense of anticipated usage
#! patterns may choose to set the requests.cpu and limits.cpu differently from these defaults.
cpu: "1000m" #! by default, throttle each pod's usage at 1 CPU
memory: "128Mi"
volumeMounts:
- name: config-volume
mountPath: /etc/config
readOnly: true
- name: podinfo
mountPath: /etc/podinfo
readOnly: true
#@ if hasUnixNetworkEndpoint():
- name: socket
mountPath: /pinniped_socket
readOnly: false #! writable to allow for socket use
#@ end
ports:
- containerPort: 8443
protocol: TCP
env:
#@ if data.values.https_proxy:
- name: HTTPS_PROXY
value: #@ data.values.https_proxy
#@ end
#@ if data.values.https_proxy and data.values.no_proxy:
- name: NO_PROXY
value: #@ data.values.no_proxy
#@ end
livenessProbe:
httpGet:
path: /healthz
port: 8443
scheme: HTTPS
initialDelaySeconds: 2
timeoutSeconds: 15
periodSeconds: 10
failureThreshold: 5
readinessProbe:
httpGet:
path: /healthz
port: 8443
scheme: HTTPS
initialDelaySeconds: 2
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 3
volumes:
- name: config-volume
configMap:
name: #@ defaultResourceNameWithSuffix("static-config")
- name: podinfo
downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
- path: "name"
fieldRef:
fieldPath: metadata.name
#@ if hasUnixNetworkEndpoint():
- name: socket
emptyDir: {}
#@ end
#! This will help make sure our multiple pods run on different nodes, making
#! our deployment "more" "HA".
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchLabels: #@ deploymentPodLabel()
topologyKey: kubernetes.io/hostname
---
apiVersion: v1
kind: Service
metadata:
#! If name is changed, must also change names.apiService in the ConfigMap above and spec.service.name in the APIService below.
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
labels: #@ labels()
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: ClusterIP
selector: #@ deploymentPodLabel()
ports:
- protocol: TCP
port: 443
targetPort: 10250
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: #@ pinnipedDevAPIGroupWithPrefix("v1alpha1.clientsecret.supervisor")
labels: #@ labels()
spec:
version: v1alpha1
group: #@ pinnipedDevAPIGroupWithPrefix("clientsecret.supervisor")
groupPriorityMinimum: 9900
versionPriority: 15
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
service:
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
port: 443