revisions to make work with prepare-supervisor-on-kind.sh script

This commit is contained in:
Benjamin A. Petersen 2023-09-05 11:16:53 -04:00
parent 3a71252167
commit d99a43bd87
No known key found for this signature in database
GPG Key ID: EF6EF83523A4BE46
18 changed files with 4032 additions and 76 deletions

View File

@ -0,0 +1,360 @@
#! Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:data", "data")
#@ load("@ytt:json", "json")
#@ load("helpers.lib.yaml", "defaultLabel", "labels", "deploymentPodLabel", "namespace", "defaultResourceName", "defaultResourceNameWithSuffix", "getAndValidateLogLevel", "pinnipedDevAPIGroupWithPrefix")
#@ load("@ytt:template", "template")
#@ if not data.values.into_namespace:
---
apiVersion: v1
kind: Namespace
metadata:
name: #@ data.values.namespace
labels:
_: #@ template.replace(labels())
#! When deploying onto a cluster which has PSAs enabled by default for namespaces,
#! effectively disable them for this namespace. The kube-cert-agent Deployment's pod
#! created by the Concierge in this namespace needs to be able to perform privileged
#! actions. The regular Concierge pod containers created by the Deployment below do
#! not need special privileges and are marked as such in their securityContext settings.
pod-security.kubernetes.io/enforce: privileged
#@ end
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceName()
namespace: #@ namespace()
labels: #@ labels()
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceNameWithSuffix("kube-cert-agent")
namespace: #@ namespace()
labels: #@ labels()
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
namespace: #@ namespace()
labels: #@ labels()
annotations:
#! we need to create this service account before we create the secret
kapp.k14s.io/change-group: "impersonation-proxy.concierge.pinniped.dev/serviceaccount"
secrets: #! make sure the token controller does not create any other secrets
- name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
---
apiVersion: v1
kind: ConfigMap
metadata:
name: #@ defaultResourceNameWithSuffix("config")
namespace: #@ namespace()
labels: #@ labels()
data:
#! If names.apiService is changed in this ConfigMap, must also change name of the ClusterIP Service resource below.
#@yaml/text-templated-strings
pinniped.yaml: |
discovery:
url: (@= data.values.discovery_url or "null" @)
api:
servingCertificate:
durationSeconds: (@= str(data.values.api_serving_certificate_duration_seconds) @)
renewBeforeSeconds: (@= str(data.values.api_serving_certificate_renew_before_seconds) @)
apiGroupSuffix: (@= data.values.api_group_suffix @)
# aggregatedAPIServerPort may be set here, although other YAML references to the default port (10250) may also need to be updated
# impersonationProxyServerPort may be set here, although other YAML references to the default port (8444) may also need to be updated
names:
servingCertificateSecret: (@= defaultResourceNameWithSuffix("api-tls-serving-certificate") @)
credentialIssuer: (@= defaultResourceNameWithSuffix("config") @)
apiService: (@= defaultResourceNameWithSuffix("api") @)
impersonationLoadBalancerService: (@= defaultResourceNameWithSuffix("impersonation-proxy-load-balancer") @)
impersonationClusterIPService: (@= defaultResourceNameWithSuffix("impersonation-proxy-cluster-ip") @)
impersonationTLSCertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-tls-serving-certificate") @)
impersonationCACertificateSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-ca-certificate") @)
impersonationSignerSecret: (@= defaultResourceNameWithSuffix("impersonation-proxy-signer-ca-certificate") @)
agentServiceAccount: (@= defaultResourceNameWithSuffix("kube-cert-agent") @)
labels: (@= json.encode(labels()).rstrip() @)
kubeCertAgent:
namePrefix: (@= defaultResourceNameWithSuffix("kube-cert-agent-") @)
(@ if data.values.kube_cert_agent_image: @)
image: (@= data.values.kube_cert_agent_image @)
(@ else: @)
(@ if data.values.image_digest: @)
image: (@= data.values.image_repo + "@" + data.values.image_digest @)
(@ else: @)
image: (@= data.values.image_repo + ":" + data.values.image_tag @)
(@ end @)
(@ end @)
(@ if data.values.image_pull_dockerconfigjson: @)
imagePullSecrets:
- image-pull-secret
(@ end @)
(@ if data.values.log_level or data.values.deprecated_log_format: @)
log:
(@ if data.values.log_level: @)
level: (@= getAndValidateLogLevel() @)
(@ end @)
(@ if data.values.deprecated_log_format: @)
format: (@= data.values.deprecated_log_format @)
(@ end @)
(@ end @)
---
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
apiVersion: v1
kind: Secret
metadata:
name: image-pull-secret
namespace: #@ namespace()
labels: #@ labels()
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: #@ defaultResourceName()
namespace: #@ namespace()
labels: #@ labels()
spec:
replicas: #@ data.values.replicas
selector:
#! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades.
matchLabels: #@ defaultLabel()
template:
metadata:
labels:
#! This has always included defaultLabel(), which is used by this Deployment's selector.
_: #@ template.replace(defaultLabel())
#! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically
#! without accidentally selecting any other Deployment's Pods, especially the kube cert agent Deployment's Pods.
_: #@ template.replace(deploymentPodLabel())
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
spec:
securityContext:
runAsUser: #@ data.values.run_as_user
runAsGroup: #@ data.values.run_as_group
serviceAccountName: #@ defaultResourceName()
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
imagePullSecrets:
- name: image-pull-secret
#@ end
containers:
- name: #@ defaultResourceName()
#@ if data.values.image_digest:
image: #@ data.values.image_repo + "@" + data.values.image_digest
#@ else:
image: #@ data.values.image_repo + ":" + data.values.image_tag
#@ end
imagePullPolicy: IfNotPresent
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop: [ "ALL" ]
#! seccompProfile was introduced in Kube v1.19. Using it on an older Kube version will result in a
#! kubectl validation error when installing via `kubectl apply`, which can be ignored using kubectl's
#! `--validate=false` flag. Note that installing via `kapp` does not complain about this validation error.
seccompProfile:
type: "RuntimeDefault"
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "100m"
memory: "128Mi"
command:
- pinniped-concierge
- --config=/etc/config/pinniped.yaml
- --downward-api-path=/etc/podinfo
volumeMounts:
- name: tmp
mountPath: /tmp
- name: config-volume
mountPath: /etc/config
readOnly: true
- name: podinfo
mountPath: /etc/podinfo
readOnly: true
- name: impersonation-proxy
mountPath: /var/run/secrets/impersonation-proxy.concierge.pinniped.dev/serviceaccount
readOnly: true
env:
#@ if data.values.https_proxy:
- name: HTTPS_PROXY
value: #@ data.values.https_proxy
#@ end
#@ if data.values.https_proxy and data.values.no_proxy:
- name: NO_PROXY
value: #@ data.values.no_proxy
#@ end
livenessProbe:
httpGet:
path: /healthz
port: 10250
scheme: HTTPS
initialDelaySeconds: 2
timeoutSeconds: 15
periodSeconds: 10
failureThreshold: 5
readinessProbe:
httpGet:
path: /healthz
port: 10250
scheme: HTTPS
initialDelaySeconds: 2
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 3
volumes:
- name: tmp
emptyDir:
medium: Memory
sizeLimit: 100Mi
- name: config-volume
configMap:
name: #@ defaultResourceNameWithSuffix("config")
- name: impersonation-proxy
secret:
secretName: #@ defaultResourceNameWithSuffix("impersonation-proxy")
items: #! make sure our pod does not start until the token controller has a chance to populate the secret
- key: token
path: token
- name: podinfo
downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- path: "name"
fieldRef:
fieldPath: metadata.name
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master #! Allow running on master nodes too (name deprecated by kubernetes 1.20).
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane #! The new name for these nodes as of Kubernetes 1.24.
effect: NoSchedule
#! "system-cluster-critical" cannot be used outside the kube-system namespace until Kubernetes >= 1.17,
#! so we skip setting this for now (see https://github.com/kubernetes/kubernetes/issues/60596).
#!priorityClassName: system-cluster-critical
#! This will help make sure our multiple pods run on different nodes, making
#! our deployment "more" "HA".
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchLabels: #@ deploymentPodLabel()
topologyKey: kubernetes.io/hostname
---
apiVersion: v1
kind: Service
metadata:
#! If name is changed, must also change names.apiService in the ConfigMap above and spec.service.name in the APIService below.
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
labels: #@ labels()
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: ClusterIP
selector: #@ deploymentPodLabel()
ports:
- protocol: TCP
port: 443
targetPort: 10250
---
apiVersion: v1
kind: Service
metadata:
name: #@ defaultResourceNameWithSuffix("proxy")
namespace: #@ namespace()
labels: #@ labels()
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: ClusterIP
selector: #@ deploymentPodLabel()
ports:
- protocol: TCP
port: 443
targetPort: 8444
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: #@ pinnipedDevAPIGroupWithPrefix("v1alpha1.login.concierge")
labels: #@ labels()
spec:
version: v1alpha1
group: #@ pinnipedDevAPIGroupWithPrefix("login.concierge")
groupPriorityMinimum: 9900
versionPriority: 15
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
service:
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
port: 443
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: #@ pinnipedDevAPIGroupWithPrefix("v1alpha1.identity.concierge")
labels: #@ labels()
spec:
version: v1alpha1
group: #@ pinnipedDevAPIGroupWithPrefix("identity.concierge")
groupPriorityMinimum: 9900
versionPriority: 15
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
service:
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
port: 443
---
apiVersion: #@ pinnipedDevAPIGroupWithPrefix("config.concierge") + "/v1alpha1"
kind: CredentialIssuer
metadata:
name: #@ defaultResourceNameWithSuffix("config")
labels: #@ labels()
spec:
impersonationProxy:
mode: #@ data.values.impersonation_proxy_spec.mode
#@ if data.values.impersonation_proxy_spec.external_endpoint:
externalEndpoint: #@ data.values.impersonation_proxy_spec.external_endpoint
#@ end
service:
type: #@ data.values.impersonation_proxy_spec.service.type
#@ if data.values.impersonation_proxy_spec.service.load_balancer_ip:
loadBalancerIP: #@ data.values.impersonation_proxy_spec.service.load_balancer_ip
#@ end
annotations: #@ data.values.impersonation_proxy_spec.service.annotations
---
apiVersion: v1
kind: Secret
metadata:
name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
namespace: #@ namespace()
labels: #@ labels()
annotations:
#! wait until the SA exists to create this secret so that the token controller does not delete it
#! we have this secret at the end so that kubectl will create the service account first
kapp.k14s.io/change-rule: "upsert after upserting impersonation-proxy.concierge.pinniped.dev/serviceaccount"
kubernetes.io/service-account.name: #@ defaultResourceNameWithSuffix("impersonation-proxy")
type: kubernetes.io/service-account-token

View File

@ -0,0 +1,41 @@
#!/bin/bash
APP="pinn-conci"
kapp deploy --app "${APP}" --diff-changes --file <(ytt \
--file concierge/config/authentication.concierge.pinniped.dev_jwtauthenticators.yaml
--file concierge/config/authentication.concierge.pinniped.dev_webhookauthenticcators.yaml
--file concierge/config/config.concierge.pinniped.dev_credential_issuers.yaml
--file concierge/config/deployment-HACKED.yaml \
--file concierge/config/helpers.lib.yaml \
--file concierge/config/rbac.yaml \
--file concierge/config/z0_crd_overlay.yaml \
--file concierge/config/values.yaml \
--data-value app_name=pinn-conci \
--data-value namespace=pinn-conci \
--data-value-yaml 'custom_labels={"foo": bar}' \
--data-value replicas=3)
## template the thing
#RENDER_OUTPUT_FILE=$(
#ytt \
# --file concierge/config/helpers.lib.yaml \
# --file concierge/config/deployment.yaml \
# --file concierge/config/service.yaml \
# --file concierge/config/values.yaml \
# --data-value app_name=pinn-super \
# --data-value namespace=pinn-super \
# --data-value-yaml 'custom_labels={"foo": bar}' \
# --data-value replicas=3
#)
#
## view it
#echo "$RENDER_OUTPUT_FILE"
#
## give it to kapp
#kapp deploy \
# --app pinn-super \
# --diff-changes \
# --file <( "${RENDER_OUTPUT_FILE}" )

View File

@ -0,0 +1,66 @@
#!/bin/bash
# need to maintain this if used.
# but there must be a way to get ytt to read a directory of files.
#RENDERED_OUTPUT_FILES=$(
#ytt \
# --file supervisor/config/helpers.lib.yaml \
# --file supervisor/config/config.supervisor.pinniped.dev_federationdomains.yaml \
# --file supervisor/config/config.supervisor.pinniped.dev_oidcclients.yaml \
# --file supervisor/config/idp.supervisor.pinniped.dev_activedirectoryidentityproviders.yaml \
# --file supervisor/config/idp.supervisor.pinniped.dev_ldapidentityproviders.yaml \
# --file supervisor/config/idp.supervisor.pinniped.dev_oidcidentityproviders.yaml \
# --file supervisor/config/z0_crd_overlay.yaml \
# --file supervisor/config/rbac.yaml \
# --file supervisor/config/service.yaml \
# --file supervisor/config/deployment.yaml \
# --file supervisor/config/values.yaml \
# --data-value app_name=pinn-super \
# --data-value namespace=pinn-super \
# --data-value-yaml 'custom_labels={"foo": bar}' \
# --data-value replicas=3
#)
#
#echo "${RENDERED_OUTPUT_FILES}"
APP="pinn-super"
kapp deploy --app "${APP}" --diff-changes --file <(ytt \
--file supervisor/config/helpers.lib.yaml \
--file supervisor/config/config.supervisor.pinniped.dev_federationdomains.yaml \
--file supervisor/config/config.supervisor.pinniped.dev_oidcclients.yaml \
--file supervisor/config/idp.supervisor.pinniped.dev_activedirectoryidentityproviders.yaml \
--file supervisor/config/idp.supervisor.pinniped.dev_ldapidentityproviders.yaml \
--file supervisor/config/idp.supervisor.pinniped.dev_oidcidentityproviders.yaml \
--file supervisor/config/z0_crd_overlay.yaml \
--file supervisor/config/rbac.yaml \
--file supervisor/config/service.yaml \
--file supervisor/config/deployment-HACKED.yaml \
--file supervisor/config/values.yaml \
--data-value app_name=pinn-super \
--data-value namespace=pinn-super \
--data-value-yaml 'custom_labels={"foo": bar}' \
--data-value replicas=3)
## template the thing
#RENDER_OUTPUT_FILE=$(
#ytt \
# --file supervisor/config/helpers.lib.yaml \
# --file supervisor/config/deployment.yaml \
# --file supervisor/config/service.yaml \
# --file supervisor/config/values.yaml \
# --data-value app_name=pinn-super \
# --data-value namespace=pinn-super \
# --data-value-yaml 'custom_labels={"foo": bar}' \
# --data-value replicas=3
#)
#
## view it
#echo "$RENDER_OUTPUT_FILE"
#
## give it to kapp
#kapp deploy \
# --app pinn-super \
# --diff-changes \
# --file <( "${RENDER_OUTPUT_FILE}" )

View File

@ -0,0 +1,89 @@
# The following env vars should be set before running 'go test -v -count 1 -timeout 0 ./test/integration'
export PINNIPED_TEST_TOOLS_NAMESPACE="tools"
export PINNIPED_TEST_CONCIERGE_NAMESPACE=concierge
export PINNIPED_TEST_CONCIERGE_APP_NAME=pinniped-concierge
export PINNIPED_TEST_CONCIERGE_CUSTOM_LABELS='{myConciergeCustomLabelName: myConciergeCustomLabelValue}'
export PINNIPED_TEST_USER_USERNAME=test-username
export PINNIPED_TEST_USER_GROUPS=test-group-0,test-group-1
export PINNIPED_TEST_USER_TOKEN=test-username:bf1dc425a45f9ee37ccf6f35931a3609
export PINNIPED_TEST_WEBHOOK_ENDPOINT=https://local-user-authenticator.local-user-authenticator.svc/authenticate
export PINNIPED_TEST_WEBHOOK_CA_BUNDLE=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJyRENDQVZPZ0F3SUJBZ0lRZlR6OG9EQlpuQVJvaTdCS1AySmtYVEFLQmdncWhrak9QUVFEQWpBbU1TUXcKSWdZRFZRUURFeHRzYjJOaGJDMTFjMlZ5TFdGMWRHaGxiblJwWTJGMGIzSWdRMEV3SUJjTk1qTXdPREk1TVRrdwpNekEyV2hnUE1qRXlNekE0TURVeE9UQTRNRFphTUNZeEpEQWlCZ05WQkFNVEcyeHZZMkZzTFhWelpYSXRZWFYwCmFHVnVkR2xqWVhSdmNpQkRRVEJaTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCS0UxN3NwNW8zdTkKK0dKQlFtbHBuay9sU3pTVWdQUHUvY0ltQnpqWFNPMXBrcGMwcU9BRndjRVZxYlg5V05LdFFBMXplOVA5TXBOdgpMKzdJNnNpN0xmMmpZVEJmTUE0R0ExVWREd0VCL3dRRUF3SUNoREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNECkFnWUlLd1lCQlFVSEF3RXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVVlRjR0bDZRT24yd3kKSUVPcm00WVFOWWtVOU0wd0NnWUlLb1pJemowRUF3SURSd0F3UkFJZ0cvQTluQisyUVpDME82aStmWFhGRm1XYQpQWXMyanNxbU5lbVBBMkdDNVZzQ0lFN2w1L1lNSE8xUHdYQWlwYXRaYjkwUDhaZ2pFc0x0Qi9lb3BLQlpBT0VSCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
export PINNIPED_TEST_SUPERVISOR_NAMESPACE=supervisor
export PINNIPED_TEST_SUPERVISOR_APP_NAME=pinniped-supervisor
export PINNIPED_TEST_SUPERVISOR_CUSTOM_LABELS='{mySupervisorCustomLabelName: mySupervisorCustomLabelValue}'
export PINNIPED_TEST_SUPERVISOR_HTTPS_ADDRESS="localhost:12344"
export PINNIPED_TEST_PROXY=http://127.0.0.1:12346
export PINNIPED_TEST_LDAP_HOST=ldap.tools.svc.cluster.local
export PINNIPED_TEST_LDAP_STARTTLS_ONLY_HOST=ldapstarttls.tools.svc.cluster.local
export PINNIPED_TEST_LDAP_LDAPS_CA_BUNDLE="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJjekNDQVJxZ0F3SUJBZ0lVUmE4OENCQWhwbnpNVmt3bmJtQnJ2RXZQdzdVd0NnWUlLb1pJemowRUF3SXcKR0RFV01CUUdBMVVFQXhNTlVHbHVibWx3WldRZ1ZHVnpkREFlRncweU16QTRNamt4T1RBME1EQmFGdzB5T0RBNApNamN4T1RBME1EQmFNQmd4RmpBVUJnTlZCQU1URFZCcGJtNXBjR1ZrSUZSbGMzUXdXVEFUQmdjcWhrak9QUUlCCkJnZ3Foa2pPUFFNQkJ3TkNBQVN1cWVzRStZM1RwWER1c0lKSUFkUHVQU3N5Q3BzUGVUM3BhYnZHdTIwRlpNYXEKTWZLejJrZFlqenhKNlN4b2lTM3dmSkFwc0VRRU9MV1NTaG51QmlrdG8wSXdRREFPQmdOVkhROEJBZjhFQkFNQwpBUVl3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVam5Ua3dPc1NhbHVHOXZlcnBtc0VWVGRLCjZZd3dDZ1lJS29aSXpqMEVBd0lEUndBd1JBSWdkeTNUcFA3WUFXaVdaaWV6WFBBVVhLOWNIWDJmUW9GVndFZGIKaGhDSDRib0NJR2trNTg5VzZIcHRUMHFVR0sreG9YbzkzeXA4NDBCcXNHMEtoeW5GV29JTQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
export PINNIPED_TEST_LDAP_BIND_ACCOUNT_USERNAME="cn=admin,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_BIND_ACCOUNT_PASSWORD=password
export PINNIPED_TEST_LDAP_USERS_SEARCH_BASE="ou=users,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_GROUPS_SEARCH_BASE="ou=groups,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_USER_DN="cn=pinny,ou=users,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_USER_CN="pinny"
export PINNIPED_TEST_LDAP_USER_PASSWORD=342db8a6d3416ecc99a735f7d00db93d
export PINNIPED_TEST_LDAP_USER_UNIQUE_ID_ATTRIBUTE_NAME="uidNumber"
export PINNIPED_TEST_LDAP_USER_UNIQUE_ID_ATTRIBUTE_VALUE="1000"
export PINNIPED_TEST_LDAP_USER_EMAIL_ATTRIBUTE_NAME="mail"
export PINNIPED_TEST_LDAP_USER_EMAIL_ATTRIBUTE_VALUE="pinny.ldap@example.com"
export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_GROUPS_DN="cn=ball-game-players,ou=beach-groups,ou=groups,dc=pinniped,dc=dev;cn=seals,ou=groups,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_EXPECTED_INDIRECT_GROUPS_DN="cn=pinnipeds,ou=groups,dc=pinniped,dc=dev;cn=mammals,ou=groups,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_GROUPS_CN="ball-game-players;seals"
export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_POSIX_GROUPS_CN="ball-game-players-posix;seals-posix"
export PINNIPED_TEST_LDAP_EXPECTED_INDIRECT_GROUPS_CN="pinnipeds;mammals"
export PINNIPED_TEST_CLI_OIDC_ISSUER=https://dex.tools.svc.cluster.local/dex
export PINNIPED_TEST_CLI_OIDC_ISSUER_CA_BUNDLE="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJjekNDQVJxZ0F3SUJBZ0lVUmE4OENCQWhwbnpNVmt3bmJtQnJ2RXZQdzdVd0NnWUlLb1pJemowRUF3SXcKR0RFV01CUUdBMVVFQXhNTlVHbHVibWx3WldRZ1ZHVnpkREFlRncweU16QTRNamt4T1RBME1EQmFGdzB5T0RBNApNamN4T1RBME1EQmFNQmd4RmpBVUJnTlZCQU1URFZCcGJtNXBjR1ZrSUZSbGMzUXdXVEFUQmdjcWhrak9QUUlCCkJnZ3Foa2pPUFFNQkJ3TkNBQVN1cWVzRStZM1RwWER1c0lKSUFkUHVQU3N5Q3BzUGVUM3BhYnZHdTIwRlpNYXEKTWZLejJrZFlqenhKNlN4b2lTM3dmSkFwc0VRRU9MV1NTaG51QmlrdG8wSXdRREFPQmdOVkhROEJBZjhFQkFNQwpBUVl3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVam5Ua3dPc1NhbHVHOXZlcnBtc0VWVGRLCjZZd3dDZ1lJS29aSXpqMEVBd0lEUndBd1JBSWdkeTNUcFA3WUFXaVdaaWV6WFBBVVhLOWNIWDJmUW9GVndFZGIKaGhDSDRib0NJR2trNTg5VzZIcHRUMHFVR0sreG9YbzkzeXA4NDBCcXNHMEtoeW5GV29JTQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
export PINNIPED_TEST_CLI_OIDC_CLIENT_ID=pinniped-cli
export PINNIPED_TEST_CLI_OIDC_CALLBACK_URL=http://127.0.0.1:48095/callback
export PINNIPED_TEST_CLI_OIDC_USERNAME=pinny@example.com
export PINNIPED_TEST_CLI_OIDC_PASSWORD=9306dcb43f0f8d0ccbad3d431c05940d
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER=https://dex.tools.svc.cluster.local/dex
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER_CA_BUNDLE="LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJjekNDQVJxZ0F3SUJBZ0lVUmE4OENCQWhwbnpNVmt3bmJtQnJ2RXZQdzdVd0NnWUlLb1pJemowRUF3SXcKR0RFV01CUUdBMVVFQXhNTlVHbHVibWx3WldRZ1ZHVnpkREFlRncweU16QTRNamt4T1RBME1EQmFGdzB5T0RBNApNamN4T1RBME1EQmFNQmd4RmpBVUJnTlZCQU1URFZCcGJtNXBjR1ZrSUZSbGMzUXdXVEFUQmdjcWhrak9QUUlCCkJnZ3Foa2pPUFFNQkJ3TkNBQVN1cWVzRStZM1RwWER1c0lKSUFkUHVQU3N5Q3BzUGVUM3BhYnZHdTIwRlpNYXEKTWZLejJrZFlqenhKNlN4b2lTM3dmSkFwc0VRRU9MV1NTaG51QmlrdG8wSXdRREFPQmdOVkhROEJBZjhFQkFNQwpBUVl3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVam5Ua3dPc1NhbHVHOXZlcnBtc0VWVGRLCjZZd3dDZ1lJS29aSXpqMEVBd0lEUndBd1JBSWdkeTNUcFA3WUFXaVdaaWV6WFBBVVhLOWNIWDJmUW9GVndFZGIKaGhDSDRib0NJR2trNTg5VzZIcHRUMHFVR0sreG9YbzkzeXA4NDBCcXNHMEtoeW5GV29JTQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ADDITIONAL_SCOPES="offline_access,email"
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_USERNAME_CLAIM=email
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_GROUPS_CLAIM=groups
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CLIENT_ID=pinniped-supervisor
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CLIENT_SECRET=pinniped-supervisor-secret
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_CALLBACK_URL=https://pinniped-supervisor-clusterip.supervisor.svc.cluster.local/some/path/callback
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_USERNAME=pinny@example.com
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_PASSWORD=9306dcb43f0f8d0ccbad3d431c05940d
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_EXPECTED_GROUPS= # Dex's local user store does not let us configure groups.
export PINNIPED_TEST_API_GROUP_SUFFIX='pinniped.dev'
# PINNIPED_TEST_SHELL_CONTAINER_IMAGE should be a container which includes bash and sleep, used by some tests.
export PINNIPED_TEST_SHELL_CONTAINER_IMAGE="ghcr.io/pinniped-ci-bot/test-kubectl:latest"
# We can't set up an in-cluster active directory instance, but
# if you have an active directory instance that you wish to run the tests against,
# specify a script to set the ad-related environment variables.
# You will need to set the environment variables that start with "PINNIPED_TEST_AD_"
# found in pinniped/test/testlib/env.go.
if [[ "" != "" ]]; then
source
fi
read -r -d '' PINNIPED_TEST_CLUSTER_CAPABILITY_YAML << PINNIPED_TEST_CLUSTER_CAPABILITY_YAML_EOF || true
# Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# The name of the cluster type.
kubernetesDistribution: Kind
# Describe the capabilities of the cluster against which the integration tests will run.
capabilities:
# Is it possible to borrow the cluster's signing key from the kube API server?
clusterSigningKeyIsAvailable: true
# Will the cluster successfully provision a load balancer if requested?
hasExternalLoadBalancerProvider: false
# Does the cluster allow requests without authentication?
# https://kubernetes.io/docs/reference/access-authn-authz/authentication/#anonymous-requests
anonymousAuthenticationSupported: true
# Are LDAP ports on the Internet reachable without interference from network firewalls or proxies?
canReachInternetLDAPPorts: true
PINNIPED_TEST_CLUSTER_CAPABILITY_YAML_EOF
export PINNIPED_TEST_CLUSTER_CAPABILITY_YAML

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,235 @@
#! Copyright 2020-2022 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:data", "data")
#@ load("@ytt:yaml", "yaml")
#@ load("helpers.lib.yaml",
#@ "defaultLabel",
#@ "labels",
#@ "deploymentPodLabel",
#@ "namespace",
#@ "defaultResourceName",
#@ "defaultResourceNameWithSuffix",
#@ "pinnipedDevAPIGroupWithPrefix",
#@ "getPinnipedConfigMapData",
#@ "hasUnixNetworkEndpoint",
#@ )
#@ load("@ytt:template", "template")
#@ if not data.values.into_namespace:
---
apiVersion: v1
kind: Namespace
metadata:
name: #@ data.values.namespace
labels: #@ labels()
#@ end
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: #@ defaultResourceName()
namespace: #@ namespace()
labels: #@ labels()
---
apiVersion: v1
kind: ConfigMap
metadata:
name: #@ defaultResourceNameWithSuffix("static-config")
namespace: #@ namespace()
labels: #@ labels()
data:
#@yaml/text-templated-strings
pinniped.yaml: #@ yaml.encode(getPinnipedConfigMapData())
---
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
apiVersion: v1
kind: Secret
metadata:
name: image-pull-secret
namespace: #@ namespace()
labels: #@ labels()
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
#@ end
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: #@ defaultResourceName()
namespace: #@ namespace()
labels: #@ labels()
spec:
replicas: #@ data.values.replicas
selector:
#! In hindsight, this should have been deploymentPodLabel(), but this field is immutable so changing it would break upgrades.
matchLabels: #@ defaultLabel()
template:
metadata:
labels:
#! This has always included defaultLabel(), which is used by this Deployment's selector.
_: #@ template.replace(defaultLabel())
#! More recently added the more unique deploymentPodLabel() so Services can select these Pods more specifically
#! without accidentally selecting pods from any future Deployments which might also want to use the defaultLabel().
_: #@ template.replace(deploymentPodLabel())
spec:
securityContext:
runAsUser: #@ data.values.run_as_user
runAsGroup: #@ data.values.run_as_group
serviceAccountName: #@ defaultResourceName()
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
imagePullSecrets:
- name: image-pull-secret
#@ end
containers:
- name: #@ defaultResourceName()
#@ if data.values.image_digest:
image: #@ data.values.image_repo + "@" + data.values.image_digest
#@ else:
image: #@ data.values.image_repo + ":" + data.values.image_tag
#@ end
imagePullPolicy: IfNotPresent
command:
- pinniped-supervisor
- /etc/podinfo
- /etc/config/pinniped.yaml
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop: [ "ALL" ]
#! seccompProfile was introduced in Kube v1.19. Using it on an older Kube version will result in a
#! kubectl validation error when installing via `kubectl apply`, which can be ignored using kubectl's
#! `--validate=false` flag. Note that installing via `kapp` does not complain about this validation error.
seccompProfile:
type: "RuntimeDefault"
resources:
requests:
#! If OIDCClient CRs are being used, then the Supervisor needs enough CPU to run expensive bcrypt
#! operations inside the implementation of the token endpoint for any authcode flows performed by those
#! clients, so for that use case administrators may wish to increase the requests.cpu value to more
#! closely align with their anticipated needs. Increasing this value will cause Kubernetes to give more
#! available CPU to this process during times of high CPU contention. By default, don't ask for too much
#! because that would make it impossible to install the Pinniped Supervisor on small clusters.
#! Aside from performing bcrypts at the token endpoint for those clients, the Supervisor is not a
#! particularly CPU-intensive process.
cpu: "100m" #! by default, request one-tenth of a CPU
memory: "128Mi"
limits:
#! By declaring a CPU limit that is not equal to the CPU request value, the Supervisor will be classified
#! by Kubernetes to have "burstable" quality of service.
#! See https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-burstable
#! If OIDCClient CRs are being used, and lots of simultaneous users have active sessions, then it is hard
#! pre-determine what the CPU limit should be for that use case. Guessing too low would cause the
#! pod's CPU usage to be throttled, resulting in poor performance. Guessing too high would allow clients
#! to cause the usage of lots of CPU resources. Administrators who have a good sense of anticipated usage
#! patterns may choose to set the requests.cpu and limits.cpu differently from these defaults.
cpu: "1000m" #! by default, throttle each pod's usage at 1 CPU
memory: "128Mi"
volumeMounts:
- name: config-volume
mountPath: /etc/config
readOnly: true
- name: podinfo
mountPath: /etc/podinfo
readOnly: true
#@ if hasUnixNetworkEndpoint():
- name: socket
mountPath: /pinniped_socket
readOnly: false #! writable to allow for socket use
#@ end
ports:
- containerPort: 8443
protocol: TCP
env:
#@ if data.values.https_proxy:
- name: HTTPS_PROXY
value: #@ data.values.https_proxy
#@ end
#@ if data.values.https_proxy and data.values.no_proxy:
- name: NO_PROXY
value: #@ data.values.no_proxy
#@ end
livenessProbe:
httpGet:
path: /healthz
port: 8443
scheme: HTTPS
initialDelaySeconds: 2
timeoutSeconds: 15
periodSeconds: 10
failureThreshold: 5
readinessProbe:
httpGet:
path: /healthz
port: 8443
scheme: HTTPS
initialDelaySeconds: 2
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 3
volumes:
- name: config-volume
configMap:
name: #@ defaultResourceNameWithSuffix("static-config")
- name: podinfo
downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- path: "namespace"
fieldRef:
fieldPath: metadata.namespace
- path: "name"
fieldRef:
fieldPath: metadata.name
#@ if hasUnixNetworkEndpoint():
- name: socket
emptyDir: {}
#@ end
#! This will help make sure our multiple pods run on different nodes, making
#! our deployment "more" "HA".
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
podAffinityTerm:
labelSelector:
matchLabels: #@ deploymentPodLabel()
topologyKey: kubernetes.io/hostname
---
apiVersion: v1
kind: Service
metadata:
#! If name is changed, must also change names.apiService in the ConfigMap above and spec.service.name in the APIService below.
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
labels: #@ labels()
#! prevent kapp from altering the selector of our services to match kubectl behavior
annotations:
kapp.k14s.io/disable-default-label-scoping-rules: ""
spec:
type: ClusterIP
selector: #@ deploymentPodLabel()
ports:
- protocol: TCP
port: 443
targetPort: 10250
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: #@ pinnipedDevAPIGroupWithPrefix("v1alpha1.clientsecret.supervisor")
labels: #@ labels()
spec:
version: v1alpha1
group: #@ pinnipedDevAPIGroupWithPrefix("clientsecret.supervisor")
groupPriorityMinimum: 9900
versionPriority: 15
#! caBundle: Do not include this key here. Starts out null, will be updated/owned by the golang code.
service:
name: #@ defaultResourceNameWithSuffix("api")
namespace: #@ namespace()
port: 443

View File

@ -44,6 +44,11 @@ function check_dependency() {
exit 1 exit 1
fi fi
} }
# TODO: add support for
# Read the env vars output by hack/prepare-for-integration-tests.sh
# source /tmp/integration-test-env
#
#
# Deploy the PackageRepository and Package resources # Deploy the PackageRepository and Package resources
# Requires a running kind cluster # Requires a running kind cluster
# Does not configure Pinniped # Does not configure Pinniped
@ -212,8 +217,8 @@ stringData:
values.yml: | values.yml: |
--- ---
namespace: "${RESOURCE_NAMESPACE}" namespace: "${RESOURCE_NAMESPACE}"
app_name: "${resource_name}-app-installed-via-package" app_name: "${resource_name}" # this affects services and things, needs to be just the resource name to match hack scripts
replicas: 3 replicas: 1 # keep logs testing easy
EOF EOF
KAPP_CONTROLLER_APP_NAME="${resource_name}-pkginstall" KAPP_CONTROLLER_APP_NAME="${resource_name}-pkginstall"
@ -233,12 +238,31 @@ kubectl get deploy -n supervisor
kubectl get deploy -n concierge kubectl get deploy -n concierge
# FLOW:
# kind delete cluster --name pinniped
# ./hack/prepare-for-integration-tests.sh --alternate-deploy-supervisor $(pwd)/deploy_carvel/deploy.sh --alternate-deploy-concierge $(pwd)/deploy_carvel/deploy.sh
# ./hack/prepare-supervisor-on-kind.sh --oidc
#
# TODO: # TODO:
# - change the namespace to whatever it is in ./hack/prepare-for-integration-tests.sh # - change the namespace to whatever it is in ./hack/prepare-for-integration-tests.sh
# - make a script that can work for $alternate-deploy # - make a script that can work for $alternate-deploy
# - then run ./hack/prepare-supervisor-on-kind.sh and make sure it works # - then run ./hack/prepare-supervisor-on-kind.sh and make sure it works
#
#
# openssl x509 -text -noout -in ./root_ca.crt
#curl --insecure https://127.0.0.1:61759/live
#{
# "kind": "Status",
# "apiVersion": "v1",
# "metadata": {},
# "status": "Failure",
# "message": "forbidden: User \"system:anonymous\" cannot get path \"/live\"",
# "reason": "Forbidden",
# "details": {},
# "code": 403
#}%
#curl --insecure https://127.0.0.1:61759/readyz
#ok%
# #
# #

View File

@ -30,75 +30,3 @@ echo_blue() {
echo -e "${BLUE}>> $@${DEFAULT}\n" echo -e "${BLUE}>> $@${DEFAULT}\n"
# printf "${BLUE}$@${DEFAULT}" # printf "${BLUE}$@${DEFAULT}"
} }
# borrowed from /tmp/integration-test-env
# TODO: make new scripts work with the old script?
# or how to ensure we can install both
# - the old way, ytt or plain yamls
# - the new way, with the PackageRepository and Packages
# export PINNIPED_TEST_SUPERVISOR_NAMESPACE=supervisor
PINNIPED_TEST_SUPERVISOR_NAMESPACE=default
# export PINNIPED_TEST_PROXY=http://127.0.0.1:12346
PINNIPED_TEST_PROXY=http://127.0.0.1:12346
# from here forward borrowed from ${repo_root}/hack/prepare-supervisor-on-kind.sh
# NOPE! Not running this script, so we have to pull the env vars ourselves
# however, we can run it against another kind cluster and take a look at it to make sure
# we understand what the contents are
# Read the env vars output by hack/prepare-for-integration-tests.sh
# source /tmp/integration-test-env
# Choose some filenames.
root_ca_crt_path=root_ca.crt
root_ca_key_path=root_ca.key
tls_crt_path=tls.crt
tls_key_path=tls.key
# Choose an audience name for the Concierge.
audience="my-workload-cluster-$(openssl rand -hex 4)"
# These settings align with how the Dex redirect URI is configured by hack/prepare-for-integration-tests.sh.
# Note that this hostname can only be resolved inside the cluster, so we will use a web proxy running inside
# the cluster whenever we want to be able to connect to it.
issuer_host="pinniped-supervisor-clusterip.supervisor.svc.cluster.local"
issuer="https://$issuer_host/some/path"
# Create a CA and TLS serving certificates for the Supervisor.
step certificate create \
"Supervisor CA" "$root_ca_crt_path" "$root_ca_key_path" \
--profile root-ca \
--no-password --insecure --force
step certificate create \
"$issuer_host" "$tls_crt_path" "$tls_key_path" \
--profile leaf \
--not-after 8760h \
--ca "$root_ca_crt_path" --ca-key "$root_ca_key_path" \
--no-password --insecure --force
# Put the TLS certificate into a Secret for the Supervisor.
kubectl create secret tls -n "$PINNIPED_TEST_SUPERVISOR_NAMESPACE" my-federation-domain-tls --cert "$tls_crt_path" --key "$tls_key_path" \
--dry-run=client --output yaml | kubectl apply -f -
# Make a FederationDomain using the TLS Secret from above.
cat <<EOF | kubectl apply --namespace "$PINNIPED_TEST_SUPERVISOR_NAMESPACE" -f -
apiVersion: config.supervisor.pinniped.dev/v1alpha1
kind: FederationDomain
metadata:
name: my-federation-domain
spec:
issuer: $issuer
tls:
secretName: my-federation-domain-tls
EOF
echo "Waiting for FederationDomain to initialize..."
# Sleeping is a race, but that's probably good enough for the purposes of this script.
sleep 5
# Test that the federation domain is working before we proceed.
echo "Fetching FederationDomain discovery info..."
echo "$PINNIPED_TEST_PROXY - curl -fLsS --cacert $root_ca_crt_path $issuer/.well-known/openid-configuration"
https_proxy="$PINNIPED_TEST_PROXY" curl -fLsS --cacert "$root_ca_crt_path" "$issuer/.well-known/openid-configuration" | jq .

View File

@ -0,0 +1,85 @@
#!/usr/bin/env bash
#
# This script is intended to be used with:
# - $repo_root/hack/prepare-for-integration-test.sh --alternate-deploy $(pwd)/deploy_carvel/hack/log-args.sh
# and originated with the following:
# - https://github.com/jvanzyl/pinniped-charts/blob/main/alternate-deploy-helm
# along with this PR to pinniped:
# - https://github.com/vmware-tanzu/pinniped/pull/1028
set -euo pipefail
#
# Helper functions
#
function log_note() {
GREEN='\033[0;32m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "${GREEN}$*${NC}"
else
echo "$*"
fi
}
function log_error() {
RED='\033[0;31m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "🙁${RED} Error: $* ${NC}"
else
echo ":( Error: $*"
fi
}
function check_dependency() {
if ! command -v "$1" >/dev/null; then
log_error "Missing dependency..."
log_error "$2"
exit 1
fi
}
log_note "log-args.sh 🐳 🐳 🐳"
# two vars will be received by this script:
# Received: local-user-authenticator
# Received: D00A4537-80F1-4AF2-A3B3-5F20BDBB9AEB
log_note "passed this invocation:"
app=${1}
# tag is fed in from the prepare-for-integration-tests.sh script, just uuidgen to identify a
# specific docker build of the pinniped-server image.
tag=${2}
registry="pinniped.local"
repo="test/build"
registry_repo="$registry/$repo"
if [ "${app}" = "local-user-authenticator" ]; then
log_note "deploy-pachage.sh: local-user-authenticator 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠"
log_note "deploy-pachage.sh: local-user-authenticator 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠"
log_note "deploy-pachage.sh: local-user-authenticator 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠 🐠"
pushd deploy/local-user-authenticator >/dev/null
manifest=/tmp/pinniped-local-user-authenticator.yaml
ytt --file . \
--data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" >"$manifest"
kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
popd >/dev/null
fi
if [ "${app}" = "pinniped-supervisor" ]; then
log_note "deploy-pachage.sh: supervisor 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡"
log_note "deploy-pachage.sh: supervisor 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡"
log_note "deploy-pachage.sh: supervisor 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡 🐡"
fi
if [ "${app}" = "pinniped-concierge" ]; then
log_note "deploy-pachage.sh: concierge 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼"
log_note "deploy-pachage.sh: concierge 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼"
log_note "deploy-pachage.sh: concierge 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼 🪼"
fi

View File

@ -0,0 +1,69 @@
#!/bin/sh
set -o errexit
# default name if not provided
# KIND_CLUSTER_NAME=my-kind-of-cluster ./kind-with-registry.sh
KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:=my-kind-cluster}"
# 1. Create registry container unless it already exists
reg_name='kind-registry'
reg_port='5001'
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
docker run \
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --name "${reg_name}" \
registry:2
fi
# 2. Create kind cluster with containerd registry config dir enabled
# TODO: kind will eventually enable this by default and this patch will
# be unnecessary.
#
# See:
# https://github.com/kubernetes-sigs/kind/issues/2875
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
cat <<EOF | kind create cluster --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
name: ${KIND_CLUSTER_NAME}
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
EOF
# 3. Add the registry config to the nodes
#
# This is necessary because localhost resolves to loopback addresses that are
# network-namespace local.
# In other words: localhost in the container is not localhost on the host.
#
# We want a consistent name that works from both ends, so we tell containerd to
# alias localhost:${reg_port} to the registry container when pulling images
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
for node in $(kind get nodes); do
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
[host."http://${reg_name}:5000"]
EOF
done
# 4. Connect the registry to the cluster network if not already connected
# This allows kind to bootstrap the network but ensures they're on the same network
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
docker network connect "kind" "${reg_name}"
fi
# 5. Document the local registry
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: "localhost:${reg_port}"
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
EOF

55
deploy_carvel/hack/log-args.sh Executable file
View File

@ -0,0 +1,55 @@
#!/usr/bin/env bash
#
# This script is intended to be used with:
# - $repo_root/hack/prepare-for-integration-test.sh --alternate-deploy $(pwd)/deploy_carvel/hack/log-args.sh
# and originated with the following:
# - https://github.com/jvanzyl/pinniped-charts/blob/main/alternate-deploy-helm
# along with this PR to pinniped:
# - https://github.com/vmware-tanzu/pinniped/pull/1028
set -euo pipefail
#
# Helper functions
#
function log_note() {
GREEN='\033[0;32m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "${GREEN}$*${NC}"
else
echo "$*"
fi
}
function log_error() {
RED='\033[0;31m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "🙁${RED} Error: $* ${NC}"
else
echo ":( Error: $*"
fi
}
function check_dependency() {
if ! command -v "$1" >/dev/null; then
log_error "Missing dependency..."
log_error "$2"
exit 1
fi
}
# two vars will be received by this script:
# Received: local-user-authenticator
# Received: D00A4537-80F1-4AF2-A3B3-5F20BDBB9AEB
app=${1}
# tag is fed in from the prepare-for-integration-tests.sh script, just uuidgen to identify a
# specific docker build of the pinniped-server image.
tag=${2}
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
log_note "log-args.sh >>> script dir: ${SCRIPT_DIR}"
log_note "log-args.sh >>> app: ${app} tag: ${tag}"
# nothing else, this is a test.

53
deploy_carvel/hack/log-args2.sh Executable file
View File

@ -0,0 +1,53 @@
#!/usr/bin/env bash
#
# This script is intended to be used with:
# - $repo_root/hack/prepare-for-integration-test.sh --alternate-deploy $(pwd)/deploy_carvel/hack/log-args.sh
# and originated with the following:
# - https://github.com/jvanzyl/pinniped-charts/blob/main/alternate-deploy-helm
# along with this PR to pinniped:
# - https://github.com/vmware-tanzu/pinniped/pull/1028
set -euo pipefail
#
# Helper functions
#
function log_note() {
GREEN='\033[0;32m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "${GREEN}$*${NC}"
else
echo "$*"
fi
}
function log_error() {
RED='\033[0;31m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "🙁${RED} Error: $* ${NC}"
else
echo ":( Error: $*"
fi
}
function check_dependency() {
if ! command -v "$1" >/dev/null; then
log_error "Missing dependency..."
log_error "$2"
exit 1
fi
}
# two vars will be received by this script:
# Received: local-user-authenticator
# Received: D00A4537-80F1-4AF2-A3B3-5F20BDBB9AEB
app=${1}
# tag is fed in from the prepare-for-integration-tests.sh script, just uuidgen to identify a
# specific docker build of the pinniped-server image.
tag=${2}
log_note "🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄"
log_note "log-args2.sh >>> app: ${app} tag: ${tag} 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄"
log_note "🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄 🦄"

View File

@ -0,0 +1,99 @@
#!/usr/bin/env bash
#
# This script is intended to be used with:
# - $repo_root/hack/prepare-for-integration-test.sh --alternate-deploy deploy_carvel/prepare-alt-deploy-with-package.sh
# and originated with the following:
# - https://github.com/jvanzyl/pinniped-charts/blob/main/alternate-deploy-helm
# along with this PR to pinniped:
# - https://github.com/vmware-tanzu/pinniped/pull/1028
set -euo pipefail
#
# Helper functions
#
function log_note() {
GREEN='\033[0;32m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "${GREEN}$*${NC}"
else
echo "$*"
fi
}
function log_error() {
RED='\033[0;31m'
NC='\033[0m'
if [[ ${COLORTERM:-unknown} =~ ^(truecolor|24bit)$ ]]; then
echo -e "🙁${RED} Error: $* ${NC}"
else
echo ":( Error: $*"
fi
}
function check_dependency() {
if ! command -v "$1" >/dev/null; then
log_error "Missing dependency..."
log_error "$2"
exit 1
fi
}
# two vars will be received by this script:
# Received: local-user-authenticator
# Received: D00A4537-80F1-4AF2-A3B3-5F20BDBB9AEB
log_note "passed this invocation:"
app=${1}
# tag is fed in from the prepare-for-integration-tests.sh script, just uuidgen to identify a
# specific docker build of the pinniped-server image.
tag=${2}
if [ "${app}" = "local-user-authenticator" ]; then
#
# TODO: continue on from here.
# get this to install correctly, exaclty as it did before
# and then do the rest?
# OR TODO: correct the $alternate_deploy issue by creating 3 new flags:
# $alternate_deploy-supervisor
# $alternate_deploy-concierge
# $alternate_deploy-local-user-authenticator
#
# TODO step 1: test to ensure current change did not break the script!
#
log_note "🦄 🦄 🦄 where are we?!?!?"
pwd
log_note "Deploying the local-user-authenticator app to the cluster using kapp..."
ytt --file . \
--data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" >"$manifest"
kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
fi
if [ "${app}" = "pinniped-supervisor" ]; then
# helm upgrade pinniped-supervisor charts/pinniped-supervisor \
# --install \
# --values source/pinniped-supervisor/values-lit.yaml \
# --set image.version=${tag} \
# --namespace supervisor \
# --create-namespace \
# --atomic
# --atomic
log_note "ignoring supervisor, so sad........."
fi
if [ "${app}" = "pinniped-concierge" ]; then
# discovery_url="$(TERM=dumb kubectl cluster-info | awk '/master|control plane/ {print $NF}')"
# helm upgrade pinniped-concierge charts/pinniped-concierge \
# --install \
# --values source/pinniped-concierge/values-lit.yaml \
# --set image.version=${tag} \
# --set config.discovery.url=${discovery_url} \
# --set config.logLevel="debug" \
# --namespace concierge \
# --create-namespace \
# --atomic
log_note "ignoring concierge, so sad........."
fi

View File

@ -0,0 +1,28 @@
---
apiVersion: packaging.carvel.dev/v1alpha1
kind: PackageInstall
metadata:
# name, does not have to be versioned, versionSelection.constraints below will handle
name: concierge-install
namespace: concierge-install-ns
spec:
serviceAccountName: "pinniped-package-rbac-concierge-sa-superadmin-dangerous"
packageRef:
refName: "concierge.pinniped.dev"
versionSelection:
constraints: "0.25.0"
values:
- secretRef:
name: "concierge-package-install-secret"
---
apiVersion: v1
kind: Secret
metadata:
name: "concierge-package-install-secret"
namespace: concierge-install-ns
stringData:
values.yml: |
---
namespace: "concierge"
app_name: "concierge" # this affects services and things, needs to be just the resource name to match hack scripts
replicas: 1 # keep logs testing easy

View File

@ -0,0 +1,37 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: "concierge-install-ns"
---
# ServiceAccount details from the file linked above
apiVersion: v1
kind: ServiceAccount
metadata:
name: "pinniped-package-rbac-concierge-sa-superadmin-dangerous"
namespace: "concierge-install-ns"
# namespace: default # --> sticking to default for everything for now.
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: "pinniped-package-rbac-concierge-role-superadmin-dangerous"
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["*"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: "pinniped-package-rbac-concierge-role-binding-superadmin-dangerous"
subjects:
- kind: ServiceAccount
name: "pinniped-package-rbac-concierge-sa-superadmin-dangerous"
namespace: "concierge-install-ns"
# namespace: default # --> sticking to default for everything for now.
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "pinniped-package-rbac-concierge-role-superadmin-dangerous"

View File

@ -0,0 +1,37 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: "supervisor-install-ns"
---
# ServiceAccount details from the file linked above
apiVersion: v1
kind: ServiceAccount
metadata:
name: "pinniped-package-rbac-supervisor-sa-superadmin-dangerous"
namespace: "supervisor-install-ns"
# namespace: default # --> sticking to default for everything for now.
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: "pinniped-package-rbac-supervisor-role-superadmin-dangerous"
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["*"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: "pinniped-package-rbac-supervisor-role-binding-superadmin-dangerous"
subjects:
- kind: ServiceAccount
name: "pinniped-package-rbac-supervisor-sa-superadmin-dangerous"
namespace: "supervisor-install-ns"
# namespace: default # --> sticking to default for everything for now.
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "pinniped-package-rbac-supervisor-role-superadmin-dangerous"

View File

@ -0,0 +1,28 @@
---
apiVersion: packaging.carvel.dev/v1alpha1
kind: PackageInstall
metadata:
# name, does not have to be versioned, versionSelection.constraints below will handle
name: supervisor-install
namespace: supervisor-install-ns
spec:
serviceAccountName: "pinniped-package-rbac-supervisor-sa-superadmin-dangerous"
packageRef:
refName: "supervisor.pinniped.dev"
versionSelection:
constraints: "0.25.0"
values:
- secretRef:
name: "supervisor-package-install-secret"
---
apiVersion: v1
kind: Secret
metadata:
name: "supervisor-package-install-secret"
namespace: supervisor-install-ns
stringData:
values.yml: |
---
namespace: "supervisor"
app_name: "supervisor" # this affects services and things, needs to be just the resource name to match hack scripts
replicas: 1 # keep logs testing easy

View File

@ -0,0 +1,60 @@
apiVersion: idp.supervisor.pinniped.dev/v1alpha1
kind: OIDCIdentityProvider
metadata:
# namespace: pinniped-supervisor
namespace: supervisor-ns # for this install this is the namespace that I've ben using.
name: gitlab
spec:
# Specify the upstream issuer URL.
issuer: https://gitlab.eng.vmware.com
# Specify how to form authorization requests to GitLab.
authorizationConfig:
# GitLab is unusual among OIDC providers in that it returns an
# error if you request the "offline_access" scope during an
# authorization flow, so ask Pinniped to avoid requesting that
# scope when using GitLab by excluding it from this list.
# By specifying only "openid" here then Pinniped will only
# request "openid".
additionalScopes: [openid,email]
# If you would also like to allow your end users to authenticate using
# a password grant, then change this to true. See
# https://docs.gitlab.com/ee/api/oauth2.html#resource-owner-password-credentials-flow
# for more information about using the password grant with GitLab.
allowPasswordGrant: false
# Specify how GitLab claims are mapped to Kubernetes identities.
claims:
# Specify the name of the claim in your GitLab token that will be mapped
# to the "username" claim in downstream tokens minted by the Supervisor.
username: email
# Specify the name of the claim in GitLab that represents the groups
# that the user belongs to. Note that GitLab's "groups" claim comes from
# their "/userinfo" endpoint, not the token.
groups: groups
# Specify the name of the Kubernetes Secret that contains your GitLab
# application's client credentials (created below).
client:
secretName: gitlab-client-credentials
---
apiVersion: v1
kind: Secret
metadata:
# namespace: pinniped-supervisor
namespace: supervisor-ns # for this install this is the namespace that I've ben using.
name: gitlab-client-credentials
type: secrets.pinniped.dev/oidc-client
stringData:
# The "Application ID" that you got from GitLab.
clientID: "bbf1c9e13b38642adec54d47a112159549c2de10ae3506086c5af2ff4beb32d6"
# The "Secret" that you got from GitLab.
clientSecret: "16a92c0fdbba5f87a7ea61d6c64a526b5fb838bf436825c98af95459c7c5eeb8"