Allow creation of different Service types in Supervisor ytt templates

- Tiltfile and prepare-for-integration-tests.sh both specify the
  NodePort Service using `--data-value-yaml 'service_nodeport_port=31234'`
- Also rename the namespaces used by the Concierge and Supervisor apps
  during integration tests running locally
This commit is contained in:
Ryan Richard 2020-10-09 16:00:11 -07:00
parent 34549b779b
commit 354b922e48
6 changed files with 97 additions and 87 deletions

View File

@ -76,8 +76,8 @@ spec:
command: #! override the default entrypoint command: #! override the default entrypoint
- /usr/local/bin/pinniped-supervisor - /usr/local/bin/pinniped-supervisor
args: args:
- /etc/podinfo #! TODO proper flag parsing instead of positional - /etc/podinfo
- /etc/config/pinniped.yaml #! TODO proper flag parsing instead of positional - /etc/config/pinniped.yaml
resources: resources:
requests: requests:
memory: "128Mi" memory: "128Mi"
@ -86,24 +86,6 @@ spec:
mountPath: /etc/config mountPath: /etc/config
- name: podinfo - name: podinfo
mountPath: /etc/podinfo mountPath: /etc/podinfo
#! livenessProbe:
#! httpGet:
#! path: /healthz
#! port: 443
#! scheme: HTTPS
#! initialDelaySeconds: 2
#! timeoutSeconds: 15
#! periodSeconds: 10
#! failureThreshold: 5
#! readinessProbe:
#! httpGet:
#! path: /healthz
#! port: 443
#! scheme: HTTPS
#! initialDelaySeconds: 2
#! timeoutSeconds: 3
#! periodSeconds: 10
#! failureThreshold: 3
volumes: volumes:
- name: config-volume - name: config-volume
configMap: configMap:
@ -128,19 +110,3 @@ spec:
matchLabels: matchLabels:
app: #@ data.values.app_name app: #@ data.values.app_name
topologyKey: kubernetes.io/hostname topologyKey: kubernetes.io/hostname
---
apiVersion: v1
kind: Service
metadata:
name: #@ data.values.app_name
namespace: #@ data.values.namespace
labels:
app: #@ data.values.app_name
spec:
type: ClusterIP
selector:
app: #@ data.values.app_name
ports:
- protocol: TCP
port: 80
targetPort: 80

View File

@ -0,0 +1,59 @@
#@ load("@ytt:data", "data")
#@ if data.values.service_nodeport_port:
---
apiVersion: v1
kind: Service
metadata:
name: #@ data.values.app_name + "-nodeport"
namespace: #@ data.values.namespace
labels:
app: #@ data.values.app_name
spec:
type: NodePort
selector:
app: #@ data.values.app_name
ports:
- protocol: TCP
port: 80
targetPort: 80
nodePort: #@ data.values.service_nodeport_port
#@ end
#@ if data.values.service_clusterip_port:
---
apiVersion: v1
kind: Service
metadata:
name: #@ data.values.app_name + "-clusterip"
namespace: #@ data.values.namespace
labels:
app: #@ data.values.app_name
spec:
type: ClusterIP
selector:
app: #@ data.values.app_name
ports:
- protocol: TCP
port: #@ data.values.service_clusterip_port
targetPort: 80
#@ end
#@ if data.values.service_loadbalancer_port:
---
apiVersion: v1
kind: Service
metadata:
name: #@ data.values.app_name + "-loadbalancer"
namespace: #@ data.values.namespace
labels:
app: #@ data.values.app_name
spec:
type: LoadBalancer
selector:
app: #@ data.values.app_name
ports:
- protocol: TCP
port: #@ data.values.service_loadbalancer_port
targetPort: 80
#@ end

View File

@ -20,3 +20,10 @@ image_tag: latest
#! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]' #! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]'
#! Optional. #! Optional.
image_pull_dockerconfigjson: #! e.g. {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}} image_pull_dockerconfigjson: #! e.g. {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}}
#! Specify how to expose the Supervisor app as a Service.
#! Typically you would set a value for only one of the following.
#! Setting any of these values means that a Service of that type will be created.
service_nodeport_port: #! e.g. 31234
service_loadbalancer_port: #! e.g. 443
service_clusterip_port: #! e.g. 443

View File

@ -8,15 +8,18 @@ os.putenv('GOARCH', 'amd64')
os.putenv('CGO_ENABLED', '0') os.putenv('CGO_ENABLED', '0')
os.putenv('KUBE_GIT_VERSION', 'v0.0.0') os.putenv('KUBE_GIT_VERSION', 'v0.0.0')
#####################################################################################################
# Compile all of our ./cmd/... binaries. # Compile all of our ./cmd/... binaries.
#
local_resource( local_resource(
'compile', 'compile',
'cd ../../../ && mkdir -p ./hack/lib/tilt/build && go build -v -ldflags "$(hack/get-ldflags.sh)" -o ./hack/lib/tilt/build ./cmd/...', 'cd ../../../ && mkdir -p ./hack/lib/tilt/build && go build -v -ldflags "$(hack/get-ldflags.sh)" -o ./hack/lib/tilt/build ./cmd/...',
deps=['../../../cmd', '../../../internal', '../../../pkg', '../../../generated'], deps=['../../../cmd', '../../../internal', '../../../pkg', '../../../generated'],
) )
# #####################################################################################################
# local-user-authenticator app # Local-user-authenticator app
# #
# Build a container image for local-user-authenticator, with live-update enabled. # Build a container image for local-user-authenticator, with live-update enabled.
@ -37,17 +40,18 @@ k8s_yaml(local([
# Collect all the deployed local-user-authenticator resources under a "local-user-auth" resource tab. # Collect all the deployed local-user-authenticator resources under a "local-user-auth" resource tab.
k8s_resource( k8s_resource(
workload='local-user-authenticator', workload='local-user-authenticator', # this is the deployment name
new_name='local-user-auth', new_name='local-user-auth', # this is the name that will appear in the tilt UI
objects=[ objects=[
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
'local-user-authenticator:namespace', 'local-user-authenticator:namespace',
'local-user-authenticator:serviceaccount', 'local-user-authenticator:serviceaccount',
'local-user-authenticator:role', 'local-user-authenticator:role',
'local-user-authenticator:rolebinding' 'local-user-authenticator:rolebinding',
], ],
) )
# #####################################################################################################
# Supervisor app # Supervisor app
# #
@ -63,19 +67,23 @@ docker_build_with_restart('image/supervisor', '.',
k8s_yaml(local([ k8s_yaml(local([
'ytt', 'ytt',
'--file', '../../../deploy/supervisor', '--file', '../../../deploy/supervisor',
'--data-value', 'app_name=pinniped-supervisor',
'--data-value', 'namespace=supervisor',
'--data-value', 'image_repo=image/supervisor', '--data-value', 'image_repo=image/supervisor',
'--data-value', 'image_tag=tilt-dev', '--data-value', 'image_tag=tilt-dev',
'--data-value-yaml', 'replicas=1' '--data-value-yaml', 'replicas=1',
'--data-value-yaml', 'service_nodeport_port=31234',
])) ]))
# Collect all the deployed supervisor resources under a "supervisor" resource tab. # Collect all the deployed supervisor resources under a "supervisor" resource tab.
k8s_resource( k8s_resource(
workload='pinniped-supervisor', workload='pinniped-supervisor', # this is the deployment name
new_name='supervisor', new_name='supervisor', # this is the name that will appear in the tilt UI
objects=[ objects=[
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
'oidcproviderconfigs.config.pinniped.dev:customresourcedefinition', 'oidcproviderconfigs.config.pinniped.dev:customresourcedefinition',
'pinniped-supervisor-static-config:configmap', 'pinniped-supervisor-static-config:configmap',
'pinniped-supervisor:namespace', 'supervisor:namespace',
'pinniped-supervisor:role', 'pinniped-supervisor:role',
'pinniped-supervisor:rolebinding', 'pinniped-supervisor:rolebinding',
'pinniped-supervisor:serviceaccount', 'pinniped-supervisor:serviceaccount',
@ -90,9 +98,7 @@ docker_build_with_restart('image/concierge', '.',
only=['./build/pinniped-concierge'], only=['./build/pinniped-concierge'],
) )
k8s_yaml('nodeport.yaml') #####################################################################################################
#
# Concierge app # Concierge app
# #
@ -101,20 +107,21 @@ k8s_yaml(local([
'sh', '-c', 'sh', '-c',
'ytt --file ../../../deploy/concierge ' + 'ytt --file ../../../deploy/concierge ' +
'--data-value app_name=pinniped-concierge ' + '--data-value app_name=pinniped-concierge ' +
'--data-value namespace=integration ' + '--data-value namespace=concierge ' +
'--data-value image_repo=image/concierge ' + '--data-value image_repo=image/concierge ' +
'--data-value image_tag=tilt-dev ' + '--data-value image_tag=tilt-dev ' +
'--data-value kube_cert_agent_image=debian:10.5-slim ' + '--data-value kube_cert_agent_image=debian:10.5-slim ' +
'--data-value discovery_url=$(TERM=dumb kubectl cluster-info | awk \'/Kubernetes master/ {print $NF}\') ' + '--data-value discovery_url=$(TERM=dumb kubectl cluster-info | awk \'/Kubernetes master/ {print $NF}\') ' +
'--data-value-yaml replicas=1' '--data-value-yaml replicas=1',
])) ]))
# Collect all the deployed local-user-authenticator resources under a "concierge" resource tab. # Collect all the deployed local-user-authenticator resources under a "concierge" resource tab.
k8s_resource( k8s_resource(
workload='pinniped-concierge', workload='pinniped-concierge', # this is the deployment name
new_name='concierge', new_name='concierge', # this is the name that will appear in the tilt UI
objects=[ objects=[
'integration:namespace', # these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
'concierge:namespace',
'pinniped-concierge-aggregated-api-server:clusterrole', 'pinniped-concierge-aggregated-api-server:clusterrole',
'pinniped-concierge-aggregated-api-server:clusterrolebinding', 'pinniped-concierge-aggregated-api-server:clusterrolebinding',
'pinniped-concierge-aggregated-api-server:role', 'pinniped-concierge-aggregated-api-server:role',
@ -135,7 +142,7 @@ k8s_resource(
], ],
) )
# #####################################################################################################
# Finish setting up cluster and creating integration test env file # Finish setting up cluster and creating integration test env file
# #

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: pinniped-supervisor-node-port
namespace: pinniped-supervisor
spec:
type: NodePort
selector:
app: pinniped-supervisor
ports:
- port: 80
targetPort: 80
nodePort: 31234

View File

@ -200,7 +200,7 @@ kubectl create secret generic "$test_username" \
# Deploy the Pinniped Supervisor # Deploy the Pinniped Supervisor
# #
supervisor_app_name="pinniped-supervisor" supervisor_app_name="pinniped-supervisor"
supervisor_namespace="pinniped-supervisor" supervisor_namespace="supervisor"
if ! tilt_mode; then if ! tilt_mode; then
pushd deploy/supervisor >/dev/null pushd deploy/supervisor >/dev/null
@ -210,27 +210,11 @@ if ! tilt_mode; then
--data-value "app_name=$supervisor_app_name" \ --data-value "app_name=$supervisor_app_name" \
--data-value "namespace=$supervisor_namespace" \ --data-value "namespace=$supervisor_namespace" \
--data-value "image_repo=$registry_repo" \ --data-value "image_repo=$registry_repo" \
--data-value-yaml 'service_nodeport_port=31234' \
--data-value "image_tag=$tag" >"$manifest" --data-value "image_tag=$tag" >"$manifest"
kapp deploy --yes --app "$supervisor_app_name" --diff-changes --file "$manifest" kapp deploy --yes --app "$supervisor_app_name" --diff-changes --file "$manifest"
log_note "Adding NodePort service to expose the Pinniped Supervisor app on the kind node..."
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: ${supervisor_app_name}-node-port
namespace: $supervisor_namespace
spec:
type: NodePort
selector:
app: $supervisor_app_name
ports:
- port: 80
targetPort: 80
nodePort: 31234
EOF
popd >/dev/null popd >/dev/null
fi fi
@ -238,7 +222,7 @@ fi
# Deploy Pinniped # Deploy Pinniped
# #
concierge_app_name="pinniped-concierge" concierge_app_name="pinniped-concierge"
concierge_namespace="integration" concierge_namespace="concierge"
webhook_url="https://local-user-authenticator.local-user-authenticator.svc/authenticate" webhook_url="https://local-user-authenticator.local-user-authenticator.svc/authenticate"
webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator -o 'jsonpath={.data.caCertificate}')" webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator -o 'jsonpath={.data.caCertificate}')"
discovery_url="$(TERM=dumb kubectl cluster-info | awk '/Kubernetes master/ {print $NF}')" discovery_url="$(TERM=dumb kubectl cluster-info | awk '/Kubernetes master/ {print $NF}')"