ContainerImage.Pinniped/hack/lib/tilt/Tiltfile
Ryan Richard 397ec61e57 Specify the supervisor NodePort Service's port and nodePort separately
When using kind we forward the node's port to the host, so we only
really care about the `nodePort` value. For acceptance clusters,
we put an Ingress in front of a NodePort Service, so we only really
care about the `port` value.
2020-10-22 15:37:35 -07:00

187 lines
7.7 KiB
Plaintext

load('ext://restart_process', 'docker_build_with_restart')
disable_snapshots()
analytics_settings(False)
update_settings(max_parallel_updates=8)
os.putenv('CGO_ENABLED', '0')
os.putenv('GOOS', 'linux')
os.putenv('GOARCH', 'amd64')
os.putenv('CGO_ENABLED', '0')
os.putenv('KUBE_GIT_VERSION', 'v0.0.0')
#####################################################################################################
# Compile all of our ./cmd/... binaries.
#
local_resource(
'compile',
'cd ../../../ && mkdir -p ./hack/lib/tilt/build && go build -v -ldflags "$(hack/get-ldflags.sh)" -o ./hack/lib/tilt/build ./cmd/...',
deps=['../../../cmd', '../../../internal', '../../../pkg', '../../../generated'],
)
#####################################################################################################
# Dex
#
# Render the Dex installation manifest using ytt.
k8s_yaml(local(['ytt','--file', '../../../test/deploy/dex']))
# Tell tilt to watch all of those files for changes.
watch_file('../../../test/deploy/dex')
# Collect all the deployed Dex resources under a "dex" resource tab.
k8s_resource(
workload='dex', # this is the deployment name
objects=[
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
'dex:namespace',
'dex-config:configmap',
],
)
#####################################################################################################
# Local-user-authenticator app
#
# Build a container image for local-user-authenticator, with live-update enabled.
docker_build_with_restart('image/local-user-auth', '.',
dockerfile='local-user-authenticator.Dockerfile',
entrypoint=['/usr/local/bin/local-user-authenticator'],
live_update=[sync('./build/local-user-authenticator', '/usr/local/bin/local-user-authenticator')],
only=['./build/local-user-authenticator'],
)
# Render the local-user-authenticator installation manifest using ytt.
k8s_yaml(local([
'ytt',
'--file', '../../../deploy/local-user-authenticator',
'--data-value', 'image_repo=image/local-user-auth',
'--data-value', 'image_tag=tilt-dev',
]))
# Tell tilt to watch all of those files for changes.
watch_file('../../../deploy/local-user-authenticator')
# Collect all the deployed local-user-authenticator resources under a "local-user-auth" resource tab.
k8s_resource(
workload='local-user-authenticator', # this is the deployment name
new_name='local-user-auth', # this is the name that will appear in the tilt UI
objects=[
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
'local-user-authenticator:namespace',
'local-user-authenticator:serviceaccount',
'local-user-authenticator:role',
'local-user-authenticator:rolebinding',
],
)
#####################################################################################################
# Supervisor app
#
# Build a container image for supervisor, with live-update enabled.
docker_build_with_restart('image/supervisor', '.',
dockerfile='supervisor.Dockerfile',
entrypoint=['/usr/local/bin/pinniped-supervisor'],
live_update=[sync('./build/pinniped-supervisor', '/usr/local/bin/pinniped-supervisor')],
only=['./build/pinniped-supervisor'],
)
# Render the supervisor installation manifest using ytt.
#
# 31234 is the same port number hardcoded in the port forwarding of our kind configuration.
# Don't think that you can just change this!
k8s_yaml(local([
'ytt',
'--file', '../../../deploy/supervisor',
'--data-value', 'app_name=pinniped-supervisor',
'--data-value', 'namespace=supervisor',
'--data-value', 'image_repo=image/supervisor',
'--data-value', 'image_tag=tilt-dev',
'--data-value-yaml', 'replicas=1',
'--data-value-yaml', 'service_nodeport_port=80',
'--data-value-yaml', 'service_nodeport_nodeport=31234',
'--data-value-yaml', 'custom_labels={mySupervisorCustomLabelName: mySupervisorCustomLabelValue}',
]))
# Tell tilt to watch all of those files for changes.
watch_file('../../../deploy/supervisor')
# Collect all the deployed supervisor resources under a "supervisor" resource tab.
k8s_resource(
workload='pinniped-supervisor', # this is the deployment name
new_name='supervisor', # this is the name that will appear in the tilt UI
objects=[
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
'oidcproviderconfigs.config.pinniped.dev:customresourcedefinition',
'pinniped-supervisor-static-config:configmap',
'supervisor:namespace',
'pinniped-supervisor:role',
'pinniped-supervisor:rolebinding',
'pinniped-supervisor:serviceaccount',
],
)
# Build a container image for the Concierge server, with live-update enabled.
docker_build_with_restart('image/concierge', '.',
dockerfile='concierge.Dockerfile',
entrypoint=['/usr/local/bin/pinniped-concierge'],
live_update=[sync('./build/pinniped-concierge', '/usr/local/bin/pinniped-concierge')],
only=['./build/pinniped-concierge'],
)
#####################################################################################################
# Concierge app
#
# Render the Concierge server installation manifest using ytt.
k8s_yaml(local([
'sh', '-c',
'ytt --file ../../../deploy/concierge ' +
'--data-value app_name=pinniped-concierge ' +
'--data-value namespace=concierge ' +
'--data-value image_repo=image/concierge ' +
'--data-value image_tag=tilt-dev ' +
'--data-value kube_cert_agent_image=debian:10.5-slim ' +
'--data-value discovery_url=$(TERM=dumb kubectl cluster-info | awk \'/Kubernetes master/ {print $NF}\') ' +
'--data-value-yaml replicas=1 ' +
'--data-value-yaml "custom_labels={myConciergeCustomLabelName: myConciergeCustomLabelValue}"'
]))
# Tell tilt to watch all of those files for changes.
watch_file('../../../deploy/concierge')
# Collect all the deployed local-user-authenticator resources under a "concierge" resource tab.
k8s_resource(
workload='pinniped-concierge', # this is the deployment name
new_name='concierge', # this is the name that will appear in the tilt UI
objects=[
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
'concierge:namespace',
'pinniped-concierge-aggregated-api-server:clusterrole',
'pinniped-concierge-aggregated-api-server:clusterrolebinding',
'pinniped-concierge-aggregated-api-server:role',
'pinniped-concierge-aggregated-api-server:rolebinding',
'pinniped-concierge-cluster-info-lister-watcher:role',
'pinniped-concierge-cluster-info-lister-watcher:rolebinding',
'pinniped-concierge-config:configmap',
'pinniped-concierge-create-token-credential-requests:clusterrole',
'pinniped-concierge-create-token-credential-requests:clusterrolebinding',
'pinniped-concierge-extension-apiserver-authentication-reader:rolebinding',
'pinniped-concierge-kube-system-pod-read:role',
'pinniped-concierge-kube-system-pod-read:rolebinding',
'pinniped-concierge:clusterrolebinding',
'pinniped-concierge:serviceaccount',
'credentialissuerconfigs.config.pinniped.dev:customresourcedefinition',
'webhookidentityproviders.idp.pinniped.dev:customresourcedefinition',
'v1alpha1.login.pinniped.dev:apiservice',
],
)
#####################################################################################################
# Finish setting up cluster and creating integration test env file
#
# Collect environment variables needed to run our integration test suite.
local_resource(
'test-env',
'TILT_MODE=yes ../../prepare-for-integration-tests.sh',
resource_deps=['local-user-auth', 'concierge', 'supervisor'],
deps=['../../prepare-for-integration-tests.sh'],
)