ContainerImage.Pinniped/hack/lib/tilt/Tiltfile
Andrew Keesler af11d8cd58
Run Tilt images as root for faster reload
Previously, when triggering a Tilt reload via a *.go file change, a reload would
take ~13 seconds and we would see this error message in the Tilt logs for each
component.

  Live Update failed with unexpected error:
    command terminated with exit code 2
  Falling back to a full image build + deploy

Now, Tilt should reload images a lot faster (~3 seconds) since we are running
the images as root.

Note! Reloading the Concierge component still takes ~13 seconds because there
are 2 containers running in the Concierge namespace that use the Concierge
image: the main Concierge app and the kube cert agent pod. Tilt can't live
reload both of these at once, so the reload takes longer and we see this error
message.

  Will not perform Live Update because:
    Error retrieving container info: can only get container info for a single pod; image target image:image/concierge has 2 pods
  Falling back to a full image build + deploy

Signed-off-by: Andrew Keesler <akeesler@vmware.com>
2021-01-15 11:34:53 -05:00

204 lines
8.7 KiB
Plaintext

load('ext://restart_process', 'docker_build_with_restart')
disable_snapshots()
analytics_settings(False)
update_settings(max_parallel_updates=8)
os.putenv('CGO_ENABLED', '0')
os.putenv('GOOS', 'linux')
os.putenv('GOARCH', 'amd64')
os.putenv('CGO_ENABLED', '0')
os.putenv('KUBE_GIT_VERSION', 'v0.0.0')
#####################################################################################################
# Compile all of our ./cmd/... binaries.
#
local_resource(
'compile',
'cd ../../../ && mkdir -p ./hack/lib/tilt/build && go build -v -ldflags "$(hack/get-ldflags.sh)" -o ./hack/lib/tilt/build ./cmd/...',
deps=['../../../cmd', '../../../internal', '../../../pkg', '../../../generated'],
)
#####################################################################################################
# Test IDP (Dex + cert generation + squid proxy)
#
# Render the IDP installation manifest using ytt.
k8s_yaml(local(['ytt',
'--file', '../../../test/deploy/dex',
'--data-value', 'supervisor_redirect_uri=https://pinniped-supervisor-clusterip.supervisor.svc.cluster.local/some/path/callback',
]))
# Tell tilt to watch all of those files for changes.
watch_file('../../../test/deploy/dex')
k8s_resource(objects=['dex:namespace'], new_name='dex-ns')
k8s_resource(workload='cert-issuer', resource_deps=['dex-ns'], objects=[
'cert-issuer:serviceaccount',
'cert-issuer:role',
'cert-issuer:rolebinding',
])
k8s_resource(workload='proxy', resource_deps=['dex-ns'])
k8s_resource(workload='dex', resource_deps=['dex-ns', 'cert-issuer'], objects=[
'dex-config:configmap',
])
#####################################################################################################
# Local-user-authenticator app
#
# Build a container image for local-user-authenticator, with live-update enabled.
docker_build_with_restart('image/local-user-auth', '.',
dockerfile='local-user-authenticator.Dockerfile',
entrypoint=['/usr/local/bin/local-user-authenticator'],
live_update=[sync('./build/local-user-authenticator', '/usr/local/bin/local-user-authenticator')],
only=['./build/local-user-authenticator'],
)
# Render the local-user-authenticator installation manifest using ytt.
k8s_yaml(local([
'ytt',
'--file', '../../../deploy/local-user-authenticator',
'--data-value', 'image_repo=image/local-user-auth',
'--data-value', 'image_tag=tilt-dev',
'--data-value-yaml', 'run_as_user=0',
'--data-value-yaml', 'run_as_group=0',
]))
# Tell tilt to watch all of those files for changes.
watch_file('../../../deploy/local-user-authenticator')
# Collect all the deployed local-user-authenticator resources under a "local-user-auth" resource tab.
k8s_resource(
workload='local-user-authenticator', # this is the deployment name
new_name='local-user-auth', # this is the name that will appear in the tilt UI
objects=[
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
'local-user-authenticator:namespace',
'local-user-authenticator:serviceaccount',
'local-user-authenticator:role',
'local-user-authenticator:rolebinding',
],
)
#####################################################################################################
# Supervisor app
#
# Build a container image for supervisor, with live-update enabled.
docker_build_with_restart('image/supervisor', '.',
dockerfile='supervisor.Dockerfile',
entrypoint=['/usr/local/bin/pinniped-supervisor'],
live_update=[sync('./build/pinniped-supervisor', '/usr/local/bin/pinniped-supervisor')],
only=['./build/pinniped-supervisor'],
)
# Render the supervisor installation manifest using ytt.
#
# 31234 and 31243 are the same port numbers hardcoded in the port forwarding of our kind configuration.
# Don't think that you can just change this!
k8s_yaml(local([
'ytt',
'--file', '../../../deploy/supervisor',
'--data-value', 'app_name=pinniped-supervisor',
'--data-value', 'namespace=supervisor',
'--data-value', 'image_repo=image/supervisor',
'--data-value', 'image_tag=tilt-dev',
'--data-value', 'log_level=debug',
'--data-value-yaml', 'replicas=1',
'--data-value-yaml', 'service_http_nodeport_port=80',
'--data-value-yaml', 'service_http_nodeport_nodeport=31234',
'--data-value-yaml', 'service_https_nodeport_port=443',
'--data-value-yaml', 'service_https_nodeport_nodeport=31243',
'--data-value-yaml', 'service_https_clusterip_port=443',
'--data-value-yaml', 'custom_labels={mySupervisorCustomLabelName: mySupervisorCustomLabelValue}',
'--data-value-yaml', 'run_as_user=0',
'--data-value-yaml', 'run_as_group=0',
]))
# Tell tilt to watch all of those files for changes.
watch_file('../../../deploy/supervisor')
# Collect all the deployed supervisor resources under a "supervisor" resource tab.
k8s_resource(
workload='pinniped-supervisor', # this is the deployment name
new_name='supervisor', # this is the name that will appear in the tilt UI
objects=[
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
'federationdomains.config.supervisor.pinniped.dev:customresourcedefinition',
'oidcidentityproviders.idp.supervisor.pinniped.dev:customresourcedefinition',
'pinniped-supervisor-static-config:configmap',
'supervisor:namespace',
'pinniped-supervisor:role',
'pinniped-supervisor:rolebinding',
'pinniped-supervisor:serviceaccount',
],
)
# Build a container image for the Concierge server, with live-update enabled.
docker_build_with_restart('image/concierge', '.',
dockerfile='concierge.Dockerfile',
entrypoint=['/usr/local/bin/pinniped-concierge'],
live_update=[sync('./build/pinniped-concierge', '/usr/local/bin/pinniped-concierge')],
only=['./build/pinniped-concierge'],
)
#####################################################################################################
# Concierge app
#
# Render the Concierge server installation manifest using ytt.
k8s_yaml(local([
'sh', '-c',
'ytt --file ../../../deploy/concierge ' +
'--data-value app_name=pinniped-concierge ' +
'--data-value namespace=concierge ' +
'--data-value image_repo=image/concierge ' +
'--data-value image_tag=tilt-dev ' +
'--data-value kube_cert_agent_image=debian:10.7-slim ' +
'--data-value discovery_url=$(TERM=dumb kubectl cluster-info | awk \'/master|control plane/ {print $NF}\') ' +
'--data-value log_level=debug ' +
'--data-value-yaml replicas=1 ' +
'--data-value-yaml "custom_labels={myConciergeCustomLabelName: myConciergeCustomLabelValue}" ' +
'--data-value-yaml run_as_user=0 ' +
'--data-value-yaml run_as_group=0',
]))
# Tell tilt to watch all of those files for changes.
watch_file('../../../deploy/concierge')
# Collect all the deployed local-user-authenticator resources under a "concierge" resource tab.
k8s_resource(
workload='pinniped-concierge', # this is the deployment name
new_name='concierge', # this is the name that will appear in the tilt UI
objects=[
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
'concierge:namespace',
'pinniped-concierge-aggregated-api-server:clusterrole',
'pinniped-concierge-aggregated-api-server:clusterrolebinding',
'pinniped-concierge-aggregated-api-server:role',
'pinniped-concierge-aggregated-api-server:rolebinding',
'pinniped-concierge-cluster-info-lister-watcher:role',
'pinniped-concierge-cluster-info-lister-watcher:rolebinding',
'pinniped-concierge-config:configmap',
'pinniped-concierge-create-token-credential-requests:clusterrole',
'pinniped-concierge-create-token-credential-requests:clusterrolebinding',
'pinniped-concierge-extension-apiserver-authentication-reader:rolebinding',
'pinniped-concierge-kube-system-pod-read:role',
'pinniped-concierge-kube-system-pod-read:rolebinding',
'pinniped-concierge:clusterrolebinding',
'pinniped-concierge:serviceaccount',
'credentialissuers.config.concierge.pinniped.dev:customresourcedefinition',
'webhookauthenticators.authentication.concierge.pinniped.dev:customresourcedefinition',
'v1alpha1.login.concierge.pinniped.dev:apiservice',
],
)
#####################################################################################################
# Finish setting up cluster and creating integration test env file
#
# Collect environment variables needed to run our integration test suite.
local_resource(
'test-env',
'TILT_MODE=yes ../../prepare-for-integration-tests.sh',
resource_deps=['local-user-auth', 'concierge', 'supervisor', 'dex', 'proxy'],
deps=['../../prepare-for-integration-tests.sh'],
)