Merge pull request #146 from mattmoyer/tilt

Add Tilt-based local dev workflow.
This commit is contained in:
Matt Moyer 2020-10-06 15:19:29 -05:00 committed by GitHub
commit 79c07f3e21
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 425 additions and 80 deletions

3
.gitignore vendored
View File

@ -16,3 +16,6 @@
# goland # goland
.idea .idea
# Intermediate files used by Tilt
/hack/lib/tilt/build

View File

@ -1,4 +1,4 @@
exclude: '^generated/' exclude: '^(generated|hack/lib/tilt/tilt_modules)/'
repos: repos:
- repo: git://github.com/pre-commit/pre-commit-hooks - repo: git://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0 rev: v3.2.0

View File

@ -93,12 +93,41 @@ docker build .
### Running Integration Tests ### Running Integration Tests
```bash 1. Install dependencies:
./hack/prepare-for-integration-tests.sh && source /tmp/integration-test-env && go test -v -count 1 ./test/...
```
The `./hack/prepare-for-integration-tests.sh` script will create a local - [`kind`](https://kind.sigs.k8s.io/docs/user/quick-start)
[`kind`](https://kind.sigs.k8s.io/) cluster on which the integration tests will run. - [`tilt`](https://docs.tilt.dev/install.html)
- [`ytt`](https://carvel.dev/#getting-started)
- [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
On macOS, these tools can be installed with [Homebrew](https://brew.sh/):
```bash
brew install kind tilt-dev/tap/tilt k14s/tap/ytt kubectl
```
1. Create a local Kubernetes cluster using `kind`:
```bash
kind create cluster --image kindest/node:v1.18.8
```
1. Install Pinniped and supporting dependencies using `tilt`:
```bash
./hack/tilt-up.sh
```
Tilt will continue running and live-updating the Pinniped deployment whenever the code changes.
1. Run the Pinniped integration tests:
```bash
source ./hack/lib/tilt/integration-test.env && go test -v -count 1 ./test/integration
```
To uninstall the test environment, run `./hack/tilt-down.sh`.
To destroy the local Kubernetes cluster, run `kind delete cluster`.
### Observing Tests on the Continuous Integration Environment ### Observing Tests on the Continuous Integration Environment

View File

@ -5,6 +5,7 @@ package main
import ( import (
"os" "os"
"time"
genericapiserver "k8s.io/apiserver/pkg/server" genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/client-go/pkg/version" "k8s.io/client-go/pkg/version"
@ -19,7 +20,12 @@ func main() {
logs.InitLogs() logs.InitLogs()
defer logs.FlushLogs() defer logs.FlushLogs()
klog.Infof("Running %s at %#v", rest.DefaultKubernetesUserAgent(), version.Get()) // Dump out the time since compile (mostly useful for benchmarking our local development cycle latency).
var timeSinceCompile time.Duration
if buildDate, err := time.Parse(time.RFC3339, version.Get().BuildDate); err == nil {
timeSinceCompile = time.Since(buildDate).Round(time.Second)
}
klog.Infof("Running %s at %#v (%s since build)", rest.DefaultKubernetesUserAgent(), version.Get(), timeSinceCompile)
ctx := genericapiserver.SetupSignalContext() ctx := genericapiserver.SetupSignalContext()

View File

@ -40,11 +40,15 @@ data:
apiService: (@= data.values.app_name + "-api" @) apiService: (@= data.values.app_name + "-api" @)
kubeCertAgent: kubeCertAgent:
namePrefix: (@= data.values.app_name + "-kube-cert-agent-" @) namePrefix: (@= data.values.app_name + "-kube-cert-agent-" @)
(@ if data.values.kube_cert_agent_image: @)
image: (@= data.values.kube_cert_agent_image @)
(@ else: @)
(@ if data.values.image_digest: @) (@ if data.values.image_digest: @)
image: (@= data.values.image_repo + "@" + data.values.image_digest @) image: (@= data.values.image_repo + "@" + data.values.image_digest @)
(@ else: @) (@ else: @)
image: (@= data.values.image_repo + ":" + data.values.image_tag @) image: (@= data.values.image_repo + ":" + data.values.image_tag @)
(@ end @) (@ end @)
(@ end @)
(@ if data.values.image_pull_dockerconfigjson: @) (@ if data.values.image_pull_dockerconfigjson: @)
imagePullSecrets: imagePullSecrets:
- image-pull-secret - image-pull-secret

View File

@ -15,6 +15,11 @@ image_repo: docker.io/getpinniped/pinniped-server
image_digest: #! e.g. sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8 image_digest: #! e.g. sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8
image_tag: latest image_tag: latest
#! Optionally specify a different image for the "kube-cert-agent" pod which is scheduled
#! on the control plane. This image needs only to include `sleep` and `cat` binaries.
#! By default, the same image specified for image_repo/image_digest/image_tag will be re-used.
kube_cert_agent_image:
#! Specifies a secret to be used when pulling the above container image. #! Specifies a secret to be used when pulling the above container image.
#! Can be used when the above image_repo is a private registry. #! Can be used when the above image_repo is a private registry.
#! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]' #! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]'

97
hack/lib/tilt/Tiltfile Normal file
View File

@ -0,0 +1,97 @@
load('ext://restart_process', 'docker_build_with_restart')
disable_snapshots()
analytics_settings(False)
update_settings(max_parallel_updates=8)
os.putenv('CGO_ENABLED', '0')
os.putenv('GOOS', 'linux')
os.putenv('GOARCH', 'amd64')
os.putenv('CGO_ENABLED', '0')
os.putenv('KUBE_GIT_VERSION', 'v0.0.0')
# Compile all of our ./cmd/... binaries.
local_resource(
'compile',
'cd ../../../ && mkdir -p ./hack/lib/tilt/build && go build -v -ldflags "$(hack/get-ldflags.sh)" -o ./hack/lib/tilt/build ./cmd/...',
deps=['../../../cmd', '../../../internal', '../../../pkg', '../../../generated'],
)
# Build a container image for local-user-authenticator, with live-update enabled.
docker_build_with_restart('image/local-user-auth', '.',
dockerfile='local-user-authenticator.Dockerfile',
entrypoint=['/usr/local/bin/local-user-authenticator'],
live_update=[sync('./build/local-user-authenticator', '/usr/local/bin/local-user-authenticator')],
only=['./build/local-user-authenticator'],
)
# Render the local-user-authenticator installation manifest using ytt.
k8s_yaml(local([
'ytt',
'--file', '../../../deploy-local-user-authenticator',
'--data-value', 'image_repo=image/local-user-auth',
'--data-value', 'image_tag=tilt-dev',
]))
# Collect all the deployed local-user-authenticator resources under a "local-user-auth" resource tab.
k8s_resource(
workload='local-user-authenticator',
new_name='local-user-auth',
objects=[
'local-user-authenticator:namespace',
'local-user-authenticator:serviceaccount',
'local-user-authenticator:role',
'local-user-authenticator:rolebinding',
],
)
# Build a container image for the Pinniped server, with live-update enabled.
docker_build_with_restart('image/pinniped', '.',
dockerfile='pinniped.Dockerfile',
entrypoint=['/usr/local/bin/pinniped-server'],
live_update=[sync('./build/pinniped-server', '/usr/local/bin/pinniped-server')],
only=['./build/pinniped-server'],
)
# Render the Pinniped server installation manifest using ytt.
k8s_yaml(local([
'sh', '-c',
'ytt --file ../../../deploy ' +
'--data-value namespace=integration ' +
'--data-value image_repo=image/pinniped ' +
'--data-value image_tag=tilt-dev ' +
'--data-value kube_cert_agent_image=debian:10.5-slim ' +
'--data-value discovery_url=$(TERM=dumb kubectl cluster-info | awk \'/Kubernetes master/ {print $NF}\') ' +
'--data-value-yaml replicas=1'
]))
# Collect all the deployed local-user-authenticator resources under a "deploy/pinniped" resource tab.
k8s_resource(
workload='pinniped',
objects=[
'integration:namespace',
'credentialissuerconfigs.config.pinniped.dev:customresourcedefinition',
'webhookidentityproviders.idp.pinniped.dev:customresourcedefinition',
'pinniped:serviceaccount',
'pinniped-aggregated-api-server:role',
'pinniped-kube-system-pod-read:role',
'pinniped-cluster-info-lister-watcher:role',
'pinniped-aggregated-api-server:clusterrole',
'pinniped-create-token-credential-requests:clusterrole',
'pinniped-aggregated-api-server:rolebinding',
'pinniped-kube-system-pod-read:rolebinding',
'pinniped-extension-apiserver-authentication-reader:rolebinding',
'pinniped-cluster-info-lister-watcher:rolebinding',
'pinniped-aggregated-api-server:clusterrolebinding',
'pinniped-create-token-credential-requests:clusterrolebinding',
'pinniped:clusterrolebinding',
'pinniped-config:configmap',
'v1alpha1.login.pinniped.dev:apiservice',
],
)
# Collect environment variables needed to run our integration test suite.
local_resource(
'test-env',
'TILT_MODE=yes ../../prepare-for-integration-tests.sh',
resource_deps=['local-user-auth', 'pinniped'],
deps=['../../prepare-for-integration-tests.sh'],
)

View File

@ -0,0 +1,14 @@
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
# Use a runtime image based on Debian slim
FROM debian:10.5-slim
# Copy the binary which was built outside the container.
COPY build/local-user-authenticator /usr/local/bin/local-user-authenticator
# Document the port
EXPOSE 443
# Set the entrypoint
ENTRYPOINT ["/usr/local/bin/local-user-authenticator"]

View File

@ -0,0 +1,14 @@
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
# Use a runtime image based on Debian slim
FROM debian:10.5-slim
# Copy the binary which was built outside the container.
COPY build/pinniped-server /usr/local/bin/pinniped-server
# Document the port
EXPOSE 443
# Set the entrypoint
ENTRYPOINT ["/usr/local/bin/pinniped-server"]

View File

@ -0,0 +1,44 @@
def docker_build_sub(ref, context, extra_cmds, child_context=None, base_suffix='-tilt_docker_build_sub_base', live_update=[], **kwargs):
"""
Substitutes in a docker image with extra Dockerfile commands.
This allows you to easily customize your docker build for your dev environment without changing your prod Dockerfile.
This works by:
1. Renaming the original image to, e.g. "myimage-base"
2. Creating a new image named, e.g. "myimage" that starts with "FROM myimage-base"
3. Adding whatever extra stuff you want
Examples:
```
# load the extension
load("ext://docker_build_sub", "docker_build_sub")
# ensure you have vim installed when running in dev, so you can
# shell into the box and look at files
docker_build_sub('myimage', '.', extra_cmds=["apt-get install vim"])
# use live_update to sync files from outside your docker context
docker_build_sub('foo', 'foo', child_context='bar',
extra_cmds=['ADD . /bar'],
live_update=[
sync('foo', '/foo'),
sync('bar', '/bar'),
]
)
```
This function supports all the normal `docker_build` arguments. See [docker_build API docs](https://docs.tilt.dev/api.html#api.docker_build) for arguments not mentioned here..
Args:
context (str): The directory in which to build the parent (original) image. If child_context is not set, also the directory in which to build the new child image.
extra_cmds (List[str]): Any extra Dockerfile commands you want to run when building the image.
child_context (str): The directory in which to build the new child image. If unset (None), defaults to the parent image's context.
base_suffix (str): The suffix to append to the parent (original) image's name so that the new child image can take the original name. This is mostly ignorable, and just here in case the default generates a conflict for you.
"""
if not child_context:
child_context = context
base_ref = '%s-base' % ref
docker_build(base_ref, context, **kwargs)
df = '\n'.join(['FROM %s' % base_ref] + extra_cmds)
docker_build(ref, child_context, dockerfile_contents=df, live_update=live_update, **kwargs)

View File

@ -0,0 +1,16 @@
{
"Extensions": [
{
"Name": "restart_process",
"GitCommitHash": "b8df6f5f3368ced855da56e002027a3bd1a61bdf",
"ExtensionRegistry": "https://github.com/tilt-dev/tilt-extensions",
"TimeFetched": "2020-09-03T23:04:40.167635-05:00"
},
{
"Name": "docker_build_sub",
"GitCommitHash": "b8df6f5f3368ced855da56e002027a3bd1a61bdf",
"ExtensionRegistry": "https://github.com/tilt-dev/tilt-extensions",
"TimeFetched": "2020-09-04T18:01:24.795509-05:00"
}
]
}

View File

@ -0,0 +1,78 @@
RESTART_FILE = '/.restart-proc'
TYPE_RESTART_CONTAINER_STEP = 'live_update_restart_container_step'
KWARGS_BLACKLIST = [
# since we'll be passing `dockerfile_contents` when building the
# child image, remove any kwargs that might conflict
'dockerfile', 'dockerfile_contents',
# 'target' isn't relevant to our child build--if we pass this arg,
# Docker will just fail to find the specified stage and error out
'target',
]
def docker_build_with_restart(ref, context, entrypoint, live_update,
base_suffix='-tilt_docker_build_with_restart_base', restart_file=RESTART_FILE, **kwargs):
"""Wrap a docker_build call and its associated live_update steps so that the last step
of any live update is to rerun the given entrypoint.
Args:
ref: name for this image (e.g. 'myproj/backend' or 'myregistry/myproj/backend'); as the parameter of the same name in docker_build
context: path to use as the Docker build context; as the parameter of the same name in docker_build
entrypoint: the command to be (re-)executed when the container starts or when a live_update is run
live_update: set of steps for updating a running container; as the parameter of the same name in docker_build
base_suffix: suffix for naming the base image, applied as {ref}{base_suffix}
restart_file: file that Tilt will update during a live_update to signal the entrypoint to rerun
**kwargs: will be passed to the underlying `docker_build` call
"""
# first, validate the given live_update steps
if len(live_update) == 0:
fail("`docker_build_with_restart` requires at least one live_update step")
for step in live_update:
if type(step) == TYPE_RESTART_CONTAINER_STEP:
fail("`docker_build_with_restart` is not compatible with live_update step: "+
"`restart_container()` (this extension is meant to REPLACE restart_container() )")
# rename the original image to make it a base image and declare a docker_build for it
base_ref = '{}{}'.format(ref, base_suffix)
docker_build(base_ref, context, **kwargs)
# declare a new docker build that adds a static binary of tilt-restart-wrapper
# (which makes use of `entr` to watch files and restart processes) to the user's image
df = '''
FROM tiltdev/restart-helper:2020-07-16 as restart-helper
FROM {}
USER root
RUN ["touch", "{}"]
COPY --from=restart-helper /tilt-restart-wrapper /
COPY --from=restart-helper /entr /
'''.format(base_ref, restart_file)
# Clean kwargs for building the child image (which builds on user's specified
# image and copies in Tilt's restart wrapper). In practice, this means removing
# kwargs that were relevant to building the user's specified image but are NOT
# relevant to building the child image / may conflict with args we specifically
# pass for the child image.
cleaned_kwargs = {k: v for k, v in kwargs.items() if k not in KWARGS_BLACKLIST}
# Change the entrypoint to use `tilt-restart-wrapper`.
# `tilt-restart-wrapper` makes use of `entr` (https://github.com/eradman/entr/) to
# re-execute $entrypoint whenever $restart_file changes
if type(entrypoint) == type(""):
entrypoint_with_entr = ["/tilt-restart-wrapper", "--watch_file={}".format(restart_file), "sh", "-c", entrypoint]
elif type(entrypoint) == type([]):
entrypoint_with_entr = ["/tilt-restart-wrapper", "--watch_file={}".format(restart_file)] + entrypoint
else:
fail("`entrypoint` must be a string or list of strings: got {}".format(type(entrypoint)))
# last live_update step should always be to modify $restart_file, which
# triggers the process wrapper to rerun $entrypoint
# NB: write `date` instead of just `touch`ing because `entr` doesn't respond
# to timestamp changes, only writes (see https://github.com/eradman/entr/issues/32)
live_update = live_update + [run('date > {}'.format(restart_file))]
docker_build(ref, context, entrypoint=entrypoint_with_entr, dockerfile_contents=df,
live_update=live_update, **cleaned_kwargs)

View File

@ -9,6 +9,14 @@ set -euo pipefail
# #
# Helper functions # Helper functions
# #
TILT_MODE=${TILT_MODE:-no}
function tilt_mode() {
if [[ "$TILT_MODE" == "yes" ]]; then
return 0
fi
return 1
}
function log_note() { function log_note() {
GREEN='\033[0;32m' GREEN='\033[0;32m'
NC='\033[0m' NC='\033[0m'
@ -94,67 +102,71 @@ if [ "$(kubectl version --client=true --short | cut -d '.' -f 2)" -lt 18 ]; then
exit 1 exit 1
fi fi
# if ! tilt_mode; then
# Setup kind and build the app #
# # Setup kind and build the app
log_note "Checking for running kind clusters..." #
if ! kind get clusters | grep -q -e '^kind$'; then log_note "Checking for running kind clusters..."
log_note "Creating a kind cluster..." if ! kind get clusters | grep -q -e '^kind$'; then
kind create cluster log_note "Creating a kind cluster..."
else kind create cluster
if ! kubectl cluster-info | grep master | grep -q 127.0.0.1; then else
log_error "Seems like your kubeconfig is not targeting a local cluster." if ! kubectl cluster-info | grep master | grep -q 127.0.0.1; then
log_error "Exiting to avoid accidentally running tests against a real cluster." log_error "Seems like your kubeconfig is not targeting a local cluster."
exit 1 log_error "Exiting to avoid accidentally running tests against a real cluster."
fi exit 1
fi fi
fi
registry="docker.io"
repo="test/build" registry="docker.io"
registry_repo="$registry/$repo" repo="test/build"
tag=$(uuidgen) # always a new tag to force K8s to reload the image on redeploy registry_repo="$registry/$repo"
tag=$(uuidgen) # always a new tag to force K8s to reload the image on redeploy
if [[ "$skip_build" == "yes" ]]; then
most_recent_tag=$(docker images "$repo" --format "{{.Tag}}" | head -1) if [[ "$skip_build" == "yes" ]]; then
if [[ -n "$most_recent_tag" ]]; then most_recent_tag=$(docker images "$repo" --format "{{.Tag}}" | head -1)
tag="$most_recent_tag" if [[ -n "$most_recent_tag" ]]; then
do_build=no tag="$most_recent_tag"
do_build=no
else
# Oops, there was no previous build. Need to build anyway.
do_build=yes
fi
else else
# Oops, there was no previous build. Need to build anyway.
do_build=yes do_build=yes
fi fi
else
do_build=yes registry_repo_tag="${registry_repo}:${tag}"
if [[ "$do_build" == "yes" ]]; then
# Rebuild the code
log_note "Docker building the app..."
docker build . --tag "$registry_repo_tag"
fi
# Load it into the cluster
log_note "Loading the app's container image into the kind cluster..."
kind load docker-image "$registry_repo_tag"
manifest=/tmp/manifest.yaml
#
# Deploy local-user-authenticator
#
pushd deploy-local-user-authenticator >/dev/null
log_note "Deploying the local-user-authenticator app to the cluster..."
ytt --file . \
--data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" >"$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest"
popd >/dev/null
fi fi
registry_repo_tag="${registry_repo}:${tag}"
if [[ "$do_build" == "yes" ]]; then
# Rebuild the code
log_note "Docker building the app..."
docker build . --tag "$registry_repo_tag"
fi
# Load it into the cluster
log_note "Loading the app's container image into the kind cluster..."
kind load docker-image "$registry_repo_tag"
manifest=/tmp/manifest.yaml
#
# Deploy local-user-authenticator
#
pushd deploy-local-user-authenticator >/dev/null
log_note "Deploying the local-user-authenticator app to the cluster..."
ytt --file . \
--data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" >"$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest"
popd >/dev/null
test_username="test-username" test_username="test-username"
test_groups="test-group-0,test-group-1" test_groups="test-group-0,test-group-1"
@ -180,22 +192,24 @@ webhook_url="https://local-user-authenticator.local-user-authenticator.svc/authe
webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator -o 'jsonpath={.data.caCertificate}')" webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator -o 'jsonpath={.data.caCertificate}')"
discovery_url="$(TERM=dumb kubectl cluster-info | awk '/Kubernetes master/ {print $NF}')" discovery_url="$(TERM=dumb kubectl cluster-info | awk '/Kubernetes master/ {print $NF}')"
# if ! tilt_mode; then
# Deploy Pinniped #
# # Deploy Pinniped
pushd deploy >/dev/null #
pushd deploy >/dev/null
log_note "Deploying the Pinniped app to the cluster..." log_note "Deploying the Pinniped app to the cluster..."
ytt --file . \ ytt --file . \
--data-value "app_name=$app_name" \ --data-value "app_name=$app_name" \
--data-value "namespace=$namespace" \ --data-value "namespace=$namespace" \
--data-value "image_repo=$registry_repo" \ --data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" \ --data-value "image_tag=$tag" \
--data-value "discovery_url=$discovery_url" >"$manifest" --data-value "discovery_url=$discovery_url" >"$manifest"
kapp deploy --yes --app "$app_name" --diff-changes --file "$manifest" kapp deploy --yes --app "$app_name" --diff-changes --file "$manifest"
popd >/dev/null popd >/dev/null
fi
# #
# Create the environment file # Create the environment file
@ -233,7 +247,10 @@ log_note
log_note 'Want to run integration tests in GoLand? Copy/paste this "Environment" value for GoLand run configurations:' log_note 'Want to run integration tests in GoLand? Copy/paste this "Environment" value for GoLand run configurations:'
log_note " ${goland_vars}PINNIPED_CLUSTER_CAPABILITY_FILE=${kind_capabilities_file}" log_note " ${goland_vars}PINNIPED_CLUSTER_CAPABILITY_FILE=${kind_capabilities_file}"
log_note log_note
log_note "You can rerun this script to redeploy local production code changes while you are working."
log_note if ! tilt_mode; then
log_note "To delete the deployments, run 'kapp delete -a local-user-authenticator -y && kapp delete -a pinniped -y'." log_note "You can rerun this script to redeploy local production code changes while you are working."
log_note "When you're finished, use 'kind delete cluster' to tear down the cluster." log_note
log_note "To delete the deployments, run 'kapp delete -a local-user-authenticator -y && kapp delete -a pinniped -y'."
log_note "When you're finished, use 'kind delete cluster' to tear down the cluster."
fi

9
hack/tilt-down.sh Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
# Copyright 2020 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
cd "${ROOT}"
exec tilt down -f ./hack/lib/tilt/Tiltfile

9
hack/tilt-up.sh Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
# Copyright 2020 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
cd "${ROOT}"
exec tilt up -f ./hack/lib/tilt/Tiltfile --stream