diff --git a/.gitignore b/.gitignore index 2aaba826..3a5c3201 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,6 @@ # goland .idea + +# Intermediate files used by Tilt +/hack/lib/tilt/build diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f4712345..50581ddd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,4 @@ -exclude: '^generated/' +exclude: '^(generated|hack/lib/tilt/tilt_modules)/' repos: - repo: git://github.com/pre-commit/pre-commit-hooks rev: v3.2.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 191520fd..29e4d7ce 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -93,12 +93,41 @@ docker build . ### Running Integration Tests -```bash -./hack/prepare-for-integration-tests.sh && source /tmp/integration-test-env && go test -v -count 1 ./test/... -``` +1. Install dependencies: -The `./hack/prepare-for-integration-tests.sh` script will create a local -[`kind`](https://kind.sigs.k8s.io/) cluster on which the integration tests will run. + - [`kind`](https://kind.sigs.k8s.io/docs/user/quick-start) + - [`tilt`](https://docs.tilt.dev/install.html) + - [`ytt`](https://carvel.dev/#getting-started) + - [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + + On macOS, these tools can be installed with [Homebrew](https://brew.sh/): + + ```bash + brew install kind tilt-dev/tap/tilt k14s/tap/ytt kubectl + ``` + +1. Create a local Kubernetes cluster using `kind`: + + ```bash + kind create cluster --image kindest/node:v1.18.8 + ``` + +1. Install Pinniped and supporting dependencies using `tilt`: + + ```bash + ./hack/tilt-up.sh + ``` + + Tilt will continue running and live-updating the Pinniped deployment whenever the code changes. + +1. Run the Pinniped integration tests: + + ```bash + source ./hack/lib/tilt/integration-test.env && go test -v -count 1 ./test/integration + ``` + +To uninstall the test environment, run `./hack/tilt-down.sh`. +To destroy the local Kubernetes cluster, run `kind delete cluster`. ### Observing Tests on the Continuous Integration Environment diff --git a/cmd/pinniped-server/main.go b/cmd/pinniped-server/main.go index 5741a696..42293796 100644 --- a/cmd/pinniped-server/main.go +++ b/cmd/pinniped-server/main.go @@ -5,6 +5,7 @@ package main import ( "os" + "time" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/client-go/pkg/version" @@ -19,7 +20,12 @@ func main() { logs.InitLogs() defer logs.FlushLogs() - klog.Infof("Running %s at %#v", rest.DefaultKubernetesUserAgent(), version.Get()) + // Dump out the time since compile (mostly useful for benchmarking our local development cycle latency). + var timeSinceCompile time.Duration + if buildDate, err := time.Parse(time.RFC3339, version.Get().BuildDate); err == nil { + timeSinceCompile = time.Since(buildDate).Round(time.Second) + } + klog.Infof("Running %s at %#v (%s since build)", rest.DefaultKubernetesUserAgent(), version.Get(), timeSinceCompile) ctx := genericapiserver.SetupSignalContext() diff --git a/deploy/deployment.yaml b/deploy/deployment.yaml index 2a1d5e9e..b946ca7c 100644 --- a/deploy/deployment.yaml +++ b/deploy/deployment.yaml @@ -40,11 +40,15 @@ data: apiService: (@= data.values.app_name + "-api" @) kubeCertAgent: namePrefix: (@= data.values.app_name + "-kube-cert-agent-" @) + (@ if data.values.kube_cert_agent_image: @) + image: (@= data.values.kube_cert_agent_image @) + (@ else: @) (@ if data.values.image_digest: @) image: (@= data.values.image_repo + "@" + data.values.image_digest @) (@ else: @) image: (@= data.values.image_repo + ":" + data.values.image_tag @) (@ end @) + (@ end @) (@ if data.values.image_pull_dockerconfigjson: @) imagePullSecrets: - image-pull-secret diff --git a/deploy/values.yaml b/deploy/values.yaml index b579a212..0a25f7ee 100644 --- a/deploy/values.yaml +++ b/deploy/values.yaml @@ -15,6 +15,11 @@ image_repo: docker.io/getpinniped/pinniped-server image_digest: #! e.g. sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8 image_tag: latest +#! Optionally specify a different image for the "kube-cert-agent" pod which is scheduled +#! on the control plane. This image needs only to include `sleep` and `cat` binaries. +#! By default, the same image specified for image_repo/image_digest/image_tag will be re-used. +kube_cert_agent_image: + #! Specifies a secret to be used when pulling the above container image. #! Can be used when the above image_repo is a private registry. #! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]' diff --git a/hack/lib/tilt/Tiltfile b/hack/lib/tilt/Tiltfile new file mode 100644 index 00000000..0a7372ef --- /dev/null +++ b/hack/lib/tilt/Tiltfile @@ -0,0 +1,97 @@ +load('ext://restart_process', 'docker_build_with_restart') +disable_snapshots() +analytics_settings(False) +update_settings(max_parallel_updates=8) +os.putenv('CGO_ENABLED', '0') +os.putenv('GOOS', 'linux') +os.putenv('GOARCH', 'amd64') +os.putenv('CGO_ENABLED', '0') +os.putenv('KUBE_GIT_VERSION', 'v0.0.0') + +# Compile all of our ./cmd/... binaries. +local_resource( + 'compile', + 'cd ../../../ && mkdir -p ./hack/lib/tilt/build && go build -v -ldflags "$(hack/get-ldflags.sh)" -o ./hack/lib/tilt/build ./cmd/...', + deps=['../../../cmd', '../../../internal', '../../../pkg', '../../../generated'], +) + +# Build a container image for local-user-authenticator, with live-update enabled. +docker_build_with_restart('image/local-user-auth', '.', + dockerfile='local-user-authenticator.Dockerfile', + entrypoint=['/usr/local/bin/local-user-authenticator'], + live_update=[sync('./build/local-user-authenticator', '/usr/local/bin/local-user-authenticator')], + only=['./build/local-user-authenticator'], +) + +# Render the local-user-authenticator installation manifest using ytt. +k8s_yaml(local([ + 'ytt', + '--file', '../../../deploy-local-user-authenticator', + '--data-value', 'image_repo=image/local-user-auth', + '--data-value', 'image_tag=tilt-dev', +])) + +# Collect all the deployed local-user-authenticator resources under a "local-user-auth" resource tab. +k8s_resource( + workload='local-user-authenticator', + new_name='local-user-auth', + objects=[ + 'local-user-authenticator:namespace', + 'local-user-authenticator:serviceaccount', + 'local-user-authenticator:role', + 'local-user-authenticator:rolebinding', + ], +) + +# Build a container image for the Pinniped server, with live-update enabled. +docker_build_with_restart('image/pinniped', '.', + dockerfile='pinniped.Dockerfile', + entrypoint=['/usr/local/bin/pinniped-server'], + live_update=[sync('./build/pinniped-server', '/usr/local/bin/pinniped-server')], + only=['./build/pinniped-server'], +) + +# Render the Pinniped server installation manifest using ytt. +k8s_yaml(local([ + 'sh', '-c', + 'ytt --file ../../../deploy ' + + '--data-value namespace=integration ' + + '--data-value image_repo=image/pinniped ' + + '--data-value image_tag=tilt-dev ' + + '--data-value kube_cert_agent_image=debian:10.5-slim ' + + '--data-value discovery_url=$(TERM=dumb kubectl cluster-info | awk \'/Kubernetes master/ {print $NF}\') ' + + '--data-value-yaml replicas=1' +])) + +# Collect all the deployed local-user-authenticator resources under a "deploy/pinniped" resource tab. +k8s_resource( + workload='pinniped', + objects=[ + 'integration:namespace', + 'credentialissuerconfigs.config.pinniped.dev:customresourcedefinition', + 'webhookidentityproviders.idp.pinniped.dev:customresourcedefinition', + 'pinniped:serviceaccount', + 'pinniped-aggregated-api-server:role', + 'pinniped-kube-system-pod-read:role', + 'pinniped-cluster-info-lister-watcher:role', + 'pinniped-aggregated-api-server:clusterrole', + 'pinniped-create-token-credential-requests:clusterrole', + 'pinniped-aggregated-api-server:rolebinding', + 'pinniped-kube-system-pod-read:rolebinding', + 'pinniped-extension-apiserver-authentication-reader:rolebinding', + 'pinniped-cluster-info-lister-watcher:rolebinding', + 'pinniped-aggregated-api-server:clusterrolebinding', + 'pinniped-create-token-credential-requests:clusterrolebinding', + 'pinniped:clusterrolebinding', + 'pinniped-config:configmap', + 'v1alpha1.login.pinniped.dev:apiservice', + ], +) + +# Collect environment variables needed to run our integration test suite. +local_resource( + 'test-env', + 'TILT_MODE=yes ../../prepare-for-integration-tests.sh', + resource_deps=['local-user-auth', 'pinniped'], + deps=['../../prepare-for-integration-tests.sh'], +) diff --git a/hack/lib/tilt/local-user-authenticator.Dockerfile b/hack/lib/tilt/local-user-authenticator.Dockerfile new file mode 100644 index 00000000..1853ccd8 --- /dev/null +++ b/hack/lib/tilt/local-user-authenticator.Dockerfile @@ -0,0 +1,14 @@ +# Copyright 2020 VMware, Inc. +# SPDX-License-Identifier: Apache-2.0 + +# Use a runtime image based on Debian slim +FROM debian:10.5-slim + +# Copy the binary which was built outside the container. +COPY build/local-user-authenticator /usr/local/bin/local-user-authenticator + +# Document the port +EXPOSE 443 + +# Set the entrypoint +ENTRYPOINT ["/usr/local/bin/local-user-authenticator"] diff --git a/hack/lib/tilt/pinniped.Dockerfile b/hack/lib/tilt/pinniped.Dockerfile new file mode 100644 index 00000000..8bbc4d13 --- /dev/null +++ b/hack/lib/tilt/pinniped.Dockerfile @@ -0,0 +1,14 @@ +# Copyright 2020 VMware, Inc. +# SPDX-License-Identifier: Apache-2.0 + +# Use a runtime image based on Debian slim +FROM debian:10.5-slim + +# Copy the binary which was built outside the container. +COPY build/pinniped-server /usr/local/bin/pinniped-server + +# Document the port +EXPOSE 443 + +# Set the entrypoint +ENTRYPOINT ["/usr/local/bin/pinniped-server"] diff --git a/hack/lib/tilt/tilt_modules/docker_build_sub/Tiltfile b/hack/lib/tilt/tilt_modules/docker_build_sub/Tiltfile new file mode 100644 index 00000000..ccdd1a30 --- /dev/null +++ b/hack/lib/tilt/tilt_modules/docker_build_sub/Tiltfile @@ -0,0 +1,44 @@ +def docker_build_sub(ref, context, extra_cmds, child_context=None, base_suffix='-tilt_docker_build_sub_base', live_update=[], **kwargs): + """ + Substitutes in a docker image with extra Dockerfile commands. + + This allows you to easily customize your docker build for your dev environment without changing your prod Dockerfile. + + This works by: + 1. Renaming the original image to, e.g. "myimage-base" + 2. Creating a new image named, e.g. "myimage" that starts with "FROM myimage-base" + 3. Adding whatever extra stuff you want + + Examples: + ``` + # load the extension + load("ext://docker_build_sub", "docker_build_sub") + + # ensure you have vim installed when running in dev, so you can + # shell into the box and look at files + docker_build_sub('myimage', '.', extra_cmds=["apt-get install vim"]) + + # use live_update to sync files from outside your docker context + docker_build_sub('foo', 'foo', child_context='bar', + extra_cmds=['ADD . /bar'], + live_update=[ + sync('foo', '/foo'), + sync('bar', '/bar'), + ] + ) + ``` + + This function supports all the normal `docker_build` arguments. See [docker_build API docs](https://docs.tilt.dev/api.html#api.docker_build) for arguments not mentioned here.. + + Args: + context (str): The directory in which to build the parent (original) image. If child_context is not set, also the directory in which to build the new child image. + extra_cmds (List[str]): Any extra Dockerfile commands you want to run when building the image. + child_context (str): The directory in which to build the new child image. If unset (None), defaults to the parent image's context. + base_suffix (str): The suffix to append to the parent (original) image's name so that the new child image can take the original name. This is mostly ignorable, and just here in case the default generates a conflict for you. + """ + if not child_context: + child_context = context + base_ref = '%s-base' % ref + docker_build(base_ref, context, **kwargs) + df = '\n'.join(['FROM %s' % base_ref] + extra_cmds) + docker_build(ref, child_context, dockerfile_contents=df, live_update=live_update, **kwargs) diff --git a/hack/lib/tilt/tilt_modules/extensions.json b/hack/lib/tilt/tilt_modules/extensions.json new file mode 100644 index 00000000..0094be30 --- /dev/null +++ b/hack/lib/tilt/tilt_modules/extensions.json @@ -0,0 +1,16 @@ +{ + "Extensions": [ + { + "Name": "restart_process", + "GitCommitHash": "b8df6f5f3368ced855da56e002027a3bd1a61bdf", + "ExtensionRegistry": "https://github.com/tilt-dev/tilt-extensions", + "TimeFetched": "2020-09-03T23:04:40.167635-05:00" + }, + { + "Name": "docker_build_sub", + "GitCommitHash": "b8df6f5f3368ced855da56e002027a3bd1a61bdf", + "ExtensionRegistry": "https://github.com/tilt-dev/tilt-extensions", + "TimeFetched": "2020-09-04T18:01:24.795509-05:00" + } + ] +} \ No newline at end of file diff --git a/hack/lib/tilt/tilt_modules/restart_process/Tiltfile b/hack/lib/tilt/tilt_modules/restart_process/Tiltfile new file mode 100644 index 00000000..fc43809f --- /dev/null +++ b/hack/lib/tilt/tilt_modules/restart_process/Tiltfile @@ -0,0 +1,78 @@ +RESTART_FILE = '/.restart-proc' +TYPE_RESTART_CONTAINER_STEP = 'live_update_restart_container_step' + +KWARGS_BLACKLIST = [ + # since we'll be passing `dockerfile_contents` when building the + # child image, remove any kwargs that might conflict + 'dockerfile', 'dockerfile_contents', + + # 'target' isn't relevant to our child build--if we pass this arg, + # Docker will just fail to find the specified stage and error out + 'target', +] + +def docker_build_with_restart(ref, context, entrypoint, live_update, + base_suffix='-tilt_docker_build_with_restart_base', restart_file=RESTART_FILE, **kwargs): + """Wrap a docker_build call and its associated live_update steps so that the last step + of any live update is to rerun the given entrypoint. + + + Args: + ref: name for this image (e.g. 'myproj/backend' or 'myregistry/myproj/backend'); as the parameter of the same name in docker_build + context: path to use as the Docker build context; as the parameter of the same name in docker_build + entrypoint: the command to be (re-)executed when the container starts or when a live_update is run + live_update: set of steps for updating a running container; as the parameter of the same name in docker_build + base_suffix: suffix for naming the base image, applied as {ref}{base_suffix} + restart_file: file that Tilt will update during a live_update to signal the entrypoint to rerun + **kwargs: will be passed to the underlying `docker_build` call + """ + + # first, validate the given live_update steps + if len(live_update) == 0: + fail("`docker_build_with_restart` requires at least one live_update step") + for step in live_update: + if type(step) == TYPE_RESTART_CONTAINER_STEP: + fail("`docker_build_with_restart` is not compatible with live_update step: "+ + "`restart_container()` (this extension is meant to REPLACE restart_container() )") + + # rename the original image to make it a base image and declare a docker_build for it + base_ref = '{}{}'.format(ref, base_suffix) + docker_build(base_ref, context, **kwargs) + + # declare a new docker build that adds a static binary of tilt-restart-wrapper + # (which makes use of `entr` to watch files and restart processes) to the user's image + df = ''' + FROM tiltdev/restart-helper:2020-07-16 as restart-helper + + FROM {} + USER root + RUN ["touch", "{}"] + COPY --from=restart-helper /tilt-restart-wrapper / + COPY --from=restart-helper /entr / + '''.format(base_ref, restart_file) + + # Clean kwargs for building the child image (which builds on user's specified + # image and copies in Tilt's restart wrapper). In practice, this means removing + # kwargs that were relevant to building the user's specified image but are NOT + # relevant to building the child image / may conflict with args we specifically + # pass for the child image. + cleaned_kwargs = {k: v for k, v in kwargs.items() if k not in KWARGS_BLACKLIST} + + # Change the entrypoint to use `tilt-restart-wrapper`. + # `tilt-restart-wrapper` makes use of `entr` (https://github.com/eradman/entr/) to + # re-execute $entrypoint whenever $restart_file changes + if type(entrypoint) == type(""): + entrypoint_with_entr = ["/tilt-restart-wrapper", "--watch_file={}".format(restart_file), "sh", "-c", entrypoint] + elif type(entrypoint) == type([]): + entrypoint_with_entr = ["/tilt-restart-wrapper", "--watch_file={}".format(restart_file)] + entrypoint + else: + fail("`entrypoint` must be a string or list of strings: got {}".format(type(entrypoint))) + + # last live_update step should always be to modify $restart_file, which + # triggers the process wrapper to rerun $entrypoint + # NB: write `date` instead of just `touch`ing because `entr` doesn't respond + # to timestamp changes, only writes (see https://github.com/eradman/entr/issues/32) + live_update = live_update + [run('date > {}'.format(restart_file))] + + docker_build(ref, context, entrypoint=entrypoint_with_entr, dockerfile_contents=df, + live_update=live_update, **cleaned_kwargs) diff --git a/hack/prepare-for-integration-tests.sh b/hack/prepare-for-integration-tests.sh index 8340f2e4..e4f3bd0f 100755 --- a/hack/prepare-for-integration-tests.sh +++ b/hack/prepare-for-integration-tests.sh @@ -9,6 +9,14 @@ set -euo pipefail # # Helper functions # +TILT_MODE=${TILT_MODE:-no} +function tilt_mode() { + if [[ "$TILT_MODE" == "yes" ]]; then + return 0 + fi + return 1 +} + function log_note() { GREEN='\033[0;32m' NC='\033[0m' @@ -94,67 +102,71 @@ if [ "$(kubectl version --client=true --short | cut -d '.' -f 2)" -lt 18 ]; then exit 1 fi -# -# Setup kind and build the app -# -log_note "Checking for running kind clusters..." -if ! kind get clusters | grep -q -e '^kind$'; then - log_note "Creating a kind cluster..." - kind create cluster -else - if ! kubectl cluster-info | grep master | grep -q 127.0.0.1; then - log_error "Seems like your kubeconfig is not targeting a local cluster." - log_error "Exiting to avoid accidentally running tests against a real cluster." - exit 1 - fi -fi - -registry="docker.io" -repo="test/build" -registry_repo="$registry/$repo" -tag=$(uuidgen) # always a new tag to force K8s to reload the image on redeploy - -if [[ "$skip_build" == "yes" ]]; then - most_recent_tag=$(docker images "$repo" --format "{{.Tag}}" | head -1) - if [[ -n "$most_recent_tag" ]]; then - tag="$most_recent_tag" - do_build=no +if ! tilt_mode; then + # + # Setup kind and build the app + # + log_note "Checking for running kind clusters..." + if ! kind get clusters | grep -q -e '^kind$'; then + log_note "Creating a kind cluster..." + kind create cluster + else + if ! kubectl cluster-info | grep master | grep -q 127.0.0.1; then + log_error "Seems like your kubeconfig is not targeting a local cluster." + log_error "Exiting to avoid accidentally running tests against a real cluster." + exit 1 + fi + fi + + registry="docker.io" + repo="test/build" + registry_repo="$registry/$repo" + tag=$(uuidgen) # always a new tag to force K8s to reload the image on redeploy + + if [[ "$skip_build" == "yes" ]]; then + most_recent_tag=$(docker images "$repo" --format "{{.Tag}}" | head -1) + if [[ -n "$most_recent_tag" ]]; then + tag="$most_recent_tag" + do_build=no + else + # Oops, there was no previous build. Need to build anyway. + do_build=yes + fi else - # Oops, there was no previous build. Need to build anyway. do_build=yes fi -else - do_build=yes + + registry_repo_tag="${registry_repo}:${tag}" + + if [[ "$do_build" == "yes" ]]; then + # Rebuild the code + log_note "Docker building the app..." + docker build . --tag "$registry_repo_tag" + fi + + # Load it into the cluster + log_note "Loading the app's container image into the kind cluster..." + kind load docker-image "$registry_repo_tag" + + manifest=/tmp/manifest.yaml + + # + # Deploy local-user-authenticator + # + pushd deploy-local-user-authenticator >/dev/null + + log_note "Deploying the local-user-authenticator app to the cluster..." + ytt --file . \ + --data-value "image_repo=$registry_repo" \ + --data-value "image_tag=$tag" >"$manifest" + + kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema. + kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest" + + popd >/dev/null + fi -registry_repo_tag="${registry_repo}:${tag}" - -if [[ "$do_build" == "yes" ]]; then - # Rebuild the code - log_note "Docker building the app..." - docker build . --tag "$registry_repo_tag" -fi - -# Load it into the cluster -log_note "Loading the app's container image into the kind cluster..." -kind load docker-image "$registry_repo_tag" - -manifest=/tmp/manifest.yaml - -# -# Deploy local-user-authenticator -# -pushd deploy-local-user-authenticator >/dev/null - -log_note "Deploying the local-user-authenticator app to the cluster..." -ytt --file . \ - --data-value "image_repo=$registry_repo" \ - --data-value "image_tag=$tag" >"$manifest" - -kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema. -kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest" - -popd >/dev/null test_username="test-username" test_groups="test-group-0,test-group-1" @@ -180,22 +192,24 @@ webhook_url="https://local-user-authenticator.local-user-authenticator.svc/authe webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator -o 'jsonpath={.data.caCertificate}')" discovery_url="$(TERM=dumb kubectl cluster-info | awk '/Kubernetes master/ {print $NF}')" -# -# Deploy Pinniped -# -pushd deploy >/dev/null +if ! tilt_mode; then + # + # Deploy Pinniped + # + pushd deploy >/dev/null -log_note "Deploying the Pinniped app to the cluster..." -ytt --file . \ - --data-value "app_name=$app_name" \ - --data-value "namespace=$namespace" \ - --data-value "image_repo=$registry_repo" \ - --data-value "image_tag=$tag" \ - --data-value "discovery_url=$discovery_url" >"$manifest" + log_note "Deploying the Pinniped app to the cluster..." + ytt --file . \ + --data-value "app_name=$app_name" \ + --data-value "namespace=$namespace" \ + --data-value "image_repo=$registry_repo" \ + --data-value "image_tag=$tag" \ + --data-value "discovery_url=$discovery_url" >"$manifest" -kapp deploy --yes --app "$app_name" --diff-changes --file "$manifest" + kapp deploy --yes --app "$app_name" --diff-changes --file "$manifest" -popd >/dev/null + popd >/dev/null +fi # # Create the environment file @@ -233,7 +247,10 @@ log_note log_note 'Want to run integration tests in GoLand? Copy/paste this "Environment" value for GoLand run configurations:' log_note " ${goland_vars}PINNIPED_CLUSTER_CAPABILITY_FILE=${kind_capabilities_file}" log_note -log_note "You can rerun this script to redeploy local production code changes while you are working." -log_note -log_note "To delete the deployments, run 'kapp delete -a local-user-authenticator -y && kapp delete -a pinniped -y'." -log_note "When you're finished, use 'kind delete cluster' to tear down the cluster." + +if ! tilt_mode; then + log_note "You can rerun this script to redeploy local production code changes while you are working." + log_note + log_note "To delete the deployments, run 'kapp delete -a local-user-authenticator -y && kapp delete -a pinniped -y'." + log_note "When you're finished, use 'kind delete cluster' to tear down the cluster." +fi diff --git a/hack/tilt-down.sh b/hack/tilt-down.sh new file mode 100755 index 00000000..95992122 --- /dev/null +++ b/hack/tilt-down.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Copyright 2020 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail +ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )" +cd "${ROOT}" +exec tilt down -f ./hack/lib/tilt/Tiltfile diff --git a/hack/tilt-up.sh b/hack/tilt-up.sh new file mode 100755 index 00000000..acbf5a3f --- /dev/null +++ b/hack/tilt-up.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Copyright 2020 the Pinniped contributors. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail +ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )" +cd "${ROOT}" +exec tilt up -f ./hack/lib/tilt/Tiltfile --stream