Switch away from hack/prepare-for-integration-tests.sh to Tilt-based local workflow.
Signed-off-by: Matt Moyer <moyerm@vmware.com>
This commit is contained in:
parent
6063674623
commit
24c654120c
4
.gitignore
vendored
4
.gitignore
vendored
@ -16,3 +16,7 @@
|
||||
|
||||
# goland
|
||||
.idea
|
||||
|
||||
# Intermediate files used by Tilt
|
||||
/hack/lib/tilt/build
|
||||
/hack/lib/tilt/integration-test.env
|
||||
|
@ -1,4 +1,4 @@
|
||||
exclude: '^generated/'
|
||||
exclude: '^(generated|hack/lib/tilt/tilt_modules)/'
|
||||
repos:
|
||||
- repo: git://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.2.0
|
||||
|
@ -5,6 +5,7 @@ package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/client-go/pkg/version"
|
||||
@ -19,7 +20,12 @@ func main() {
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
|
||||
klog.Infof("Running %s at %#v", rest.DefaultKubernetesUserAgent(), version.Get())
|
||||
// Dump out the time since compile (mostly useful for benchmarking our local development cycle latency).
|
||||
var timeSinceCompile time.Duration
|
||||
if buildDate, err := time.Parse(time.RFC3339, version.Get().BuildDate); err == nil {
|
||||
timeSinceCompile = time.Since(buildDate).Round(time.Second)
|
||||
}
|
||||
klog.Infof("Running %s at %#v (%s since build)", rest.DefaultKubernetesUserAgent(), version.Get(), timeSinceCompile)
|
||||
|
||||
ctx := genericapiserver.SetupSignalContext()
|
||||
|
||||
|
@ -93,12 +93,41 @@ docker build .
|
||||
|
||||
### Running Integration Tests
|
||||
|
||||
1. Install dependencies:
|
||||
|
||||
- [`kind`](https://kind.sigs.k8s.io/docs/user/quick-start)
|
||||
- [`tilt`](https://docs.tilt.dev/install.html)
|
||||
- [`ytt`](https://carvel.dev/#getting-started)
|
||||
- [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||
|
||||
On macOS, these tools can be installed with [Homebrew](https://brew.sh/):
|
||||
|
||||
```bash
|
||||
./hack/prepare-for-integration-tests.sh && source /tmp/integration-test-env && go test -v -count 1 ./test/...
|
||||
brew install kind tilt-dev/tap/tilt k14s/tap/ytt kubectl
|
||||
```
|
||||
|
||||
The `./hack/prepare-for-integration-tests.sh` script will create a local
|
||||
[`kind`](https://kind.sigs.k8s.io/) cluster on which the integration tests will run.
|
||||
1. Create a local Kubernetes cluster using `kind`:
|
||||
|
||||
```bash
|
||||
kind create cluster --image kindest/node:v1.18.8
|
||||
```
|
||||
|
||||
1. Install Pinniped and supporting dependencies using `tilt`:
|
||||
|
||||
```bash
|
||||
tilt up -f ./hack/lib/tilt/Tiltfile --stream
|
||||
```
|
||||
|
||||
Tilt will continue running and live-updating the Pinniped deployment whenever the code changes.
|
||||
|
||||
1. Run the Pinniped integration tests:
|
||||
|
||||
```bash
|
||||
source ./hack/lib/tilt/integration-test.env && go test -v -count 1 ./test/integration
|
||||
```
|
||||
|
||||
To uninstall the test environment, run `tilt down -f ./hack/lib/tilt/Tiltfile`.
|
||||
To destroy the local Kubernetes cluster, run `kind delete cluster`.
|
||||
|
||||
### Observing Tests on the Continuous Integration Environment
|
||||
|
||||
|
95
hack/lib/tilt/Tiltfile
Normal file
95
hack/lib/tilt/Tiltfile
Normal file
@ -0,0 +1,95 @@
|
||||
load('ext://restart_process', 'docker_build_with_restart')
|
||||
disable_snapshots()
|
||||
analytics_settings(False)
|
||||
update_settings(max_parallel_updates=8)
|
||||
os.putenv('CGO_ENABLED', '0')
|
||||
os.putenv('GOOS', 'linux')
|
||||
os.putenv('GOARCH', 'amd64')
|
||||
os.putenv('CGO_ENABLED', '0')
|
||||
|
||||
# Compile all of our ./cmd/... binaries.
|
||||
local_resource(
|
||||
'compile',
|
||||
'cd ../../../ && go build -v -ldflags "$(hack/get-ldflags.sh)" -o ./hack/lib/tilt/build ./cmd/...',
|
||||
deps=['../../../cmd', '../../../internal', '../../../pkg', '../../../generated'],
|
||||
)
|
||||
|
||||
# Build a container image for local-user-authenticator, with live-update enabled.
|
||||
docker_build_with_restart('image/local-user-auth', '.',
|
||||
dockerfile='local-user-authenticator.Dockerfile',
|
||||
entrypoint=['/usr/local/bin/local-user-authenticator'],
|
||||
live_update=[sync('./build/local-user-authenticator', '/usr/local/bin/local-user-authenticator')],
|
||||
only=['./build/local-user-authenticator'],
|
||||
)
|
||||
|
||||
# Render the local-user-authenticator installation manifest using ytt.
|
||||
k8s_yaml(local([
|
||||
'ytt',
|
||||
'--file', '../../../deploy-local-user-authenticator',
|
||||
'--data-value', 'image_repo=image/local-user-auth',
|
||||
'--data-value', 'image_tag=tilt-dev',
|
||||
]))
|
||||
|
||||
# Collect all the deployed local-user-authenticator resources under a "deploy/local-user-auth" resource tab.
|
||||
k8s_resource(
|
||||
workload='local-user-authenticator',
|
||||
new_name='deploy/local-user-auth',
|
||||
objects=[
|
||||
'local-user-authenticator:namespace',
|
||||
'local-user-authenticator:serviceaccount',
|
||||
'local-user-authenticator:role',
|
||||
'local-user-authenticator:rolebinding',
|
||||
],
|
||||
)
|
||||
|
||||
# Build a container image for the Pinniped server, with live-update enabled.
|
||||
docker_build_with_restart('image/pinniped', '.',
|
||||
dockerfile='pinniped.Dockerfile',
|
||||
entrypoint=['/usr/local/bin/pinniped-server'],
|
||||
live_update=[sync('./build/pinniped-server', '/usr/local/bin/pinniped-server')],
|
||||
only=['./build/pinniped-server'],
|
||||
)
|
||||
|
||||
# Render the Pinniped server installation manifest using ytt.
|
||||
k8s_yaml(local([
|
||||
'sh', '-c',
|
||||
'ytt --file ../../../deploy ' +
|
||||
'--data-value namespace=integration ' +
|
||||
'--data-value image_repo=image/pinniped ' +
|
||||
'--data-value image_tag=tilt-dev ' +
|
||||
'--data-value discovery_url=$(TERM=dumb kubectl cluster-info | awk \'/Kubernetes master/ {print $NF}\') ' +
|
||||
'--data-value-yaml replicas=1'
|
||||
]))
|
||||
|
||||
# Collect all the deployed local-user-authenticator resources under a "deploy/pinniped" resource tab.
|
||||
k8s_resource(
|
||||
workload='pinniped',
|
||||
new_name='deploy/pinniped',
|
||||
objects=[
|
||||
'integration:namespace',
|
||||
'credentialissuerconfigs.config.pinniped.dev:customresourcedefinition',
|
||||
'webhookidentityproviders.idp.pinniped.dev:customresourcedefinition',
|
||||
'pinniped:serviceaccount',
|
||||
'pinniped-aggregated-api-server:role',
|
||||
'pinniped-kube-system-pod-exec:role',
|
||||
'pinniped-cluster-info-lister-watcher:role',
|
||||
'pinniped-aggregated-api-server:clusterrole',
|
||||
'pinniped-create-token-credential-requests:clusterrole',
|
||||
'pinniped-aggregated-api-server:rolebinding',
|
||||
'pinniped-kube-system-pod-exec:rolebinding',
|
||||
'pinniped-extension-apiserver-authentication-reader:rolebinding',
|
||||
'pinniped-cluster-info-lister-watcher:rolebinding',
|
||||
'pinniped-aggregated-api-server:clusterrolebinding',
|
||||
'pinniped-create-token-credential-requests:clusterrolebinding',
|
||||
'pinniped:clusterrolebinding',
|
||||
'pinniped-config:configmap',
|
||||
'v1alpha1.login.pinniped.dev:apiservice',
|
||||
],
|
||||
)
|
||||
|
||||
# Collect environment variables needed to run our integration test suite.
|
||||
local_resource(
|
||||
'create-test-environment',
|
||||
'./ensure-integration-test-env.sh',
|
||||
resource_deps=['deploy/local-user-auth', 'deploy/pinniped'],
|
||||
)
|
101
hack/lib/tilt/ensure-integration-test-env.sh
Executable file
101
hack/lib/tilt/ensure-integration-test-env.sh
Executable file
@ -0,0 +1,101 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
#
|
||||
# Helper functions
|
||||
#
|
||||
function log_note() {
|
||||
GREEN='\033[0;32m'
|
||||
NC='\033[0m'
|
||||
if [[ $COLORTERM =~ ^(truecolor|24bit)$ ]]; then
|
||||
echo -e "${GREEN}$*${NC}"
|
||||
else
|
||||
echo "$*"
|
||||
fi
|
||||
}
|
||||
|
||||
function log_error() {
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m'
|
||||
if [[ $COLORTERM =~ ^(truecolor|24bit)$ ]]; then
|
||||
echo -e "🙁${RED} Error: $* ${NC}"
|
||||
else
|
||||
echo ":( Error: $*"
|
||||
fi
|
||||
}
|
||||
|
||||
function check_dependency() {
|
||||
if ! command -v "$1" >/dev/null; then
|
||||
log_error "Missing dependency..."
|
||||
log_error "$2"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# Check for dependencies
|
||||
#
|
||||
check_dependency kubectl "Please install kubectl. e.g. 'brew install kubectl' for MacOS"
|
||||
check_dependency htpasswd "Please install htpasswd. Should be pre-installed on MacOS. Usually found in 'apache2-utils' package for linux."
|
||||
|
||||
# Require kubectl >= 1.18.x
|
||||
if [ "$(kubectl version --client=true --short | cut -d '.' -f 2)" -lt 18 ]; then
|
||||
echo "kubectl >= 1.18.x is required, you have $(kubectl version --client=true --short | cut -d ':' -f2)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pinniped_path="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)"
|
||||
cd "$pinniped_path" || exit 1
|
||||
|
||||
app_name="pinniped"
|
||||
namespace="integration"
|
||||
webhook_url="https://local-user-authenticator.local-user-authenticator.svc/authenticate"
|
||||
webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator -o 'jsonpath={.data.caCertificate}')"
|
||||
|
||||
test_username="test-username"
|
||||
test_groups="test-group-0,test-group-1"
|
||||
set +o pipefail
|
||||
test_password="$(openssl rand 16 -hex)"
|
||||
|
||||
log_note "Creating test user '$test_username'..."
|
||||
kubectl create secret generic "$test_username" \
|
||||
--namespace local-user-authenticator \
|
||||
--from-literal=groups="$test_groups" \
|
||||
--from-literal=passwordHash="$(htpasswd -nbBC 10 x "$test_password" | sed -e "s/^x://")" \
|
||||
--dry-run=client \
|
||||
--output yaml |
|
||||
kubectl apply -f -
|
||||
|
||||
#
|
||||
# Create the environment file
|
||||
#
|
||||
test_env_file="$pinniped_path/hack/lib/tilt/integration-test.env"
|
||||
|
||||
cat <<EOF > "$test_env_file"
|
||||
# The following env vars should be set before running 'go test -v -count 1 ./test/...'
|
||||
export PINNIPED_NAMESPACE=${namespace}
|
||||
export PINNIPED_APP_NAME=${app_name}
|
||||
export PINNIPED_TEST_USER_USERNAME=${test_username}
|
||||
export PINNIPED_TEST_USER_GROUPS=${test_groups}
|
||||
export PINNIPED_TEST_USER_TOKEN=${test_username}:${test_password}
|
||||
export PINNIPED_TEST_WEBHOOK_ENDPOINT=${webhook_url}
|
||||
export PINNIPED_TEST_WEBHOOK_CA_BUNDLE=${webhook_ca_bundle}
|
||||
|
||||
read -r -d '' PINNIPED_CLUSTER_CAPABILITY_YAML << PINNIPED_CLUSTER_CAPABILITY_YAML_EOF || true
|
||||
$(cat "$pinniped_path/test/cluster_capabilities/kind.yaml")
|
||||
PINNIPED_CLUSTER_CAPABILITY_YAML_EOF
|
||||
|
||||
export PINNIPED_CLUSTER_CAPABILITY_YAML
|
||||
EOF
|
||||
|
||||
goland_vars=$(grep -v '^#' "$test_env_file" | grep -E '^export .+=' | sed 's/export //g' | tr '\n' ';')
|
||||
|
||||
log_note
|
||||
log_note "🚀 Ready to run integration tests! For example..."
|
||||
log_note " cd $pinniped_path"
|
||||
log_note " source ./hack/lib/tilt/integration-test.env && go test -v -count 1 ./test/integration"
|
||||
log_note
|
||||
log_note 'Want to run integration tests in GoLand? Copy/paste this "Environment" value for GoLand run configurations:'
|
||||
log_note " ${goland_vars}PINNIPED_CLUSTER_CAPABILITY_FILE=$pinniped_path/test/cluster_capabilities/kind.yaml"
|
||||
log_note
|
14
hack/lib/tilt/local-user-authenticator.Dockerfile
Normal file
14
hack/lib/tilt/local-user-authenticator.Dockerfile
Normal file
@ -0,0 +1,14 @@
|
||||
# Copyright 2020 VMware, Inc.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Use a runtime image based on Debian slim
|
||||
FROM debian:10.5-slim
|
||||
|
||||
# Copy the binary which was built outside the container.
|
||||
COPY build/local-user-authenticator /usr/local/bin/local-user-authenticator
|
||||
|
||||
# Document the port
|
||||
EXPOSE 443
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/local-user-authenticator"]
|
14
hack/lib/tilt/pinniped.Dockerfile
Normal file
14
hack/lib/tilt/pinniped.Dockerfile
Normal file
@ -0,0 +1,14 @@
|
||||
# Copyright 2020 VMware, Inc.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Use a runtime image based on Debian slim
|
||||
FROM debian:10.5-slim
|
||||
|
||||
# Copy the binary which was built outside the container.
|
||||
COPY build/pinniped-server /usr/local/bin/pinniped-server
|
||||
|
||||
# Document the port
|
||||
EXPOSE 443
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/pinniped-server"]
|
44
hack/lib/tilt/tilt_modules/docker_build_sub/Tiltfile
Normal file
44
hack/lib/tilt/tilt_modules/docker_build_sub/Tiltfile
Normal file
@ -0,0 +1,44 @@
|
||||
def docker_build_sub(ref, context, extra_cmds, child_context=None, base_suffix='-tilt_docker_build_sub_base', live_update=[], **kwargs):
|
||||
"""
|
||||
Substitutes in a docker image with extra Dockerfile commands.
|
||||
|
||||
This allows you to easily customize your docker build for your dev environment without changing your prod Dockerfile.
|
||||
|
||||
This works by:
|
||||
1. Renaming the original image to, e.g. "myimage-base"
|
||||
2. Creating a new image named, e.g. "myimage" that starts with "FROM myimage-base"
|
||||
3. Adding whatever extra stuff you want
|
||||
|
||||
Examples:
|
||||
```
|
||||
# load the extension
|
||||
load("ext://docker_build_sub", "docker_build_sub")
|
||||
|
||||
# ensure you have vim installed when running in dev, so you can
|
||||
# shell into the box and look at files
|
||||
docker_build_sub('myimage', '.', extra_cmds=["apt-get install vim"])
|
||||
|
||||
# use live_update to sync files from outside your docker context
|
||||
docker_build_sub('foo', 'foo', child_context='bar',
|
||||
extra_cmds=['ADD . /bar'],
|
||||
live_update=[
|
||||
sync('foo', '/foo'),
|
||||
sync('bar', '/bar'),
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
This function supports all the normal `docker_build` arguments. See [docker_build API docs](https://docs.tilt.dev/api.html#api.docker_build) for arguments not mentioned here..
|
||||
|
||||
Args:
|
||||
context (str): The directory in which to build the parent (original) image. If child_context is not set, also the directory in which to build the new child image.
|
||||
extra_cmds (List[str]): Any extra Dockerfile commands you want to run when building the image.
|
||||
child_context (str): The directory in which to build the new child image. If unset (None), defaults to the parent image's context.
|
||||
base_suffix (str): The suffix to append to the parent (original) image's name so that the new child image can take the original name. This is mostly ignorable, and just here in case the default generates a conflict for you.
|
||||
"""
|
||||
if not child_context:
|
||||
child_context = context
|
||||
base_ref = '%s-base' % ref
|
||||
docker_build(base_ref, context, **kwargs)
|
||||
df = '\n'.join(['FROM %s' % base_ref] + extra_cmds)
|
||||
docker_build(ref, child_context, dockerfile_contents=df, live_update=live_update, **kwargs)
|
16
hack/lib/tilt/tilt_modules/extensions.json
Normal file
16
hack/lib/tilt/tilt_modules/extensions.json
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"Extensions": [
|
||||
{
|
||||
"Name": "restart_process",
|
||||
"GitCommitHash": "b8df6f5f3368ced855da56e002027a3bd1a61bdf",
|
||||
"ExtensionRegistry": "https://github.com/tilt-dev/tilt-extensions",
|
||||
"TimeFetched": "2020-09-03T23:04:40.167635-05:00"
|
||||
},
|
||||
{
|
||||
"Name": "docker_build_sub",
|
||||
"GitCommitHash": "b8df6f5f3368ced855da56e002027a3bd1a61bdf",
|
||||
"ExtensionRegistry": "https://github.com/tilt-dev/tilt-extensions",
|
||||
"TimeFetched": "2020-09-04T18:01:24.795509-05:00"
|
||||
}
|
||||
]
|
||||
}
|
78
hack/lib/tilt/tilt_modules/restart_process/Tiltfile
Normal file
78
hack/lib/tilt/tilt_modules/restart_process/Tiltfile
Normal file
@ -0,0 +1,78 @@
|
||||
RESTART_FILE = '/.restart-proc'
|
||||
TYPE_RESTART_CONTAINER_STEP = 'live_update_restart_container_step'
|
||||
|
||||
KWARGS_BLACKLIST = [
|
||||
# since we'll be passing `dockerfile_contents` when building the
|
||||
# child image, remove any kwargs that might conflict
|
||||
'dockerfile', 'dockerfile_contents',
|
||||
|
||||
# 'target' isn't relevant to our child build--if we pass this arg,
|
||||
# Docker will just fail to find the specified stage and error out
|
||||
'target',
|
||||
]
|
||||
|
||||
def docker_build_with_restart(ref, context, entrypoint, live_update,
|
||||
base_suffix='-tilt_docker_build_with_restart_base', restart_file=RESTART_FILE, **kwargs):
|
||||
"""Wrap a docker_build call and its associated live_update steps so that the last step
|
||||
of any live update is to rerun the given entrypoint.
|
||||
|
||||
|
||||
Args:
|
||||
ref: name for this image (e.g. 'myproj/backend' or 'myregistry/myproj/backend'); as the parameter of the same name in docker_build
|
||||
context: path to use as the Docker build context; as the parameter of the same name in docker_build
|
||||
entrypoint: the command to be (re-)executed when the container starts or when a live_update is run
|
||||
live_update: set of steps for updating a running container; as the parameter of the same name in docker_build
|
||||
base_suffix: suffix for naming the base image, applied as {ref}{base_suffix}
|
||||
restart_file: file that Tilt will update during a live_update to signal the entrypoint to rerun
|
||||
**kwargs: will be passed to the underlying `docker_build` call
|
||||
"""
|
||||
|
||||
# first, validate the given live_update steps
|
||||
if len(live_update) == 0:
|
||||
fail("`docker_build_with_restart` requires at least one live_update step")
|
||||
for step in live_update:
|
||||
if type(step) == TYPE_RESTART_CONTAINER_STEP:
|
||||
fail("`docker_build_with_restart` is not compatible with live_update step: "+
|
||||
"`restart_container()` (this extension is meant to REPLACE restart_container() )")
|
||||
|
||||
# rename the original image to make it a base image and declare a docker_build for it
|
||||
base_ref = '{}{}'.format(ref, base_suffix)
|
||||
docker_build(base_ref, context, **kwargs)
|
||||
|
||||
# declare a new docker build that adds a static binary of tilt-restart-wrapper
|
||||
# (which makes use of `entr` to watch files and restart processes) to the user's image
|
||||
df = '''
|
||||
FROM tiltdev/restart-helper:2020-07-16 as restart-helper
|
||||
|
||||
FROM {}
|
||||
USER root
|
||||
RUN ["touch", "{}"]
|
||||
COPY --from=restart-helper /tilt-restart-wrapper /
|
||||
COPY --from=restart-helper /entr /
|
||||
'''.format(base_ref, restart_file)
|
||||
|
||||
# Clean kwargs for building the child image (which builds on user's specified
|
||||
# image and copies in Tilt's restart wrapper). In practice, this means removing
|
||||
# kwargs that were relevant to building the user's specified image but are NOT
|
||||
# relevant to building the child image / may conflict with args we specifically
|
||||
# pass for the child image.
|
||||
cleaned_kwargs = {k: v for k, v in kwargs.items() if k not in KWARGS_BLACKLIST}
|
||||
|
||||
# Change the entrypoint to use `tilt-restart-wrapper`.
|
||||
# `tilt-restart-wrapper` makes use of `entr` (https://github.com/eradman/entr/) to
|
||||
# re-execute $entrypoint whenever $restart_file changes
|
||||
if type(entrypoint) == type(""):
|
||||
entrypoint_with_entr = ["/tilt-restart-wrapper", "--watch_file={}".format(restart_file), "sh", "-c", entrypoint]
|
||||
elif type(entrypoint) == type([]):
|
||||
entrypoint_with_entr = ["/tilt-restart-wrapper", "--watch_file={}".format(restart_file)] + entrypoint
|
||||
else:
|
||||
fail("`entrypoint` must be a string or list of strings: got {}".format(type(entrypoint)))
|
||||
|
||||
# last live_update step should always be to modify $restart_file, which
|
||||
# triggers the process wrapper to rerun $entrypoint
|
||||
# NB: write `date` instead of just `touch`ing because `entr` doesn't respond
|
||||
# to timestamp changes, only writes (see https://github.com/eradman/entr/issues/32)
|
||||
live_update = live_update + [run('date > {}'.format(restart_file))]
|
||||
|
||||
docker_build(ref, context, entrypoint=entrypoint_with_entr, dockerfile_contents=df,
|
||||
live_update=live_update, **cleaned_kwargs)
|
@ -1,239 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script can be used to prepare a kind cluster and deploy the app.
|
||||
# You can call this script again to redeploy the app.
|
||||
# It will also output instructions on how to run the integration.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
#
|
||||
# Helper functions
|
||||
#
|
||||
function log_note() {
|
||||
GREEN='\033[0;32m'
|
||||
NC='\033[0m'
|
||||
if [[ $COLORTERM =~ ^(truecolor|24bit)$ ]]; then
|
||||
echo -e "${GREEN}$*${NC}"
|
||||
else
|
||||
echo "$*"
|
||||
fi
|
||||
}
|
||||
|
||||
function log_error() {
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m'
|
||||
if [[ $COLORTERM =~ ^(truecolor|24bit)$ ]]; then
|
||||
echo -e "🙁${RED} Error: $* ${NC}"
|
||||
else
|
||||
echo ":( Error: $*"
|
||||
fi
|
||||
}
|
||||
|
||||
function check_dependency() {
|
||||
if ! command -v "$1" >/dev/null; then
|
||||
log_error "Missing dependency..."
|
||||
log_error "$2"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Handle argument parsing and help message
|
||||
#
|
||||
help=no
|
||||
skip_build=no
|
||||
|
||||
while (("$#")); do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
help=yes
|
||||
shift
|
||||
;;
|
||||
-s | --skip-build)
|
||||
skip_build=yes
|
||||
shift
|
||||
;;
|
||||
-*)
|
||||
log_error "Unsupported flag $1" >&2
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
log_error "Unsupported positional arg $1" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ "$help" == "yes" ]]; then
|
||||
me="$(basename "${BASH_SOURCE[0]}")"
|
||||
log_note "Usage:"
|
||||
log_note " $me [flags]"
|
||||
log_note
|
||||
log_note "Flags:"
|
||||
log_note " -h, --help: print this usage"
|
||||
log_note " -s, --skip-build: reuse the most recently built image of the app instead of building"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pinniped_path="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
cd "$pinniped_path" || exit 1
|
||||
|
||||
#
|
||||
# Check for dependencies
|
||||
#
|
||||
check_dependency docker "Please install docker. See https://docs.docker.com/get-docker"
|
||||
check_dependency kind "Please install kind. e.g. 'brew install kind' for MacOS"
|
||||
check_dependency ytt "Please install ytt. e.g. 'brew tap k14s/tap && brew install ytt' for MacOS"
|
||||
check_dependency kapp "Please install kapp. e.g. 'brew tap k14s/tap && brew install kapp' for MacOS"
|
||||
check_dependency kubectl "Please install kubectl. e.g. 'brew install kubectl' for MacOS"
|
||||
check_dependency htpasswd "Please install htpasswd. Should be pre-installed on MacOS. Usually found in 'apache2-utils' package for linux."
|
||||
|
||||
# Require kubectl >= 1.18.x
|
||||
if [ "$(kubectl version --client=true --short | cut -d '.' -f 2)" -lt 18 ]; then
|
||||
echo "kubectl >= 1.18.x is required, you have $(kubectl version --client=true --short | cut -d ':' -f2)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#
|
||||
# Setup kind and build the app
|
||||
#
|
||||
log_note "Checking for running kind clusters..."
|
||||
if ! kind get clusters | grep -q -e '^kind$'; then
|
||||
log_note "Creating a kind cluster..."
|
||||
kind create cluster
|
||||
else
|
||||
if ! kubectl cluster-info | grep master | grep -q 127.0.0.1; then
|
||||
log_error "Seems like your kubeconfig is not targeting a local cluster."
|
||||
log_error "Exiting to avoid accidentally running tests against a real cluster."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
registry="docker.io"
|
||||
repo="test/build"
|
||||
registry_repo="$registry/$repo"
|
||||
tag=$(uuidgen) # always a new tag to force K8s to reload the image on redeploy
|
||||
|
||||
if [[ "$skip_build" == "yes" ]]; then
|
||||
most_recent_tag=$(docker images "$repo" --format "{{.Tag}}" | head -1)
|
||||
if [[ -n "$most_recent_tag" ]]; then
|
||||
tag="$most_recent_tag"
|
||||
do_build=no
|
||||
else
|
||||
# Oops, there was no previous build. Need to build anyway.
|
||||
do_build=yes
|
||||
fi
|
||||
else
|
||||
do_build=yes
|
||||
fi
|
||||
|
||||
registry_repo_tag="${registry_repo}:${tag}"
|
||||
|
||||
if [[ "$do_build" == "yes" ]]; then
|
||||
# Rebuild the code
|
||||
log_note "Docker building the app..."
|
||||
docker build . --tag "$registry_repo_tag"
|
||||
fi
|
||||
|
||||
# Load it into the cluster
|
||||
log_note "Loading the app's container image into the kind cluster..."
|
||||
kind load docker-image "$registry_repo_tag"
|
||||
|
||||
manifest=/tmp/manifest.yaml
|
||||
|
||||
#
|
||||
# Deploy local-user-authenticator
|
||||
#
|
||||
pushd deploy-local-user-authenticator >/dev/null
|
||||
|
||||
log_note "Deploying the local-user-authenticator app to the cluster..."
|
||||
ytt --file . \
|
||||
--data-value "image_repo=$registry_repo" \
|
||||
--data-value "image_tag=$tag" >"$manifest"
|
||||
|
||||
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
|
||||
kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest"
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
test_username="test-username"
|
||||
test_groups="test-group-0,test-group-1"
|
||||
set +o pipefail
|
||||
test_password="$(cat /dev/urandom | env LC_CTYPE=C tr -dc 'a-z0-9' | fold -w 32 | head -n 1)"
|
||||
set -o pipefail
|
||||
if [[ ${#test_password} -ne 32 ]]; then
|
||||
log_error "Could not create test user's random password"
|
||||
exit 1
|
||||
fi
|
||||
log_note "Creating test user '$test_username'..."
|
||||
kubectl create secret generic "$test_username" \
|
||||
--namespace local-user-authenticator \
|
||||
--from-literal=groups="$test_groups" \
|
||||
--from-literal=passwordHash="$(htpasswd -nbBC 10 x "$test_password" | sed -e "s/^x://")" \
|
||||
--dry-run=client \
|
||||
--output yaml |
|
||||
kubectl apply -f -
|
||||
|
||||
app_name="pinniped"
|
||||
namespace="integration"
|
||||
webhook_url="https://local-user-authenticator.local-user-authenticator.svc/authenticate"
|
||||
webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator -o 'jsonpath={.data.caCertificate}')"
|
||||
discovery_url="$(TERM=dumb kubectl cluster-info | awk '/Kubernetes master/ {print $NF}')"
|
||||
|
||||
#
|
||||
# Deploy Pinniped
|
||||
#
|
||||
pushd deploy >/dev/null
|
||||
|
||||
log_note "Deploying the Pinniped app to the cluster..."
|
||||
ytt --file . \
|
||||
--data-value "app_name=$app_name" \
|
||||
--data-value "namespace=$namespace" \
|
||||
--data-value "image_repo=$registry_repo" \
|
||||
--data-value "image_tag=$tag" \
|
||||
--data-value "discovery_url=$discovery_url" >"$manifest"
|
||||
|
||||
kapp deploy --yes --app "$app_name" --diff-changes --file "$manifest"
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
#
|
||||
# Create the environment file
|
||||
#
|
||||
kind_capabilities_file="$pinniped_path/test/cluster_capabilities/kind.yaml"
|
||||
pinniped_cluster_capability_file_content=$(cat "$kind_capabilities_file")
|
||||
|
||||
cat <<EOF >/tmp/integration-test-env
|
||||
# The following env vars should be set before running 'go test -v -count 1 ./test/...'
|
||||
export PINNIPED_NAMESPACE=${namespace}
|
||||
export PINNIPED_APP_NAME=${app_name}
|
||||
export PINNIPED_TEST_USER_USERNAME=${test_username}
|
||||
export PINNIPED_TEST_USER_GROUPS=${test_groups}
|
||||
export PINNIPED_TEST_USER_TOKEN=${test_username}:${test_password}
|
||||
export PINNIPED_TEST_WEBHOOK_ENDPOINT=${webhook_url}
|
||||
export PINNIPED_TEST_WEBHOOK_CA_BUNDLE=${webhook_ca_bundle}
|
||||
|
||||
read -r -d '' PINNIPED_CLUSTER_CAPABILITY_YAML << PINNIPED_CLUSTER_CAPABILITY_YAML_EOF || true
|
||||
${pinniped_cluster_capability_file_content}
|
||||
PINNIPED_CLUSTER_CAPABILITY_YAML_EOF
|
||||
|
||||
export PINNIPED_CLUSTER_CAPABILITY_YAML
|
||||
EOF
|
||||
|
||||
#
|
||||
# Print instructions for next steps
|
||||
#
|
||||
goland_vars=$(grep -v '^#' /tmp/integration-test-env | grep -E '^export .+=' | sed 's/export //g' | tr '\n' ';')
|
||||
|
||||
log_note
|
||||
log_note "🚀 Ready to run integration tests! For example..."
|
||||
log_note " cd $pinniped_path"
|
||||
log_note ' source /tmp/integration-test-env && go test -v -count 1 ./test/...'
|
||||
log_note
|
||||
log_note 'Want to run integration tests in GoLand? Copy/paste this "Environment" value for GoLand run configurations:'
|
||||
log_note " ${goland_vars}PINNIPED_CLUSTER_CAPABILITY_FILE=${kind_capabilities_file}"
|
||||
log_note
|
||||
log_note "You can rerun this script to redeploy local production code changes while you are working."
|
||||
log_note
|
||||
log_note "To delete the deployments, run 'kapp delete -a local-user-authenticator -y && kapp delete -a pinniped -y'."
|
||||
log_note "When you're finished, use 'kind delete cluster' to tear down the cluster."
|
Loading…
x
Reference in New Issue
Block a user