Rename dex namespace, add new ytt value to deploy/tools, and remove Tilt

- Rename the test/deploy/dex directory to test/deploy/tools
- Rename the dex namespace to tools
- Add a new ytt value called `pinny_ldap_password` for the tools
  ytt templates
- This new value is not used on main at this time. We intend to use
  it in the forthcoming ldap branch. We're defining it on main so
  that the CI scripts can use it across all branches and PRs.

Signed-off-by: Ryan Richard <richardry@vmware.com>
This commit is contained in:
Andrew Keesler 2021-04-05 15:01:17 -07:00 committed by Ryan Richard
parent 9cd2b6e855
commit c53507809d
20 changed files with 181 additions and 591 deletions

View File

@ -5,9 +5,8 @@
./deploy
./Dockerfile
./generated/1.1*
./hack/lib/tilt/
./internal/mocks
./LICENSE
./site/
./test
**/*_test.go
**/*_test.go

3
.gitignore vendored
View File

@ -17,8 +17,5 @@
# GoLand
.idea
# Intermediate files used by Tilt
/hack/lib/tilt/build
# MacOS Desktop Services Store
.DS_Store

View File

@ -1,6 +1,6 @@
# This is a configuration for https://pre-commit.com/.
# On macOS, try `brew install pre-commit` and then run `pre-commit install`.
exclude: '^(site|generated|hack/lib/tilt/tilt_modules)/'
exclude: '^(site|generated)/'
repos:
- repo: git://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0

View File

@ -1,203 +0,0 @@
load('ext://restart_process', 'docker_build_with_restart')
disable_snapshots()
analytics_settings(False)
update_settings(max_parallel_updates=8)
os.putenv('CGO_ENABLED', '0')
os.putenv('GOOS', 'linux')
os.putenv('GOARCH', 'amd64')
os.putenv('CGO_ENABLED', '0')
os.putenv('KUBE_GIT_VERSION', 'v0.0.0')
#####################################################################################################
# Compile all of our ./cmd/... binaries.
#
local_resource(
'compile',
'cd ../../../ && mkdir -p ./hack/lib/tilt/build && go build -v -ldflags "$(hack/get-ldflags.sh)" -o ./hack/lib/tilt/build ./cmd/...',
deps=['../../../cmd', '../../../internal', '../../../pkg', '../../../generated'],
)
#####################################################################################################
# Test IDP (Dex + cert generation + squid proxy)
#
# Render the IDP installation manifest using ytt.
k8s_yaml(local(['ytt',
'--file', '../../../test/deploy/dex',
'--data-value-yaml', 'supervisor_redirect_uris=[https://pinniped-supervisor-clusterip.supervisor.svc.cluster.local/some/path/callback]',
]))
# Tell tilt to watch all of those files for changes.
watch_file('../../../test/deploy/dex')
k8s_resource(objects=['dex:namespace'], new_name='dex-ns')
k8s_resource(workload='cert-issuer', resource_deps=['dex-ns'], objects=[
'cert-issuer:serviceaccount',
'cert-issuer:role',
'cert-issuer:rolebinding',
])
k8s_resource(workload='proxy', resource_deps=['dex-ns'])
k8s_resource(workload='dex', resource_deps=['dex-ns', 'cert-issuer'], objects=[
'dex-config:configmap',
])
#####################################################################################################
# Local-user-authenticator app
#
# Build a container image for local-user-authenticator, with live-update enabled.
docker_build_with_restart('image/local-user-auth', '.',
dockerfile='local-user-authenticator.Dockerfile',
entrypoint=['/usr/local/bin/local-user-authenticator'],
live_update=[sync('./build/local-user-authenticator', '/usr/local/bin/local-user-authenticator')],
only=['./build/local-user-authenticator'],
)
# Render the local-user-authenticator installation manifest using ytt.
k8s_yaml(local([
'ytt',
'--file', '../../../deploy/local-user-authenticator',
'--data-value', 'image_repo=image/local-user-auth',
'--data-value', 'image_tag=tilt-dev',
'--data-value-yaml', 'run_as_user=0',
'--data-value-yaml', 'run_as_group=0',
]))
# Tell tilt to watch all of those files for changes.
watch_file('../../../deploy/local-user-authenticator')
# Collect all the deployed local-user-authenticator resources under a "local-user-auth" resource tab.
k8s_resource(
workload='local-user-authenticator', # this is the deployment name
new_name='local-user-auth', # this is the name that will appear in the tilt UI
objects=[
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
'local-user-authenticator:namespace',
'local-user-authenticator:serviceaccount',
'local-user-authenticator:role',
'local-user-authenticator:rolebinding',
],
)
#####################################################################################################
# Supervisor app
#
# Build a container image for supervisor, with live-update enabled.
docker_build_with_restart('image/supervisor', '.',
dockerfile='supervisor.Dockerfile',
entrypoint=['/usr/local/bin/pinniped-supervisor'],
live_update=[sync('./build/pinniped-supervisor', '/usr/local/bin/pinniped-supervisor')],
only=['./build/pinniped-supervisor'],
)
# Render the supervisor installation manifest using ytt.
#
# 31234 and 31243 are the same port numbers hardcoded in the port forwarding of our kind configuration.
# Don't think that you can just change this!
k8s_yaml(local([
'ytt',
'--file', '../../../deploy/supervisor',
'--data-value', 'app_name=pinniped-supervisor',
'--data-value', 'namespace=supervisor',
'--data-value', 'image_repo=image/supervisor',
'--data-value', 'image_tag=tilt-dev',
'--data-value', 'log_level=debug',
'--data-value-yaml', 'replicas=1',
'--data-value-yaml', 'service_http_nodeport_port=80',
'--data-value-yaml', 'service_http_nodeport_nodeport=31234',
'--data-value-yaml', 'service_https_nodeport_port=443',
'--data-value-yaml', 'service_https_nodeport_nodeport=31243',
'--data-value-yaml', 'service_https_clusterip_port=443',
'--data-value-yaml', 'custom_labels={mySupervisorCustomLabelName: mySupervisorCustomLabelValue}',
'--data-value-yaml', 'run_as_user=0',
'--data-value-yaml', 'run_as_group=0',
]))
# Tell tilt to watch all of those files for changes.
watch_file('../../../deploy/supervisor')
# Collect all the deployed supervisor resources under a "supervisor" resource tab.
k8s_resource(
workload='pinniped-supervisor', # this is the deployment name
new_name='supervisor', # this is the name that will appear in the tilt UI
objects=[
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
'federationdomains.config.supervisor.pinniped.dev:customresourcedefinition',
'oidcidentityproviders.idp.supervisor.pinniped.dev:customresourcedefinition',
'pinniped-supervisor-static-config:configmap',
'supervisor:namespace',
'pinniped-supervisor:role',
'pinniped-supervisor:rolebinding',
'pinniped-supervisor:serviceaccount',
],
)
# Build a container image for the Concierge server, with live-update enabled.
docker_build_with_restart('image/concierge', '.',
dockerfile='concierge.Dockerfile',
entrypoint=['/usr/local/bin/pinniped-concierge'],
live_update=[sync('./build/pinniped-concierge', '/usr/local/bin/pinniped-concierge')],
only=['./build/pinniped-concierge'],
)
#####################################################################################################
# Concierge app
#
# Render the Concierge server installation manifest using ytt.
k8s_yaml(local([
'sh', '-c',
'ytt --file ../../../deploy/concierge ' +
'--data-value app_name=pinniped-concierge ' +
'--data-value namespace=concierge ' +
'--data-value image_repo=image/concierge ' +
'--data-value image_tag=tilt-dev ' +
'--data-value kube_cert_agent_image=debian:10.8-slim ' +
'--data-value discovery_url=$(TERM=dumb kubectl cluster-info | awk \'/master|control plane/ {print $NF}\') ' +
'--data-value log_level=debug ' +
'--data-value-yaml replicas=1 ' +
'--data-value-yaml "custom_labels={myConciergeCustomLabelName: myConciergeCustomLabelValue}" ' +
'--data-value-yaml run_as_user=0 ' +
'--data-value-yaml run_as_group=0',
]))
# Tell tilt to watch all of those files for changes.
watch_file('../../../deploy/concierge')
# Collect all the deployed local-user-authenticator resources under a "concierge" resource tab.
k8s_resource(
workload='pinniped-concierge', # this is the deployment name
new_name='concierge', # this is the name that will appear in the tilt UI
objects=[
# these are the objects that would otherwise appear in the "uncategorized" tab in the tilt UI
'concierge:namespace',
'pinniped-concierge-aggregated-api-server:clusterrole',
'pinniped-concierge-aggregated-api-server:clusterrolebinding',
'pinniped-concierge-aggregated-api-server:role',
'pinniped-concierge-aggregated-api-server:rolebinding',
'pinniped-concierge-cluster-info-lister-watcher:role',
'pinniped-concierge-cluster-info-lister-watcher:rolebinding',
'pinniped-concierge-config:configmap',
'pinniped-concierge-create-token-credential-requests:clusterrole',
'pinniped-concierge-create-token-credential-requests:clusterrolebinding',
'pinniped-concierge-extension-apiserver-authentication-reader:rolebinding',
'pinniped-concierge-kube-system-pod-read:role',
'pinniped-concierge-kube-system-pod-read:rolebinding',
'pinniped-concierge:clusterrolebinding',
'pinniped-concierge:serviceaccount',
'credentialissuers.config.concierge.pinniped.dev:customresourcedefinition',
'webhookauthenticators.authentication.concierge.pinniped.dev:customresourcedefinition',
'v1alpha1.login.concierge.pinniped.dev:apiservice',
],
)
#####################################################################################################
# Finish setting up cluster and creating integration test env file
#
# Collect environment variables needed to run our integration test suite.
local_resource(
'test-env',
'TILT_MODE=yes ../../prepare-for-integration-tests.sh',
resource_deps=['local-user-auth', 'concierge', 'supervisor', 'dex', 'proxy'],
deps=['../../prepare-for-integration-tests.sh'],
)

View File

@ -1,19 +0,0 @@
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
# Use a runtime image based on Debian slim
FROM debian:10.8-slim
# Copy the binary which was built outside the container.
COPY build/pinniped-concierge /usr/local/bin/pinniped-concierge
# Document the port
EXPOSE 8443
# Run as non-root for security posture
# Commented out because it breaks the live-reload feature of Tilt. See https://github.com/tilt-dev/tilt/issues/2300
# Be aware that this creates a significant difference between running with Tilt and running otherwise.
#USER 1001:1001
# Set the entrypoint
ENTRYPOINT ["/usr/local/bin/pinniped-concierge"]

View File

@ -1,19 +0,0 @@
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
# Use a runtime image based on Debian slim
FROM debian:10.8-slim
# Copy the binary which was built outside the container.
COPY build/local-user-authenticator /usr/local/bin/local-user-authenticator
# Document the port
EXPOSE 8443
# Run as non-root for security posture
# Commented out because it breaks the live-reload feature of Tilt. See https://github.com/tilt-dev/tilt/issues/2300
# Be aware that this creates a significant difference between running with Tilt and running otherwise.
#USER 1001:1001
# Set the entrypoint
ENTRYPOINT ["/usr/local/bin/local-user-authenticator"]

View File

@ -1,21 +0,0 @@
# Copyright 2020 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
# Use a runtime image based on Debian slim
FROM debian:10.8-slim
RUN apt-get update && apt-get install -y ca-certificates && rm -rf /var/lib/apt/lists/*
# Copy the binary which was built outside the container.
COPY build/pinniped-supervisor /usr/local/bin/pinniped-supervisor
# Document the port
EXPOSE 8080 8443
# Run as non-root for security posture
# Commented out because it breaks the live-reload feature of Tilt. See https://github.com/tilt-dev/tilt/issues/2300
# Be aware that this creates a significant difference between running with Tilt and running otherwise.
#USER 1001:1001
# Set the entrypoint
ENTRYPOINT ["/usr/local/bin/pinniped-supervisor"]

View File

@ -1,44 +0,0 @@
def docker_build_sub(ref, context, extra_cmds, child_context=None, base_suffix='-tilt_docker_build_sub_base', live_update=[], **kwargs):
"""
Substitutes in a docker image with extra Dockerfile commands.
This allows you to easily customize your docker build for your dev environment without changing your prod Dockerfile.
This works by:
1. Renaming the original image to, e.g. "myimage-base"
2. Creating a new image named, e.g. "myimage" that starts with "FROM myimage-base"
3. Adding whatever extra stuff you want
Examples:
```
# load the extension
load("ext://docker_build_sub", "docker_build_sub")
# ensure you have vim installed when running in dev, so you can
# shell into the box and look at files
docker_build_sub('myimage', '.', extra_cmds=["apt-get install vim"])
# use live_update to sync files from outside your docker context
docker_build_sub('foo', 'foo', child_context='bar',
extra_cmds=['ADD . /bar'],
live_update=[
sync('foo', '/foo'),
sync('bar', '/bar'),
]
)
```
This function supports all the normal `docker_build` arguments. See [docker_build API docs](https://docs.tilt.dev/api.html#api.docker_build) for arguments not mentioned here..
Args:
context (str): The directory in which to build the parent (original) image. If child_context is not set, also the directory in which to build the new child image.
extra_cmds (List[str]): Any extra Dockerfile commands you want to run when building the image.
child_context (str): The directory in which to build the new child image. If unset (None), defaults to the parent image's context.
base_suffix (str): The suffix to append to the parent (original) image's name so that the new child image can take the original name. This is mostly ignorable, and just here in case the default generates a conflict for you.
"""
if not child_context:
child_context = context
base_ref = '%s-base' % ref
docker_build(base_ref, context, **kwargs)
df = '\n'.join(['FROM %s' % base_ref] + extra_cmds)
docker_build(ref, child_context, dockerfile_contents=df, live_update=live_update, **kwargs)

View File

@ -1,16 +0,0 @@
{
"Extensions": [
{
"Name": "restart_process",
"GitCommitHash": "b8df6f5f3368ced855da56e002027a3bd1a61bdf",
"ExtensionRegistry": "https://github.com/tilt-dev/tilt-extensions",
"TimeFetched": "2020-09-03T23:04:40.167635-05:00"
},
{
"Name": "docker_build_sub",
"GitCommitHash": "b8df6f5f3368ced855da56e002027a3bd1a61bdf",
"ExtensionRegistry": "https://github.com/tilt-dev/tilt-extensions",
"TimeFetched": "2020-09-04T18:01:24.795509-05:00"
}
]
}

View File

@ -1,78 +0,0 @@
RESTART_FILE = '/.restart-proc'
TYPE_RESTART_CONTAINER_STEP = 'live_update_restart_container_step'
KWARGS_BLACKLIST = [
# since we'll be passing `dockerfile_contents` when building the
# child image, remove any kwargs that might conflict
'dockerfile', 'dockerfile_contents',
# 'target' isn't relevant to our child build--if we pass this arg,
# Docker will just fail to find the specified stage and error out
'target',
]
def docker_build_with_restart(ref, context, entrypoint, live_update,
base_suffix='-tilt_docker_build_with_restart_base', restart_file=RESTART_FILE, **kwargs):
"""Wrap a docker_build call and its associated live_update steps so that the last step
of any live update is to rerun the given entrypoint.
Args:
ref: name for this image (e.g. 'myproj/backend' or 'myregistry/myproj/backend'); as the parameter of the same name in docker_build
context: path to use as the Docker build context; as the parameter of the same name in docker_build
entrypoint: the command to be (re-)executed when the container starts or when a live_update is run
live_update: set of steps for updating a running container; as the parameter of the same name in docker_build
base_suffix: suffix for naming the base image, applied as {ref}{base_suffix}
restart_file: file that Tilt will update during a live_update to signal the entrypoint to rerun
**kwargs: will be passed to the underlying `docker_build` call
"""
# first, validate the given live_update steps
if len(live_update) == 0:
fail("`docker_build_with_restart` requires at least one live_update step")
for step in live_update:
if type(step) == TYPE_RESTART_CONTAINER_STEP:
fail("`docker_build_with_restart` is not compatible with live_update step: "+
"`restart_container()` (this extension is meant to REPLACE restart_container() )")
# rename the original image to make it a base image and declare a docker_build for it
base_ref = '{}{}'.format(ref, base_suffix)
docker_build(base_ref, context, **kwargs)
# declare a new docker build that adds a static binary of tilt-restart-wrapper
# (which makes use of `entr` to watch files and restart processes) to the user's image
df = '''
FROM tiltdev/restart-helper:2020-07-16 as restart-helper
FROM {}
USER root
RUN ["touch", "{}"]
COPY --from=restart-helper /tilt-restart-wrapper /
COPY --from=restart-helper /entr /
'''.format(base_ref, restart_file)
# Clean kwargs for building the child image (which builds on user's specified
# image and copies in Tilt's restart wrapper). In practice, this means removing
# kwargs that were relevant to building the user's specified image but are NOT
# relevant to building the child image / may conflict with args we specifically
# pass for the child image.
cleaned_kwargs = {k: v for k, v in kwargs.items() if k not in KWARGS_BLACKLIST}
# Change the entrypoint to use `tilt-restart-wrapper`.
# `tilt-restart-wrapper` makes use of `entr` (https://github.com/eradman/entr/) to
# re-execute $entrypoint whenever $restart_file changes
if type(entrypoint) == type(""):
entrypoint_with_entr = ["/tilt-restart-wrapper", "--watch_file={}".format(restart_file), "sh", "-c", entrypoint]
elif type(entrypoint) == type([]):
entrypoint_with_entr = ["/tilt-restart-wrapper", "--watch_file={}".format(restart_file)] + entrypoint
else:
fail("`entrypoint` must be a string or list of strings: got {}".format(type(entrypoint)))
# last live_update step should always be to modify $restart_file, which
# triggers the process wrapper to rerun $entrypoint
# NB: write `date` instead of just `touch`ing because `entr` doesn't respond
# to timestamp changes, only writes (see https://github.com/eradman/entr/issues/32)
live_update = live_update + [run('date > {}'.format(restart_file))]
docker_build(ref, context, entrypoint=entrypoint_with_entr, dockerfile_contents=df,
live_update=live_update, **cleaned_kwargs)

View File

@ -14,14 +14,6 @@ set -euo pipefail
#
# Helper functions
#
TILT_MODE=${TILT_MODE:-no}
function tilt_mode() {
if [[ "$TILT_MODE" == "yes" ]]; then
return 0
fi
return 1
}
function log_note() {
GREEN='\033[0;32m'
NC='\033[0m'
@ -143,94 +135,93 @@ if [ "$(kubectl version --client=true --short | cut -d '.' -f 2)" -lt 18 ]; then
exit 1
fi
if ! tilt_mode; then
if [[ "$clean_kind" == "yes" ]]; then
log_note "Deleting running kind cluster to prepare from a clean slate..."
./hack/kind-down.sh
if [[ "$clean_kind" == "yes" ]]; then
log_note "Deleting running kind cluster to prepare from a clean slate..."
./hack/kind-down.sh
fi
#
# Setup kind and build the app
#
log_note "Checking for running kind cluster..."
if ! kind get clusters | grep -q -e '^pinniped$'; then
log_note "Creating a kind cluster..."
# Our kind config exposes node port 31234 as 127.0.0.1:12345, 31243 as 127.0.0.1:12344, and 31235 as 127.0.0.1:12346
./hack/kind-up.sh
else
if ! kubectl cluster-info | grep -E '(master|control plane)' | grep -q 127.0.0.1; then
log_error "Seems like your kubeconfig is not targeting a local cluster."
log_error "Exiting to avoid accidentally running tests against a real cluster."
exit 1
fi
fi
#
# Setup kind and build the app
#
log_note "Checking for running kind cluster..."
if ! kind get clusters | grep -q -e '^pinniped$'; then
log_note "Creating a kind cluster..."
# Our kind config exposes node port 31234 as 127.0.0.1:12345, 31243 as 127.0.0.1:12344, and 31235 as 127.0.0.1:12346
./hack/kind-up.sh
else
if ! kubectl cluster-info | grep -E '(master|control plane)' | grep -q 127.0.0.1; then
log_error "Seems like your kubeconfig is not targeting a local cluster."
log_error "Exiting to avoid accidentally running tests against a real cluster."
exit 1
fi
fi
registry="pinniped.local"
repo="test/build"
registry_repo="$registry/$repo"
tag=$(uuidgen) # always a new tag to force K8s to reload the image on redeploy
if [[ "$skip_build" == "yes" ]]; then
most_recent_tag=$(docker images "$registry/$repo" --format "{{.Tag}}" | head -1)
if [[ -n "$most_recent_tag" ]]; then
tag="$most_recent_tag"
do_build=no
else
# Oops, there was no previous build. Need to build anyway.
do_build=yes
fi
registry="pinniped.local"
repo="test/build"
registry_repo="$registry/$repo"
tag=$(uuidgen) # always a new tag to force K8s to reload the image on redeploy
if [[ "$skip_build" == "yes" ]]; then
most_recent_tag=$(docker images "$registry/$repo" --format "{{.Tag}}" | head -1)
if [[ -n "$most_recent_tag" ]]; then
tag="$most_recent_tag"
do_build=no
else
# Oops, there was no previous build. Need to build anyway.
do_build=yes
fi
registry_repo_tag="${registry_repo}:${tag}"
if [[ "$do_build" == "yes" ]]; then
# Rebuild the code
log_note "Docker building the app..."
docker build . --tag "$registry_repo_tag"
fi
# Load it into the cluster
log_note "Loading the app's container image into the kind cluster..."
kind load docker-image "$registry_repo_tag" --name pinniped
manifest=/tmp/manifest.yaml
#
# Deploy local-user-authenticator
#
pushd deploy/local-user-authenticator >/dev/null
log_note "Deploying the local-user-authenticator app to the cluster..."
ytt --file . \
--data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" >"$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest"
popd >/dev/null
#
# Deploy dex
#
dex_test_password="$(openssl rand -hex 16)"
pushd test/deploy/dex >/dev/null
log_note "Deploying Dex to the cluster..."
ytt --file . >"$manifest"
ytt --file . \
--data-value-yaml "supervisor_redirect_uris=[https://pinniped-supervisor-clusterip.supervisor.svc.cluster.local/some/path/callback]" \
--data-value "pinny_bcrypt_passwd_hash=$(htpasswd -nbBC 10 x "$dex_test_password" | sed -e "s/^x://")" \
>"$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
kapp deploy --yes --app dex --diff-changes --file "$manifest"
popd >/dev/null
else
do_build=yes
fi
registry_repo_tag="${registry_repo}:${tag}"
if [[ "$do_build" == "yes" ]]; then
# Rebuild the code
log_note "Docker building the app..."
docker build . --tag "$registry_repo_tag"
fi
# Load it into the cluster
log_note "Loading the app's container image into the kind cluster..."
kind load docker-image "$registry_repo_tag" --name pinniped
manifest=/tmp/manifest.yaml
#
# Deploy local-user-authenticator
#
pushd deploy/local-user-authenticator >/dev/null
log_note "Deploying the local-user-authenticator app to the cluster..."
ytt --file . \
--data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" >"$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
kapp deploy --yes --app local-user-authenticator --diff-changes --file "$manifest"
popd >/dev/null
#
# Deploy Tools
#
dex_test_password="$(openssl rand -hex 16)"
ldap_test_password="$(openssl rand -hex 16)"
pushd test/deploy/tools >/dev/null
log_note "Deploying Tools to the cluster..."
ytt --file . \
--data-value-yaml "supervisor_redirect_uris=[https://pinniped-supervisor-clusterip.supervisor.svc.cluster.local/some/path/callback]" \
--data-value "pinny_ldap_password=$ldap_test_password" \
--data-value "pinny_bcrypt_passwd_hash=$(htpasswd -nbBC 10 x "$dex_test_password" | sed -e "s/^x://")" \
>"$manifest"
kubectl apply --dry-run=client -f "$manifest" # Validate manifest schema.
kapp deploy --yes --app tools --diff-changes --file "$manifest"
popd >/dev/null
test_username="test-username"
test_groups="test-group-0,test-group-1"
test_password="$(openssl rand -hex 16)"
@ -250,29 +241,27 @@ supervisor_app_name="pinniped-supervisor"
supervisor_namespace="supervisor"
supervisor_custom_labels="{mySupervisorCustomLabelName: mySupervisorCustomLabelValue}"
if ! tilt_mode; then
pushd deploy/supervisor >/dev/null
pushd deploy/supervisor >/dev/null
log_note "Deploying the Pinniped Supervisor app to the cluster..."
ytt --file . \
--data-value "app_name=$supervisor_app_name" \
--data-value "namespace=$supervisor_namespace" \
--data-value "api_group_suffix=$api_group_suffix" \
--data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" \
--data-value "log_level=debug" \
--data-value-yaml "custom_labels=$supervisor_custom_labels" \
--data-value-yaml 'service_http_nodeport_port=80' \
--data-value-yaml 'service_http_nodeport_nodeport=31234' \
--data-value-yaml 'service_https_nodeport_port=443' \
--data-value-yaml 'service_https_nodeport_nodeport=31243' \
--data-value-yaml 'service_https_clusterip_port=443' \
>"$manifest"
log_note "Deploying the Pinniped Supervisor app to the cluster..."
ytt --file . \
--data-value "app_name=$supervisor_app_name" \
--data-value "namespace=$supervisor_namespace" \
--data-value "api_group_suffix=$api_group_suffix" \
--data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" \
--data-value "log_level=debug" \
--data-value-yaml "custom_labels=$supervisor_custom_labels" \
--data-value-yaml 'service_http_nodeport_port=80' \
--data-value-yaml 'service_http_nodeport_nodeport=31234' \
--data-value-yaml 'service_https_nodeport_port=443' \
--data-value-yaml 'service_https_nodeport_nodeport=31243' \
--data-value-yaml 'service_https_clusterip_port=443' \
>"$manifest"
kapp deploy --yes --app "$supervisor_app_name" --diff-changes --file "$manifest"
kapp deploy --yes --app "$supervisor_app_name" --diff-changes --file "$manifest"
popd >/dev/null
fi
popd >/dev/null
#
# Deploy the Pinniped Concierge
@ -284,29 +273,27 @@ webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-cer
discovery_url="$(TERM=dumb kubectl cluster-info | awk '/master|control plane/ {print $NF}')"
concierge_custom_labels="{myConciergeCustomLabelName: myConciergeCustomLabelValue}"
if ! tilt_mode; then
pushd deploy/concierge >/dev/null
pushd deploy/concierge >/dev/null
log_note "Deploying the Pinniped Concierge app to the cluster..."
ytt --file . \
--data-value "app_name=$concierge_app_name" \
--data-value "namespace=$concierge_namespace" \
--data-value "api_group_suffix=$api_group_suffix" \
--data-value "log_level=debug" \
--data-value-yaml "custom_labels=$concierge_custom_labels" \
--data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" \
--data-value "discovery_url=$discovery_url" >"$manifest"
log_note "Deploying the Pinniped Concierge app to the cluster..."
ytt --file . \
--data-value "app_name=$concierge_app_name" \
--data-value "namespace=$concierge_namespace" \
--data-value "api_group_suffix=$api_group_suffix" \
--data-value "log_level=debug" \
--data-value-yaml "custom_labels=$concierge_custom_labels" \
--data-value "image_repo=$registry_repo" \
--data-value "image_tag=$tag" \
--data-value "discovery_url=$discovery_url" >"$manifest"
kapp deploy --yes --app "$concierge_app_name" --diff-changes --file "$manifest"
kapp deploy --yes --app "$concierge_app_name" --diff-changes --file "$manifest"
popd >/dev/null
fi
popd >/dev/null
#
# Download the test CA bundle that was generated in the Dex pod.
#
test_ca_bundle_pem="$(kubectl get secrets -n dex certs -o go-template='{{index .data "ca.pem" | base64decode}}')"
test_ca_bundle_pem="$(kubectl get secrets -n tools certs -o go-template='{{index .data "ca.pem" | base64decode}}')"
#
# Create the environment file
@ -330,13 +317,29 @@ export PINNIPED_TEST_SUPERVISOR_CUSTOM_LABELS='${supervisor_custom_labels}'
export PINNIPED_TEST_SUPERVISOR_HTTP_ADDRESS="127.0.0.1:12345"
export PINNIPED_TEST_SUPERVISOR_HTTPS_ADDRESS="localhost:12344"
export PINNIPED_TEST_PROXY=http://127.0.0.1:12346
export PINNIPED_TEST_CLI_OIDC_ISSUER=https://dex.dex.svc.cluster.local/dex
export PINNIPED_TEST_LDAP_LDAP_URL=ldap://ldap.tools.svc.cluster.local
export PINNIPED_TEST_LDAP_LDAPS_URL=ldaps://ldap.tools.svc.cluster.local
export PINNIPED_TEST_LDAP_LDAPS_CA_BUNDLE="${test_ca_bundle_pem}"
export PINNIPED_TEST_LDAP_BIND_ACCOUNT_USERNAME="cn=admin,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_BIND_ACCOUNT_PASSWORD=password
export PINNIPED_TEST_LDAP_USERS_SEARCH_BASE="ou=users,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_GROUPS_SEARCH_BASE="ou=groups,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_USER_DN="cn=pinny,ou=users,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_USER_CN="pinny"
export PINNIPED_TEST_LDAP_USER_PASSWORD=${ldap_test_password}
export PINNIPED_TEST_LDAP_USER_EMAIL_ATTRIBUTE_NAME="mail"
export PINNIPED_TEST_LDAP_USER_EMAIL_ATTRIBUTE_VALUE="pinny.ldap@example.com"
export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_GROUPS_DN="cn=ball-game-players,ou=beach-groups,ou=groups,dc=pinniped,dc=dev;cn=seals,ou=groups,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_EXPECTED_INDIRECT_GROUPS_DN="cn=pinnipeds,ou=groups,dc=pinniped,dc=dev;cn=mammals,ou=groups,dc=pinniped,dc=dev"
export PINNIPED_TEST_LDAP_EXPECTED_DIRECT_GROUPS_CN="ball-game-players;seals"
export PINNIPED_TEST_LDAP_EXPECTED_INDIRECT_GROUPS_CN="pinnipeds;mammals"
export PINNIPED_TEST_CLI_OIDC_ISSUER=https://dex.tools.svc.cluster.local/dex
export PINNIPED_TEST_CLI_OIDC_ISSUER_CA_BUNDLE="${test_ca_bundle_pem}"
export PINNIPED_TEST_CLI_OIDC_CLIENT_ID=pinniped-cli
export PINNIPED_TEST_CLI_OIDC_CALLBACK_URL=http://127.0.0.1:48095/callback
export PINNIPED_TEST_CLI_OIDC_USERNAME=pinny@example.com
export PINNIPED_TEST_CLI_OIDC_PASSWORD=${dex_test_password}
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER=https://dex.dex.svc.cluster.local/dex
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER=https://dex.tools.svc.cluster.local/dex
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ISSUER_CA_BUNDLE="${test_ca_bundle_pem}"
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_ADDITIONAL_SCOPES=email
export PINNIPED_TEST_SUPERVISOR_UPSTREAM_OIDC_USERNAME_CLAIM=email
@ -369,11 +372,8 @@ log_note
log_note 'Want to run integration tests in GoLand? Copy/paste this "Environment" value for GoLand run configurations:'
log_note " ${goland_vars}PINNIPED_TEST_CLUSTER_CAPABILITY_FILE=${kind_capabilities_file}"
log_note
if ! tilt_mode; then
log_note "You can rerun this script to redeploy local production code changes while you are working."
log_note
log_note "To delete the deployments, run:"
log_note " kapp delete -a local-user-authenticator -y && kapp delete -a $concierge_app_name -y && kapp delete -a $supervisor_app_name -y"
log_note "When you're finished, use './hack/kind-down.sh' to tear down the cluster."
fi
log_note "You can rerun this script to redeploy local production code changes while you are working."
log_note
log_note "To delete the deployments, run:"
log_note " kapp delete -a local-user-authenticator -y && kapp delete -a $concierge_app_name -y && kapp delete -a $supervisor_app_name -y"
log_note "When you're finished, use './hack/kind-down.sh' to tear down the cluster."

View File

@ -1,9 +0,0 @@
#!/usr/bin/env bash
# Copyright 2020 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
cd "${ROOT}"
exec tilt down -f ./hack/lib/tilt/Tiltfile

View File

@ -1,11 +0,0 @@
#!/usr/bin/env bash
# Copyright 2020 the Pinniped contributors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
cd "${ROOT}"
exec tilt up -f ./hack/lib/tilt/Tiltfile "$@"

View File

@ -1,11 +1,11 @@
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cert-issuer
namespace: dex
namespace: tools
labels:
app: cert-issuer
---
@ -13,7 +13,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-issuer
namespace: dex
namespace: tools
labels:
app: cert-issuer
rules:
@ -25,13 +25,13 @@ kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cert-issuer
namespace: dex
namespace: tools
labels:
app: cert-issuer
subjects:
- kind: ServiceAccount
name: cert-issuer
namespace: dex
namespace: tools
roleRef:
kind: Role
name: cert-issuer
@ -41,7 +41,7 @@ apiVersion: batch/v1
kind: Job
metadata:
name: cert-issuer
namespace: dex
namespace: tools
labels:
app: cert-issuer
spec:
@ -71,11 +71,21 @@ spec:
-ca ca.pem -ca-key ca-key.pem \
-config /tmp/cfssl-default.json \
-profile www \
-cn "dex.dex.svc.cluster.local" \
-hostname "dex.dex.svc.cluster.local" \
-cn "dex.tools.svc.cluster.local" \
-hostname "dex.tools.svc.cluster.local" \
/tmp/csr.json \
| cfssljson -bare dex
echo "generating LDAP server certificate..."
cfssl gencert \
-ca ca.pem -ca-key ca-key.pem \
-config /tmp/cfssl-default.json \
-profile www \
-cn "ldap.tools.svc.cluster.local" \
-hostname "ldap.tools.svc.cluster.local" \
/tmp/csr.json \
| cfssljson -bare ldap
chmod -R 777 /var/certs
echo "generated certificates:"
@ -90,12 +100,12 @@ spec:
args:
- -c
- |
kubectl get secrets -n dex certs -o jsonpath='created: {.metadata.creationTimestamp}' || \
kubectl create secret generic certs --from-file=/var/certs
kubectl get secrets -n tools certs -o jsonpath='created: {.metadata.creationTimestamp}' || \
kubectl create secret generic -n tools certs --from-file=/var/certs
volumeMounts:
- name: certs
mountPath: /var/certs
volumes:
- name: certs
emptyDir: {}
restartPolicy: Never
restartPolicy: Never

View File

@ -6,7 +6,7 @@
#@ load("@ytt:yaml", "yaml")
#@ def dexConfig():
issuer: https://dex.dex.svc.cluster.local/dex
issuer: https://dex.tools.svc.cluster.local/dex
storage:
type: sqlite3
config:
@ -36,19 +36,12 @@ staticPasswords:
userID: "061d23d1-fe1e-4777-9ae9-59cd12abeaaa"
#@ end
---
apiVersion: v1
kind: Namespace
metadata:
name: dex
labels:
name: dex
---
apiVersion: v1
kind: ConfigMap
metadata:
name: dex-config
namespace: dex
namespace: tools
labels:
app: dex
data:
@ -58,7 +51,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: dex
namespace: dex
namespace: tools
labels:
app: dex
spec:
@ -102,7 +95,7 @@ apiVersion: v1
kind: Service
metadata:
name: dex
namespace: dex
namespace: tools
labels:
app: dex
spec:

View File

@ -0,0 +1,8 @@
#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
---
apiVersion: v1
kind: Namespace
metadata:
name: tools

View File

@ -1,4 +1,4 @@
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
#! SPDX-License-Identifier: Apache-2.0
#@ load("@ytt:data", "data")
@ -7,7 +7,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: proxy
namespace: dex
namespace: tools
labels:
app: proxy
spec:
@ -62,7 +62,7 @@ apiVersion: v1
kind: Service
metadata:
name: proxy
namespace: dex
namespace: tools
labels:
app: proxy
spec:
@ -71,4 +71,4 @@ spec:
app: proxy
ports:
- port: 3128
nodePort: #@ data.values.ports.node
nodePort: #@ data.values.ports.node

View File

@ -22,3 +22,6 @@ supervisor_redirect_uris: []
#! The bcrypt-hashed password of the pinny test user account.
pinny_bcrypt_passwd_hash:
#! The plaintext password of the LDAP test account user.
pinny_ldap_password:

View File

@ -357,7 +357,7 @@ func TestImpersonationProxy(t *testing.T) { //nolint:gocyclo // yeah, it's compl
t.Log("curlStdErr: " + curlStdErr.String())
t.Log("stdout: " + curlStdOut.String())
}
t.Log("time: ", time.Now())
t.Log("Running curl through the kubectl port-forward port for 70 seconds. Elapsed time:", time.Now().Sub(startTime))
time.Sleep(1 * time.Second)
}

View File

@ -1,4 +1,4 @@
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package browsertest provides integration test helpers for our browser-based tests.
@ -126,8 +126,8 @@ func LoginToUpstream(t *testing.T, page *agouti.Page, upstream library.TestOIDCU
},
{
Name: "Dex",
IssuerPattern: regexp.MustCompile(`\Ahttps://dex\.dex\.svc\.cluster\.local/dex.*\z`),
LoginPagePattern: regexp.MustCompile(`\Ahttps://dex\.dex\.svc\.cluster\.local/dex/auth/local.+\z`),
IssuerPattern: regexp.MustCompile(`\Ahttps://dex\.tools\.svc\.cluster\.local/dex.*\z`),
LoginPagePattern: regexp.MustCompile(`\Ahttps://dex\.tools\.svc\.cluster\.local/dex/auth/local.+\z`),
UsernameSelector: "input#login",
PasswordSelector: "input#password",
LoginButtonSelector: "button#submit-login",