#! Copyright 2020-2021 the Pinniped contributors. All Rights Reserved. #! SPDX-License-Identifier: Apache-2.0 #@data/values --- app_name: pinniped-concierge #! Creates a new namespace statically in yaml with the given name and installs the app into that namespace. namespace: pinniped-concierge #! If specified, assumes that a namespace of the given name already exists and installs the app into that namespace. #! If both `namespace` and `into_namespace` are specified, then only `into_namespace` is used. into_namespace: #! e.g. my-preexisting-namespace #! All resources created statically by yaml at install-time and all resources created dynamically #! by controllers at runtime will be labelled with `app: $app_name` and also with the labels #! specified here. The value of `custom_labels` must be a map of string keys to string values. #! The app can be uninstalled either by: #! 1. Deleting the static install-time yaml resources including the static namespace, which will cascade and also delete #! resources that were dynamically created by controllers at runtime #! 2. Or, deleting all resources by label, which does not assume that there was a static install-time yaml namespace. custom_labels: {} #! e.g. {myCustomLabelName: myCustomLabelValue, otherCustomLabelName: otherCustomLabelValue} #! Specify how many replicas of the Pinniped server to run. replicas: 2 #! Specify either an image_digest or an image_tag. If both are given, only image_digest will be used. image_repo: projects.registry.vmware.com/pinniped/pinniped-server image_digest: #! e.g. sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8 image_tag: latest #! Optionally specify a different image for the "kube-cert-agent" pod which is scheduled #! on the control plane. This image needs only to include `sleep` and `cat` binaries. #! By default, the same image specified for image_repo/image_digest/image_tag will be re-used. kube_cert_agent_image: #! Specifies a secret to be used when pulling the above `image_repo` container image. #! Can be used when the above image_repo is a private registry. #! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]' #! Optional. image_pull_dockerconfigjson: #! e.g. {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}} #! Pinniped will try to guess the right K8s API URL for sharing that information with potential clients. #! This setting allows the guess to be overridden. #! Optional. discovery_url: #! e.g., https://example.com #! Specify the duration and renewal interval for the API serving certificate. #! The defaults are set to expire the cert about every 30 days, and to rotate it #! about every 25 days. api_serving_certificate_duration_seconds: 2592000 api_serving_certificate_renew_before_seconds: 2160000 #! Specify the verbosity of logging: info ("nice to know" information), debug (developer #! information), trace (timing information), all (kitchen sink). log_level: #! By default, when this value is left unset, only warnings and errors are printed. There is no way to suppress warning and error logs. run_as_user: 65532 #! run_as_user specifies the user ID that will own the process, see the Dockerfile for the reasoning behind this choice run_as_group: 65532 #! run_as_group specifies the group ID that will own the process, see the Dockerfile for the reasoning behind this choice #! Specify the API group suffix for all Pinniped API groups. By default, this is set to #! pinniped.dev, so Pinniped API groups will look like foo.pinniped.dev, #! authentication.concierge.pinniped.dev, etc. As an example, if this is set to tuna.io, then #! Pinniped API groups will look like foo.tuna.io. authentication.concierge.tuna.io, etc. api_group_suffix: pinniped.dev #! Customize CredentialIssuer.spec.impersonationProxy to change how the concierge #! handles impersonation. impersonation_proxy_spec: #! options are "auto", "disabled" or "enabled". #! If auto, the impersonation proxy will run only if the cluster signing key is not available #! and the other strategy does not work. #! If disabled, the impersonation proxy will never run, which could mean that the concierge #! doesn't work at all. #! If enabled, the impersonation proxy will always run regardless of other strategies available. mode: auto #! The endpoint which the client should use to connect to the impersonation proxy. #! If left unset, the client will default to connecting based on the ClusterIP or LoadBalancer #! endpoint. external_endpoint: service: #! Options are "LoadBalancer", "ClusterIP" and "None". #! LoadBalancer automatically provisions a Service of type LoadBalancer pointing at #! the impersonation proxy. Some cloud providers will allocate #! a public IP address by default even on private clusters. #! ClusterIP automatically provisions a Service of type ClusterIP pointing at the #! impersonation proxy. #! None does not provision either and assumes that you have set the external_endpoint #! and set up your own ingress to connect to the impersonation proxy. type: LoadBalancer #! The annotations that should be set on the ClusterIP or LoadBalancer Service. annotations: {service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "4000"} #! When mode LoadBalancer is set, this will set the LoadBalancer Service's Spec.LoadBalancerIP. load_balancer_ip: #! Set the standard golang HTTPS_PROXY and NO_PROXY environment variables on the Concierge containers. #! These will be used when the Concierge makes backend-to-backend calls to authenticators using HTTPS, #! e.g. when the Concierge fetches discovery documents, JWKS keys, and POSTs to token webhooks. #! The Concierge never makes insecure HTTP calls, so there is no reason to set HTTP_PROXY. #! Optional. https_proxy: #! e.g. http://proxy.example.com no_proxy: "$(KUBERNETES_SERVICE_HOST),169.254.169.254,127.0.0.1,localhost,.svc,.cluster.local" #! do not proxy Kubernetes endpoints