Implement very rough skeleton of the start of a supervisor server
- This is just stab at a starting place because it felt easier to put something down on paper than to keep staring at a blank page
This commit is contained in:
parent
7eed7ba19a
commit
76bd462cf8
@ -21,6 +21,7 @@ COPY hack ./hack
|
||||
# Build the executable binary (CGO_ENABLED=0 means static linking)
|
||||
RUN mkdir out \
|
||||
&& CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(hack/get-ldflags.sh)" -o out ./cmd/pinniped-server/... \
|
||||
&& CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(hack/get-ldflags.sh)" -o out ./cmd/pinniped-supervisor/... \
|
||||
&& CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o out ./cmd/local-user-authenticator/...
|
||||
|
||||
# Use a runtime image based on Debian slim
|
||||
@ -28,6 +29,7 @@ FROM debian:10.5-slim
|
||||
|
||||
# Copy the binaries from the build-env stage
|
||||
COPY --from=build-env /work/out/pinniped-server /usr/local/bin/pinniped-server
|
||||
COPY --from=build-env /work/out/pinniped-supervisor /usr/local/bin/pinniped-supervisor
|
||||
COPY --from=build-env /work/out/local-user-authenticator /usr/local/bin/local-user-authenticator
|
||||
|
||||
# Document the port
|
||||
|
186
cmd/pinniped-supervisor/main.go
Normal file
186
cmd/pinniped-supervisor/main.go
Normal file
@ -0,0 +1,186 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/version"
|
||||
"k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/component-base/logs"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"go.pinniped.dev/internal/controller/supervisorconfig"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
"go.pinniped.dev/internal/downward"
|
||||
)
|
||||
|
||||
const (
|
||||
singletonWorker = 1
|
||||
defaultResyncInterval = 3 * time.Minute
|
||||
)
|
||||
|
||||
type helloWorld struct{}
|
||||
|
||||
func (w *helloWorld) start(ctx context.Context, l net.Listener) error {
|
||||
server := http.Server{
|
||||
Handler: w,
|
||||
}
|
||||
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
errCh <- server.Serve(l)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
klog.InfoS("server exited", "err", err)
|
||||
case <-ctx.Done():
|
||||
klog.InfoS("server context cancelled", "err", ctx.Err())
|
||||
if err := server.Shutdown(context.Background()); err != nil {
|
||||
klog.InfoS("server shutdown failed", "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *helloWorld) ServeHTTP(rsp http.ResponseWriter, req *http.Request) {
|
||||
// TODO this is just a placeholder to allow manually testing that this is reachable; we don't want a hello world endpoint
|
||||
defer req.Body.Close()
|
||||
_, _ = fmt.Fprintf(rsp, "Hello, world!")
|
||||
}
|
||||
|
||||
func waitForSignal() os.Signal {
|
||||
signalCh := make(chan os.Signal, 1)
|
||||
signal.Notify(signalCh, os.Interrupt)
|
||||
return <-signalCh
|
||||
}
|
||||
|
||||
func startControllers(
|
||||
ctx context.Context,
|
||||
kubeClient kubernetes.Interface,
|
||||
kubeInformers kubeinformers.SharedInformerFactory,
|
||||
serverInstallationNamespace string,
|
||||
staticConfig StaticConfig,
|
||||
) {
|
||||
// Create controller manager.
|
||||
controllerManager := controllerlib.
|
||||
NewManager().
|
||||
WithController(
|
||||
supervisorconfig.NewDynamicConfigWatcherController(
|
||||
serverInstallationNamespace,
|
||||
staticConfig.NamesConfig.DynamicConfigMap,
|
||||
kubeClient,
|
||||
kubeInformers.Core().V1().ConfigMaps(),
|
||||
controllerlib.WithInformer,
|
||||
),
|
||||
singletonWorker,
|
||||
)
|
||||
|
||||
kubeInformers.Start(ctx.Done())
|
||||
|
||||
go controllerManager.Start(ctx)
|
||||
}
|
||||
|
||||
func newK8sClient() (kubernetes.Interface, error) {
|
||||
kubeConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the core Kubernetes API.
|
||||
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load in-cluster configuration: %w", err)
|
||||
}
|
||||
|
||||
return kubeClient, nil
|
||||
}
|
||||
|
||||
func run(serverInstallationNamespace string, staticConfig StaticConfig) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kubeClient, err := newK8sClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create k8s client: %w", err)
|
||||
}
|
||||
|
||||
kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(
|
||||
kubeClient,
|
||||
defaultResyncInterval,
|
||||
kubeinformers.WithNamespace(serverInstallationNamespace),
|
||||
)
|
||||
|
||||
startControllers(ctx, kubeClient, kubeInformers, serverInstallationNamespace, staticConfig)
|
||||
|
||||
//nolint: gosec // Intentionally binding to all network interfaces.
|
||||
l, err := net.Listen("tcp", ":80")
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create listener: %w", err)
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
helloHandler := &helloWorld{}
|
||||
err = helloHandler.start(ctx, l)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot start webhook: %w", err)
|
||||
}
|
||||
klog.InfoS("supervisor is ready", "address", l.Addr().String())
|
||||
|
||||
gotSignal := waitForSignal()
|
||||
klog.InfoS("supervisor exiting", "signal", gotSignal)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type StaticConfig struct {
|
||||
NamesConfig NamesConfigSpec `json:"names"`
|
||||
}
|
||||
|
||||
type NamesConfigSpec struct {
|
||||
DynamicConfigMap string `json:"dynamicConfigMap"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
|
||||
klog.Infof("Running %s at %#v", rest.DefaultKubernetesUserAgent(), version.Get())
|
||||
klog.Infof("Command-line arguments were: %s %s %s", os.Args[0], os.Args[1], os.Args[2])
|
||||
|
||||
// Discover in which namespace we are installed.
|
||||
podInfo, err := downward.Load(os.Args[1])
|
||||
if err != nil {
|
||||
klog.Fatal(fmt.Errorf("could not read pod metadata: %w", err))
|
||||
}
|
||||
|
||||
// Read static config.
|
||||
data, err := ioutil.ReadFile(os.Args[2])
|
||||
if err != nil {
|
||||
klog.Fatal(fmt.Errorf("read file: %w", err))
|
||||
}
|
||||
var staticConfig StaticConfig
|
||||
if err := yaml.Unmarshal(data, &staticConfig); err != nil {
|
||||
klog.Fatal(fmt.Errorf("decode yaml: %w", err))
|
||||
}
|
||||
|
||||
if err := run(podInfo.Namespace, staticConfig); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
}
|
41
deploy-supervisor/README.md
Normal file
41
deploy-supervisor/README.md
Normal file
@ -0,0 +1,41 @@
|
||||
# Deploying the Pinniped Supervisor
|
||||
|
||||
## What is the Pinniped Supervisor?
|
||||
|
||||
The Pinniped Supervisor app is a component of the Pinniped OIDC and Cluster Federation solutions.
|
||||
It can be deployed when those features are needed.
|
||||
|
||||
## Installing the Latest Version with Default Options
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/latest/download/install-supervisor.yaml
|
||||
```
|
||||
|
||||
## Installing an Older Version with Default Options
|
||||
|
||||
Choose your preferred [release](https://github.com/vmware-tanzu/pinniped/releases) version number
|
||||
and use it to replace the version number in the URL below.
|
||||
|
||||
```bash
|
||||
# Replace v0.3.0 with your preferred version in the URL below
|
||||
kubectl apply -f https://github.com/vmware-tanzu/pinniped/releases/download/v0.3.0/install-supervisor.yaml
|
||||
```
|
||||
|
||||
## Installing with Custom Options
|
||||
|
||||
Creating your own deployment YAML file requires `ytt` from [Carvel](https://carvel.dev/) to template the YAML files
|
||||
in the [deploy-supervisor](../deploy-supervisor) directory.
|
||||
Either [install `ytt`](https://get-ytt.io/) or use the [container image from Dockerhub](https://hub.docker.com/r/k14s/image/tags).
|
||||
|
||||
1. `git clone` this repo and `git checkout` the release version tag of the release that you would like to deploy.
|
||||
1. The configuration options are in [deploy-supervisor/values.yml](values.yaml).
|
||||
Fill in the values in that file, or override those values using additional `ytt` command-line options in
|
||||
the command below. Use the release version tag as the `image_tag` value.
|
||||
2. In a terminal, cd to this `deploy-supervisor` directory
|
||||
3. To generate the final YAML files, run `ytt --file .`
|
||||
4. Deploy the generated YAML using your preferred deployment tool, such as `kubectl` or [`kapp`](https://get-kapp.io/).
|
||||
For example: `ytt --file . | kapp deploy --yes --app pinniped-supervisor --diff-changes --file -`
|
||||
|
||||
## Configuring After Installing
|
||||
|
||||
TODO: Provide some instructions here.
|
146
deploy-supervisor/deployment.yaml
Normal file
146
deploy-supervisor/deployment.yaml
Normal file
@ -0,0 +1,146 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: #@ data.values.namespace
|
||||
labels:
|
||||
name: #@ data.values.namespace
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: #@ data.values.app_name + "-static-config"
|
||||
namespace: #@ data.values.namespace
|
||||
labels:
|
||||
app: #@ data.values.app_name
|
||||
data:
|
||||
#@yaml/text-templated-strings
|
||||
pinniped.yaml: |
|
||||
names:
|
||||
dynamicConfigMap: (@= data.values.app_name + "-dynamic-config" @)
|
||||
---
|
||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: image-pull-secret
|
||||
namespace: #@ data.values.namespace
|
||||
labels:
|
||||
app: #@ data.values.app_name
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
data:
|
||||
.dockerconfigjson: #@ data.values.image_pull_dockerconfigjson
|
||||
#@ end
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
labels:
|
||||
app: #@ data.values.app_name
|
||||
spec:
|
||||
replicas: #@ data.values.replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app: #@ data.values.app_name
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: #@ data.values.app_name
|
||||
spec:
|
||||
serviceAccountName: #@ data.values.app_name
|
||||
#@ if data.values.image_pull_dockerconfigjson and data.values.image_pull_dockerconfigjson != "":
|
||||
imagePullSecrets:
|
||||
- name: image-pull-secret
|
||||
#@ end
|
||||
containers:
|
||||
- name: pinniped-supervisor
|
||||
#@ if data.values.image_digest:
|
||||
image: #@ data.values.image_repo + "@" + data.values.image_digest
|
||||
#@ else:
|
||||
image: #@ data.values.image_repo + ":" + data.values.image_tag
|
||||
#@ end
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: #! override the default entrypoint
|
||||
- /usr/local/bin/pinniped-supervisor
|
||||
args:
|
||||
- /etc/podinfo #! TODO proper flag parsing instead of positional
|
||||
- /etc/config/pinniped.yaml #! TODO proper flag parsing instead of positional
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config
|
||||
- name: podinfo
|
||||
mountPath: /etc/podinfo
|
||||
#! livenessProbe:
|
||||
#! httpGet:
|
||||
#! path: /healthz
|
||||
#! port: 443
|
||||
#! scheme: HTTPS
|
||||
#! initialDelaySeconds: 2
|
||||
#! timeoutSeconds: 15
|
||||
#! periodSeconds: 10
|
||||
#! failureThreshold: 5
|
||||
#! readinessProbe:
|
||||
#! httpGet:
|
||||
#! path: /healthz
|
||||
#! port: 443
|
||||
#! scheme: HTTPS
|
||||
#! initialDelaySeconds: 2
|
||||
#! timeoutSeconds: 3
|
||||
#! periodSeconds: 10
|
||||
#! failureThreshold: 3
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: #@ data.values.app_name + "-static-config"
|
||||
- name: podinfo
|
||||
downwardAPI:
|
||||
items:
|
||||
- path: "labels"
|
||||
fieldRef:
|
||||
fieldPath: metadata.labels
|
||||
- path: "namespace"
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
#! This will help make sure our multiple pods run on different nodes, making
|
||||
#! our deployment "more" "HA".
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 50
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app: #@ data.values.app_name
|
||||
topologyKey: kubernetes.io/hostname
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
labels:
|
||||
app: #@ data.values.app_name
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: #@ data.values.app_name
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 80
|
34
deploy-supervisor/rbac.yaml
Normal file
34
deploy-supervisor/rbac.yaml
Normal file
@ -0,0 +1,34 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@ load("@ytt:data", "data")
|
||||
|
||||
#! Give permission to various objects within the app's own namespace
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
labels:
|
||||
app: #@ data.values.app_name
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [configmaps]
|
||||
verbs: [get, list, watch]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
labels:
|
||||
app: #@ data.values.app_name
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: #@ data.values.app_name
|
||||
namespace: #@ data.values.namespace
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: #@ data.values.app_name
|
||||
apiGroup: rbac.authorization.k8s.io
|
22
deploy-supervisor/values.yaml
Normal file
22
deploy-supervisor/values.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
#! Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
#! SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#@data/values
|
||||
---
|
||||
|
||||
app_name: pinniped-supervisor
|
||||
namespace: pinniped-supervisor
|
||||
|
||||
#! Specify how many replicas of the Pinniped server to run.
|
||||
replicas: 2
|
||||
|
||||
#! Specify either an image_digest or an image_tag. If both are given, only image_digest will be used.
|
||||
image_repo: docker.io/getpinniped/pinniped-server
|
||||
image_digest: #! e.g. sha256:f3c4fdfd3ef865d4b97a1fd295d94acc3f0c654c46b6f27ffad5cf80216903c8
|
||||
image_tag: latest
|
||||
|
||||
#! Specifies a secret to be used when pulling the above container image.
|
||||
#! Can be used when the above image_repo is a private registry.
|
||||
#! Typically the value would be the output of: kubectl create secret docker-registry x --docker-server=https://example.io --docker-username="USERNAME" --docker-password="PASSWORD" --dry-run=client -o json | jq -r '.data[".dockerconfigjson"]'
|
||||
#! Optional.
|
||||
image_pull_dockerconfigjson: #! e.g. {"auths":{"https://registry.example.com":{"username":"USERNAME","password":"PASSWORD","auth":"BASE64_ENCODED_USERNAME_COLON_PASSWORD"}}}
|
@ -174,15 +174,29 @@ kubectl create secret generic "$test_username" \
|
||||
--output yaml |
|
||||
kubectl apply -f -
|
||||
|
||||
#
|
||||
# Deploy the Pinniped Supervisor
|
||||
#
|
||||
pushd deploy-supervisor >/dev/null
|
||||
|
||||
log_note "Deploying the Pinniped Supervisor app to the cluster..."
|
||||
ytt --file . \
|
||||
--data-value "image_repo=$registry_repo" \
|
||||
--data-value "image_tag=$tag" >"$manifest"
|
||||
|
||||
kapp deploy --yes --app "pinniped-supervisor" --diff-changes --file "$manifest"
|
||||
|
||||
popd >/dev/null
|
||||
|
||||
#
|
||||
# Deploy Pinniped
|
||||
#
|
||||
app_name="pinniped"
|
||||
namespace="integration"
|
||||
webhook_url="https://local-user-authenticator.local-user-authenticator.svc/authenticate"
|
||||
webhook_ca_bundle="$(kubectl get secret local-user-authenticator-tls-serving-certificate --namespace local-user-authenticator -o 'jsonpath={.data.caCertificate}')"
|
||||
discovery_url="$(TERM=dumb kubectl cluster-info | awk '/Kubernetes master/ {print $NF}')"
|
||||
|
||||
#
|
||||
# Deploy Pinniped
|
||||
#
|
||||
pushd deploy >/dev/null
|
||||
|
||||
log_note "Deploying the Pinniped app to the cluster..."
|
||||
|
@ -0,0 +1,52 @@
|
||||
// Copyright 2020 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package supervisorconfig
|
||||
|
||||
import (
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
pinnipedcontroller "go.pinniped.dev/internal/controller"
|
||||
"go.pinniped.dev/internal/controllerlib"
|
||||
)
|
||||
|
||||
type dynamicConfigWatcherController struct {
|
||||
k8sClient kubernetes.Interface
|
||||
configMapInformer corev1informers.ConfigMapInformer
|
||||
}
|
||||
|
||||
func NewDynamicConfigWatcherController(
|
||||
serverInstallationNamespace string,
|
||||
configMapName string,
|
||||
k8sClient kubernetes.Interface,
|
||||
configMapInformer corev1informers.ConfigMapInformer,
|
||||
withInformer pinnipedcontroller.WithInformerOptionFunc,
|
||||
) controllerlib.Controller {
|
||||
return controllerlib.New(
|
||||
controllerlib.Config{
|
||||
Name: "DynamicConfigWatcherController",
|
||||
Syncer: &dynamicConfigWatcherController{
|
||||
k8sClient: k8sClient,
|
||||
configMapInformer: configMapInformer,
|
||||
},
|
||||
},
|
||||
withInformer(
|
||||
configMapInformer,
|
||||
pinnipedcontroller.NameAndNamespaceExactMatchFilterFactory(configMapName, serverInstallationNamespace),
|
||||
controllerlib.InformerOption{},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// Sync implements controllerlib.Syncer.
|
||||
func (c *dynamicConfigWatcherController) Sync(ctx controllerlib.Context) error {
|
||||
// TODO Watch the configmap to find the issuer name, ingress url, etc.
|
||||
// TODO Update some kind of in-memory representation of the configuration so the discovery endpoint can use it.
|
||||
// TODO The discovery endpoint would return an error until all missing configuration options are filled in.
|
||||
|
||||
klog.InfoS("DynamicConfigWatcherController sync finished")
|
||||
|
||||
return nil
|
||||
}
|
Loading…
Reference in New Issue
Block a user