Use a DaemonSet instead of a Deployment to deploy our app
- For high availability reasons, we would like our app to scale linearly with the size of the control plane. Using a DaemonSet allows us to run one pod on each node-role.kubernetes.io/master node. - The hope is that the Service that we create should load balance between these pods appropriately.
This commit is contained in:
parent
e0f0eca512
commit
4cb0fd3949
@ -30,18 +30,16 @@ data:
|
|||||||
url: (@= data.values.webhook_url @)
|
url: (@= data.values.webhook_url @)
|
||||||
caBundle: (@= data.values.webhook_ca_bundle @)
|
caBundle: (@= data.values.webhook_ca_bundle @)
|
||||||
---
|
---
|
||||||
#! TODO set up healthy, ready, etc. probes correctly for our deployment
|
#! TODO set up healthy, ready, etc. probes correctly?
|
||||||
#! TODO set the priority-critical-urgent on our deployment to ask kube to never let it die
|
#! TODO set resource minimums (e.g. 512MB RAM) to make sure we get scheduled onto a reasonable node?
|
||||||
#! TODO set resource minimums (e.g. 512MB RAM) on the deployment to make sure we get scheduled onto a reasonable node
|
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: DaemonSet
|
||||||
metadata:
|
metadata:
|
||||||
name: #@ data.values.app_name + "-deployment"
|
name: #@ data.values.app_name
|
||||||
namespace: #@ data.values.namespace
|
namespace: #@ data.values.namespace
|
||||||
labels:
|
labels:
|
||||||
app: #@ data.values.app_name
|
app: #@ data.values.app_name
|
||||||
spec:
|
spec:
|
||||||
replicas: 1 #! TODO more than one replica for high availability, and share the same serving certificate among them (maybe using client-go leader election)
|
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
app: #@ data.values.app_name
|
app: #@ data.values.app_name
|
||||||
@ -92,16 +90,16 @@ spec:
|
|||||||
hostPath:
|
hostPath:
|
||||||
path: /etc/kubernetes/pki
|
path: /etc/kubernetes/pki
|
||||||
type: DirectoryOrCreate
|
type: DirectoryOrCreate
|
||||||
#! "system-cluster-critical" cannot be used outside the kube-system namespace until Kubernetes >= 1.17,
|
nodeSelector: #! Create Pods on all nodes which match this node selector, and not on any other nodes.
|
||||||
#! so we skip setting this for now (see https://github.com/kubernetes/kubernetes/issues/60596).
|
|
||||||
#! priorityClassName: system-cluster-critical
|
|
||||||
nodeSelector:
|
|
||||||
node-role.kubernetes.io/master: ""
|
node-role.kubernetes.io/master: ""
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: CriticalAddonsOnly
|
- key: CriticalAddonsOnly
|
||||||
operator: Exists
|
operator: Exists
|
||||||
- effect: NoSchedule
|
- key: node-role.kubernetes.io/master #! Allow running on master nodes.
|
||||||
key: node-role.kubernetes.io/master
|
effect: NoSchedule
|
||||||
|
#! "system-cluster-critical" cannot be used outside the kube-system namespace until Kubernetes >= 1.17,
|
||||||
|
#! so we skip setting this for now (see https://github.com/kubernetes/kubernetes/issues/60596).
|
||||||
|
#!priorityClassName: system-cluster-critical
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
|
@ -11,27 +11,23 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/suzerain-io/placeholder-name/test/library"
|
"github.com/suzerain-io/placeholder-name/test/library"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGetDeployment(t *testing.T) {
|
func TestAppAvailability(t *testing.T) {
|
||||||
library.SkipUnlessIntegration(t)
|
library.SkipUnlessIntegration(t)
|
||||||
namespaceName := library.Getenv(t, "PLACEHOLDER_NAME_NAMESPACE")
|
namespaceName := library.Getenv(t, "PLACEHOLDER_NAME_NAMESPACE")
|
||||||
deploymentName := library.Getenv(t, "PLACEHOLDER_NAME_DEPLOYMENT")
|
daemonSetName := library.Getenv(t, "PLACEHOLDER_NAME_APP_NAME")
|
||||||
|
|
||||||
client := library.NewClientset(t)
|
client := library.NewClientset(t)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
appDeployment, err := client.AppsV1().Deployments(namespaceName).Get(ctx, deploymentName, metav1.GetOptions{})
|
daemonSet, err := client.AppsV1().DaemonSets(namespaceName).Get(ctx, daemonSetName, metav1.GetOptions{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
cond := library.GetDeploymentCondition(appDeployment.Status, appsv1.DeploymentAvailable)
|
require.GreaterOrEqual(t, daemonSet.Status.NumberAvailable, int32(1))
|
||||||
require.NotNil(t, cond)
|
|
||||||
require.Equalf(t, corev1.ConditionTrue, cond.Status, "app should be available: %s", library.Sdump(appDeployment))
|
|
||||||
}
|
}
|
@ -1,20 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2020 VMware, Inc.
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package library
|
|
||||||
|
|
||||||
import appsv1 "k8s.io/api/apps/v1"
|
|
||||||
|
|
||||||
// GetDeploymentCondition returns the condition with the provided type.
|
|
||||||
// Copied from k8s.io/kubectl/pkg/util/deployment/deployment.go to prevent us from vendoring the world.
|
|
||||||
func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition {
|
|
||||||
for i := range status.Conditions {
|
|
||||||
c := status.Conditions[i]
|
|
||||||
if c.Type == condType {
|
|
||||||
return &c
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user