More tests for the PublisherController
- Also, don't repeat `spec.Parallel()` because, according to the docs for the spec package, "options are inherited by subgroups and subspecs" - Two tests are left pending to be filled in on the next commit
This commit is contained in:
parent
a5f7de429d
commit
e0cac97084
@ -64,7 +64,7 @@ func TestRun(t *testing.T) {
|
||||
err := run(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to login: environment variable not set: PLACEHOLDER_NAME_K8S_API_ENDPOINT")
|
||||
})
|
||||
}, spec.Parallel())
|
||||
})
|
||||
|
||||
when("the token exchange fails", func() {
|
||||
it.Before(func() {
|
||||
@ -77,7 +77,7 @@ func TestRun(t *testing.T) {
|
||||
err := run(envGetter, tokenExchanger, buffer, 30*time.Second)
|
||||
r.EqualError(err, "failed to login: some error")
|
||||
})
|
||||
}, spec.Parallel())
|
||||
})
|
||||
|
||||
when("the JSON encoder fails", func() {
|
||||
it.Before(func() {
|
||||
@ -92,7 +92,7 @@ func TestRun(t *testing.T) {
|
||||
err := run(envGetter, tokenExchanger, &library.ErrorWriter{ReturnError: fmt.Errorf("some IO error")}, 30*time.Second)
|
||||
r.EqualError(err, "failed to marshal response to stdout: some IO error")
|
||||
})
|
||||
}, spec.Parallel())
|
||||
})
|
||||
|
||||
when("the token exchange times out", func() {
|
||||
it.Before(func() {
|
||||
@ -112,7 +112,7 @@ func TestRun(t *testing.T) {
|
||||
err := run(envGetter, tokenExchanger, buffer, 1*time.Millisecond)
|
||||
r.EqualError(err, "failed to login: context deadline exceeded")
|
||||
})
|
||||
}, spec.Parallel())
|
||||
})
|
||||
|
||||
when("the token exchange succeeds", func() {
|
||||
var actualToken, actualCaBundle, actualAPIEndpoint string
|
||||
@ -146,6 +146,6 @@ func TestRun(t *testing.T) {
|
||||
}`
|
||||
r.JSONEq(expected, buffer.String())
|
||||
})
|
||||
}, spec.Parallel())
|
||||
}, spec.Report(report.Terminal{}))
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
|
@ -45,8 +45,14 @@ func NewPublisherController(namespace string, kubeClient kubernetes.Interface, p
|
||||
}
|
||||
|
||||
func (c *publisherController) Sync(ctx controller.Context) error {
|
||||
configMap, _ := c.kubeClient.CoreV1().ConfigMaps(clusterInfoNamespace).Get(ctx.Context, clusterInfoName, metav1.GetOptions{})
|
||||
kubeConfig := configMap.Data[clusterInfoConfigMapKey] // TODO also handle when the key is not found
|
||||
configMap, err := c.kubeClient.CoreV1().ConfigMaps(clusterInfoNamespace).Get(ctx.Context, clusterInfoName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil // TODO should this return an error? and should it log?
|
||||
}
|
||||
kubeConfig, kubeConfigPresent := configMap.Data[clusterInfoConfigMapKey]
|
||||
if !kubeConfigPresent {
|
||||
return nil // TODO should this return an error? and should it log?
|
||||
}
|
||||
|
||||
config, _ := clientcmd.Load([]byte(kubeConfig))
|
||||
|
||||
|
@ -25,12 +25,6 @@ import (
|
||||
placeholderfake "github.com/suzerain-io/placeholder-name-client-go/pkg/generated/clientset/versioned/fake"
|
||||
)
|
||||
|
||||
// TODO: add test for ConfigMap does NOT exist
|
||||
// TODO: add test for when LoginDiscoveryConfig already exists
|
||||
// There should be 4 tests of the above cross product
|
||||
|
||||
// TODO test when the expected key in the configmap does not exist
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
spec.Run(t, "publisher", func(t *testing.T, when spec.G, it spec.S) {
|
||||
const installedInNamespace = "some-namespace"
|
||||
@ -79,16 +73,15 @@ func TestRun(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
when("when there is a cluster-info ConfigMap in the kube-public namespace", func() {
|
||||
when("there is a cluster-info ConfigMap in the kube-public namespace", func() {
|
||||
const caData = "c29tZS1jZXJ0aWZpY2F0ZS1hdXRob3JpdHktZGF0YQo=" // "some-certificate-authority-data" base64 encoded
|
||||
const kubeServerURL = "https://some-server"
|
||||
var clusterInfoConfigMap *corev1.ConfigMap
|
||||
|
||||
when("the ConfigMap has the expected `kubeconfig` top-level data key", func() {
|
||||
it.Before(func() {
|
||||
clusterInfo := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-info",
|
||||
Namespace: "kube-public",
|
||||
},
|
||||
clusterInfoConfigMap = &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "cluster-info", Namespace: "kube-public"},
|
||||
// Note that go fmt puts tabs in our file, which we must remove from our configmap yaml below.
|
||||
Data: map[string]string{
|
||||
"kubeconfig": strings.ReplaceAll(`
|
||||
@ -102,15 +95,15 @@ func TestRun(t *testing.T) {
|
||||
"uninteresting-key": "uninteresting-value",
|
||||
},
|
||||
}
|
||||
err := kubeClient.Tracker().Add(clusterInfo)
|
||||
err := kubeClient.Tracker().Add(clusterInfoConfigMap)
|
||||
r.NoError(err)
|
||||
})
|
||||
|
||||
when("when the LoginDiscoveryConfig does not already exist", func() {
|
||||
when("the LoginDiscoveryConfig does not already exist", func() {
|
||||
it("creates a LoginDiscoveryConfig", func() {
|
||||
defer timeoutContextCancel()
|
||||
|
||||
err := controller.TestSync(t, subject, *controllerContext) // Call the controller's sync method once
|
||||
err := controller.TestSync(t, subject, *controllerContext)
|
||||
r.NoError(err)
|
||||
|
||||
expectedLoginDiscoveryConfigGVR, expectedLoginDiscoveryConfig := expectedLoginDiscoveryConfig(installedInNamespace, kubeServerURL, caData)
|
||||
@ -123,7 +116,70 @@ func TestRun(t *testing.T) {
|
||||
r.Equal(expectedLoginDiscoveryConfig, actualCreatedObject)
|
||||
r.Equal(expectedActions, placeholderClient.Actions())
|
||||
})
|
||||
}, spec.Parallel())
|
||||
}, spec.Parallel())
|
||||
}, spec.Report(report.Terminal{}))
|
||||
})
|
||||
|
||||
when("the LoginDiscoveryConfig already exists", func() {
|
||||
when("the LoginDiscoveryConfig is already up to date according to the data in the ConfigMap", func() {
|
||||
it.Before(func() {
|
||||
// TODO add a fake LoginDiscoveryConfig to the placeholderClient whose data matches the ConfigMap's data
|
||||
})
|
||||
|
||||
it.Pend("does not update the LoginDiscoveryConfig to avoid unnecessary etcd writes", func() {
|
||||
err := controller.TestSync(t, subject, *controllerContext)
|
||||
r.NoError(err)
|
||||
r.Empty(placeholderClient.Actions())
|
||||
})
|
||||
})
|
||||
|
||||
when("the LoginDiscoveryConfig is stale compared to the data in the ConfigMap", func() {
|
||||
it.Before(func() {
|
||||
// TODO add a fake LoginDiscoveryConfig to the placeholderClient whose data does not match the ConfigMap's data
|
||||
})
|
||||
|
||||
it.Pend("updates the existing LoginDiscoveryConfig", func() {
|
||||
// TODO
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("the ConfigMap is missing the expected `kubeconfig` top-level data key", func() {
|
||||
it.Before(func() {
|
||||
clusterInfoConfigMap = &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "cluster-info", Namespace: "kube-public"},
|
||||
Data: map[string]string{
|
||||
"these are not the droids you're looking for": "uninteresting-value",
|
||||
},
|
||||
}
|
||||
err := kubeClient.Tracker().Add(clusterInfoConfigMap)
|
||||
r.NoError(err)
|
||||
})
|
||||
|
||||
it("keeps waiting for it to exist", func() {
|
||||
err := controller.TestSync(t, subject, *controllerContext)
|
||||
r.NoError(err)
|
||||
r.Empty(placeholderClient.Actions())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
when("there is not a cluster-info ConfigMap in the kube-public namespace", func() {
|
||||
it.Before(func() {
|
||||
unrelatedConfigMap := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "oops this is not the cluster-info ConfigMap",
|
||||
Namespace: "kube-public",
|
||||
},
|
||||
}
|
||||
err := kubeClient.Tracker().Add(unrelatedConfigMap)
|
||||
r.NoError(err)
|
||||
})
|
||||
|
||||
it("keeps waiting for one", func() {
|
||||
err := controller.TestSync(t, subject, *controllerContext)
|
||||
r.NoError(err)
|
||||
r.Empty(placeholderClient.Actions())
|
||||
})
|
||||
})
|
||||
}, spec.Parallel(), spec.Report(report.Terminal{}))
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user