specify the container name when fetching keys from kube cert agent pod
Avoid errors seen when the cluster has been configured to automatically inject additional sidecar containers into every pod.
This commit is contained in:
parent
0e7ef1637d
commit
4b4cc93ae7
@ -69,6 +69,8 @@ const (
|
||||
ClusterInfoNamespace = "kube-public"
|
||||
clusterInfoName = "cluster-info"
|
||||
clusterInfoConfigMapKey = "kubeconfig"
|
||||
|
||||
agentPodContainerName = "sleeper"
|
||||
)
|
||||
|
||||
// AgentConfig is the configuration for the kube-cert-agent controller.
|
||||
@ -348,7 +350,7 @@ func (c *agentController) loadSigningKey(ctx context.Context, agentPod *corev1.P
|
||||
}
|
||||
|
||||
// Exec into the agent pod and cat out the certificate and the key.
|
||||
outputJSON, err := c.executor.Exec(ctx, agentPod.Namespace, agentPod.Name, "pinniped-concierge-kube-cert-agent", "print")
|
||||
outputJSON, err := c.executor.Exec(ctx, agentPod.Namespace, agentPod.Name, agentPodContainerName, "pinniped-concierge-kube-cert-agent", "print")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not exec into agent pod %s/%s: %w", agentPod.Namespace, agentPod.Name, err)
|
||||
}
|
||||
@ -532,7 +534,7 @@ func (c *agentController) newAgentDeployment(controllerManagerPod *corev1.Pod) *
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "sleeper",
|
||||
Name: agentPodContainerName,
|
||||
Image: c.cfg.ContainerImage,
|
||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||
Command: []string{"pinniped-concierge-kube-cert-agent", "sleep"},
|
||||
|
@ -229,7 +229,7 @@ func TestAgentController(t *testing.T) {
|
||||
}
|
||||
|
||||
mockExecSucceeds := func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
||||
executor.Exec(gomock.Any(), "concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "pinniped-concierge-kube-cert-agent", "print").
|
||||
executor.Exec(gomock.Any(), "concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "sleeper", "pinniped-concierge-kube-cert-agent", "print").
|
||||
Return(`{"tls.crt": "dGVzdC1jZXJ0", "tls.key": "dGVzdC1rZXk="}`, nil) // "test-cert" / "test-key"
|
||||
dynamicCert.SetCertKeyContent([]byte("test-cert"), []byte("test-key")).
|
||||
Return(nil)
|
||||
@ -740,7 +740,7 @@ func TestAgentController(t *testing.T) {
|
||||
validClusterInfoConfigMap,
|
||||
},
|
||||
mocks: func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
||||
executor.Exec(gomock.Any(), "concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "pinniped-concierge-kube-cert-agent", "print").
|
||||
executor.Exec(gomock.Any(), "concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "sleeper", "pinniped-concierge-kube-cert-agent", "print").
|
||||
Return("", fmt.Errorf("some exec error")).
|
||||
AnyTimes()
|
||||
},
|
||||
@ -769,7 +769,7 @@ func TestAgentController(t *testing.T) {
|
||||
validClusterInfoConfigMap,
|
||||
},
|
||||
mocks: func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
||||
executor.Exec(gomock.Any(), "concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "pinniped-concierge-kube-cert-agent", "print").
|
||||
executor.Exec(gomock.Any(), "concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "sleeper", "pinniped-concierge-kube-cert-agent", "print").
|
||||
Return("bogus-data", nil).
|
||||
AnyTimes()
|
||||
},
|
||||
@ -798,7 +798,7 @@ func TestAgentController(t *testing.T) {
|
||||
validClusterInfoConfigMap,
|
||||
},
|
||||
mocks: func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
||||
executor.Exec(gomock.Any(), "concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "pinniped-concierge-kube-cert-agent", "print").
|
||||
executor.Exec(gomock.Any(), "concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "sleeper", "pinniped-concierge-kube-cert-agent", "print").
|
||||
Return(`{"tls.crt": "invalid"}`, nil).
|
||||
AnyTimes()
|
||||
},
|
||||
@ -827,7 +827,7 @@ func TestAgentController(t *testing.T) {
|
||||
validClusterInfoConfigMap,
|
||||
},
|
||||
mocks: func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
||||
executor.Exec(gomock.Any(), "concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "pinniped-concierge-kube-cert-agent", "print").
|
||||
executor.Exec(gomock.Any(), "concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "sleeper", "pinniped-concierge-kube-cert-agent", "print").
|
||||
Return(`{"tls.crt": "dGVzdAo=", "tls.key": "invalid"}`, nil).
|
||||
AnyTimes()
|
||||
},
|
||||
@ -856,7 +856,7 @@ func TestAgentController(t *testing.T) {
|
||||
validClusterInfoConfigMap,
|
||||
},
|
||||
mocks: func(t *testing.T, executor *mocks.MockPodCommandExecutorMockRecorder, dynamicCert *mocks.MockDynamicCertPrivateMockRecorder, execCache *cache.Expiring) {
|
||||
executor.Exec(gomock.Any(), "concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "pinniped-concierge-kube-cert-agent", "print").
|
||||
executor.Exec(gomock.Any(), "concierge", "pinniped-concierge-kube-cert-agent-xyz-1234", "sleeper", "pinniped-concierge-kube-cert-agent", "print").
|
||||
Return(`{"tls.crt": "dGVzdC1jZXJ0", "tls.key": "dGVzdC1rZXk="}`, nil). // "test-cert" / "test-key"
|
||||
AnyTimes()
|
||||
dynamicCert.SetCertKeyContent([]byte("test-cert"), []byte("test-key")).
|
||||
|
@ -39,10 +39,10 @@ func (m *MockPodCommandExecutor) EXPECT() *MockPodCommandExecutorMockRecorder {
|
||||
}
|
||||
|
||||
// Exec mocks base method.
|
||||
func (m *MockPodCommandExecutor) Exec(arg0 context.Context, arg1, arg2 string, arg3 ...string) (string, error) {
|
||||
func (m *MockPodCommandExecutor) Exec(arg0 context.Context, arg1, arg2, arg3 string, arg4 ...string) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []interface{}{arg0, arg1, arg2}
|
||||
for _, a := range arg3 {
|
||||
varargs := []interface{}{arg0, arg1, arg2, arg3}
|
||||
for _, a := range arg4 {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "Exec", varargs...)
|
||||
@ -52,8 +52,8 @@ func (m *MockPodCommandExecutor) Exec(arg0 context.Context, arg1, arg2 string, a
|
||||
}
|
||||
|
||||
// Exec indicates an expected call of Exec.
|
||||
func (mr *MockPodCommandExecutorMockRecorder) Exec(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
|
||||
func (mr *MockPodCommandExecutorMockRecorder) Exec(arg0, arg1, arg2, arg3 interface{}, arg4 ...interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
|
||||
varargs := append([]interface{}{arg0, arg1, arg2, arg3}, arg4...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockPodCommandExecutor)(nil).Exec), varargs...)
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
|
||||
// PodCommandExecutor can exec a command in a pod located via namespace and name.
|
||||
type PodCommandExecutor interface {
|
||||
Exec(ctx context.Context, podNamespace string, podName string, commandAndArgs ...string) (stdoutResult string, err error)
|
||||
Exec(ctx context.Context, podNamespace string, podName string, containerName string, commandAndArgs ...string) (stdoutResult string, err error)
|
||||
}
|
||||
|
||||
type kubeClientPodCommandExecutor struct {
|
||||
@ -32,7 +32,7 @@ func NewPodCommandExecutor(kubeConfig *restclient.Config, kubeClient kubernetes.
|
||||
return &kubeClientPodCommandExecutor{kubeConfig: kubeConfig, kubeClient: kubeClient}
|
||||
}
|
||||
|
||||
func (s *kubeClientPodCommandExecutor) Exec(ctx context.Context, podNamespace string, podName string, commandAndArgs ...string) (string, error) {
|
||||
func (s *kubeClientPodCommandExecutor) Exec(ctx context.Context, podNamespace string, podName string, containerName string, commandAndArgs ...string) (string, error) {
|
||||
request := s.kubeClient.
|
||||
CoreV1().
|
||||
RESTClient().
|
||||
@ -42,11 +42,12 @@ func (s *kubeClientPodCommandExecutor) Exec(ctx context.Context, podNamespace st
|
||||
Name(podName).
|
||||
SubResource("exec").
|
||||
VersionedParams(&v1.PodExecOptions{
|
||||
Stdin: false,
|
||||
Stdout: true,
|
||||
Stderr: false,
|
||||
TTY: false,
|
||||
Command: commandAndArgs,
|
||||
Stdin: false,
|
||||
Stdout: true,
|
||||
Stderr: false,
|
||||
TTY: false,
|
||||
Container: containerName,
|
||||
Command: commandAndArgs,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
executor, err := remotecommand.NewSPDYExecutor(s.kubeConfig, "POST", request.URL())
|
||||
|
@ -37,7 +37,7 @@ func TestSecureTLS(t *testing.T) {
|
||||
// build this exactly like our production could does
|
||||
podCommandExecutor := NewPodCommandExecutor(client.JSONConfig, client.Kubernetes)
|
||||
|
||||
got, err := podCommandExecutor.Exec(context.Background(), "podNamespace", "podName", "command", "arg1", "arg2")
|
||||
got, err := podCommandExecutor.Exec(context.Background(), "podNamespace", "podName", "containerName", "command", "arg1", "arg2")
|
||||
require.Equal(t, &errors.StatusError{}, err)
|
||||
require.Empty(t, got)
|
||||
|
||||
|
@ -55,8 +55,9 @@ func (mr *MockConnMockRecorder) Bind(arg0, arg1 interface{}) *gomock.Call {
|
||||
// Close mocks base method.
|
||||
func (m *MockConn) Close() error {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "Close")
|
||||
return nil
|
||||
ret := m.ctrl.Call(m, "Close")
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Close indicates an expected call of Close.
|
||||
|
Loading…
Reference in New Issue
Block a user