diff --git a/.drone.yml b/.drone.yml index 87b2cc8..b4b91b8 100644 --- a/.drone.yml +++ b/.drone.yml @@ -13,12 +13,42 @@ volumes: steps: - name: Debugging information image: bv11-cr01.bessems.eu/library/packer-extended + pull: always commands: - ansible --version - ovftool --version - packer --version - yamllint --version + +- name: Linting + depends_on: + - Debugging information + image: bv11-cr01.bessems.eu/library/packer-extended + pull: always + commands: + - | + yamllint -d "{extends: relaxed, rules: {line-length: disable}}" \ + ansible \ + packer/preseed/UbuntuServer22.04/user-data \ + scripts + +- name: Install Ansible Galaxy collections + depends_on: + - Linting + image: bv11-cr01.bessems.eu/library/packer-extended + pull: always + commands: + - | + ansible-galaxy collection install \ + -r ansible/requirements.yml \ + -p ./ansible/collections + volumes: + - name: scratch + path: /scratch + - name: Kubernetes Bootstrap Appliance + depends_on: + - Install Ansible Galaxy collections image: bv11-cr01.bessems.eu/library/packer-extended pull: always commands: @@ -27,20 +57,13 @@ steps: packer/preseed/UbuntuServer22.04/user-data - | export K8S_VERSION=$(yq '.components.clusterapi.workload.version.k8s' < ./ansible/vars/metacluster.yml) - - | - yamllint -d "{extends: relaxed, rules: {line-length: disable}}" \ - ansible \ - packer/preseed/UbuntuServer22.04/user-data \ - scripts - - | - ansible-galaxy install \ - -r ansible/requirements.yml - | packer init -upgrade \ ./packer - | packer validate \ - -var vm_name=$DRONE_BUILD_NUMBER-${DRONE_COMMIT_SHA:0:10} \ + -only=vsphere-iso.bootstrap \ + -var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \ -var docker_username=$${DOCKER_USERNAME} \ -var docker_password=$${DOCKER_PASSWORD} \ -var repo_username=$${REPO_USERNAME} \ @@ -52,7 +75,8 @@ steps: - | packer build \ -on-error=cleanup -timestamp-ui \ - -var vm_name=$DRONE_BUILD_NUMBER-${DRONE_COMMIT_SHA:0:10} \ + -only=vsphere-iso.bootstrap \ + -var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \ -var docker_username=$${DOCKER_USERNAME} \ -var docker_password=$${DOCKER_PASSWORD} \ -var repo_username=$${REPO_USERNAME} \ @@ -80,7 +104,70 @@ steps: path: /output - name: scratch path: /scratch + +- name: Kubernetes Upgrade Appliance + depends_on: + - Install Ansible Galaxy collections + image: bv11-cr01.bessems.eu/library/packer-extended + pull: alwaysquery( + commands: + - | + sed -i -e "s/<>/$${SSH_PASSWORD}/g" \ + packer/preseed/UbuntuServer22.04/user-data + - | + export K8S_VERSION=$(yq '.components.clusterapi.workload.version.k8s' < ./ansible/vars/metacluster.yml) + - | + packer init -upgrade \ + ./packer + - | + packer validate \ + -only=vsphere-iso.upgrade \ + -var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \ + -var docker_username=$${DOCKER_USERNAME} \ + -var docker_password=$${DOCKER_PASSWORD} \ + -var repo_username=$${REPO_USERNAME} \ + -var repo_password=$${REPO_PASSWORD} \ + -var ssh_password=$${SSH_PASSWORD} \ + -var vsphere_password=$${VSPHERE_PASSWORD} \ + -var k8s_version=$K8S_VERSION \ + ./packer + - | + packer build \ + -on-error=cleanup -timestamp-ui \ + -only=vsphere-iso.upgrade \ + -var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \ + -var docker_username=$${DOCKER_USERNAME} \ + -var docker_password=$${DOCKER_PASSWORD} \ + -var repo_username=$${REPO_USERNAME} \ + -var repo_password=$${REPO_PASSWORD} \ + -var ssh_password=$${SSH_PASSWORD} \ + -var vsphere_password=$${VSPHERE_PASSWORD} \ + -var k8s_version=$K8S_VERSION \ + ./packer + environment: + DOCKER_USERNAME: + from_secret: docker_username + DOCKER_PASSWORD: + from_secret: docker_password + # PACKER_LOG: 1 + REPO_USERNAME: + from_secret: repo_username + REPO_PASSWORD: + from_secret: repo_password + SSH_PASSWORD: + from_secret: ssh_password + VSPHERE_PASSWORD: + from_secret: vsphere_password + volumes: + - name: output + path: /output + - name: scratch + path: /scratch + - name: Remove temporary resources + depends_on: + - Kubernetes Bootstrap Appliance + - Kubernetes Upgrade Appliance image: bv11-cr01.bessems.eu/library/packer-extended commands: - | diff --git a/ansible/playbook.yml b/ansible/playbook.yml index d0763e8..4939995 100644 --- a/ansible/playbook.yml +++ b/ansible/playbook.yml @@ -3,6 +3,7 @@ gather_facts: false vars_files: - metacluster.yml + - workloadcluster.yml become: true roles: - os diff --git a/ansible/roles/assets/tasks/containerimages.yml b/ansible/roles/assets/tasks/containerimages.yml index 74ef9ed..664fce9 100644 --- a/ansible/roles/assets/tasks/containerimages.yml +++ b/ansible/roles/assets/tasks/containerimages.yml @@ -14,13 +14,22 @@ loop_control: label: "{{ item.dest | basename }}" -- name: Parse helm charts for container images +- name: Parse metacluster helm charts for container images ansible.builtin.shell: cmd: "{{ item.value.helm.parse_logic }}" chdir: /opt/metacluster/helm-charts/{{ item.key }} - register: chartimages + register: chartimages_metacluster when: item.value.helm is defined - loop: "{{ lookup('ansible.builtin.dict', components) }}" + loop: "{{ query('ansible.builtin.dict', components) }}" + loop_control: + label: "{{ item.key }}" + +- name: Parse workloadcluster helm charts for container images + ansible.builtin.shell: + cmd: "{{ item.value.parse_logic }}" + chdir: /opt/workloadcluster/helm-charts/{{ item.value.namespace }}/{{ item.key }} + register: chartimages_workloadcluster + loop: "{{ query('ansible.builtin.dict', downstream.helm_charts) }}" loop_control: label: "{{ item.key }}" @@ -29,7 +38,7 @@ containerimages_{{ item.source }}: "{{ item.results }}" loop: - source: charts - results: "{{ chartimages | json_query('results[*].stdout_lines') | select() | flatten | list }}" + results: "{{ (chartimages_metacluster | json_query('results[*].stdout_lines')) + (chartimages_workloadcluster | json_query('results[*].stdout_lines')) | select() | flatten | list }}" - source: kubeadm results: "{{ kubeadmimages.stdout_lines }}" - source: manifests diff --git a/ansible/roles/assets/tasks/git.yml b/ansible/roles/assets/tasks/git.yml deleted file mode 100644 index f4d2f8f..0000000 --- a/ansible/roles/assets/tasks/git.yml +++ /dev/null @@ -1,5 +0,0 @@ -- name: Clone git repository - ansible.builtin.git: - repo: "{{ platform.gitops.repository.uri }}" - version: "{{ platform.gitops.repository.revision }}" - dest: /opt/metacluster/git-repositories/gitops diff --git a/ansible/roles/assets/tasks/helm.yml b/ansible/roles/assets/tasks/helm.yml index f03d073..8e82710 100644 --- a/ansible/roles/assets/tasks/helm.yml +++ b/ansible/roles/assets/tasks/helm.yml @@ -3,17 +3,29 @@ name: "{{ item.name }}" repo_url: "{{ item.url }}" state: present - loop: "{{ platform.helm_repositories }}" + loop: "{{ platform.helm_repositories + downstream.helm_repositories }}" -- name: Fetch helm charts +- name: Fetch helm charts for metacluster ansible.builtin.command: cmd: helm fetch {{ item.value.helm.chart }} --untar --version {{ item.value.helm.version }} chdir: /opt/metacluster/helm-charts when: item.value.helm is defined - register: helmcharts - loop: "{{ lookup('ansible.builtin.dict', components) }}" + register: helmcharts_metacluster + loop: "{{ query('ansible.builtin.dict', components) }}" loop_control: label: "{{ item.key }}" retries: 5 delay: 5 - until: helmcharts is not failed + until: helmcharts_metacluster is not failed + +- name: Fetch helm charts for workloadcluster + ansible.builtin.command: + cmd: helm fetch {{ item.value.chart }} --untardir ./{{ item.value.namespace }} --untar --version {{ item.value.version }} + chdir: /opt/workloadcluster/helm-charts + register: helmcharts_workloadcluster + loop: "{{ query('ansible.builtin.dict', downstream.helm_charts) }}" + loop_control: + label: "{{ item.key }}" + retries: 5 + delay: 5 + until: helmcharts_workloadcluster is not failed diff --git a/ansible/roles/assets/tasks/k3s.yml b/ansible/roles/assets/tasks/k3s.yml index 5da958f..7b4c2c1 100644 --- a/ansible/roles/assets/tasks/k3s.yml +++ b/ansible/roles/assets/tasks/k3s.yml @@ -21,7 +21,7 @@ - name: Download K3s install script ansible.builtin.get_url: - url: https://get.k3s.io + url: https://raw.githubusercontent.com/k3s-io/k3s/{{ platform.k3s.version | urlencode }}/install.sh dest: /opt/metacluster/k3s/install.sh owner: root group: root diff --git a/ansible/roles/assets/tasks/main.yml b/ansible/roles/assets/tasks/main.yml index d565b5c..ad8bc81 100644 --- a/ansible/roles/assets/tasks/main.yml +++ b/ansible/roles/assets/tasks/main.yml @@ -12,10 +12,13 @@ - /opt/metacluster/cluster-api/infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }} - /opt/metacluster/cluster-api/ipam-in-cluster/{{ components.clusterapi.management.version.ipam_incluster }} - /opt/metacluster/container-images - - /opt/metacluster/git-repositories/gitops + - /opt/metacluster/git-repositories - /opt/metacluster/helm-charts - /opt/metacluster/k3s - /opt/metacluster/kube-vip + - /opt/workloadcluster/git-repositories/gitops/charts + - /opt/workloadcluster/git-repositories/gitops/values + - /opt/workloadcluster/helm-charts - /opt/workloadcluster/node-templates - /var/lib/rancher/k3s/agent/images - /var/lib/rancher/k3s/server/manifests diff --git a/ansible/roles/assets/tasks/manifests.yml b/ansible/roles/assets/tasks/manifests.yml index adf2094..52068fb 100644 --- a/ansible/roles/assets/tasks/manifests.yml +++ b/ansible/roles/assets/tasks/manifests.yml @@ -2,9 +2,9 @@ - name: Aggregate chart_values into dict ansible.builtin.set_fact: - chart_values: "{{ chart_values | default({}) | combine({ (item.key | regex_replace('[^A-Za-z0-9]', '')): { 'chart_values': (item.value.helm.chart_values | from_yaml) } }) }}" + metacluster_chartvalues: "{{ metacluster_chartvalues | default({}) | combine({ item.key: { 'chart_values': (item.value.helm.chart_values | from_yaml) } }) }}" when: item.value.helm.chart_values is defined - loop: "{{ lookup('ansible.builtin.dict', components) }}" + loop: "{{ query('ansible.builtin.dict', components) }}" loop_control: label: "{{ item.key }}" @@ -14,12 +14,29 @@ content: >- {{ { 'components': ( - chart_values | + metacluster_chartvalues | combine({ 'clusterapi': components.clusterapi }) | combine({ 'kubevip' : components.kubevip }) ) } | to_nice_yaml(indent=2, width=4096) }} + - name: Aggregate chart_values into dict + ansible.builtin.set_fact: + workloadcluster_chartvalues: "{{ workloadcluster_chartvalues | default({}) | combine({ item.key: { 'chart_values': (item.value.chart_values | default('') | from_yaml) } }) }}" + # when: item.value.chart_values is defined + loop: "{{ query('ansible.builtin.dict', downstream.helm_charts) }}" + loop_control: + label: "{{ item.key }}" + + - name: Write dict to vars_file + ansible.builtin.copy: + dest: /opt/firstboot/ansible/vars/workloadcluster.yml + content: >- + {{ + { 'downstream_components': ( workloadcluster_chartvalues ) + } | to_nice_yaml(indent=2, width=4096) + }} + - name: Download ClusterAPI manifests ansible.builtin.get_url: url: "{{ item.url }}" @@ -65,6 +82,12 @@ delay: 5 until: clusterapi_manifests is not failed +- name: Update cluster-template with image tags + ansible.builtin.replace: + dest: /opt/metacluster/cluster-api/infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }}/cluster-template.yaml + regexp: ':\${CPI_IMAGE_K8S_VERSION}' + replace: ":{{ components.clusterapi.management.version.cpi_vsphere }}" + - name: Download kube-vip RBAC manifest ansible.builtin.get_url: url: https://kube-vip.io/manifests/rbac.yaml @@ -81,6 +104,6 @@ # owner: root # group: root # mode: 0600 -# loop: "{{ lookup('ansible.builtin.dict', components) | map(attribute='value.manifests') | list | select('defined') | flatten }}" +# loop: "{{ query('ansible.builtin.dict', components) | map(attribute='value.manifests') | list | select('defined') | flatten }}" # loop_control: # label: "{{ item.type ~ '/' ~ item.name }}" diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/playbook.yml b/ansible/roles/firstboot/files/ansible_payload/bootstrap/playbook.yml index 629a28e..02f94aa 100644 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/playbook.yml +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/playbook.yml @@ -5,6 +5,7 @@ vars_files: - defaults.yml - metacluster.yml + - workloadcluster.yml # become: true roles: - vapp diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/filter_plugins/netaddr.py b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/filter_plugins/netaddr.py deleted file mode 100644 index cc788ef..0000000 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/filter_plugins/netaddr.py +++ /dev/null @@ -1,14 +0,0 @@ -import netaddr - -def netaddr_iter_iprange(ip_start, ip_end): - return [str(ip) for ip in netaddr.iter_iprange(ip_start, ip_end)] - -class FilterModule(object): - ''' Ansible filter. Interface to netaddr methods. - https://pypi.org/project/netaddr/ - ''' - - def filters(self): - return { - 'netaddr_iter_iprange': netaddr_iter_iprange - } \ No newline at end of file diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/git.yml b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/git.yml index 10fab3b..3da8e60 100644 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/git.yml +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/git.yml @@ -8,7 +8,7 @@ create_namespace: true wait: false kubeconfig: "{{ kubeconfig.path }}" - values: "{{ components.gitea.chart_values }}" + values: "{{ components['gitea'].chart_values }}" - name: Ensure gitea API availability ansible.builtin.uri: @@ -109,16 +109,28 @@ loop: - organization: mc body: - name: GitOps.Config - auto_init: true - default_branch: main - description: GitOps manifests - - organization: wl + name: GitOps.ClusterAPI + # auto_init: true + # default_branch: main + description: ClusterAPI manifests + - organization: mc body: - name: Template.GitOps.Config + name: GitOps.Config # auto_init: true # default_branch: main description: GitOps manifests + - organization: wl + body: + name: GitOps.Config + # auto_init: true + # default_branch: main + description: GitOps manifests + - organization: wl + body: + name: GitOps.HelmCharts + # auto_init: true + # default_branch: main + description: Helm charts loop_control: label: "{{ item.organization ~ '/' ~ item.body.name }}" diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/gitops.yml b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/gitops.yml index 865b300..8157a00 100644 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/gitops.yml +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/gitops.yml @@ -8,7 +8,7 @@ create_namespace: true wait: false kubeconfig: "{{ kubeconfig.path }}" - values: "{{ components.argocd.chart_values }}" + values: "{{ components['argo-cd'].chart_values }}" - name: Ensure argo-cd API availability ansible.builtin.uri: @@ -39,24 +39,29 @@ mode: 0600 vars: _template: - name: argocd-gitrepo-metacluster + name: gitrepo-mc-gitopsconfig namespace: argo-cd - uid: "{{ lookup('ansible.builtin.password', '/dev/null length=5 chars=ascii_lowercase,digits seed=inventory_hostname') }}" - privatekey: "{{ lookup('ansible.builtin.file', '~/.ssh/git_rsa_id') | indent(4, true) }}" + url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.Config.git notify: - Apply manifests - name: Create applicationset ansible.builtin.template: src: applicationset.j2 - dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml + dest: /var/lib/rancher/k3s/server/manifests/{{ _template.application.name }}-manifest.yaml owner: root group: root mode: 0600 vars: _template: - name: argocd-applicationset-metacluster - namespace: argo-cd + application: + name: applicationset-metacluster + namespace: argo-cd + cluster: + url: https://kubernetes.default.svc + repository: + url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.Config.git + revision: main notify: - Apply manifests diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/init.yml b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/init.yml index f01c1fe..ddbcd7e 100644 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/init.yml +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/init.yml @@ -78,6 +78,6 @@ src: registries.j2 vars: _template: - data: "{{ source_registries }}" + registries: "{{ source_registries }}" hv: fqdn: "{{ vapp['metacluster.fqdn'] }}" diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/registry.yml b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/registry.yml index 6589b12..fe4a99d 100644 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/registry.yml +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/registry.yml @@ -8,7 +8,7 @@ create_namespace: true wait: false kubeconfig: "{{ kubeconfig.path }}" - values: "{{ components.harbor.chart_values }}" + values: "{{ components['harbor'].chart_values }}" - name: Ensure harbor API availability ansible.builtin.uri: diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/storage.yml b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/storage.yml index 059e0a7..17c8f15 100644 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/storage.yml +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/metacluster/tasks/storage.yml @@ -7,7 +7,7 @@ create_namespace: true wait: false kubeconfig: "{{ kubeconfig.path }}" - values: "{{ components.longhorn.chart_values }}" + values: "{{ components['longhorn'].chart_values }}" - name: Ensure longhorn API availability ansible.builtin.uri: diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/workloadcluster/tasks/clusterapi.yml b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/workloadcluster/tasks/clusterapi.yml index 4281c54..575fe15 100644 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/workloadcluster/tasks/clusterapi.yml +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/workloadcluster/tasks/clusterapi.yml @@ -47,41 +47,27 @@ resourcepool: "{{ vcenter_info.resourcepool }}" folder: "{{ vcenter_info.folder }}" cluster: - nodetemplate: "{{ (components.clusterapi.workload.node_template.url | basename | split('.'))[:-1] | join('.') }}" + nodetemplate: "{{ nodetemplate_inventorypath }}" publickey: "{{ vapp['guestinfo.rootsshkey'] }}" version: "{{ components.clusterapi.workload.version.k8s }}" vip: "{{ vapp['workloadcluster.vip'] }}" -- name: WORKAROUND - Update image references to use local registry - ansible.builtin.replace: - dest: "{{ item }}" - regexp: '([ ]+image:[ "]+)(?!({{ _template.pattern }}|"{{ _template.pattern }}))' - replace: '\1{{ _template.pattern }}' - vars: - fileglobs: - - "{{ query('ansible.builtin.fileglob', '/opt/metacluster/cluster-api/cni-calico/' ~ components.clusterapi.workload.version.calico ~ '/*.yaml') }}" - - "{{ query('ansible.builtin.fileglob', '/opt/metacluster/cluster-api/infrastructure-vsphere/' ~ components.clusterapi.management.version.infrastructure_vsphere ~ '/*.yaml') }}" - _template: - pattern: registry.{{ vapp['metacluster.fqdn'] }}/library/ - loop: "{{ fileglobs[0:] | flatten | select }}" - loop_control: - label: "{{ item | basename }}" - when: - - item is not search("components.yaml|metadata.yaml") - -- name: Generate kustomization template +- name: Generate cluster-template kustomization manifest ansible.builtin.template: src: kustomization.cluster-template.j2 dest: /opt/metacluster/cluster-api/infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }}/kustomization.yaml vars: _template: - additionaldisk: "{{ vapp['workloadcluster.additionaldisk'] }}" network: fqdn: "{{ vapp['metacluster.fqdn'] }}" dnsserver: "{{ vapp['guestinfo.dnsserver'] }}" + nodesize: + cpu: "{{ config.clusterapi.size_matrix[ vapp['workloadcluster.nodesize'] ].cpu }}" + memory: "{{ config.clusterapi.size_matrix[ vapp['workloadcluster.nodesize'] ].memory }}" rootca: "{{ stepca_cm_certs.resources[0].data['root_ca.crt'] }}" runcmds: - update-ca-certificates + registries: "{{ source_registries }}" - name: Store custom cluster-template ansible.builtin.copy: @@ -121,7 +107,8 @@ clustersize: >- {{ { 'controlplane': vapp['deployment.type'] | regex_findall('^cp(\d)+') | first, - 'workers': vapp['deployment.type'] | regex_findall('w(\d)+$') | first + 'worker': vapp['deployment.type'] | regex_findall('w(\d)+') | first, + 'workerstorage': vapp['deployment.type'] | regex_findall('ws(\d)+$') | first } }} - name: Generate workload cluster manifest @@ -130,41 +117,51 @@ clusterctl generate cluster \ {{ vapp['workloadcluster.name'] | lower }} \ --control-plane-machine-count {{ clustersize.controlplane }} \ - --worker-machine-count {{ clustersize.workers }} \ + --worker-machine-count {{ clustersize.worker }} \ --from ./custom-cluster-template.yaml \ --config ./clusterctl.yaml \ --kubeconfig {{ kubeconfig.path }} chdir: /opt/metacluster/cluster-api register: clusterctl_newcluster -- name: Initialize tempfile +- name: Initialize tempfolder ansible.builtin.tempfile: - state: file + state: directory register: capi_clustermanifest - name: Save workload cluster manifest ansible.builtin.copy: - dest: "{{ capi_clustermanifest.path }}" + dest: "{{ capi_clustermanifest.path }}/new-cluster.yaml" content: "{{ clusterctl_newcluster.stdout }}" - name: Split manifest into separate files ansible.builtin.shell: cmd: >- kubectl slice \ - -f {{ capi_clustermanifest.path }} \ - -o /opt/metacluster/cluster-api/new-cluster + -f {{ capi_clustermanifest.path }}/new-cluster.yaml \ + -o {{ capi_clustermanifest.path }}/manifests -- name: Cleanup tempfile - ansible.builtin.file: - path: "{{ capi_clustermanifest.path }}" - state: absent - when: capi_clustermanifest.path is defined +- name: Generate nodepool kustomization manifest + ansible.builtin.template: + src: kustomization.nodepool.j2 + dest: "{{ capi_clustermanifest.path }}/kustomization.yaml" + vars: + _template: + cluster: + name: "{{ vapp['workloadcluster.name'] }}" + nodepool: + size: "{{ clustersize.workerstorage }}" + additionaldisk: "{{ vapp['workloadcluster.additionaldisk'] }}" + +- name: Store nodepool manifest + ansible.builtin.copy: + dest: "{{ capi_clustermanifest.path }}/manifests/nodepool-worker-storage.yaml" + content: "{{ lookup('kubernetes.core.kustomize', dir=capi_clustermanifest.path) }}" - name: Create in-cluster IpPool - kubernetes.core.k8s: - template: ippool.j2 - state: present - kubeconfig: "{{ kubeconfig.path }}" + ansible.builtin.template: + src: ippool.j2 + dest: "{{ capi_clustermanifest.path }}/manifests/inclusterippool-{{ _template.cluster.name }}.yml" vars: _template: cluster: @@ -176,6 +173,40 @@ prefix: "{{ vapp['guestinfo.prefixlength'] }}" gateway: "{{ vapp['guestinfo.gateway'] }}" +- name: Initialize/Push git repository + ansible.builtin.shell: + cmd: | + git init + git config --global user.email "administrator@{{ vapp['metacluster.fqdn'] }}" + git config --global user.name "administrator" + git checkout -b main + git add ./manifests + git commit -m "Upload manifests" + git remote add origin https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git + git push https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git --all + chdir: "{{ capi_clustermanifest.path }}" + +- name: Cleanup tempfolder + ansible.builtin.file: + path: "{{ capi_clustermanifest.path }}" + state: absent + when: capi_clustermanifest.path is defined + +- name: Configure Cluster API repository + ansible.builtin.template: + src: gitrepo.j2 + dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml + owner: root + group: root + mode: 0600 + vars: + _template: + name: gitrepo-mc-gitopsclusterapi + namespace: argo-cd + url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git + notify: + - Apply manifests + - name: WORKAROUND - Wait for ingress ACME requests to complete ansible.builtin.shell: cmd: >- @@ -187,13 +218,30 @@ retries: "{{ playbook.retries }}" delay: "{{ (storage_benchmark | int) * (playbook.delay.medium | int) }}" -- name: Apply workload cluster manifest - kubernetes.core.k8s: - definition: >- - {{ clusterctl_newcluster.stdout }} - wait: true - kubeconfig: "{{ kubeconfig.path }}" -# TODO: move to git repo +- name: Create application + ansible.builtin.template: + src: application.j2 + dest: /var/lib/rancher/k3s/server/manifests/{{ _template.application.name }}-manifest.yaml + owner: root + group: root + mode: 0600 + vars: + _template: + application: + name: application-clusterapi-workloadcluster + namespace: argo-cd + cluster: + name: https://kubernetes.default.svc + namespace: default + repository: + url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git + path: manifests + revision: main + notify: + - Apply manifests + +- name: Trigger handlers + ansible.builtin.meta: flush_handlers - name: Wait for cluster to be available ansible.builtin.shell: diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/workloadcluster/tasks/gitops.yml b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/workloadcluster/tasks/gitops.yml index 1112f4f..e426700 100644 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/workloadcluster/tasks/gitops.yml +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/workloadcluster/tasks/gitops.yml @@ -1,37 +1,105 @@ -- block: +- name: Aggregate helm charts from filesystem + ansible.builtin.find: + path: /opt/workloadcluster/helm-charts + file_type: directory + recurse: false + register: helm_charts - - name: Generate service account in workload cluster - kubernetes.core.k8s: - template: serviceaccount.j2 - state: present +- name: Create hard-links to populate new git-repository + ansible.builtin.shell: + cmd: >- + cp -lr {{ item.path }}/ /opt/workloadcluster/git-repositories/gitops/charts + loop: "{{ helm_charts.files }}" + loop_control: + label: "{{ item.path | basename }}" - - name: Retrieve service account bearer token - kubernetes.core.k8s_info: - kind: Secret - name: "{{ _template.account.name }}-secret" - namespace: "{{ _template.account.namespace }}" - register: workloadcluster_bearertoken +- name: Create subfolders + ansible.builtin.file: + path: /opt/workloadcluster/git-repositories/gitops/values/{{ item.key }} + state: directory + loop: "{{ query('ansible.builtin.dict', downstream_components) }}" + loop_control: + label: "{{ item.key }}" - - name: Register workload cluster in argo-cd - kubernetes.core.k8s: - template: cluster.j2 - state: present - kubeconfig: "{{ kubeconfig.path }}" - vars: - _template: - cluster: - name: "{{ vapp['workloadcluster.name'] | lower }}" - secret: argocd-cluster-{{ vapp['workloadcluster.name'] | lower }} - url: https://{{ vapp['workloadcluster.vip'] }}:6443 - token: "{{ workloadcluster_bearertoken.resources | json_query('[].data.token') }}" +- name: Write chart values to file + ansible.builtin.copy: + dest: /opt/workloadcluster/git-repositories/gitops/values/{{ item.key }}/values.yaml + content: "{{ item.value.chart_values | default('# Empty') | to_nice_yaml(indent=2, width=4096) }}" + loop: "{{ query('ansible.builtin.dict', downstream_components) }}" + loop_control: + label: "{{ item.key }}" +- name: Initialize/Push git repository + ansible.builtin.shell: + cmd: | + git init + git config --global user.email "administrator@{{ vapp['metacluster.fqdn'] }}" + git config --global user.name "administrator" + git checkout -b main + git add . + git commit -m "Upload charts" + git remote add origin https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git + git push https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git --all + chdir: /opt/workloadcluster/git-repositories/gitops + +- name: Retrieve workload-cluster kubeconfig + kubernetes.core.k8s_info: + kind: Secret + name: "{{ vapp['workloadcluster.name'] }}-kubeconfig" + namespace: default + kubeconfig: "{{ kubeconfig.path }}" + register: secret_workloadcluster_kubeconfig + +- name: Register workload-cluster in argo-cd + kubernetes.core.k8s: + template: cluster.j2 + state: present + kubeconfig: "{{ kubeconfig.path }}" vars: _template: - account: - name: argocd-sa - namespace: default - clusterrolebinding: - name: argocd-crb - module_defaults: - group/k8s: - kubeconfig: "{{ capi_kubeconfig.path }}" + cluster: + name: "{{ vapp['workloadcluster.name'] | lower }}" + secret: argocd-cluster-{{ vapp['workloadcluster.name'] | lower }} + url: https://{{ vapp['workloadcluster.vip'] }}:6443 + kubeconfig: + ca: "{{ (secret_workloadcluster_kubeconfig.resources[0].data.value | b64decode | from_yaml).clusters[0].cluster['certificate-authority-data'] }}" + certificate: "{{ (secret_workloadcluster_kubeconfig.resources[0].data.value | b64decode | from_yaml).users[0].user['client-certificate-data'] }}" + key: "{{ (secret_workloadcluster_kubeconfig.resources[0].data.value | b64decode | from_yaml).users[0].user['client-key-data'] }}" + +- name: Configure workload-cluster GitOps repository + ansible.builtin.template: + src: gitrepo.j2 + dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml + owner: root + group: root + mode: 0600 + vars: + _template: + name: gitrepo-wl-gitopsconfig + namespace: argo-cd + url: https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git + notify: + - Apply manifests + +- name: Create applicationset + ansible.builtin.template: + src: applicationset.j2 + dest: /var/lib/rancher/k3s/server/manifests/{{ _template.application.name }}-manifest.yaml + owner: root + group: root + mode: 0600 + vars: + _template: + application: + name: applicationset-workloadcluster + namespace: argo-cd + cluster: + url: https://{{ vapp['workloadcluster.vip'] }}:6443 + repository: + url: https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git + revision: main + notify: + - Apply manifests + +- name: Trigger handlers + ansible.builtin.meta: flush_handlers diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/workloadcluster/tasks/nodetemplates.yml b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/workloadcluster/tasks/nodetemplates.yml index fa0545c..783ef2c 100644 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/workloadcluster/tasks/nodetemplates.yml +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/roles/workloadcluster/tasks/nodetemplates.yml @@ -1,77 +1,68 @@ - block: - - name: Check for existing templates on hypervisor + - name: Check for existing template on hypervisor community.vmware.vmware_guest_info: - name: "{{ (item | basename | split('.'))[:-1] | join('.') }}" + name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}" register: existing_ova - loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}" ignore_errors: yes - - name: Parse OVA files for network mappings - ansible.builtin.shell: - cmd: govc import.spec -json {{ item }} - environment: - GOVC_INSECURE: '1' - GOVC_URL: "{{ vapp['hv.fqdn'] }}" - GOVC_USERNAME: "{{ vapp['hv.username'] }}" - GOVC_PASSWORD: "{{ vapp['hv.password'] }}" - register: ova_spec - when: existing_ova.results[index] is failed - loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}" - loop_control: - index_var: index + - name: Store inventory path of existing template + ansible.builtin.set_fact: + nodetemplate_inventorypath: "{{ existing_ova.instance.hw_folder ~ '/' ~ existing_ova.instance.hw_name }}" + when: existing_ova is not failed - - name: Deploy OVA templates on hypervisor - community.vmware.vmware_deploy_ovf: - cluster: "{{ vcenter_info.cluster }}" - datastore: "{{ vcenter_info.datastore }}" - name: "{{ (item | basename | split('.'))[:-1] | join('.') }}" - networks: "{u'{{ ova_spec.results[index].stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}" - allow_duplicates: no - power_on: false - ovf: "{{ item }}" - register: ova_deploy - when: existing_ova.results[index] is failed - loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}" - loop_control: - index_var: index + - block: - - name: Add additional placeholder disk - community.vmware.vmware_guest_disk: - name: "{{ item.instance.hw_name }}" - disk: - - size: 1Gb - scsi_controller: 1 - scsi_type: paravirtual - unit_number: 0 - when: ova_deploy.results[index] is not skipped - loop: "{{ ova_deploy.results }}" - loop_control: - index_var: index - label: "{{ item.item }}" + - name: Parse OVA file for network mappings + ansible.builtin.shell: + cmd: govc import.spec -json {{ filename }} + environment: + GOVC_INSECURE: '1' + GOVC_URL: "{{ vapp['hv.fqdn'] }}" + GOVC_USERNAME: "{{ vapp['hv.username'] }}" + GOVC_PASSWORD: "{{ vapp['hv.password'] }}" + register: ova_spec - # Disabled to allow disks to be resized; at the cost of cloning speed - # - name: Create snapshot on deployed VM's - # community.vmware.vmware_guest_snapshot: - # name: "{{ item.instance.hw_name }}" - # state: present - # snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base" - # when: ova_deploy.results[index] is not skipped - # loop: "{{ ova_deploy.results }}" - # loop_control: - # index_var: index - # label: "{{ item.item }}" + - name: Deploy OVA template on hypervisor + community.vmware.vmware_deploy_ovf: + cluster: "{{ vcenter_info.cluster }}" + datastore: "{{ vcenter_info.datastore }}" + name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}" + networks: "{u'{{ ova_spec.stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}" + allow_duplicates: no + power_on: false + ovf: "{{ filename }}" + register: ova_deploy - - name: Mark deployed VM's as templates - community.vmware.vmware_guest: - name: "{{ item.instance.hw_name }}" - is_template: yes - when: ova_deploy.results[index] is not skipped - loop: "{{ ova_deploy.results }}" - loop_control: - index_var: index - label: "{{ item.item }}" + - name: Add additional placeholder disk + community.vmware.vmware_guest_disk: + name: "{{ ova_deploy.instance.hw_name }}" + disk: + - size: 1Mb + scsi_controller: 1 + scsi_type: paravirtual + unit_number: 0 + # Disabled to allow disks to be resized; at the cost of cloning speed + # - name: Create snapshot on deployed VM + # community.vmware.vmware_guest_snapshot: + # name: "{{ ova_deploy.instance.hw_name }}" + # state: present + # snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base" + + - name: Mark deployed VM as templates + community.vmware.vmware_guest: + name: "{{ ova_deploy.instance.hw_name }}" + is_template: yes + + - name: Store inventory path of deployed template + ansible.builtin.set_fact: + nodetemplate_inventorypath: "{{ ova_deploy.instance.hw_folder ~ '/' ~ ova_deploy.instance.hw_name }}" + + when: existing_ova is failed + + vars: + filename: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | first }}" module_defaults: group/vmware: hostname: "{{ vapp['hv.fqdn'] }}" diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/application.j2 b/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/application.j2 new file mode 100644 index 0000000..828b56a --- /dev/null +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/application.j2 @@ -0,0 +1,16 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: {{ _template.application.name }} + namespace: {{ _template.application.namespace }} +spec: + destination: + namespace: {{ _template.cluster.namespace }} + server: {{ _template.cluster.name }} + project: default + source: + repoURL: {{ _template.repository.url }} + path: {{ _template.repository.path }} + targetRevision: {{ _template.repository.revision }} + syncPolicy: + automated: {} diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/applicationset.j2 b/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/applicationset.j2 index 63303a8..580752f 100644 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/applicationset.j2 +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/applicationset.j2 @@ -1,28 +1,33 @@ apiVersion: argoproj.io/v1alpha1 kind: ApplicationSet metadata: - name: {{ _template.name }} - namespace: {{ _template.namespace }} + name: {{ _template.application.name }} + namespace: {{ _template.application.namespace }} spec: generators: - git: - repoURL: ssh://git@gitea-ssh.gitea.svc.cluster.local/mc/GitOps.Config.git - revision: HEAD + repoURL: {{ _template.repository.url }} + revision: {{ _template.repository.revision }} directories: - - path: metacluster-applicationset/* + - path: charts/*/* template: metadata: - name: {% raw %}'{{ path.basename }}'{% endraw +%} + name: application-{% raw %}{{ path.basename }}{% endraw +%} spec: project: default syncPolicy: automated: prune: true selfHeal: true - source: - repoURL: ssh://git@gitea-ssh.gitea.svc.cluster.local/mc/GitOps.Config.git - targetRevision: HEAD + syncOptions: + - CreateNamespace=true + sources: + - repoURL: {{ _template.repository.url }} + targetRevision: {{ _template.repository.revision }} path: {% raw %}'{{ path }}'{% endraw +%} + helm: + valueFiles: + - /values/{% raw %}{{ path.basename }}{% endraw %}/values.yaml destination: - server: https://kubernetes.default.svc - namespace: default + server: {{ _template.cluster.url }} + namespace: {% raw %}'{{ path[1] }}'{% endraw +%} diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/cluster.j2 b/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/cluster.j2 index bbaec5c..0d932a8 100644 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/cluster.j2 +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/cluster.j2 @@ -11,8 +11,10 @@ stringData: server: {{ _template.cluster.url }} config: | { - "bearerToken": "{{ _template.cluster.token }}", "tlsClientConfig": { - "insecure": true + "insecure": false, + "caData": "{{ _template.kubeconfig.ca }}", + "certData": "{{ _template.kubeconfig.certificate }}", + "keyData": "{{ _template.kubeconfig.key }}" } } diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/gitrepo.j2 b/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/gitrepo.j2 index c5351f9..643e1e7 100644 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/gitrepo.j2 +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/gitrepo.j2 @@ -1,13 +1,9 @@ apiVersion: v1 kind: Secret metadata: - name: {{ _template.name }}-{{ _template.uid }} + name: {{ _template.name }} namespace: {{ _template.namespace }} labels: argocd.argoproj.io/secret-type: repository stringData: - url: ssh://git@gitea-ssh.gitea.svc.cluster.local/mc/GitOps.Config.git - name: {{ _template.name }} - insecure: 'true' - sshPrivateKey: | -{{ _template.privatekey }} + url: {{ _template.url }} diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/kustomization.cluster-template.j2 b/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/kustomization.cluster-template.j2 index 03b8206..f273815 100644 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/kustomization.cluster-template.j2 +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/kustomization.cluster-template.j2 @@ -4,6 +4,34 @@ resources: - cluster-template.yaml patchesStrategicMerge: + - |- + apiVersion: v1 + kind: Secret + metadata: + name: csi-vsphere-config + namespace: '${NAMESPACE}' + stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: csi-vsphere-config + namespace: kube-system + stringData: + csi-vsphere.conf: |+ + [Global] + insecure-flag = true + thumbprint = "${VSPHERE_TLS_THUMBPRINT}" + cluster-id = "${NAMESPACE}/${CLUSTER_NAME}" + + [VirtualCenter "${VSPHERE_SERVER}"] + user = "${VSPHERE_USERNAME}" + password = "${VSPHERE_PASSWORD}" + datacenters = "${VSPHERE_DATACENTER}" + + [Network] + public-network = "${VSPHERE_NETWORK}" + type: Opaque - |- apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane @@ -25,18 +53,6 @@ patchesStrategicMerge: spec: clusterConfiguration: imageRepository: registry.{{ _template.network.fqdn }}/kubeadm - diskSetup: - filesystems: - - device: /dev/sdb1 - filesystem: ext4 - label: blockstorage - partitions: - - device: /dev/sdb - layout: true - tableType: gpt - mounts: - - - LABEL=blockstorage - - /mnt/blockstorage - |- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate @@ -47,6 +63,21 @@ patchesStrategicMerge: template: spec: files: + - content: | + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + append: true + path: /etc/containerd/config.toml +{% for registry in _template.registries %} + - content: | + server = "https://{{ registry }}" + + [host."https://registry.{{ _template.network.fqdn }}/v2/library/{{ registry }}"] + capabilities = ["pull", "resolve"] + override_path = true + owner: root:root + path: /etc/containerd/certs.d/{{ registry }}/hosts.toml +{% endfor %} - content: | network: {config: disabled} owner: root:root @@ -83,8 +114,6 @@ patchesStrategicMerge: spec: template: spec: - additionalDisksGiB: - - {{ _template.additionaldisk }} network: devices: - dhcp4: false @@ -103,6 +132,27 @@ patchesJson6902: kind: KubeadmControlPlane name: .* patch: |- + - op: add + path: /spec/kubeadmConfigSpec/files/- + value: + content: | + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + append: true + path: /etc/containerd/config.toml +{% for registry in _template.registries %} + - op: add + path: /spec/kubeadmConfigSpec/files/- + value: + content: | + server = "https://{{ registry }}" + + [host."https://registry.{{ _template.network.fqdn }}/v2/library/{{ registry }}"] + capabilities = ["pull", "resolve"] + override_path = true + owner: root:root + path: /etc/containerd/certs.d/{{ registry }}/hosts.toml +{% endfor %} - op: add path: /spec/kubeadmConfigSpec/files/- value: @@ -139,3 +189,68 @@ patchesJson6902: path: /spec/kubeadmConfigSpec/preKubeadmCommands/- value: {{ cmd }} {% endfor %} + + - target: + group: infrastructure.cluster.x-k8s.io + version: v1beta1 + kind: VSphereMachineTemplate + name: \${CLUSTER_NAME} + patch: |- + - op: replace + path: /metadata/name + value: ${CLUSTER_NAME}-master + - target: + group: controlplane.cluster.x-k8s.io + version: v1beta1 + kind: KubeadmControlPlane + name: \${CLUSTER_NAME} + patch: |- + - op: replace + path: /metadata/name + value: ${CLUSTER_NAME}-master + - op: replace + path: /spec/machineTemplate/infrastructureRef/name + value: ${CLUSTER_NAME}-master + - target: + group: cluster.x-k8s.io + version: v1beta1 + kind: Cluster + name: \${CLUSTER_NAME} + patch: |- + - op: replace + path: /spec/controlPlaneRef/name + value: ${CLUSTER_NAME}-master + + - target: + group: infrastructure.cluster.x-k8s.io + version: v1beta1 + kind: VSphereMachineTemplate + name: \${CLUSTER_NAME}-worker + patch: |- + - op: replace + path: /spec/template/spec/numCPUs + value: {{ _template.nodesize.cpu }} + - op: replace + path: /spec/template/spec/memoryMiB + value: {{ _template.nodesize.memory }} + - target: + group: cluster.x-k8s.io + version: v1beta1 + kind: MachineDeployment + name: \${CLUSTER_NAME}-md-0 + patch: |- + - op: replace + path: /metadata/name + value: ${CLUSTER_NAME}-worker + - op: replace + path: /spec/template/spec/bootstrap/configRef/name + value: ${CLUSTER_NAME}-worker + - target: + group: bootstrap.cluster.x-k8s.io + version: v1beta1 + kind: KubeadmConfigTemplate + name: \${CLUSTER_NAME}-md-0 + patch: |- + - op: replace + path: /metadata/name + value: ${CLUSTER_NAME}-worker diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/kustomization.nodepool.j2 b/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/kustomization.nodepool.j2 new file mode 100644 index 0000000..eff0edc --- /dev/null +++ b/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/kustomization.nodepool.j2 @@ -0,0 +1,84 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- manifests/kubeadmconfigtemplate-{{ _template.cluster.name }}-worker.yaml +- manifests/machinedeployment-{{ _template.cluster.name }}-worker.yaml +- manifests/vspheremachinetemplate-{{ _template.cluster.name }}-worker.yaml + +patchesStrategicMerge: + - |- + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + metadata: + name: {{ _template.cluster.name }}-worker + namespace: default + spec: + template: + spec: + diskSetup: + filesystems: + - device: /dev/sdb1 + filesystem: ext4 + label: blockstorage + partitions: + - device: /dev/sdb + layout: true + tableType: gpt + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + node-labels: "node.longhorn.io/create-default-disk=true" + mounts: + - - LABEL=blockstorage + - /mnt/blockstorage + - |- + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + metadata: + name: {{ _template.cluster.name }}-worker + namespace: default + spec: + template: + spec: + additionalDisksGiB: + - {{ _template.nodepool.additionaldisk }} + +patchesJson6902: + - target: + group: bootstrap.cluster.x-k8s.io + version: v1beta1 + kind: KubeadmConfigTemplate + name: {{ _template.cluster.name }}-worker + patch: |- + - op: replace + path: /metadata/name + value: {{ _template.cluster.name }}-worker-storage + + - target: + group: cluster.x-k8s.io + version: v1beta1 + kind: MachineDeployment + name: {{ _template.cluster.name }}-worker + patch: |- + - op: replace + path: /metadata/name + value: {{ _template.cluster.name }}-worker-storage + - op: replace + path: /spec/template/spec/bootstrap/configRef/name + value: {{ _template.cluster.name }}-worker-storage + - op: replace + path: /spec/template/spec/infrastructureRef/name + value: {{ _template.cluster.name }}-worker-storage + - op: replace + path: /spec/replicas + value: {{ _template.nodepool.size }} + + - target: + group: infrastructure.cluster.x-k8s.io + version: v1beta1 + kind: VSphereMachineTemplate + name: {{ _template.cluster.name }}-worker + patch: |- + - op: replace + path: /metadata/name + value: {{ _template.cluster.name }}-worker-storage diff --git a/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/serviceaccount.j2 b/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/serviceaccount.j2 deleted file mode 100644 index cec2c90..0000000 --- a/ansible/roles/firstboot/files/ansible_payload/bootstrap/templates/serviceaccount.j2 +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ _template.account.name }} - namespace: {{ _template.account.namespace }} ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ _template.account.name }}-secret - namespace: {{ _template.account.namespace }} - annotations: - kubernetes.io/service-account.name: {{ _template.account.name }} -type: kubernetes.io/service-account-token ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ _template.clusterrolebinding.name }} -subjects: -- kind: ServiceAccount - name: {{ _template.account.name }} - namespace: {{ _template.account.namespace }} -roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io diff --git a/ansible/roles/firstboot/files/ansible_payload/common/roles/disks/tasks/main.yml b/ansible/roles/firstboot/files/ansible_payload/common/roles/disks/tasks/main.yml index c74bcec..62b80ef 100644 --- a/ansible/roles/firstboot/files/ansible_payload/common/roles/disks/tasks/main.yml +++ b/ansible/roles/firstboot/files/ansible_payload/common/roles/disks/tasks/main.yml @@ -28,6 +28,10 @@ ansible.builtin.set_fact: storage_benchmark: "{{ [storage_benchmark, (end_time | int - start_time | int)] | max }}" +- name: Log benchmark actual duration + ansible.builtin.debug: + msg: "Benchmark actual duration: {{ (end_time | int - start_time | int) }} second(s)" + - name: Mount dynamic disk ansible.posix.mount: path: /mnt/blockstorage diff --git a/ansible/roles/firstboot/files/ansible_payload/common/templates/registries.j2 b/ansible/roles/firstboot/files/ansible_payload/common/templates/registries.j2 index 45cde78..8345db3 100644 --- a/ansible/roles/firstboot/files/ansible_payload/common/templates/registries.j2 +++ b/ansible/roles/firstboot/files/ansible_payload/common/templates/registries.j2 @@ -1,8 +1,8 @@ mirrors: -{% for entry in _template.data %} - {{ entry }}: +{% for registry in _template.registries %} + {{ registry }}: endpoint: - https://registry.{{ _template.hv.fqdn }} rewrite: - "(.*)": "library/{{ entry }}/$1" + "(.*)": "library/{{ registry }}/$1" {% endfor %} diff --git a/ansible/roles/firstboot/files/ansible_payload/common/templates/tty.j2 b/ansible/roles/firstboot/files/ansible_payload/common/templates/tty.j2 index 8eb0e83..882376d 100644 --- a/ansible/roles/firstboot/files/ansible_payload/common/templates/tty.j2 +++ b/ansible/roles/firstboot/files/ansible_payload/common/templates/tty.j2 @@ -33,7 +33,7 @@ while /bin/true; do echo -e "${PRST}" > /dev/tty1 echo -e "\n\n\t${DFLT}To manage this appliance, please connect to one of the following:${LCLR}\n" > /dev/tty1 - for c in "${!COMPONENTS[@]}"; do + for c in $( echo "${!COMPONENTS[@]}" | tr ' ' $'\n' | sort); do STATUS=$(curl -kLs "${COMPONENTS[${c}]}" -o /dev/null -w '%{http_code}') if [[ "${STATUS}" -eq "200" ]]; then diff --git a/ansible/roles/firstboot/files/ansible_payload/common/vars/defaults.yml b/ansible/roles/firstboot/files/ansible_payload/common/vars/defaults.yml index c3b1df9..3d2b32c 100644 --- a/ansible/roles/firstboot/files/ansible_payload/common/vars/defaults.yml +++ b/ansible/roles/firstboot/files/ansible_payload/common/vars/defaults.yml @@ -8,3 +8,16 @@ playbook: # This default value is updated during the playbook, based on an I/O intensive operation storage_benchmark: 30 + +config: + clusterapi: + size_matrix: + small: + cpu: 2 + memory: 6144 + medium: + cpu: 4 + memory: 8192 + large: + cpu: 8 + memory: 16384 diff --git a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/decommission/tasks/storage.yml b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/decommission/tasks/storage.yml index ab3d9c9..a675b83 100644 --- a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/decommission/tasks/storage.yml +++ b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/decommission/tasks/storage.yml @@ -22,6 +22,6 @@ spec: numberOfReplicas: {{ (lookup('kubernetes.core.k8s', kind='node', kubeconfig=(kubeconfig.path)) | length | int) - 1 }} kubeconfig: "{{ kubeconfig.path }}" - loop: "{{ lookup('kubernetes.core.k8s', api_version='longhorn.io/v1beta2', kind='volume', namespace='longhorn-system', kubeconfig=(kubeconfig.path)) }}" + loop: "{{ query('kubernetes.core.k8s', api_version='longhorn.io/v1beta2', kind='volume', namespace='longhorn-system', kubeconfig=(kubeconfig.path)) }}" loop_control: label: "{{ item.metadata.name }}" diff --git a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/git.yml b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/git.yml index 9970549..2454039 100644 --- a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/git.yml +++ b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/git.yml @@ -7,7 +7,7 @@ release_namespace: gitea wait: false kubeconfig: "{{ kubeconfig.path }}" - values: "{{ components.gitea.chart_values }}" + values: "{{ components['gitea'].chart_values }}" - name: Ensure gitea API availability ansible.builtin.uri: diff --git a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/gitops.yml b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/gitops.yml index ada716e..79e4ea7 100644 --- a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/gitops.yml +++ b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/gitops.yml @@ -7,7 +7,7 @@ release_namespace: argo-cd wait: false kubeconfig: "{{ kubeconfig.path }}" - values: "{{ components.argocd.chart_values }}" + values: "{{ components['argo-cd'].chart_values }}" - name: Ensure argo-cd API availability ansible.builtin.uri: diff --git a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/registry.yml b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/registry.yml index e433a87..9005a2b 100644 --- a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/registry.yml +++ b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/registry.yml @@ -7,7 +7,7 @@ release_namespace: harbor wait: false kubeconfig: "{{ kubeconfig.path }}" - values: "{{ components.harbor.chart_values }}" + values: "{{ components['harbor'].chart_values }}" - name: Ensure harbor API availability ansible.builtin.uri: diff --git a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/storage.yml b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/storage.yml index 93861e4..282d8a4 100644 --- a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/storage.yml +++ b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/metacluster/tasks/storage.yml @@ -11,7 +11,7 @@ spec: numberOfReplicas: {{ lookup('kubernetes.core.k8s', kind='node', kubeconfig=(kubeconfig.path)) | length | int }} kubeconfig: "{{ kubeconfig.path }}" - loop: "{{ lookup('kubernetes.core.k8s', api_version='longhorn.io/v1beta2', kind='volume', namespace='longhorn-system', kubeconfig=(kubeconfig.path)) }}" + loop: "{{ query('kubernetes.core.k8s', api_version='longhorn.io/v1beta2', kind='volume', namespace='longhorn-system', kubeconfig=(kubeconfig.path)) }}" loop_control: label: "{{ item.metadata.name }}" @@ -34,7 +34,7 @@ release_namespace: longhorn-system wait: false kubeconfig: "{{ kubeconfig.path }}" - values: "{{ components.longhorn.chart_values }}" + values: "{{ components['longhorn'].chart_values }}" - name: Ensure longhorn API availability ansible.builtin.uri: diff --git a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/workloadcluster/tasks/nodetemplates.yml b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/workloadcluster/tasks/nodetemplates.yml index 2060474..e867b67 100644 --- a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/workloadcluster/tasks/nodetemplates.yml +++ b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/workloadcluster/tasks/nodetemplates.yml @@ -1,81 +1,68 @@ - block: - - name: Check for existing templates on hypervisor + - name: Check for existing template on hypervisor community.vmware.vmware_guest_info: - name: "{{ (item | basename | split('.'))[:-1] | join('.') }}" + name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}" register: existing_ova - loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}" ignore_errors: yes - - name: Parse OVA files for network mappings - ansible.builtin.shell: - cmd: govc import.spec -json {{ item }} - environment: - GOVC_INSECURE: '1' - GOVC_URL: "{{ vapp['hv.fqdn'] }}" - GOVC_USERNAME: "{{ vapp['hv.username'] }}" - GOVC_PASSWORD: "{{ vapp['hv.password'] }}" - register: ova_spec - when: existing_ova.results[index] is failed - loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}" - loop_control: - index_var: index + - name: Store inventory path of existing template + ansible.builtin.set_fact: + nodetemplate_inventorypath: "{{ existing_ova.instance.hw_folder ~ '/' ~ existing_ova.instance.hw_name }}" + when: existing_ova is not failed - - name: Deploy OVA templates on hypervisor - community.vmware.vmware_deploy_ovf: - cluster: "{{ vcenter_info.cluster }}" - datastore: "{{ vcenter_info.datastore }}" - folder: "{{ vcenter_info.folder }}" - name: "{{ (item | basename | split('.'))[:-1] | join('.') }}" - networks: "{u'{{ ova_spec.results[index].stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}" - allow_duplicates: no - power_on: false - ovf: "{{ item }}" - register: ova_deploy - when: existing_ova.results[index] is failed - loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}" - loop_control: - index_var: index + - block: - - name: Add vApp properties on deployed VM's - ansible.builtin.shell: - cmd: >- - npp-prepper \ - --server "{{ vapp['hv.fqdn'] }}" \ - --username "{{ vapp['hv.username'] }}" \ - --password "{{ vapp['hv.password'] }}" \ - vm \ - --datacenter "{{ vcenter_info.datacenter }}" \ - --portgroup "{{ vcenter_info.network }}" \ - --name "{{ item.instance.hw_name }}" - when: existing_ova.results[index] is failed - loop: "{{ ova_deploy.results }}" - loop_control: - index_var: index - label: "{{ item.item }}" + - name: Parse OVA file for network mappings + ansible.builtin.shell: + cmd: govc import.spec -json {{ filename }} + environment: + GOVC_INSECURE: '1' + GOVC_URL: "{{ vapp['hv.fqdn'] }}" + GOVC_USERNAME: "{{ vapp['hv.username'] }}" + GOVC_PASSWORD: "{{ vapp['hv.password'] }}" + register: ova_spec - - name: Create snapshot on deployed VM's - community.vmware.vmware_guest_snapshot: - folder: "{{ vcenter_info.folder }}" - name: "{{ item.instance.hw_name }}" - state: present - snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base" - when: ova_deploy.results[index] is not skipped - loop: "{{ ova_deploy.results }}" - loop_control: - index_var: index - label: "{{ item.item }}" + - name: Deploy OVA template on hypervisor + community.vmware.vmware_deploy_ovf: + cluster: "{{ vcenter_info.cluster }}" + datastore: "{{ vcenter_info.datastore }}" + name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}" + networks: "{u'{{ ova_spec.stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}" + allow_duplicates: no + power_on: false + ovf: "{{ filename }}" + register: ova_deploy - - name: Mark deployed VM's as templates - community.vmware.vmware_guest: - name: "{{ item.instance.hw_name }}" - is_template: yes - when: ova_deploy.results[index] is not skipped - loop: "{{ ova_deploy.results }}" - loop_control: - index_var: index - label: "{{ item.item }}" + - name: Add additional placeholder disk + community.vmware.vmware_guest_disk: + name: "{{ ova_deploy.instance.hw_name }}" + disk: + - size: 1Gb + scsi_controller: 1 + scsi_type: paravirtual + unit_number: 0 + # Disabled to allow disks to be resized; at the cost of cloning speed + # - name: Create snapshot on deployed VM + # community.vmware.vmware_guest_snapshot: + # name: "{{ ova_deploy.instance.hw_name }}" + # state: present + # snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base" + + - name: Mark deployed VM as templates + community.vmware.vmware_guest: + name: "{{ ova_deploy.instance.hw_name }}" + is_template: yes + + - name: Store inventory path of deployed template + ansible.builtin.set_fact: + nodetemplate_inventorypath: "{{ ova_deploy.instance.hw_folder ~ '/' ~ ova_deploy.instance.hw_name }}" + + when: existing_ova is failed + + vars: + filename: "{{ query('ansible.builtin.fileglob', '/opt/metacluster/node-templates/*.ova') | first }}" module_defaults: group/vmware: hostname: "{{ vapp['hv.fqdn'] }}" @@ -83,3 +70,4 @@ username: "{{ vapp['hv.username'] }}" password: "{{ vapp['hv.password'] }}" datacenter: "{{ vcenter_info.datacenter }}" + folder: "{{ vcenter_info.folder }}" diff --git a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/workloadcluster/tasks/registry.yml b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/workloadcluster/tasks/registry.yml index 60b1b2b..dcd0285 100644 --- a/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/workloadcluster/tasks/registry.yml +++ b/ansible/roles/firstboot/files/ansible_payload/upgrade/roles/workloadcluster/tasks/registry.yml @@ -1,9 +1,5 @@ - block: - - name: Lookup kubeadm container images - ansible.builtin.set_fact: - kubeadm_images: "{{ lookup('ansible.builtin.file', '/opt/metacluster/cluster-api/imagelist').splitlines() }}" - - name: Copy kubeadm container images to dedicated project ansible.builtin.uri: url: https://registry.{{ vapp['metacluster.fqdn'] }}/api/v2.0/projects/kubeadm/repositories/{{ ( item | regex_findall('([^:/]+)') )[-2] }}/artifacts?from=library/{{ item | replace('/', '%2F') | replace(':', '%3A') }} @@ -12,7 +8,7 @@ Authorization: "Basic {{ ('admin:' ~ vapp['metacluster.password']) | b64encode }}" body: from: "{{ item }}" - loop: "{{ kubeadm_images }}" + loop: "{{ lookup('ansible.builtin.file', '/opt/metacluster/cluster-api/imagelist').splitlines() }}" module_defaults: ansible.builtin.uri: diff --git a/ansible/vars/metacluster.yml b/ansible/vars/metacluster.yml index 4af907d..2230376 100644 --- a/ansible/vars/metacluster.yml +++ b/ansible/vars/metacluster.yml @@ -1,13 +1,7 @@ platform: k3s: - version: v1.25.7+k3s1 - - gitops: - repository: - uri: https://code.spamasaurus.com/djpbessems/GitOps.MetaCluster.git - # revision: v0.1.0 - revision: HEAD + version: v1.25.9+k3s1 packaged_components: - name: traefik @@ -39,8 +33,12 @@ platform: helm_repositories: - name: argo url: https://argoproj.github.io/argo-helm - - name: dex - url: https://charts.dexidp.io + - name: authentik + url: https://charts.goauthentik.io + # - name: codecentric + # url: https://codecentric.github.io/helm-charts + # - name: dex + # url: https://charts.dexidp.io - name: gitea-charts url: https://dl.gitea.io/charts/ - name: harbor @@ -58,7 +56,7 @@ components: argo-cd: helm: - version: 5.24.0 # (= ArgoCD v2.6.3) + version: 5.27.4 # (= ArgoCD v2.6.7) chart: argo/argo-cd parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /' chart_values: !unsafe | @@ -73,6 +71,32 @@ components: hosts: - gitops.{{ vapp['metacluster.fqdn'] }} + authentik: + helm: + version: 2023.3.1 + chart: authentik/authentik + parse_logic: helm template . --set postgresql.enabled=true,redis.enabled=true | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /' + chart_values: !unsafe | + authentik: + avatars: none + secret_key: "{{ lookup('ansible.builtin.password', '/dev/null length=64 chars=ascii_lowercase,digits seed=' ~ vapp['guestinfo.hostname']) }}" + postgresql: + password: "{{ lookup('ansible.builtin.password', '/dev/null length=32 chars=ascii_lowercase,digits seed=' ~ vapp['guestinfo.hostname']) }}" + env: + AUTHENTIK_BOOTSTRAP_PASSWORD: "{{ vapp['metacluster.password'] }}" + ingress: + enabled: true + hosts: + - host: auth.{{ vapp['metacluster.fqdn'] }} + paths: + - path: "/" + pathType: Prefix + postgresql: + enabled: true + postgresqlPassword: "{{ lookup('ansible.builtin.password', '/dev/null length=32 chars=ascii_lowercase,digits seed=' ~ vapp['guestinfo.hostname']) }}" + redis: + enabled: true + cert-manager: helm: version: 1.11.0 @@ -85,65 +109,67 @@ components: management: version: # Must match the version referenced at `dependencies.static_binaries[.filename==clusterctl].url` - base: v1.3.5 + base: v1.4.0 # Must match the version referenced at `components.cert-manager.helm.version` cert_manager: v1.11.0 - infrastructure_vsphere: v1.5.3 + infrastructure_vsphere: v1.6.0 ipam_incluster: v0.1.0-alpha.2 + # Refer to `https://console.cloud.google.com/gcr/images/cloud-provider-vsphere/GLOBAL/cpi/release/manager` for available tags + cpi_vsphere: v1.25.2 workload: version: calico: v3.25.0 - k8s: v1.25.8 + k8s: v1.25.9 node_template: - url: https://{{ repo_username }}:{{ repo_password }}@sn.itch.fyi/Repository/rel/ubuntu-2004-kube-v1.25.8.ova + url: https://{{ repo_username }}:{{ repo_password }}@sn.itch.fyi/Repository/rel/ubuntu-2004-kube-v1.25.9.ova - dex: - helm: - version: 0.13.0 # (= Dex 2.35.3) - chart: dex/dex - parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /' - chart_values: !unsafe | - config: - connectors: - - type: ldap - id: ldap - name: "LDAP" - config: - host: "{{ vapp['ldap.fqdn'] }}:636" - insecureNoSSL: false - insecureSkipVerify: true - bindDN: "{{ vapp['ldap.dn'] }}" - bindPW: "{{ vapp['ldap.password'] }}" + # dex: + # helm: + # version: 0.13.0 # (= Dex 2.35.3) + # chart: dex/dex + # parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /' + # chart_values: !unsafe | + # config: + # connectors: + # - type: ldap + # id: ldap + # name: "LDAP" + # config: + # host: "{{ vapp['ldap.fqdn'] }}:636" + # insecureNoSSL: false + # insecureSkipVerify: true + # bindDN: "{{ vapp['ldap.dn'] }}" + # bindPW: "{{ vapp['ldap.password'] }}" - usernamePrompt: "Username" - userSearch: - baseDN: OU=Administrators,OU=Useraccounts,DC=bessems,DC=eu - filter: "(objectClass=person)" - username: userPrincipalName - idAttr: DN - emailAttr: userPrincipalName - nameAttr: cn + # usernamePrompt: "Username" + # userSearch: + # baseDN: OU=Administrators,OU=Useraccounts,DC=bessems,DC=eu + # filter: "(objectClass=person)" + # username: userPrincipalName + # idAttr: DN + # emailAttr: userPrincipalName + # nameAttr: cn - groupSearch: - baseDN: OU=Roles,OU=Groups,DC=bessems,DC=eu - filter: "(objectClass=group)" - userMatchers: - - userAttr: DN - groupAttr: member - nameAttr: cn - enablePasswordDB: true - issuer: https://oidc.{{ vapp['metacluster.fqdn'] }} - storage: - type: kubernetes - config: - inCluster: true - ingress: - enabled: true - hosts: - - host: oidc.{{ vapp['metacluster.fqdn'] }} - paths: - - path: / - pathType: Prefix + # groupSearch: + # baseDN: OU=Roles,OU=Groups,DC=bessems,DC=eu + # filter: "(objectClass=group)" + # userMatchers: + # - userAttr: DN + # groupAttr: member + # nameAttr: cn + # enablePasswordDB: true + # issuer: https://oidc.{{ vapp['metacluster.fqdn'] }} + # storage: + # type: kubernetes + # config: + # inCluster: true + # ingress: + # enabled: true + # hosts: + # - host: oidc.{{ vapp['metacluster.fqdn'] }} + # paths: + # - path: / + # pathType: Prefix gitea: helm: @@ -199,6 +225,38 @@ components: registry: size: 25Gi + # keycloakx: + # helm: + # version: 2.1.1 # (= Keycloak 20.0.3) + # chart: codecentric/keycloakx + # parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /' + # chart_values: !unsafe | + # command: + # - "/opt/keycloak/bin/kc.sh" + # - "start" + # - "--http-enabled=true" + # - "--http-port=8080" + # - "--hostname-strict=false" + # - "--hostname-strict-https=false" + # extraEnv: | + # - name: KEYCLOAK_ADMIN + # value: admin + # - name: KEYCLOAK_ADMIN_PASSWORD + # value: {{ vapp['metacluster.password'] }} + # - name: KC_PROXY + # value: "passthrough" + # - name: JAVA_OPTS_APPEND + # value: >- + # -Djgroups.dns.query={% raw %}{{ include "keycloak.fullname" . }}{% endraw %}-headless + # ingress: + # enabled: true + # rules: + # - host: keycloak.{{ vapp['metacluster.fqdn'] }} + # paths: + # - path: / + # pathType: Prefix + # tls: [] + kube-prometheus-stack: helm: version: 45.2.0 @@ -216,7 +274,7 @@ components: longhorn: helm: - version: 1.4.0 + version: 1.4.1 chart: longhorn/longhorn parse_logic: cat values.yaml | yq eval '.. | select(has("repository")) | .repository + ":" + .tag' chart_values: !unsafe | @@ -276,7 +334,7 @@ dependencies: static_binaries: - filename: clusterctl - url: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.5/clusterctl-linux-amd64 + url: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.0/clusterctl-linux-amd64 - filename: govc url: https://github.com/vmware/govmomi/releases/download/v0.29.0/govc_Linux_x86_64.tar.gz archive: compressed @@ -288,7 +346,7 @@ dependencies: url: https://github.com/patrickdappollonio/kubectl-slice/releases/download/v1.2.5/kubectl-slice_linux_x86_64.tar.gz archive: compressed - filename: skopeo - url: https://code.spamasaurus.com/api/packages/djpbessems/generic/skopeo/v1.11.1/skopeo_linux_amd64 + url: https://code.spamasaurus.com/api/packages/djpbessems/generic/skopeo/v1.12.0/skopeo_linux_amd64 - filename: step url: https://dl.step.sm/gh-release/cli/gh-release-header/v0.23.0/step_linux_0.23.0_amd64.tar.gz archive: compressed diff --git a/ansible/vars/workloadcluster.yml b/ansible/vars/workloadcluster.yml new file mode 100644 index 0000000..c4b15d6 --- /dev/null +++ b/ansible/vars/workloadcluster.yml @@ -0,0 +1,27 @@ +downstream: + + helm_repositories: + - name: longhorn + url: https://charts.longhorn.io + - name: sealed-secrets + url: https://bitnami-labs.github.io/sealed-secrets + + helm_charts: + + longhorn: + version: 1.4.1 + chart: longhorn/longhorn + namespace: longhorn-system + parse_logic: cat values.yaml | yq eval '.. | select(has("repository")) | .repository + ":" + .tag' + chart_values: !unsafe | + defaultSettings: + createDefaultDiskLabeledNodes: true + defaultDataPath: /mnt/blockstorage + + sealed-secrets: + version: 2.8.1 # (= Sealed Secrets v0.20.2) + chart: sealed-secrets/sealed-secrets + namespace: sealed-secrets + parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /' + # chart_values: !unsafe | + # # Empty diff --git a/packer/build.pkr.hcl b/packer/build.pkr.hcl index fa6e9d9..2a1b241 100644 --- a/packer/build.pkr.hcl +++ b/packer/build.pkr.hcl @@ -6,22 +6,12 @@ packer { build { source "vsphere-iso.ubuntu" { name = "bootstrap" - vm_name = "ova.bootstrap-${var.vm_name}" - - export { - images = false - output_directory = "/scratch/airgapped-k8s/bootstrap" - } + vm_name = "bld_${var.vm_name}_bootstrap" } source "vsphere-iso.ubuntu" { name = "upgrade" - vm_name = "ova.upgrade-${var.vm_name}" - - export { - images = false - output_directory = "/scratch/airgapped-k8s/upgrade" - } + vm_name = "bld_${var.vm_name}_upgrade" } provisioner "ansible" { @@ -34,6 +24,8 @@ build { "PYTHONUNBUFFERED=1" ] use_proxy = "false" + collections_path = "ansible/collections" + extra_arguments = [ "--extra-vars", "appliancetype=${source.name}", "--extra-vars", "ansible_ssh_pass=${var.ssh_password}", @@ -48,11 +40,11 @@ build { inline = [ "pwsh -command \"& scripts/Update-OvfConfiguration.ps1 \\", " -ApplianceType '${source.name}' \\", - " -OVFFile '/scratch/airgapped-k8s/${source.name}/ova.${source.name}-${var.vm_name}.ovf' \"", + " -OVFFile '/scratch/bld_${var.vm_name}_${source.name}.ovf' \"", "pwsh -file scripts/Update-Manifest.ps1 \\", - " -ManifestFileName '/scratch/airgapped-k8s/${source.name}/ova.${source.name}-${var.vm_name}.mf'", + " -ManifestFileName '/scratch/bld_${var.vm_name}_${source.name}.mf'", "ovftool --acceptAllEulas --allowExtraConfig --overwrite \\", - " '/scratch/airgapped-k8s/${source.name}/ova.${source.name}-${var.vm_name}.ovf' \\", + " '/scratch/bld_${var.vm_name}_${source.name}.ovf' \\", " /output/airgapped-k8s-${var.k8s_version}.${source.name}.ova" ] } diff --git a/packer/source.pkr.hcl b/packer/source.pkr.hcl index 3774c76..02b2f3f 100644 --- a/packer/source.pkr.hcl +++ b/packer/source.pkr.hcl @@ -53,4 +53,9 @@ source "vsphere-iso" "ubuntu" { shutdown_timeout = "5m" remove_cdrom = true + + export { + images = false + output_directory = "/scratch" + } } diff --git a/scripts/Update-OvfConfiguration.bootstrap.yml b/scripts/Update-OvfConfiguration.bootstrap.yml index caa0dac..8089d71 100644 --- a/scripts/Update-OvfConfiguration.bootstrap.yml +++ b/scripts/Update-OvfConfiguration.bootstrap.yml @@ -1,12 +1,12 @@ DeploymentConfigurations: -- Id: cp1w1 +- Id: cp1w1ws0 Label: 'Workload-cluster: 1 control-plane node/1 worker node' Description: 1 control-plane node/1 worker node -- Id: cp1w2 - Label: 'Workload-cluster: 1 control-plane node/2 worker nodes' - Description: 1 control-plane node/2 worker nodes +- Id: cp1w1ws1 + Label: 'Workload-cluster: 1 control-plane node/1 worker node/1 worker-storage node' + Description: 1 control-plane node/1 worker node/1 worker-storage node - Id: core Label: No workload-cluster @@ -28,8 +28,8 @@ PropertyCategories: - Key: deployment.type Type: string Value: - - cp1w1 - - cp1w2 + - cp1w1ws0 + - cp1w1ws1 - core UserConfigurable: false @@ -128,61 +128,61 @@ PropertyCategories: Description: '' DefaultValue: 'workload-{{ hostname.suffix }}' Configurations: - - cp1w1 - - cp1w2 + - cp1w1ws0 + - cp1w1ws1 UserConfigurable: true - Key: workloadcluster.vip Type: ip Label: Workload-cluster virtual IP address* Description: Workload-cluster control plane endpoint virtual IP address - DefaultValue: '' + DefaultValue: '0.0.0.0' Configurations: - - cp1w1 - - cp1w2 + - cp1w1ws0 + - cp1w1ws1 UserConfigurable: true - Key: ippool.startip Type: ip Label: Workload-cluster IP-pool start IP address* Description: All nodes for the workload-cluster will be provisioned within this IP pool - DefaultValue: '' + DefaultValue: '0.0.0.0' Configurations: - - cp1w1 - - cp1w2 + - cp1w1ws0 + - cp1w1ws1 UserConfigurable: true - Key: ippool.endip Type: ip Label: Workload-cluster IP-pool end IP address* Description: All nodes for the workload-cluster will be provisioned within this IP pool - DefaultValue: '' + DefaultValue: '0.0.0.0' Configurations: - - cp1w1 - - cp1w2 + - cp1w1ws0 + - cp1w1ws1 UserConfigurable: true - Key: workloadcluster.nodesize - Type: string["small", "medium"] + Type: string["small", "medium", "large"] Label: Workload-cluster node size* Description: | - All worker-nodes for the workload-cluster will be provisioned with number of cpu-cores and memory as specified: + All worker and worker-storage nodes for the workload-cluster will be provisioned with number of cpu-cores and memory as specified: - SMALL: 2 vCPU/6GB RAM - MEDIUM: 4 vCPU/8GB RAM + - LARGE: 8 vCPU/16GB RAM DefaultValue: 'small' Configurations: - - cp1w1 - - cp1w2 + - cp1w1ws0 + - cp1w1ws1 UserConfigurable: true - Key: workloadcluster.additionaldisk Type: int(0..120) Label: Workload-cluster block storage disk size* - Description: 'All worker-nodes for the workload-cluster will be provisioned with an additional disk of the specified size' - DefaultValue: '20' + Description: 'All worker-storage nodes for the workload-cluster will be provisioned with an additional disk of the specified size' + DefaultValue: '42' Configurations: - - cp1w1 - - cp1w2 + - cp1w1ws1 UserConfigurable: true - Name: 4) Common