Compare commits
	
		
			75 Commits
		
	
	
		
			bae9696023
			...
			K8s_1.25.9
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| cce39a5bb7 | |||
| 823cc467fa | |||
| 9cb89bf055 | |||
| 358cbe39ea | |||
| 0fee2df2a6 | |||
| e4e58e4789 | |||
| 75158a8a5b | |||
| c83d541a0d | |||
| a46610f828 | |||
| fe5147bd2e | |||
| 6d168f0517 | |||
| 68445ee13f | |||
| 31b21c9b7a | |||
| e03cd20d65 | |||
| fd1c306061 | |||
| ca8044b4ab | |||
| 3c98e16e74 | |||
| 1860d8e2dd | |||
| 16fdd66328 | |||
| d73320da32 | |||
| 572b7df74c | |||
| ee08fd47b5 | |||
| 75277e285a | |||
| debe80a2a1 | |||
| 2534cea4a0 | |||
| 05c3a09ab3 | |||
| 2f91c0f7c3 | |||
| c385baf630 | |||
| 5c18869d60 | |||
| 1941e02d94 | |||
| 610495e424 | |||
| 4e6a0549b5 | |||
| db090ac564 | |||
| 2b56677e9a | |||
| 641ee2d9a7 | |||
| 979ac38794 | |||
| 86a0b684e2 | |||
| 56a33134a0 | |||
| 915660f618 | |||
| d0c4251e06 | |||
| 9ff0e09625 | |||
| 8e76617794 | |||
| 5a82c9e122 | |||
| cde92b4514 | |||
| 6942c33ae8 | |||
| 7ac4cc0914 | |||
| c054c76b60 | |||
| 25230fdda2 | |||
| 89cf69adc7 | |||
| 3f9fc4b7aa | |||
| 570047df3b | |||
| d187f60091 | |||
| 933615adeb | |||
| 1c60214f5a | |||
| 414b72bcb8 | |||
| 29396de154 | |||
| 5effe00c19 | |||
| 767be3b8f5 | |||
| eb2f491f72 | |||
| cd5fa89a0d | |||
| d7e8685225 | |||
| 5113dd5b6c | |||
| 89fd23f66a | |||
| fa0b72a903 | |||
| ec6f712427 | |||
| 1c19708855 | |||
| 942c13dde7 | |||
| 439223c56e | |||
| b644dc1a04 | |||
| 2de2259c76 | |||
| 214a3d189a | |||
| df91de5516 | |||
| 68f0524bda | |||
| ff555ce0de | |||
| ad0c511651 | 
							
								
								
									
										259
									
								
								.drone.yml
									
									
									
									
									
								
							
							
						
						
									
										259
									
								
								.drone.yml
									
									
									
									
									
								
							@@ -13,89 +13,208 @@ volumes:
 | 
			
		||||
steps:
 | 
			
		||||
- name: Debugging information
 | 
			
		||||
  image: bv11-cr01.bessems.eu/library/packer-extended
 | 
			
		||||
  pull: always
 | 
			
		||||
  commands:
 | 
			
		||||
  - ansible --version
 | 
			
		||||
  - ovftool --version
 | 
			
		||||
  - packer --version
 | 
			
		||||
  - yamllint --version
 | 
			
		||||
- name: Kubernetes Bootstrap Appliance
 | 
			
		||||
 | 
			
		||||
- name: Linting
 | 
			
		||||
  depends_on:
 | 
			
		||||
  - Debugging information
 | 
			
		||||
  image: bv11-cr01.bessems.eu/library/packer-extended
 | 
			
		||||
  pull: always
 | 
			
		||||
  commands:
 | 
			
		||||
  - |
 | 
			
		||||
    sed -i -e "s/<<img-password>>/$${SSH_PASSWORD}/g" \
 | 
			
		||||
      packer/preseed/UbuntuServer22.04/user-data
 | 
			
		||||
  - |
 | 
			
		||||
    export K8S_VERSION=$(yq '.components.clusterapi.workload.version.k8s' < ./ansible/vars/metacluster.yml)
 | 
			
		||||
  - |
 | 
			
		||||
    yamllint -d "{extends: relaxed, rules: {line-length: disable}}" \
 | 
			
		||||
      ansible \
 | 
			
		||||
      packer/preseed/UbuntuServer22.04/user-data \
 | 
			
		||||
      scripts
 | 
			
		||||
  - |
 | 
			
		||||
    ansible-galaxy install \
 | 
			
		||||
      -r ansible/requirements.yml
 | 
			
		||||
  - |
 | 
			
		||||
    packer init -upgrade \
 | 
			
		||||
      ./packer
 | 
			
		||||
  - |
 | 
			
		||||
    packer validate \
 | 
			
		||||
      -var vm_name=$DRONE_BUILD_NUMBER-${DRONE_COMMIT_SHA:0:10} \
 | 
			
		||||
      -var docker_username=$${DOCKER_USERNAME} \
 | 
			
		||||
      -var docker_password=$${DOCKER_PASSWORD} \
 | 
			
		||||
      -var repo_username=$${REPO_USERNAME} \
 | 
			
		||||
      -var repo_password=$${REPO_PASSWORD} \
 | 
			
		||||
      -var ssh_password=$${SSH_PASSWORD} \
 | 
			
		||||
      -var vsphere_password=$${VSPHERE_PASSWORD} \
 | 
			
		||||
      -var k8s_version=$K8S_VERSION \
 | 
			
		||||
      ./packer
 | 
			
		||||
  - |
 | 
			
		||||
    packer build \
 | 
			
		||||
      -on-error=cleanup -timestamp-ui \
 | 
			
		||||
      -var vm_name=$DRONE_BUILD_NUMBER-${DRONE_COMMIT_SHA:0:10} \
 | 
			
		||||
      -var docker_username=$${DOCKER_USERNAME} \
 | 
			
		||||
      -var docker_password=$${DOCKER_PASSWORD} \
 | 
			
		||||
      -var repo_username=$${REPO_USERNAME} \
 | 
			
		||||
      -var repo_password=$${REPO_PASSWORD} \
 | 
			
		||||
      -var ssh_password=$${SSH_PASSWORD} \
 | 
			
		||||
      -var vsphere_password=$${VSPHERE_PASSWORD} \
 | 
			
		||||
      -var k8s_version=$K8S_VERSION \
 | 
			
		||||
      ./packer
 | 
			
		||||
  environment:
 | 
			
		||||
    DOCKER_USERNAME:
 | 
			
		||||
      from_secret: docker_username
 | 
			
		||||
    DOCKER_PASSWORD:
 | 
			
		||||
      from_secret: docker_password
 | 
			
		||||
    # PACKER_LOG: 1
 | 
			
		||||
    REPO_USERNAME:
 | 
			
		||||
      from_secret: repo_username
 | 
			
		||||
    REPO_PASSWORD:
 | 
			
		||||
      from_secret: repo_password
 | 
			
		||||
    SSH_PASSWORD:
 | 
			
		||||
      from_secret: ssh_password
 | 
			
		||||
    VSPHERE_PASSWORD:
 | 
			
		||||
      from_secret: vsphere_password
 | 
			
		||||
  volumes:
 | 
			
		||||
  - name: output
 | 
			
		||||
    path: /output
 | 
			
		||||
  - name: scratch
 | 
			
		||||
    path: /scratch
 | 
			
		||||
- name: Remove temporary resources
 | 
			
		||||
  image: bv11-cr01.bessems.eu/library/packer-extended
 | 
			
		||||
 | 
			
		||||
- name: Semantic Release (Dry-run)
 | 
			
		||||
  depends_on:
 | 
			
		||||
  - Linting
 | 
			
		||||
  image: bv11-cr01.bessems.eu/proxy/library/node:20-slim
 | 
			
		||||
  pull: always
 | 
			
		||||
  commands:
 | 
			
		||||
  - |
 | 
			
		||||
    pwsh -file scripts/Remove-Resources.ps1 \
 | 
			
		||||
      -VMName $DRONE_BUILD_NUMBER-${DRONE_COMMIT_SHA:0:10} \
 | 
			
		||||
      -VSphereFQDN 'bv11-vc.bessems.lan' \
 | 
			
		||||
      -VSphereUsername 'administrator@vsphere.local' \
 | 
			
		||||
      -VSpherePassword $${VSPHERE_PASSWORD}
 | 
			
		||||
    apt-get update
 | 
			
		||||
  - |
 | 
			
		||||
    apt-get install -y --no-install-recommends \
 | 
			
		||||
      git-core \
 | 
			
		||||
      ca-certificates
 | 
			
		||||
  - |
 | 
			
		||||
    npm install \
 | 
			
		||||
      semantic-release \
 | 
			
		||||
      @semantic-release/commit-analyzer \
 | 
			
		||||
      @semantic-release/release-notes-generator\
 | 
			
		||||
      @semantic-release/exec
 | 
			
		||||
  - |
 | 
			
		||||
    export GIT_CREDENTIALS=$${GIT_USERNAME}:$${GIT_APIKEY}
 | 
			
		||||
  - |
 | 
			
		||||
    npx semantic-release \
 | 
			
		||||
      --branches ${DRONE_BRANCH} \
 | 
			
		||||
      --plugins @semantic-release/commit-analyzer,@semantic-release/release-notes-generator,@semantic-release/exec \
 | 
			
		||||
      --dry-run
 | 
			
		||||
  environment:
 | 
			
		||||
    VSPHERE_PASSWORD:
 | 
			
		||||
      from_secret: vsphere_password
 | 
			
		||||
  volumes:
 | 
			
		||||
  - name: scratch
 | 
			
		||||
    path: /scratch
 | 
			
		||||
  when:
 | 
			
		||||
    status:
 | 
			
		||||
    - success
 | 
			
		||||
    - failure
 | 
			
		||||
    GIT_APIKEY:
 | 
			
		||||
      from_secret: git_apikey
 | 
			
		||||
    GIT_USERNAME: djpbessems
 | 
			
		||||
 | 
			
		||||
# Add random change for testing semantic release commit
 | 
			
		||||
 | 
			
		||||
# - name: Install Ansible Galaxy collections
 | 
			
		||||
#   depends_on:
 | 
			
		||||
#   - Semantic Release (Dry-run)
 | 
			
		||||
#   image: bv11-cr01.bessems.eu/library/packer-extended
 | 
			
		||||
#   pull: always
 | 
			
		||||
#   commands:
 | 
			
		||||
#   - |
 | 
			
		||||
#     ansible-galaxy collection install \
 | 
			
		||||
#       -r ansible/requirements.yml \
 | 
			
		||||
#       -p ./ansible/collections
 | 
			
		||||
#   volumes:
 | 
			
		||||
#   - name: scratch
 | 
			
		||||
#     path: /scratch
 | 
			
		||||
 | 
			
		||||
# - name: Kubernetes Bootstrap Appliance
 | 
			
		||||
#   depends_on:
 | 
			
		||||
#   - Install Ansible Galaxy collections
 | 
			
		||||
#   image: bv11-cr01.bessems.eu/library/packer-extended
 | 
			
		||||
#   pull: always
 | 
			
		||||
#   commands:
 | 
			
		||||
#   - |
 | 
			
		||||
#     sed -i -e "s/<<img-password>>/$${SSH_PASSWORD}/g" \
 | 
			
		||||
#       packer/preseed/UbuntuServer22.04/user-data
 | 
			
		||||
#   - |
 | 
			
		||||
#     export K8S_VERSION=$(yq '.components.clusterapi.workload.version.k8s' < ./ansible/vars/metacluster.yml)
 | 
			
		||||
#   - |
 | 
			
		||||
#     packer init -upgrade \
 | 
			
		||||
#       ./packer
 | 
			
		||||
#   - |
 | 
			
		||||
#     packer validate \
 | 
			
		||||
#       -only=vsphere-iso.bootstrap \
 | 
			
		||||
#       -var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
 | 
			
		||||
#       -var docker_username=$${DOCKER_USERNAME} \
 | 
			
		||||
#       -var docker_password=$${DOCKER_PASSWORD} \
 | 
			
		||||
#       -var repo_username=$${REPO_USERNAME} \
 | 
			
		||||
#       -var repo_password=$${REPO_PASSWORD} \
 | 
			
		||||
#       -var ssh_password=$${SSH_PASSWORD} \
 | 
			
		||||
#       -var vsphere_password=$${VSPHERE_PASSWORD} \
 | 
			
		||||
#       -var k8s_version=$K8S_VERSION \
 | 
			
		||||
#       ./packer
 | 
			
		||||
#   - |
 | 
			
		||||
#     packer build \
 | 
			
		||||
#       -on-error=cleanup -timestamp-ui \
 | 
			
		||||
#       -only=vsphere-iso.bootstrap \
 | 
			
		||||
#       -var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
 | 
			
		||||
#       -var docker_username=$${DOCKER_USERNAME} \
 | 
			
		||||
#       -var docker_password=$${DOCKER_PASSWORD} \
 | 
			
		||||
#       -var repo_username=$${REPO_USERNAME} \
 | 
			
		||||
#       -var repo_password=$${REPO_PASSWORD} \
 | 
			
		||||
#       -var ssh_password=$${SSH_PASSWORD} \
 | 
			
		||||
#       -var vsphere_password=$${VSPHERE_PASSWORD} \
 | 
			
		||||
#       -var k8s_version=$K8S_VERSION \
 | 
			
		||||
#       ./packer
 | 
			
		||||
#   environment:
 | 
			
		||||
#     DOCKER_USERNAME:
 | 
			
		||||
#       from_secret: docker_username
 | 
			
		||||
#     DOCKER_PASSWORD:
 | 
			
		||||
#       from_secret: docker_password
 | 
			
		||||
#     # PACKER_LOG: 1
 | 
			
		||||
#     REPO_USERNAME:
 | 
			
		||||
#       from_secret: repo_username
 | 
			
		||||
#     REPO_PASSWORD:
 | 
			
		||||
#       from_secret: repo_password
 | 
			
		||||
#     SSH_PASSWORD:
 | 
			
		||||
#       from_secret: ssh_password
 | 
			
		||||
#     VSPHERE_PASSWORD:
 | 
			
		||||
#       from_secret: vsphere_password
 | 
			
		||||
#   volumes:
 | 
			
		||||
#   - name: output
 | 
			
		||||
#     path: /output
 | 
			
		||||
#   - name: scratch
 | 
			
		||||
#     path: /scratch
 | 
			
		||||
 | 
			
		||||
# - name: Kubernetes Upgrade Appliance
 | 
			
		||||
#   depends_on:
 | 
			
		||||
#   - Install Ansible Galaxy collections
 | 
			
		||||
#   image: bv11-cr01.bessems.eu/library/packer-extended
 | 
			
		||||
#   pull: alwaysquery(
 | 
			
		||||
#   commands:
 | 
			
		||||
#   - |
 | 
			
		||||
#     sed -i -e "s/<<img-password>>/$${SSH_PASSWORD}/g" \
 | 
			
		||||
#       packer/preseed/UbuntuServer22.04/user-data
 | 
			
		||||
#   - |
 | 
			
		||||
#     export K8S_VERSION=$(yq '.components.clusterapi.workload.version.k8s' < ./ansible/vars/metacluster.yml)
 | 
			
		||||
#   - |
 | 
			
		||||
#     packer init -upgrade \
 | 
			
		||||
#       ./packer
 | 
			
		||||
#   - |
 | 
			
		||||
#     packer validate \
 | 
			
		||||
#       -only=vsphere-iso.upgrade \
 | 
			
		||||
#       -var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
 | 
			
		||||
#       -var docker_username=$${DOCKER_USERNAME} \
 | 
			
		||||
#       -var docker_password=$${DOCKER_PASSWORD} \
 | 
			
		||||
#       -var repo_username=$${REPO_USERNAME} \
 | 
			
		||||
#       -var repo_password=$${REPO_PASSWORD} \
 | 
			
		||||
#       -var ssh_password=$${SSH_PASSWORD} \
 | 
			
		||||
#       -var vsphere_password=$${VSPHERE_PASSWORD} \
 | 
			
		||||
#       -var k8s_version=$K8S_VERSION \
 | 
			
		||||
#       ./packer
 | 
			
		||||
#   - |
 | 
			
		||||
#     packer build \
 | 
			
		||||
#       -on-error=cleanup -timestamp-ui \
 | 
			
		||||
#       -only=vsphere-iso.upgrade \
 | 
			
		||||
#       -var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
 | 
			
		||||
#       -var docker_username=$${DOCKER_USERNAME} \
 | 
			
		||||
#       -var docker_password=$${DOCKER_PASSWORD} \
 | 
			
		||||
#       -var repo_username=$${REPO_USERNAME} \
 | 
			
		||||
#       -var repo_password=$${REPO_PASSWORD} \
 | 
			
		||||
#       -var ssh_password=$${SSH_PASSWORD} \
 | 
			
		||||
#       -var vsphere_password=$${VSPHERE_PASSWORD} \
 | 
			
		||||
#       -var k8s_version=$K8S_VERSION \
 | 
			
		||||
#       ./packer
 | 
			
		||||
#   environment:
 | 
			
		||||
#     DOCKER_USERNAME:
 | 
			
		||||
#       from_secret: docker_username
 | 
			
		||||
#     DOCKER_PASSWORD:
 | 
			
		||||
#       from_secret: docker_password
 | 
			
		||||
#     # PACKER_LOG: 1
 | 
			
		||||
#     REPO_USERNAME:
 | 
			
		||||
#       from_secret: repo_username
 | 
			
		||||
#     REPO_PASSWORD:
 | 
			
		||||
#       from_secret: repo_password
 | 
			
		||||
#     SSH_PASSWORD:
 | 
			
		||||
#       from_secret: ssh_password
 | 
			
		||||
#     VSPHERE_PASSWORD:
 | 
			
		||||
#       from_secret: vsphere_password
 | 
			
		||||
#   volumes:
 | 
			
		||||
#   - name: output
 | 
			
		||||
#     path: /output
 | 
			
		||||
#   - name: scratch
 | 
			
		||||
#     path: /scratch
 | 
			
		||||
 | 
			
		||||
# - name: Remove temporary resources
 | 
			
		||||
#   depends_on:
 | 
			
		||||
#   - Kubernetes Bootstrap Appliance
 | 
			
		||||
#   - Kubernetes Upgrade Appliance
 | 
			
		||||
#   image: bv11-cr01.bessems.eu/library/packer-extended
 | 
			
		||||
#   commands:
 | 
			
		||||
#   - |
 | 
			
		||||
#     pwsh -file scripts/Remove-Resources.ps1 \
 | 
			
		||||
#       -VMName $DRONE_BUILD_NUMBER-${DRONE_COMMIT_SHA:0:10} \
 | 
			
		||||
#       -VSphereFQDN 'bv11-vc.bessems.lan' \
 | 
			
		||||
#       -VSphereUsername 'administrator@vsphere.local' \
 | 
			
		||||
#       -VSpherePassword $${VSPHERE_PASSWORD}
 | 
			
		||||
#   environment:
 | 
			
		||||
#     VSPHERE_PASSWORD:
 | 
			
		||||
#       from_secret: vsphere_password
 | 
			
		||||
#   volumes:
 | 
			
		||||
#   - name: scratch
 | 
			
		||||
#     path: /scratch
 | 
			
		||||
#   when:
 | 
			
		||||
#     status:
 | 
			
		||||
#     - success
 | 
			
		||||
#     - failure
 | 
			
		||||
 
 | 
			
		||||
@@ -3,6 +3,7 @@
 | 
			
		||||
  gather_facts: false
 | 
			
		||||
  vars_files:
 | 
			
		||||
    - metacluster.yml
 | 
			
		||||
    - workloadcluster.yml
 | 
			
		||||
  become: true
 | 
			
		||||
  roles:
 | 
			
		||||
    - os
 | 
			
		||||
 
 | 
			
		||||
@@ -14,13 +14,22 @@
 | 
			
		||||
  loop_control:
 | 
			
		||||
    label: "{{ item.dest | basename }}"
 | 
			
		||||
 | 
			
		||||
- name: Parse helm charts for container images
 | 
			
		||||
- name: Parse metacluster helm charts for container images
 | 
			
		||||
  ansible.builtin.shell:
 | 
			
		||||
    cmd: "{{ item.value.helm.parse_logic }}"
 | 
			
		||||
    chdir: /opt/metacluster/helm-charts/{{ item.key }}
 | 
			
		||||
  register: chartimages
 | 
			
		||||
  register: chartimages_metacluster
 | 
			
		||||
  when: item.value.helm is defined
 | 
			
		||||
  loop: "{{ lookup('ansible.builtin.dict', components) }}"
 | 
			
		||||
  loop: "{{ query('ansible.builtin.dict', components) }}"
 | 
			
		||||
  loop_control:
 | 
			
		||||
    label: "{{ item.key }}"
 | 
			
		||||
 | 
			
		||||
- name: Parse workloadcluster helm charts for container images
 | 
			
		||||
  ansible.builtin.shell:
 | 
			
		||||
    cmd: "{{ item.value.parse_logic }}"
 | 
			
		||||
    chdir: /opt/workloadcluster/helm-charts/{{ item.value.namespace }}/{{ item.key }}
 | 
			
		||||
  register: chartimages_workloadcluster
 | 
			
		||||
  loop: "{{ query('ansible.builtin.dict', downstream.helm_charts) }}"
 | 
			
		||||
  loop_control:
 | 
			
		||||
    label: "{{ item.key }}"
 | 
			
		||||
 | 
			
		||||
@@ -29,7 +38,7 @@
 | 
			
		||||
    containerimages_{{ item.source }}: "{{ item.results }}"
 | 
			
		||||
  loop:
 | 
			
		||||
    - source: charts
 | 
			
		||||
      results: "{{ chartimages | json_query('results[*].stdout_lines') | select() | flatten | list }}"
 | 
			
		||||
      results: "{{ (chartimages_metacluster | json_query('results[*].stdout_lines')) + (chartimages_workloadcluster | json_query('results[*].stdout_lines')) | select() | flatten | list }}"
 | 
			
		||||
    - source: kubeadm
 | 
			
		||||
      results: "{{ kubeadmimages.stdout_lines }}"
 | 
			
		||||
    - source: manifests
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +0,0 @@
 | 
			
		||||
- name: Clone git repository
 | 
			
		||||
  ansible.builtin.git:
 | 
			
		||||
    repo: "{{ platform.gitops.repository.uri }}"
 | 
			
		||||
    version: "{{ platform.gitops.repository.revision }}"
 | 
			
		||||
    dest: /opt/metacluster/git-repositories/gitops
 | 
			
		||||
@@ -3,17 +3,29 @@
 | 
			
		||||
    name: "{{ item.name }}"
 | 
			
		||||
    repo_url: "{{ item.url }}"
 | 
			
		||||
    state: present
 | 
			
		||||
  loop: "{{ platform.helm_repositories }}"
 | 
			
		||||
  loop: "{{ platform.helm_repositories + downstream.helm_repositories }}"
 | 
			
		||||
 | 
			
		||||
- name: Fetch helm charts
 | 
			
		||||
- name: Fetch helm charts for metacluster
 | 
			
		||||
  ansible.builtin.command:
 | 
			
		||||
    cmd: helm fetch {{ item.value.helm.chart }} --untar --version {{ item.value.helm.version }}
 | 
			
		||||
    chdir: /opt/metacluster/helm-charts
 | 
			
		||||
  when: item.value.helm is defined
 | 
			
		||||
  register: helmcharts
 | 
			
		||||
  loop: "{{ lookup('ansible.builtin.dict', components) }}"
 | 
			
		||||
  register: helmcharts_metacluster
 | 
			
		||||
  loop: "{{ query('ansible.builtin.dict', components) }}"
 | 
			
		||||
  loop_control:
 | 
			
		||||
    label: "{{ item.key }}"
 | 
			
		||||
  retries: 5
 | 
			
		||||
  delay: 5
 | 
			
		||||
  until: helmcharts is not failed
 | 
			
		||||
  until: helmcharts_metacluster is not failed
 | 
			
		||||
 | 
			
		||||
- name: Fetch helm charts for workloadcluster
 | 
			
		||||
  ansible.builtin.command:
 | 
			
		||||
    cmd: helm fetch {{ item.value.chart }} --untardir ./{{ item.value.namespace }} --untar --version {{ item.value.version }}
 | 
			
		||||
    chdir: /opt/workloadcluster/helm-charts
 | 
			
		||||
  register: helmcharts_workloadcluster
 | 
			
		||||
  loop: "{{ query('ansible.builtin.dict', downstream.helm_charts) }}"
 | 
			
		||||
  loop_control:
 | 
			
		||||
    label: "{{ item.key }}"
 | 
			
		||||
  retries: 5
 | 
			
		||||
  delay: 5
 | 
			
		||||
  until: helmcharts_workloadcluster is not failed
 | 
			
		||||
 
 | 
			
		||||
@@ -21,7 +21,7 @@
 | 
			
		||||
 | 
			
		||||
- name: Download K3s install script
 | 
			
		||||
  ansible.builtin.get_url:
 | 
			
		||||
    url: https://get.k3s.io
 | 
			
		||||
    url: https://raw.githubusercontent.com/k3s-io/k3s/{{ platform.k3s.version | urlencode }}/install.sh
 | 
			
		||||
    dest: /opt/metacluster/k3s/install.sh
 | 
			
		||||
    owner: root
 | 
			
		||||
    group: root
 | 
			
		||||
 
 | 
			
		||||
@@ -12,10 +12,13 @@
 | 
			
		||||
    - /opt/metacluster/cluster-api/infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }}
 | 
			
		||||
    - /opt/metacluster/cluster-api/ipam-in-cluster/{{ components.clusterapi.management.version.ipam_incluster }}
 | 
			
		||||
    - /opt/metacluster/container-images
 | 
			
		||||
    - /opt/metacluster/git-repositories/gitops
 | 
			
		||||
    - /opt/metacluster/git-repositories
 | 
			
		||||
    - /opt/metacluster/helm-charts
 | 
			
		||||
    - /opt/metacluster/k3s
 | 
			
		||||
    - /opt/metacluster/kube-vip
 | 
			
		||||
    - /opt/workloadcluster/git-repositories/gitops/charts
 | 
			
		||||
    - /opt/workloadcluster/git-repositories/gitops/values
 | 
			
		||||
    - /opt/workloadcluster/helm-charts
 | 
			
		||||
    - /opt/workloadcluster/node-templates
 | 
			
		||||
    - /var/lib/rancher/k3s/agent/images
 | 
			
		||||
    - /var/lib/rancher/k3s/server/manifests
 | 
			
		||||
 
 | 
			
		||||
@@ -2,9 +2,9 @@
 | 
			
		||||
 | 
			
		||||
    - name: Aggregate chart_values into dict
 | 
			
		||||
      ansible.builtin.set_fact:
 | 
			
		||||
        chart_values: "{{ chart_values | default({}) | combine({ (item.key | regex_replace('[^A-Za-z0-9]', '')): { 'chart_values': (item.value.helm.chart_values | from_yaml) } }) }}"
 | 
			
		||||
        metacluster_chartvalues: "{{ metacluster_chartvalues | default({}) | combine({ item.key: { 'chart_values': (item.value.helm.chart_values | from_yaml) } }) }}"
 | 
			
		||||
      when: item.value.helm.chart_values is defined
 | 
			
		||||
      loop: "{{ lookup('ansible.builtin.dict', components) }}"
 | 
			
		||||
      loop: "{{ query('ansible.builtin.dict', components) }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        label: "{{ item.key }}"
 | 
			
		||||
 | 
			
		||||
@@ -14,12 +14,29 @@
 | 
			
		||||
        content: >-
 | 
			
		||||
          {{
 | 
			
		||||
            { 'components': (
 | 
			
		||||
              chart_values |
 | 
			
		||||
              metacluster_chartvalues |
 | 
			
		||||
              combine({ 'clusterapi': components.clusterapi }) |
 | 
			
		||||
              combine({ 'kubevip'   : components.kubevip }) )
 | 
			
		||||
            } | to_nice_yaml(indent=2, width=4096)
 | 
			
		||||
          }}
 | 
			
		||||
 | 
			
		||||
    - name: Aggregate chart_values into dict
 | 
			
		||||
      ansible.builtin.set_fact:
 | 
			
		||||
        workloadcluster_chartvalues: "{{ workloadcluster_chartvalues | default({}) | combine({ item.key: { 'chart_values': (item.value.chart_values | default('') | from_yaml) } }) }}"
 | 
			
		||||
      # when: item.value.chart_values is defined
 | 
			
		||||
      loop: "{{ query('ansible.builtin.dict', downstream.helm_charts) }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        label: "{{ item.key }}"
 | 
			
		||||
 | 
			
		||||
    - name: Write dict to vars_file
 | 
			
		||||
      ansible.builtin.copy:
 | 
			
		||||
        dest: /opt/firstboot/ansible/vars/workloadcluster.yml
 | 
			
		||||
        content: >-
 | 
			
		||||
          {{
 | 
			
		||||
            { 'downstream_components': ( workloadcluster_chartvalues )
 | 
			
		||||
            } | to_nice_yaml(indent=2, width=4096)
 | 
			
		||||
          }}
 | 
			
		||||
 | 
			
		||||
- name: Download ClusterAPI manifests
 | 
			
		||||
  ansible.builtin.get_url:
 | 
			
		||||
    url: "{{ item.url }}"
 | 
			
		||||
@@ -65,6 +82,12 @@
 | 
			
		||||
  delay: 5
 | 
			
		||||
  until: clusterapi_manifests is not failed
 | 
			
		||||
 | 
			
		||||
- name: Update cluster-template with image tags
 | 
			
		||||
  ansible.builtin.replace:
 | 
			
		||||
    dest: /opt/metacluster/cluster-api/infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }}/cluster-template.yaml
 | 
			
		||||
    regexp: ':\${CPI_IMAGE_K8S_VERSION}'
 | 
			
		||||
    replace: ":{{ components.clusterapi.management.version.cpi_vsphere }}"
 | 
			
		||||
 | 
			
		||||
- name: Download kube-vip RBAC manifest
 | 
			
		||||
  ansible.builtin.get_url:
 | 
			
		||||
    url: https://kube-vip.io/manifests/rbac.yaml
 | 
			
		||||
@@ -81,6 +104,6 @@
 | 
			
		||||
#     owner: root
 | 
			
		||||
#     group: root
 | 
			
		||||
#     mode: 0600
 | 
			
		||||
#   loop: "{{ lookup('ansible.builtin.dict', components) | map(attribute='value.manifests') | list | select('defined') | flatten }}"
 | 
			
		||||
#   loop: "{{ query('ansible.builtin.dict', components) | map(attribute='value.manifests') | list | select('defined') | flatten }}"
 | 
			
		||||
#   loop_control:
 | 
			
		||||
#     label: "{{ item.type ~ '/' ~ item.name }}"
 | 
			
		||||
 
 | 
			
		||||
@@ -5,6 +5,7 @@
 | 
			
		||||
  vars_files:
 | 
			
		||||
    - defaults.yml
 | 
			
		||||
    - metacluster.yml
 | 
			
		||||
    - workloadcluster.yml
 | 
			
		||||
  # become: true
 | 
			
		||||
  roles:
 | 
			
		||||
    - vapp
 | 
			
		||||
 
 | 
			
		||||
@@ -1,14 +0,0 @@
 | 
			
		||||
import netaddr
 | 
			
		||||
 | 
			
		||||
def netaddr_iter_iprange(ip_start, ip_end):
 | 
			
		||||
    return [str(ip) for ip in netaddr.iter_iprange(ip_start, ip_end)]
 | 
			
		||||
 | 
			
		||||
class FilterModule(object):
 | 
			
		||||
        ''' Ansible filter. Interface to netaddr methods.
 | 
			
		||||
            https://pypi.org/project/netaddr/
 | 
			
		||||
        '''
 | 
			
		||||
 | 
			
		||||
        def filters(self):
 | 
			
		||||
            return {
 | 
			
		||||
                'netaddr_iter_iprange': netaddr_iter_iprange
 | 
			
		||||
            }
 | 
			
		||||
@@ -8,7 +8,7 @@
 | 
			
		||||
        create_namespace: true
 | 
			
		||||
        wait: false
 | 
			
		||||
        kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
        values: "{{ components.gitea.chart_values }}"
 | 
			
		||||
        values: "{{ components['gitea'].chart_values }}"
 | 
			
		||||
 | 
			
		||||
    - name: Ensure gitea API availability
 | 
			
		||||
      ansible.builtin.uri:
 | 
			
		||||
@@ -109,16 +109,28 @@
 | 
			
		||||
          loop:
 | 
			
		||||
            - organization: mc
 | 
			
		||||
              body:
 | 
			
		||||
                name: GitOps.Config
 | 
			
		||||
                auto_init: true
 | 
			
		||||
                default_branch: main
 | 
			
		||||
                description: GitOps manifests
 | 
			
		||||
            - organization: wl
 | 
			
		||||
                name: GitOps.ClusterAPI
 | 
			
		||||
                # auto_init: true
 | 
			
		||||
                # default_branch: main
 | 
			
		||||
                description: ClusterAPI manifests
 | 
			
		||||
            - organization: mc
 | 
			
		||||
              body:
 | 
			
		||||
                name: Template.GitOps.Config
 | 
			
		||||
                name: GitOps.Config
 | 
			
		||||
                # auto_init: true
 | 
			
		||||
                # default_branch: main
 | 
			
		||||
                description: GitOps manifests
 | 
			
		||||
            - organization: wl
 | 
			
		||||
              body:
 | 
			
		||||
                name: GitOps.Config
 | 
			
		||||
                # auto_init: true
 | 
			
		||||
                # default_branch: main
 | 
			
		||||
                description: GitOps manifests
 | 
			
		||||
            - organization: wl
 | 
			
		||||
              body:
 | 
			
		||||
                name: GitOps.HelmCharts
 | 
			
		||||
                # auto_init: true
 | 
			
		||||
                # default_branch: main
 | 
			
		||||
                description: Helm charts
 | 
			
		||||
          loop_control:
 | 
			
		||||
            label: "{{ item.organization ~ '/' ~ item.body.name }}"
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -8,7 +8,7 @@
 | 
			
		||||
        create_namespace: true
 | 
			
		||||
        wait: false
 | 
			
		||||
        kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
        values: "{{ components.argocd.chart_values }}"
 | 
			
		||||
        values: "{{ components['argo-cd'].chart_values }}"
 | 
			
		||||
 | 
			
		||||
    - name: Ensure argo-cd API availability
 | 
			
		||||
      ansible.builtin.uri:
 | 
			
		||||
@@ -39,24 +39,29 @@
 | 
			
		||||
        mode: 0600
 | 
			
		||||
      vars:
 | 
			
		||||
        _template:
 | 
			
		||||
          name: argocd-gitrepo-metacluster
 | 
			
		||||
          name: gitrepo-mc-gitopsconfig
 | 
			
		||||
          namespace: argo-cd
 | 
			
		||||
          uid: "{{ lookup('ansible.builtin.password', '/dev/null length=5 chars=ascii_lowercase,digits seed=inventory_hostname') }}"
 | 
			
		||||
          privatekey: "{{ lookup('ansible.builtin.file', '~/.ssh/git_rsa_id') | indent(4, true) }}"
 | 
			
		||||
          url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.Config.git
 | 
			
		||||
      notify:
 | 
			
		||||
        - Apply manifests
 | 
			
		||||
 | 
			
		||||
    - name: Create applicationset
 | 
			
		||||
      ansible.builtin.template:
 | 
			
		||||
        src: applicationset.j2
 | 
			
		||||
        dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
 | 
			
		||||
        dest: /var/lib/rancher/k3s/server/manifests/{{ _template.application.name }}-manifest.yaml
 | 
			
		||||
        owner: root
 | 
			
		||||
        group: root
 | 
			
		||||
        mode: 0600
 | 
			
		||||
      vars:
 | 
			
		||||
        _template:
 | 
			
		||||
          name: argocd-applicationset-metacluster
 | 
			
		||||
          namespace: argo-cd
 | 
			
		||||
          application:
 | 
			
		||||
            name: applicationset-metacluster
 | 
			
		||||
            namespace: argo-cd
 | 
			
		||||
          cluster:
 | 
			
		||||
            url: https://kubernetes.default.svc
 | 
			
		||||
          repository:
 | 
			
		||||
            url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.Config.git
 | 
			
		||||
            revision: main
 | 
			
		||||
      notify:
 | 
			
		||||
        - Apply manifests
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -78,6 +78,6 @@
 | 
			
		||||
    src: registries.j2
 | 
			
		||||
  vars:
 | 
			
		||||
    _template:
 | 
			
		||||
      data: "{{ source_registries }}"
 | 
			
		||||
      registries: "{{ source_registries }}"
 | 
			
		||||
      hv:
 | 
			
		||||
        fqdn: "{{ vapp['metacluster.fqdn'] }}"
 | 
			
		||||
 
 | 
			
		||||
@@ -8,7 +8,7 @@
 | 
			
		||||
        create_namespace: true
 | 
			
		||||
        wait: false
 | 
			
		||||
        kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
        values: "{{ components.harbor.chart_values }}"
 | 
			
		||||
        values: "{{ components['harbor'].chart_values }}"
 | 
			
		||||
 | 
			
		||||
    - name: Ensure harbor API availability
 | 
			
		||||
      ansible.builtin.uri:
 | 
			
		||||
 
 | 
			
		||||
@@ -7,7 +7,7 @@
 | 
			
		||||
        create_namespace: true
 | 
			
		||||
        wait: false
 | 
			
		||||
        kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
        values: "{{ components.longhorn.chart_values }}"
 | 
			
		||||
        values: "{{ components['longhorn'].chart_values }}"
 | 
			
		||||
 | 
			
		||||
    - name: Ensure longhorn API availability
 | 
			
		||||
      ansible.builtin.uri:
 | 
			
		||||
 
 | 
			
		||||
@@ -47,29 +47,12 @@
 | 
			
		||||
        resourcepool: "{{ vcenter_info.resourcepool }}"
 | 
			
		||||
        folder: "{{ vcenter_info.folder }}"
 | 
			
		||||
      cluster:
 | 
			
		||||
        nodetemplate: "{{ (components.clusterapi.workload.node_template.url | basename | split('.'))[:-1] | join('.') }}"
 | 
			
		||||
        nodetemplate: "{{ nodetemplate_inventorypath }}"
 | 
			
		||||
        publickey: "{{ vapp['guestinfo.rootsshkey'] }}"
 | 
			
		||||
        version: "{{ components.clusterapi.workload.version.k8s }}"
 | 
			
		||||
        vip: "{{ vapp['workloadcluster.vip'] }}"
 | 
			
		||||
 | 
			
		||||
- name: WORKAROUND - Update image references to use local registry
 | 
			
		||||
  ansible.builtin.replace:
 | 
			
		||||
    dest: "{{ item }}"
 | 
			
		||||
    regexp: '([ ]+image:[ "]+)(?!({{ _template.pattern }}|"{{ _template.pattern }}))'
 | 
			
		||||
    replace: '\1{{ _template.pattern }}'
 | 
			
		||||
  vars:
 | 
			
		||||
    fileglobs:
 | 
			
		||||
      - "{{ query('ansible.builtin.fileglob', '/opt/metacluster/cluster-api/cni-calico/' ~ components.clusterapi.workload.version.calico ~ '/*.yaml') }}"
 | 
			
		||||
      - "{{ query('ansible.builtin.fileglob', '/opt/metacluster/cluster-api/infrastructure-vsphere/' ~ components.clusterapi.management.version.infrastructure_vsphere ~ '/*.yaml') }}"
 | 
			
		||||
    _template:
 | 
			
		||||
      pattern: registry.{{ vapp['metacluster.fqdn'] }}/library/
 | 
			
		||||
  loop: "{{ fileglobs[0:] | flatten | select }}"
 | 
			
		||||
  loop_control:
 | 
			
		||||
    label: "{{ item | basename }}"
 | 
			
		||||
  when:
 | 
			
		||||
    - item is not search("components.yaml|metadata.yaml")
 | 
			
		||||
 | 
			
		||||
- name: Generate kustomization template
 | 
			
		||||
- name: Generate cluster-template kustomization manifest
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: kustomization.cluster-template.j2
 | 
			
		||||
    dest: /opt/metacluster/cluster-api/infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }}/kustomization.yaml
 | 
			
		||||
@@ -78,13 +61,13 @@
 | 
			
		||||
      network:
 | 
			
		||||
        fqdn: "{{ vapp['metacluster.fqdn'] }}"
 | 
			
		||||
        dnsserver: "{{ vapp['guestinfo.dnsserver'] }}"
 | 
			
		||||
      nodesize:
 | 
			
		||||
        cpu: "{{ config.clusterapi.size_matrix[ vapp['workloadcluster.nodesize'] ].cpu }}"
 | 
			
		||||
        memory: "{{ config.clusterapi.size_matrix[ vapp['workloadcluster.nodesize'] ].memory }}"
 | 
			
		||||
      rootca: "{{ stepca_cm_certs.resources[0].data['root_ca.crt'] }}"
 | 
			
		||||
      # script:
 | 
			
		||||
      #   # Base64 encoded; to avoid variable substitution when clusterctl parses the cluster-template.yml
 | 
			
		||||
      #   encoded: IyEvYmluL2Jhc2gKdm10b29sc2QgLS1jbWQgJ2luZm8tZ2V0IGd1ZXN0aW5mby5vdmZFbnYnID4gL3RtcC9vdmZlbnYKCklQQWRkcmVzcz0kKHNlZCAtbiAncy8uKlByb3BlcnR5IG9lOmtleT0iZ3Vlc3RpbmZvLmludGVyZmFjZS4wLmlwLjAuYWRkcmVzcyIgb2U6dmFsdWU9IlwoW14iXSpcKS4qL1wxL3AnIC90bXAvb3ZmZW52KQpTdWJuZXRNYXNrPSQoc2VkIC1uICdzLy4qUHJvcGVydHkgb2U6a2V5PSJndWVzdGluZm8uaW50ZXJmYWNlLjAuaXAuMC5uZXRtYXNrIiBvZTp2YWx1ZT0iXChbXiJdKlwpLiovXDEvcCcgL3RtcC9vdmZlbnYpCkdhdGV3YXk9JChzZWQgLW4gJ3MvLipQcm9wZXJ0eSBvZTprZXk9Imd1ZXN0aW5mby5pbnRlcmZhY2UuMC5yb3V0ZS4wLmdhdGV3YXkiIG9lOnZhbHVlPSJcKFteIl0qXCkuKi9cMS9wJyAvdG1wL292ZmVudikKRE5TPSQoc2VkIC1uICdzLy4qUHJvcGVydHkgb2U6a2V5PSJndWVzdGluZm8uZG5zLnNlcnZlcnMiIG9lOnZhbHVlPSJcKFteIl0qXCkuKi9cMS9wJyAvdG1wL292ZmVudikKTUFDQWRkcmVzcz0kKHNlZCAtbiAncy8uKnZlOkFkYXB0ZXIgdmU6bWFjPSJcKFteIl0qXCkuKi9cMS9wJyAvdG1wL292ZmVudikKCm1hc2syY2lkcigpIHsKICBjPTAKICB4PTAkKCBwcmludGYgJyVvJyAkezEvLy4vIH0gKQoKICB3aGlsZSBbICR4IC1ndCAwIF07IGRvCiAgICBsZXQgYys9JCgoeCUyKSkgJ3g+Pj0xJwogIGRvbmUKCiAgZWNobyAkYwp9CgpQcmVmaXg9JChtYXNrMmNpZHIgJFN1Ym5ldE1hc2spCgpjYXQgPiAvZXRjL25ldHBsYW4vMDEtbmV0Y2ZnLnlhbWwgPDxFT0YKbmV0d29yazoKICB2ZXJzaW9uOiAyCiAgcmVuZGVyZXI6IG5ldHdvcmtkCiAgZXRoZXJuZXRzOgogICAgaWQwOgogICAgICBzZXQtbmFtZTogZXRoMAogICAgICBtYXRjaDoKICAgICAgICBtYWNhZGRyZXNzOiAkTUFDQWRkcmVzcwogICAgICBhZGRyZXNzZXM6CiAgICAgICAgLSAkSVBBZGRyZXNzLyRQcmVmaXgKICAgICAgZ2F0ZXdheTQ6ICRHYXRld2F5CiAgICAgIG5hbWVzZXJ2ZXJzOgogICAgICAgIGFkZHJlc3NlcyA6IFskRE5TXQpFT0YKcm0gL2V0Yy9uZXRwbGFuLzUwKi55YW1sIC1mCgpzdWRvIG5ldHBsYW4gYXBwbHk=
 | 
			
		||||
      runcmds:
 | 
			
		||||
        - update-ca-certificates
 | 
			
		||||
        # - bash /root/network.sh
 | 
			
		||||
      registries: "{{ source_registries }}"
 | 
			
		||||
 | 
			
		||||
- name: Store custom cluster-template
 | 
			
		||||
  ansible.builtin.copy:
 | 
			
		||||
@@ -102,7 +85,7 @@
 | 
			
		||||
        --kubeconfig {{ kubeconfig.path }}
 | 
			
		||||
    chdir: /opt/metacluster/cluster-api
 | 
			
		||||
 | 
			
		||||
- name: Ensure CAPI/CAPV controller availability
 | 
			
		||||
- name: Ensure controller availability
 | 
			
		||||
  kubernetes.core.k8s_info:
 | 
			
		||||
    kind: Deployment
 | 
			
		||||
    name: "{{ item.name }}"
 | 
			
		||||
@@ -110,6 +93,8 @@
 | 
			
		||||
    wait: true
 | 
			
		||||
    kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
  loop:
 | 
			
		||||
    - name: caip-in-cluster-controller-manager
 | 
			
		||||
      namespace: caip-in-cluster-system
 | 
			
		||||
    - name: capi-controller-manager
 | 
			
		||||
      namespace: capi-system
 | 
			
		||||
    - name: capv-controller-manager
 | 
			
		||||
@@ -122,7 +107,8 @@
 | 
			
		||||
    clustersize: >-
 | 
			
		||||
      {{ {
 | 
			
		||||
        'controlplane': vapp['deployment.type'] | regex_findall('^cp(\d)+') | first,
 | 
			
		||||
        'workers': vapp['deployment.type'] | regex_findall('w(\d)+$') | first
 | 
			
		||||
        'worker': vapp['deployment.type'] | regex_findall('w(\d)+') | first,
 | 
			
		||||
        'workerstorage': vapp['deployment.type'] | regex_findall('ws(\d)+$') | first
 | 
			
		||||
      } }}
 | 
			
		||||
 | 
			
		||||
- name: Generate workload cluster manifest
 | 
			
		||||
@@ -131,41 +117,51 @@
 | 
			
		||||
      clusterctl generate cluster \
 | 
			
		||||
        {{ vapp['workloadcluster.name'] | lower }} \
 | 
			
		||||
        --control-plane-machine-count {{ clustersize.controlplane }} \
 | 
			
		||||
        --worker-machine-count {{ clustersize.workers }} \
 | 
			
		||||
        --worker-machine-count {{ clustersize.worker }} \
 | 
			
		||||
        --from ./custom-cluster-template.yaml \
 | 
			
		||||
        --config ./clusterctl.yaml \
 | 
			
		||||
        --kubeconfig {{ kubeconfig.path }}
 | 
			
		||||
    chdir: /opt/metacluster/cluster-api
 | 
			
		||||
  register: clusterctl_newcluster
 | 
			
		||||
 | 
			
		||||
- name: Initialize tempfile
 | 
			
		||||
- name: Initialize tempfolder
 | 
			
		||||
  ansible.builtin.tempfile:
 | 
			
		||||
    state: file
 | 
			
		||||
    state: directory
 | 
			
		||||
  register: capi_clustermanifest
 | 
			
		||||
 | 
			
		||||
- name: Save workload cluster manifest
 | 
			
		||||
  ansible.builtin.copy:
 | 
			
		||||
    dest: "{{ capi_clustermanifest.path }}"
 | 
			
		||||
    dest: "{{ capi_clustermanifest.path }}/new-cluster.yaml"
 | 
			
		||||
    content: "{{ clusterctl_newcluster.stdout }}"
 | 
			
		||||
 | 
			
		||||
- name: Split manifest into separate files
 | 
			
		||||
  ansible.builtin.shell:
 | 
			
		||||
    cmd: >-
 | 
			
		||||
      kubectl slice \
 | 
			
		||||
        -f {{ capi_clustermanifest.path }} \
 | 
			
		||||
        -o /opt/metacluster/cluster-api/new-cluster
 | 
			
		||||
        -f {{ capi_clustermanifest.path }}/new-cluster.yaml \
 | 
			
		||||
        -o {{ capi_clustermanifest.path }}/manifests
 | 
			
		||||
 | 
			
		||||
- name: Cleanup tempfile
 | 
			
		||||
  ansible.builtin.file:
 | 
			
		||||
    path: "{{ capi_clustermanifest.path }}"
 | 
			
		||||
    state: absent
 | 
			
		||||
  when: capi_clustermanifest.path is defined
 | 
			
		||||
- name: Generate nodepool kustomization manifest
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: kustomization.nodepool.j2
 | 
			
		||||
    dest: "{{ capi_clustermanifest.path }}/kustomization.yaml"
 | 
			
		||||
  vars:
 | 
			
		||||
    _template:
 | 
			
		||||
      cluster:
 | 
			
		||||
        name: "{{ vapp['workloadcluster.name'] }}"
 | 
			
		||||
      nodepool:
 | 
			
		||||
        size: "{{ clustersize.workerstorage }}"
 | 
			
		||||
        additionaldisk: "{{ vapp['workloadcluster.additionaldisk'] }}"
 | 
			
		||||
 | 
			
		||||
- name: Store nodepool manifest
 | 
			
		||||
  ansible.builtin.copy:
 | 
			
		||||
    dest: "{{ capi_clustermanifest.path }}/manifests/nodepool-worker-storage.yaml"
 | 
			
		||||
    content: "{{ lookup('kubernetes.core.kustomize', dir=capi_clustermanifest.path) }}"
 | 
			
		||||
 | 
			
		||||
- name: Create in-cluster IpPool
 | 
			
		||||
  kubernetes.core.k8s:
 | 
			
		||||
    template: ippool.j2
 | 
			
		||||
    state: present
 | 
			
		||||
    kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: ippool.j2
 | 
			
		||||
    dest: "{{ capi_clustermanifest.path }}/manifests/inclusterippool-{{ _template.cluster.name }}.yml"
 | 
			
		||||
  vars:
 | 
			
		||||
    _template:
 | 
			
		||||
      cluster:
 | 
			
		||||
@@ -177,6 +173,40 @@
 | 
			
		||||
          prefix: "{{ vapp['guestinfo.prefixlength'] }}"
 | 
			
		||||
          gateway: "{{ vapp['guestinfo.gateway'] }}"
 | 
			
		||||
 | 
			
		||||
- name: Initialize/Push git repository
 | 
			
		||||
  ansible.builtin.shell:
 | 
			
		||||
    cmd: |
 | 
			
		||||
      git init
 | 
			
		||||
      git config --global user.email "administrator@{{ vapp['metacluster.fqdn'] }}"
 | 
			
		||||
      git config --global user.name "administrator"
 | 
			
		||||
      git checkout -b main
 | 
			
		||||
      git add ./manifests
 | 
			
		||||
      git commit -m "Upload manifests"
 | 
			
		||||
      git remote add origin https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
 | 
			
		||||
      git push https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git --all
 | 
			
		||||
    chdir: "{{ capi_clustermanifest.path }}"
 | 
			
		||||
 | 
			
		||||
- name: Cleanup tempfolder
 | 
			
		||||
  ansible.builtin.file:
 | 
			
		||||
    path: "{{ capi_clustermanifest.path }}"
 | 
			
		||||
    state: absent
 | 
			
		||||
  when: capi_clustermanifest.path is defined
 | 
			
		||||
 | 
			
		||||
- name: Configure Cluster API repository
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: gitrepo.j2
 | 
			
		||||
    dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
 | 
			
		||||
    owner: root
 | 
			
		||||
    group: root
 | 
			
		||||
    mode: 0600
 | 
			
		||||
  vars:
 | 
			
		||||
    _template:
 | 
			
		||||
      name: gitrepo-mc-gitopsclusterapi
 | 
			
		||||
      namespace: argo-cd
 | 
			
		||||
      url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
 | 
			
		||||
  notify:
 | 
			
		||||
    - Apply manifests
 | 
			
		||||
 | 
			
		||||
- name: WORKAROUND - Wait for ingress ACME requests to complete
 | 
			
		||||
  ansible.builtin.shell:
 | 
			
		||||
    cmd: >-
 | 
			
		||||
@@ -188,13 +218,30 @@
 | 
			
		||||
  retries: "{{ playbook.retries }}"
 | 
			
		||||
  delay: "{{ (storage_benchmark | int) * (playbook.delay.medium | int) }}"
 | 
			
		||||
 | 
			
		||||
- name: Apply workload cluster manifest
 | 
			
		||||
  kubernetes.core.k8s:
 | 
			
		||||
    definition: >-
 | 
			
		||||
      {{ clusterctl_newcluster.stdout }}
 | 
			
		||||
    wait: true
 | 
			
		||||
    kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
# TODO: move to git repo
 | 
			
		||||
- name: Create application
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: application.j2
 | 
			
		||||
    dest: /var/lib/rancher/k3s/server/manifests/{{ _template.application.name }}-manifest.yaml
 | 
			
		||||
    owner: root
 | 
			
		||||
    group: root
 | 
			
		||||
    mode: 0600
 | 
			
		||||
  vars:
 | 
			
		||||
    _template:
 | 
			
		||||
      application:
 | 
			
		||||
        name: application-clusterapi-workloadcluster
 | 
			
		||||
        namespace: argo-cd
 | 
			
		||||
      cluster:
 | 
			
		||||
        name: https://kubernetes.default.svc
 | 
			
		||||
        namespace: default
 | 
			
		||||
      repository:
 | 
			
		||||
        url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
 | 
			
		||||
        path: manifests
 | 
			
		||||
        revision: main
 | 
			
		||||
  notify:
 | 
			
		||||
    - Apply manifests
 | 
			
		||||
 | 
			
		||||
- name: Trigger handlers
 | 
			
		||||
  ansible.builtin.meta: flush_handlers
 | 
			
		||||
 | 
			
		||||
- name: Wait for cluster to be available
 | 
			
		||||
  ansible.builtin.shell:
 | 
			
		||||
 
 | 
			
		||||
@@ -1,37 +1,105 @@
 | 
			
		||||
- block:
 | 
			
		||||
- name: Aggregate helm charts from filesystem
 | 
			
		||||
  ansible.builtin.find:
 | 
			
		||||
    path: /opt/workloadcluster/helm-charts
 | 
			
		||||
    file_type: directory
 | 
			
		||||
    recurse: false
 | 
			
		||||
  register: helm_charts
 | 
			
		||||
 | 
			
		||||
    - name: Generate service account in workload cluster
 | 
			
		||||
      kubernetes.core.k8s:
 | 
			
		||||
        template: serviceaccount.j2
 | 
			
		||||
        state: present
 | 
			
		||||
- name: Create hard-links to populate new git-repository
 | 
			
		||||
  ansible.builtin.shell:
 | 
			
		||||
    cmd: >-
 | 
			
		||||
      cp -lr {{ item.path }}/ /opt/workloadcluster/git-repositories/gitops/charts
 | 
			
		||||
  loop: "{{ helm_charts.files }}"
 | 
			
		||||
  loop_control:
 | 
			
		||||
    label: "{{ item.path | basename }}"
 | 
			
		||||
 | 
			
		||||
    - name: Retrieve service account bearer token
 | 
			
		||||
      kubernetes.core.k8s_info:
 | 
			
		||||
        kind: Secret
 | 
			
		||||
        name: "{{ _template.account.name }}-secret"
 | 
			
		||||
        namespace: "{{ _template.account.namespace }}"
 | 
			
		||||
      register: workloadcluster_bearertoken
 | 
			
		||||
- name: Create subfolders
 | 
			
		||||
  ansible.builtin.file:
 | 
			
		||||
    path: /opt/workloadcluster/git-repositories/gitops/values/{{ item.key }}
 | 
			
		||||
    state: directory
 | 
			
		||||
  loop: "{{ query('ansible.builtin.dict', downstream_components) }}"
 | 
			
		||||
  loop_control:
 | 
			
		||||
    label: "{{ item.key }}"
 | 
			
		||||
 | 
			
		||||
    - name: Register workload cluster in argo-cd
 | 
			
		||||
      kubernetes.core.k8s:
 | 
			
		||||
        template: cluster.j2
 | 
			
		||||
        state: present
 | 
			
		||||
        kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
      vars:
 | 
			
		||||
        _template:
 | 
			
		||||
          cluster:
 | 
			
		||||
            name: "{{ vapp['workloadcluster.name'] | lower }}"
 | 
			
		||||
            secret: argocd-cluster-{{ vapp['workloadcluster.name'] | lower }}
 | 
			
		||||
            url: https://{{ vapp['workloadcluster.vip'] }}:6443
 | 
			
		||||
            token: "{{ workloadcluster_bearertoken.resources | json_query('[].data.token') }}"
 | 
			
		||||
- name: Write chart values to file
 | 
			
		||||
  ansible.builtin.copy:
 | 
			
		||||
    dest: /opt/workloadcluster/git-repositories/gitops/values/{{ item.key }}/values.yaml
 | 
			
		||||
    content: "{{ item.value.chart_values | default('# Empty') | to_nice_yaml(indent=2, width=4096) }}"
 | 
			
		||||
  loop: "{{ query('ansible.builtin.dict', downstream_components) }}"
 | 
			
		||||
  loop_control:
 | 
			
		||||
    label: "{{ item.key }}"
 | 
			
		||||
 | 
			
		||||
- name: Initialize/Push git repository
 | 
			
		||||
  ansible.builtin.shell:
 | 
			
		||||
    cmd: |
 | 
			
		||||
      git init
 | 
			
		||||
      git config --global user.email "administrator@{{ vapp['metacluster.fqdn'] }}"
 | 
			
		||||
      git config --global user.name "administrator"
 | 
			
		||||
      git checkout -b main
 | 
			
		||||
      git add .
 | 
			
		||||
      git commit -m "Upload charts"
 | 
			
		||||
      git remote add origin https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git
 | 
			
		||||
      git push https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git --all
 | 
			
		||||
    chdir: /opt/workloadcluster/git-repositories/gitops
 | 
			
		||||
 | 
			
		||||
- name: Retrieve workload-cluster kubeconfig
 | 
			
		||||
  kubernetes.core.k8s_info:
 | 
			
		||||
    kind: Secret
 | 
			
		||||
    name: "{{ vapp['workloadcluster.name'] }}-kubeconfig"
 | 
			
		||||
    namespace: default
 | 
			
		||||
    kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
  register: secret_workloadcluster_kubeconfig
 | 
			
		||||
 | 
			
		||||
- name: Register workload-cluster in argo-cd
 | 
			
		||||
  kubernetes.core.k8s:
 | 
			
		||||
    template: cluster.j2
 | 
			
		||||
    state: present
 | 
			
		||||
    kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
  vars:
 | 
			
		||||
    _template:
 | 
			
		||||
      account:
 | 
			
		||||
        name: argocd-sa
 | 
			
		||||
        namespace: default
 | 
			
		||||
      clusterrolebinding:
 | 
			
		||||
        name: argocd-crb
 | 
			
		||||
  module_defaults:
 | 
			
		||||
    group/k8s:
 | 
			
		||||
      kubeconfig: "{{ capi_kubeconfig.path }}"
 | 
			
		||||
      cluster:
 | 
			
		||||
        name: "{{ vapp['workloadcluster.name'] | lower }}"
 | 
			
		||||
        secret: argocd-cluster-{{ vapp['workloadcluster.name'] | lower }}
 | 
			
		||||
        url: https://{{ vapp['workloadcluster.vip'] }}:6443
 | 
			
		||||
      kubeconfig:
 | 
			
		||||
        ca: "{{ (secret_workloadcluster_kubeconfig.resources[0].data.value | b64decode | from_yaml).clusters[0].cluster['certificate-authority-data'] }}"
 | 
			
		||||
        certificate: "{{ (secret_workloadcluster_kubeconfig.resources[0].data.value | b64decode | from_yaml).users[0].user['client-certificate-data'] }}"
 | 
			
		||||
        key: "{{ (secret_workloadcluster_kubeconfig.resources[0].data.value | b64decode | from_yaml).users[0].user['client-key-data'] }}"
 | 
			
		||||
 | 
			
		||||
- name: Configure workload-cluster GitOps repository
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: gitrepo.j2
 | 
			
		||||
    dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
 | 
			
		||||
    owner: root
 | 
			
		||||
    group: root
 | 
			
		||||
    mode: 0600
 | 
			
		||||
  vars:
 | 
			
		||||
    _template:
 | 
			
		||||
      name: gitrepo-wl-gitopsconfig
 | 
			
		||||
      namespace: argo-cd
 | 
			
		||||
      url: https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git
 | 
			
		||||
  notify:
 | 
			
		||||
    - Apply manifests
 | 
			
		||||
 | 
			
		||||
- name: Create applicationset
 | 
			
		||||
  ansible.builtin.template:
 | 
			
		||||
    src: applicationset.j2
 | 
			
		||||
    dest: /var/lib/rancher/k3s/server/manifests/{{ _template.application.name }}-manifest.yaml
 | 
			
		||||
    owner: root
 | 
			
		||||
    group: root
 | 
			
		||||
    mode: 0600
 | 
			
		||||
  vars:
 | 
			
		||||
    _template:
 | 
			
		||||
      application:
 | 
			
		||||
        name: applicationset-workloadcluster
 | 
			
		||||
        namespace: argo-cd
 | 
			
		||||
      cluster:
 | 
			
		||||
        url: https://{{ vapp['workloadcluster.vip'] }}:6443
 | 
			
		||||
      repository:
 | 
			
		||||
        url: https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git
 | 
			
		||||
        revision: main
 | 
			
		||||
  notify:
 | 
			
		||||
    - Apply manifests
 | 
			
		||||
 | 
			
		||||
- name: Trigger handlers
 | 
			
		||||
  ansible.builtin.meta: flush_handlers
 | 
			
		||||
 
 | 
			
		||||
@@ -55,21 +55,3 @@
 | 
			
		||||
  loop: "{{ govc_inventory.results }}"
 | 
			
		||||
  loop_control:
 | 
			
		||||
    label: "{{ item.item.attribute }}"
 | 
			
		||||
 | 
			
		||||
# - name: Configure network protocol profile on hypervisor
 | 
			
		||||
#   ansible.builtin.shell:
 | 
			
		||||
#     cmd: >-
 | 
			
		||||
#       npp-prepper \
 | 
			
		||||
#         --server "{{ vapp['hv.fqdn'] }}" \
 | 
			
		||||
#         --username "{{ vapp['hv.username'] }}" \
 | 
			
		||||
#         --password "{{ vapp['hv.password'] }}" \
 | 
			
		||||
#         dc \
 | 
			
		||||
#         --name "{{ vcenter_info.datacenter }}" \
 | 
			
		||||
#         --portgroup "{{ vcenter_info.network }}" \
 | 
			
		||||
#         --startaddress {{ vapp['ippool.startip'] }} \
 | 
			
		||||
#         --endaddress {{ vapp['ippool.endip'] }} \
 | 
			
		||||
#         --netmask {{ (vapp['guestinfo.ipaddress'] ~ '/' ~ vapp['guestinfo.prefixlength']) | ansible.utils.ipaddr('netmask') }} \
 | 
			
		||||
#         {{ vapp['guestinfo.dnsserver'] | split(',') | map('trim') | map('regex_replace', '^', '--dnsserver ') | join(' ') }} \
 | 
			
		||||
#         --dnsdomain {{ vapp['metacluster.fqdn'] }} \
 | 
			
		||||
#         --gateway {{ vapp['guestinfo.gateway'] }} \
 | 
			
		||||
#         --force
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,11 @@
 | 
			
		||||
- import_tasks: hypervisor.yml
 | 
			
		||||
- import_tasks: registry.yml
 | 
			
		||||
- import_tasks: nodetemplates.yml
 | 
			
		||||
- import_tasks: clusterapi.yml
 | 
			
		||||
- import_tasks: gitops.yml
 | 
			
		||||
 | 
			
		||||
- block:
 | 
			
		||||
 | 
			
		||||
    - import_tasks: clusterapi.yml
 | 
			
		||||
    - import_tasks: gitops.yml
 | 
			
		||||
 | 
			
		||||
  when:
 | 
			
		||||
    - vapp['deployment.type'] != 'core'
 | 
			
		||||
 
 | 
			
		||||
@@ -1,81 +1,68 @@
 | 
			
		||||
- block:
 | 
			
		||||
 | 
			
		||||
    - name: Check for existing templates on hypervisor
 | 
			
		||||
    - name: Check for existing template on hypervisor
 | 
			
		||||
      community.vmware.vmware_guest_info:
 | 
			
		||||
        name: "{{ (item | basename | split('.'))[:-1] | join('.') }}"
 | 
			
		||||
        name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
 | 
			
		||||
      register: existing_ova
 | 
			
		||||
      loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}"
 | 
			
		||||
      ignore_errors: yes
 | 
			
		||||
 | 
			
		||||
    - name: Parse OVA files for network mappings
 | 
			
		||||
      ansible.builtin.shell:
 | 
			
		||||
        cmd: govc import.spec -json {{ item }}
 | 
			
		||||
      environment:
 | 
			
		||||
        GOVC_INSECURE: '1'
 | 
			
		||||
        GOVC_URL: "{{ vapp['hv.fqdn'] }}"
 | 
			
		||||
        GOVC_USERNAME: "{{ vapp['hv.username'] }}"
 | 
			
		||||
        GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
 | 
			
		||||
      register: ova_spec
 | 
			
		||||
      when: existing_ova.results[index] is failed
 | 
			
		||||
      loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        index_var: index
 | 
			
		||||
    - name: Store inventory path of existing template
 | 
			
		||||
      ansible.builtin.set_fact:
 | 
			
		||||
        nodetemplate_inventorypath: "{{ existing_ova.instance.hw_folder ~ '/' ~ existing_ova.instance.hw_name }}"
 | 
			
		||||
      when: existing_ova is not failed
 | 
			
		||||
 | 
			
		||||
    - name: Deploy OVA templates on hypervisor
 | 
			
		||||
      community.vmware.vmware_deploy_ovf:
 | 
			
		||||
        cluster: "{{ vcenter_info.cluster }}"
 | 
			
		||||
        datastore: "{{ vcenter_info.datastore }}"
 | 
			
		||||
        folder: "{{ vcenter_info.folder }}"
 | 
			
		||||
        name: "{{ (item | basename | split('.'))[:-1] | join('.') }}"
 | 
			
		||||
        networks: "{u'{{ ova_spec.results[index].stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}"
 | 
			
		||||
        allow_duplicates: no
 | 
			
		||||
        power_on: false
 | 
			
		||||
        ovf: "{{ item }}"
 | 
			
		||||
      register: ova_deploy
 | 
			
		||||
      when: existing_ova.results[index] is failed
 | 
			
		||||
      loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        index_var: index
 | 
			
		||||
    - block:
 | 
			
		||||
 | 
			
		||||
    - name: Add vApp properties on deployed VM's
 | 
			
		||||
      ansible.builtin.shell:
 | 
			
		||||
        cmd: >-
 | 
			
		||||
          npp-prepper \
 | 
			
		||||
            --server "{{ vapp['hv.fqdn'] }}" \
 | 
			
		||||
            --username "{{ vapp['hv.username'] }}" \
 | 
			
		||||
            --password "{{ vapp['hv.password'] }}" \
 | 
			
		||||
            vm \
 | 
			
		||||
            --datacenter "{{ vcenter_info.datacenter }}" \
 | 
			
		||||
            --portgroup "{{ vcenter_info.network }}" \
 | 
			
		||||
            --name "{{ item.instance.hw_name }}"
 | 
			
		||||
      when: existing_ova.results[index] is failed
 | 
			
		||||
      loop: "{{ ova_deploy.results }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        index_var: index
 | 
			
		||||
        label: "{{ item.item }}"
 | 
			
		||||
        - name: Parse OVA file for network mappings
 | 
			
		||||
          ansible.builtin.shell:
 | 
			
		||||
            cmd: govc import.spec -json {{ filename }}
 | 
			
		||||
          environment:
 | 
			
		||||
            GOVC_INSECURE: '1'
 | 
			
		||||
            GOVC_URL: "{{ vapp['hv.fqdn'] }}"
 | 
			
		||||
            GOVC_USERNAME: "{{ vapp['hv.username'] }}"
 | 
			
		||||
            GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
 | 
			
		||||
          register: ova_spec
 | 
			
		||||
 | 
			
		||||
    - name: Create snapshot on deployed VM's
 | 
			
		||||
      community.vmware.vmware_guest_snapshot:
 | 
			
		||||
        folder: "{{ vcenter_info.folder }}"
 | 
			
		||||
        name: "{{ item.instance.hw_name }}"
 | 
			
		||||
        state: present
 | 
			
		||||
        snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base"
 | 
			
		||||
      when: ova_deploy.results[index] is not skipped
 | 
			
		||||
      loop: "{{ ova_deploy.results }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        index_var: index
 | 
			
		||||
        label: "{{ item.item }}"
 | 
			
		||||
        - name: Deploy OVA template on hypervisor
 | 
			
		||||
          community.vmware.vmware_deploy_ovf:
 | 
			
		||||
            cluster: "{{ vcenter_info.cluster }}"
 | 
			
		||||
            datastore: "{{ vcenter_info.datastore }}"
 | 
			
		||||
            name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
 | 
			
		||||
            networks: "{u'{{ ova_spec.stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}"
 | 
			
		||||
            allow_duplicates: no
 | 
			
		||||
            power_on: false
 | 
			
		||||
            ovf: "{{ filename }}"
 | 
			
		||||
          register: ova_deploy
 | 
			
		||||
 | 
			
		||||
    - name: Mark deployed VM's as templates
 | 
			
		||||
      community.vmware.vmware_guest:
 | 
			
		||||
        name: "{{ item.instance.hw_name }}"
 | 
			
		||||
        is_template: yes
 | 
			
		||||
      when: ova_deploy.results[index] is not skipped
 | 
			
		||||
      loop: "{{ ova_deploy.results }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        index_var: index
 | 
			
		||||
        label: "{{ item.item }}"
 | 
			
		||||
        - name: Add additional placeholder disk
 | 
			
		||||
          community.vmware.vmware_guest_disk:
 | 
			
		||||
            name: "{{ ova_deploy.instance.hw_name }}"
 | 
			
		||||
            disk:
 | 
			
		||||
              - size: 1Mb
 | 
			
		||||
                scsi_controller: 1
 | 
			
		||||
                scsi_type: paravirtual
 | 
			
		||||
                unit_number: 0
 | 
			
		||||
 | 
			
		||||
        # Disabled to allow disks to be resized; at the cost of cloning speed
 | 
			
		||||
        # - name: Create snapshot on deployed VM
 | 
			
		||||
        #   community.vmware.vmware_guest_snapshot:
 | 
			
		||||
        #     name: "{{ ova_deploy.instance.hw_name }}"
 | 
			
		||||
        #     state: present
 | 
			
		||||
        #     snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base"
 | 
			
		||||
 | 
			
		||||
        - name: Mark deployed VM as templates
 | 
			
		||||
          community.vmware.vmware_guest:
 | 
			
		||||
            name: "{{ ova_deploy.instance.hw_name }}"
 | 
			
		||||
            is_template: yes
 | 
			
		||||
 | 
			
		||||
        - name: Store inventory path of deployed template
 | 
			
		||||
          ansible.builtin.set_fact:
 | 
			
		||||
            nodetemplate_inventorypath: "{{ ova_deploy.instance.hw_folder ~ '/' ~ ova_deploy.instance.hw_name }}"
 | 
			
		||||
 | 
			
		||||
      when: existing_ova is failed
 | 
			
		||||
 | 
			
		||||
  vars:
 | 
			
		||||
    filename: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | first }}"
 | 
			
		||||
  module_defaults:
 | 
			
		||||
    group/vmware:
 | 
			
		||||
      hostname: "{{ vapp['hv.fqdn'] }}"
 | 
			
		||||
@@ -83,3 +70,4 @@
 | 
			
		||||
      username: "{{ vapp['hv.username'] }}"
 | 
			
		||||
      password: "{{ vapp['hv.password'] }}"
 | 
			
		||||
      datacenter: "{{ vcenter_info.datacenter }}"
 | 
			
		||||
      folder: "{{ vcenter_info.folder }}"
 | 
			
		||||
 
 | 
			
		||||
@@ -0,0 +1,16 @@
 | 
			
		||||
apiVersion: argoproj.io/v1alpha1
 | 
			
		||||
kind: Application
 | 
			
		||||
metadata:
 | 
			
		||||
  name: {{ _template.application.name }}
 | 
			
		||||
  namespace: {{ _template.application.namespace }}
 | 
			
		||||
spec:
 | 
			
		||||
  destination:
 | 
			
		||||
    namespace: {{ _template.cluster.namespace }}
 | 
			
		||||
    server: {{ _template.cluster.name }}
 | 
			
		||||
  project: default
 | 
			
		||||
  source:
 | 
			
		||||
    repoURL: {{ _template.repository.url }}
 | 
			
		||||
    path: {{ _template.repository.path }}
 | 
			
		||||
    targetRevision: {{ _template.repository.revision }}
 | 
			
		||||
  syncPolicy:
 | 
			
		||||
    automated: {}
 | 
			
		||||
@@ -1,28 +1,33 @@
 | 
			
		||||
apiVersion: argoproj.io/v1alpha1
 | 
			
		||||
kind: ApplicationSet
 | 
			
		||||
metadata:
 | 
			
		||||
  name: {{ _template.name }}
 | 
			
		||||
  namespace: {{ _template.namespace }}
 | 
			
		||||
  name: {{ _template.application.name }}
 | 
			
		||||
  namespace: {{ _template.application.namespace }}
 | 
			
		||||
spec:
 | 
			
		||||
  generators:
 | 
			
		||||
  - git:
 | 
			
		||||
      repoURL: ssh://git@gitea-ssh.gitea.svc.cluster.local/mc/GitOps.Config.git
 | 
			
		||||
      revision: HEAD
 | 
			
		||||
      repoURL: {{ _template.repository.url }}
 | 
			
		||||
      revision: {{ _template.repository.revision }}
 | 
			
		||||
      directories:
 | 
			
		||||
      - path: metacluster-applicationset/*
 | 
			
		||||
      - path: charts/*/*
 | 
			
		||||
  template:
 | 
			
		||||
    metadata:
 | 
			
		||||
      name: {% raw %}'{{ path.basename }}'{% endraw +%}
 | 
			
		||||
      name: application-{% raw %}{{ path.basename }}{% endraw +%}
 | 
			
		||||
    spec:
 | 
			
		||||
      project: default
 | 
			
		||||
      syncPolicy:
 | 
			
		||||
        automated:
 | 
			
		||||
          prune: true
 | 
			
		||||
          selfHeal: true
 | 
			
		||||
      source:
 | 
			
		||||
        repoURL: ssh://git@gitea-ssh.gitea.svc.cluster.local/mc/GitOps.Config.git
 | 
			
		||||
        targetRevision: HEAD
 | 
			
		||||
        syncOptions:
 | 
			
		||||
        - CreateNamespace=true
 | 
			
		||||
      sources:
 | 
			
		||||
      - repoURL: {{ _template.repository.url }}
 | 
			
		||||
        targetRevision: {{ _template.repository.revision }}
 | 
			
		||||
        path: {% raw %}'{{ path }}'{% endraw +%}
 | 
			
		||||
        helm:
 | 
			
		||||
          valueFiles:
 | 
			
		||||
          - /values/{% raw %}{{ path.basename }}{% endraw %}/values.yaml
 | 
			
		||||
      destination:
 | 
			
		||||
        server: https://kubernetes.default.svc
 | 
			
		||||
        namespace: default
 | 
			
		||||
        server: {{ _template.cluster.url }}
 | 
			
		||||
        namespace: {% raw %}'{{ path[1] }}'{% endraw +%}
 | 
			
		||||
 
 | 
			
		||||
@@ -11,8 +11,10 @@ stringData:
 | 
			
		||||
  server: {{ _template.cluster.url }}
 | 
			
		||||
  config: |
 | 
			
		||||
    {
 | 
			
		||||
      "bearerToken": "{{ _template.cluster.token }}",
 | 
			
		||||
      "tlsClientConfig": {
 | 
			
		||||
        "insecure": true
 | 
			
		||||
        "insecure": false,
 | 
			
		||||
        "caData": "{{ _template.kubeconfig.ca }}",
 | 
			
		||||
        "certData": "{{ _template.kubeconfig.certificate }}",
 | 
			
		||||
        "keyData": "{{ _template.kubeconfig.key }}"
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -1,13 +1,9 @@
 | 
			
		||||
apiVersion: v1
 | 
			
		||||
kind: Secret
 | 
			
		||||
metadata:
 | 
			
		||||
  name: {{ _template.name }}-{{ _template.uid }}
 | 
			
		||||
  name: {{ _template.name }}
 | 
			
		||||
  namespace: {{ _template.namespace }}
 | 
			
		||||
  labels:
 | 
			
		||||
    argocd.argoproj.io/secret-type: repository
 | 
			
		||||
stringData:
 | 
			
		||||
  url: ssh://git@gitea-ssh.gitea.svc.cluster.local/mc/GitOps.Config.git
 | 
			
		||||
  name: {{ _template.name }}
 | 
			
		||||
  insecure: 'true'
 | 
			
		||||
  sshPrivateKey: |
 | 
			
		||||
{{ _template.privatekey }}
 | 
			
		||||
  url: {{ _template.url }}
 | 
			
		||||
 
 | 
			
		||||
@@ -4,6 +4,34 @@ resources:
 | 
			
		||||
- cluster-template.yaml
 | 
			
		||||
 | 
			
		||||
patchesStrategicMerge:
 | 
			
		||||
  - |-
 | 
			
		||||
    apiVersion: v1
 | 
			
		||||
    kind: Secret
 | 
			
		||||
    metadata:
 | 
			
		||||
      name: csi-vsphere-config
 | 
			
		||||
      namespace: '${NAMESPACE}'
 | 
			
		||||
    stringData:
 | 
			
		||||
      data: |
 | 
			
		||||
        apiVersion: v1
 | 
			
		||||
        kind: Secret
 | 
			
		||||
        metadata:
 | 
			
		||||
          name: csi-vsphere-config
 | 
			
		||||
          namespace: kube-system
 | 
			
		||||
        stringData:
 | 
			
		||||
          csi-vsphere.conf: |+
 | 
			
		||||
            [Global]
 | 
			
		||||
            insecure-flag = true
 | 
			
		||||
            thumbprint = "${VSPHERE_TLS_THUMBPRINT}"
 | 
			
		||||
            cluster-id = "${NAMESPACE}/${CLUSTER_NAME}"
 | 
			
		||||
 | 
			
		||||
            [VirtualCenter "${VSPHERE_SERVER}"]
 | 
			
		||||
            user = "${VSPHERE_USERNAME}"
 | 
			
		||||
            password = "${VSPHERE_PASSWORD}"
 | 
			
		||||
            datacenters = "${VSPHERE_DATACENTER}"
 | 
			
		||||
 | 
			
		||||
            [Network]
 | 
			
		||||
            public-network = "${VSPHERE_NETWORK}"
 | 
			
		||||
        type: Opaque
 | 
			
		||||
  - |-
 | 
			
		||||
    apiVersion: controlplane.cluster.x-k8s.io/v1beta1
 | 
			
		||||
    kind: KubeadmControlPlane
 | 
			
		||||
@@ -13,7 +41,7 @@ patchesStrategicMerge:
 | 
			
		||||
    spec:
 | 
			
		||||
      kubeadmConfigSpec:
 | 
			
		||||
        clusterConfiguration:
 | 
			
		||||
          imageRepository: registry.{{ _template.fqdn }}/kubeadm
 | 
			
		||||
          imageRepository: registry.{{ _template.network.fqdn }}/kubeadm
 | 
			
		||||
  - |-
 | 
			
		||||
    apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
 | 
			
		||||
    kind: KubeadmConfigTemplate
 | 
			
		||||
@@ -24,7 +52,7 @@ patchesStrategicMerge:
 | 
			
		||||
      template:
 | 
			
		||||
        spec:
 | 
			
		||||
          clusterConfiguration:
 | 
			
		||||
            imageRepository: registry.{{ _template.fqdn }}/kubeadm
 | 
			
		||||
            imageRepository: registry.{{ _template.network.fqdn }}/kubeadm
 | 
			
		||||
  - |-
 | 
			
		||||
    apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
 | 
			
		||||
    kind: KubeadmConfigTemplate
 | 
			
		||||
@@ -35,6 +63,21 @@ patchesStrategicMerge:
 | 
			
		||||
      template:
 | 
			
		||||
        spec:
 | 
			
		||||
          files:
 | 
			
		||||
          - content: |
 | 
			
		||||
                [plugins."io.containerd.grpc.v1.cri".registry]
 | 
			
		||||
                  config_path = "/etc/containerd/certs.d"
 | 
			
		||||
            append: true
 | 
			
		||||
            path: /etc/containerd/config.toml
 | 
			
		||||
{% for registry in _template.registries %}
 | 
			
		||||
          - content: |
 | 
			
		||||
              server = "https://{{ registry }}"
 | 
			
		||||
 | 
			
		||||
              [host."https://registry.{{ _template.network.fqdn }}/v2/library/{{ registry }}"]
 | 
			
		||||
                capabilities = ["pull", "resolve"]
 | 
			
		||||
                override_path = true
 | 
			
		||||
            owner: root:root
 | 
			
		||||
            path: /etc/containerd/certs.d/{{ registry }}/hosts.toml
 | 
			
		||||
{% endfor %}
 | 
			
		||||
          - content: |
 | 
			
		||||
              network: {config: disabled}
 | 
			
		||||
            owner: root:root
 | 
			
		||||
@@ -59,6 +102,8 @@ patchesStrategicMerge:
 | 
			
		||||
              - apiGroup: ipam.cluster.x-k8s.io
 | 
			
		||||
                kind: InClusterIPPool
 | 
			
		||||
                name: inclusterippool-${CLUSTER_NAME}
 | 
			
		||||
              nameservers:
 | 
			
		||||
              - {{ _template.network.dnsserver }}
 | 
			
		||||
              networkName: '${VSPHERE_NETWORK}'
 | 
			
		||||
  - |-
 | 
			
		||||
    apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
 | 
			
		||||
@@ -87,6 +132,27 @@ patchesJson6902:
 | 
			
		||||
      kind: KubeadmControlPlane
 | 
			
		||||
      name: .*
 | 
			
		||||
    patch: |-
 | 
			
		||||
      - op: add
 | 
			
		||||
        path: /spec/kubeadmConfigSpec/files/-
 | 
			
		||||
        value:
 | 
			
		||||
          content: |
 | 
			
		||||
              [plugins."io.containerd.grpc.v1.cri".registry]
 | 
			
		||||
                config_path = "/etc/containerd/certs.d"
 | 
			
		||||
          append: true
 | 
			
		||||
          path: /etc/containerd/config.toml
 | 
			
		||||
{% for registry in _template.registries %}
 | 
			
		||||
      - op: add
 | 
			
		||||
        path: /spec/kubeadmConfigSpec/files/-
 | 
			
		||||
        value:
 | 
			
		||||
          content: |
 | 
			
		||||
            server = "https://{{ registry }}"
 | 
			
		||||
 | 
			
		||||
            [host."https://registry.{{ _template.network.fqdn }}/v2/library/{{ registry }}"]
 | 
			
		||||
              capabilities = ["pull", "resolve"]
 | 
			
		||||
              override_path = true
 | 
			
		||||
          owner: root:root
 | 
			
		||||
          path: /etc/containerd/certs.d/{{ registry }}/hosts.toml
 | 
			
		||||
{% endfor %}
 | 
			
		||||
      - op: add
 | 
			
		||||
        path: /spec/kubeadmConfigSpec/files/-
 | 
			
		||||
        value:
 | 
			
		||||
@@ -123,3 +189,68 @@ patchesJson6902:
 | 
			
		||||
        path: /spec/kubeadmConfigSpec/preKubeadmCommands/-
 | 
			
		||||
        value: {{ cmd }}
 | 
			
		||||
{% endfor %}
 | 
			
		||||
 | 
			
		||||
  - target:
 | 
			
		||||
      group: infrastructure.cluster.x-k8s.io
 | 
			
		||||
      version: v1beta1
 | 
			
		||||
      kind: VSphereMachineTemplate
 | 
			
		||||
      name: \${CLUSTER_NAME}
 | 
			
		||||
    patch: |-
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /metadata/name
 | 
			
		||||
        value: ${CLUSTER_NAME}-master
 | 
			
		||||
  - target:
 | 
			
		||||
      group: controlplane.cluster.x-k8s.io
 | 
			
		||||
      version: v1beta1
 | 
			
		||||
      kind: KubeadmControlPlane
 | 
			
		||||
      name: \${CLUSTER_NAME}
 | 
			
		||||
    patch: |-
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /metadata/name
 | 
			
		||||
        value: ${CLUSTER_NAME}-master
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /spec/machineTemplate/infrastructureRef/name
 | 
			
		||||
        value: ${CLUSTER_NAME}-master
 | 
			
		||||
  - target:
 | 
			
		||||
      group: cluster.x-k8s.io
 | 
			
		||||
      version: v1beta1
 | 
			
		||||
      kind: Cluster
 | 
			
		||||
      name: \${CLUSTER_NAME}
 | 
			
		||||
    patch: |-
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /spec/controlPlaneRef/name
 | 
			
		||||
        value: ${CLUSTER_NAME}-master
 | 
			
		||||
 | 
			
		||||
  - target:
 | 
			
		||||
      group: infrastructure.cluster.x-k8s.io
 | 
			
		||||
      version: v1beta1
 | 
			
		||||
      kind: VSphereMachineTemplate
 | 
			
		||||
      name: \${CLUSTER_NAME}-worker
 | 
			
		||||
    patch: |-
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /spec/template/spec/numCPUs
 | 
			
		||||
        value: {{ _template.nodesize.cpu }}
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /spec/template/spec/memoryMiB
 | 
			
		||||
        value: {{ _template.nodesize.memory }}
 | 
			
		||||
  - target:
 | 
			
		||||
      group: cluster.x-k8s.io
 | 
			
		||||
      version: v1beta1
 | 
			
		||||
      kind: MachineDeployment
 | 
			
		||||
      name: \${CLUSTER_NAME}-md-0
 | 
			
		||||
    patch: |-
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /metadata/name
 | 
			
		||||
        value: ${CLUSTER_NAME}-worker
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /spec/template/spec/bootstrap/configRef/name
 | 
			
		||||
        value: ${CLUSTER_NAME}-worker
 | 
			
		||||
  - target:
 | 
			
		||||
      group: bootstrap.cluster.x-k8s.io
 | 
			
		||||
      version: v1beta1
 | 
			
		||||
      kind: KubeadmConfigTemplate
 | 
			
		||||
      name: \${CLUSTER_NAME}-md-0
 | 
			
		||||
    patch: |-
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /metadata/name
 | 
			
		||||
        value: ${CLUSTER_NAME}-worker
 | 
			
		||||
 
 | 
			
		||||
@@ -0,0 +1,84 @@
 | 
			
		||||
apiVersion: kustomize.config.k8s.io/v1beta1
 | 
			
		||||
kind: Kustomization
 | 
			
		||||
resources:
 | 
			
		||||
- manifests/kubeadmconfigtemplate-{{ _template.cluster.name }}-worker.yaml
 | 
			
		||||
- manifests/machinedeployment-{{ _template.cluster.name }}-worker.yaml
 | 
			
		||||
- manifests/vspheremachinetemplate-{{ _template.cluster.name }}-worker.yaml
 | 
			
		||||
 | 
			
		||||
patchesStrategicMerge:
 | 
			
		||||
  - |-
 | 
			
		||||
    apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
 | 
			
		||||
    kind: KubeadmConfigTemplate
 | 
			
		||||
    metadata:
 | 
			
		||||
      name: {{ _template.cluster.name }}-worker
 | 
			
		||||
      namespace: default
 | 
			
		||||
    spec:
 | 
			
		||||
      template:
 | 
			
		||||
        spec:
 | 
			
		||||
          diskSetup:
 | 
			
		||||
            filesystems:
 | 
			
		||||
            - device: /dev/sdb1
 | 
			
		||||
              filesystem: ext4
 | 
			
		||||
              label: blockstorage
 | 
			
		||||
            partitions:
 | 
			
		||||
            - device: /dev/sdb
 | 
			
		||||
              layout: true
 | 
			
		||||
              tableType: gpt
 | 
			
		||||
          joinConfiguration:
 | 
			
		||||
            nodeRegistration:
 | 
			
		||||
              kubeletExtraArgs:
 | 
			
		||||
                node-labels: "node.longhorn.io/create-default-disk=true"
 | 
			
		||||
          mounts:
 | 
			
		||||
          - - LABEL=blockstorage
 | 
			
		||||
            - /mnt/blockstorage
 | 
			
		||||
  - |-
 | 
			
		||||
    apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
 | 
			
		||||
    kind: VSphereMachineTemplate
 | 
			
		||||
    metadata:
 | 
			
		||||
      name: {{ _template.cluster.name }}-worker
 | 
			
		||||
      namespace: default
 | 
			
		||||
    spec:
 | 
			
		||||
      template:
 | 
			
		||||
        spec:
 | 
			
		||||
          additionalDisksGiB:
 | 
			
		||||
          - {{ _template.nodepool.additionaldisk }}
 | 
			
		||||
 | 
			
		||||
patchesJson6902:
 | 
			
		||||
  - target:
 | 
			
		||||
      group: bootstrap.cluster.x-k8s.io
 | 
			
		||||
      version: v1beta1
 | 
			
		||||
      kind: KubeadmConfigTemplate
 | 
			
		||||
      name: {{ _template.cluster.name }}-worker
 | 
			
		||||
    patch: |-
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /metadata/name
 | 
			
		||||
        value: {{ _template.cluster.name }}-worker-storage
 | 
			
		||||
 | 
			
		||||
  - target:
 | 
			
		||||
      group: cluster.x-k8s.io
 | 
			
		||||
      version: v1beta1
 | 
			
		||||
      kind: MachineDeployment
 | 
			
		||||
      name: {{ _template.cluster.name }}-worker
 | 
			
		||||
    patch: |-
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /metadata/name
 | 
			
		||||
        value: {{ _template.cluster.name }}-worker-storage
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /spec/template/spec/bootstrap/configRef/name
 | 
			
		||||
        value: {{ _template.cluster.name }}-worker-storage
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /spec/template/spec/infrastructureRef/name
 | 
			
		||||
        value: {{ _template.cluster.name }}-worker-storage
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /spec/replicas
 | 
			
		||||
        value: {{ _template.nodepool.size }}
 | 
			
		||||
 | 
			
		||||
  - target:
 | 
			
		||||
      group: infrastructure.cluster.x-k8s.io
 | 
			
		||||
      version: v1beta1
 | 
			
		||||
      kind: VSphereMachineTemplate
 | 
			
		||||
      name: {{ _template.cluster.name }}-worker
 | 
			
		||||
    patch: |-
 | 
			
		||||
      - op: replace
 | 
			
		||||
        path: /metadata/name
 | 
			
		||||
        value: {{ _template.cluster.name }}-worker-storage
 | 
			
		||||
@@ -1,27 +0,0 @@
 | 
			
		||||
apiVersion: v1
 | 
			
		||||
kind: ServiceAccount
 | 
			
		||||
metadata:
 | 
			
		||||
  name: {{ _template.account.name }}
 | 
			
		||||
  namespace: {{ _template.account.namespace }}
 | 
			
		||||
---
 | 
			
		||||
apiVersion: v1
 | 
			
		||||
kind: Secret
 | 
			
		||||
metadata:
 | 
			
		||||
  name: {{ _template.account.name }}-secret
 | 
			
		||||
  namespace: {{ _template.account.namespace }}
 | 
			
		||||
  annotations:
 | 
			
		||||
    kubernetes.io/service-account.name: {{ _template.account.name }}
 | 
			
		||||
type: kubernetes.io/service-account-token
 | 
			
		||||
---
 | 
			
		||||
apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: ClusterRoleBinding
 | 
			
		||||
metadata:
 | 
			
		||||
  name: {{ _template.clusterrolebinding.name }}
 | 
			
		||||
subjects:
 | 
			
		||||
- kind: ServiceAccount
 | 
			
		||||
  name: {{ _template.account.name }}
 | 
			
		||||
  namespace: {{ _template.account.namespace }}
 | 
			
		||||
roleRef:
 | 
			
		||||
  kind: ClusterRole
 | 
			
		||||
  name: cluster-admin
 | 
			
		||||
  apiGroup: rbac.authorization.k8s.io
 | 
			
		||||
@@ -28,6 +28,10 @@
 | 
			
		||||
  ansible.builtin.set_fact:
 | 
			
		||||
    storage_benchmark: "{{ [storage_benchmark, (end_time | int - start_time | int)] | max }}"
 | 
			
		||||
 | 
			
		||||
- name: Log benchmark actual duration
 | 
			
		||||
  ansible.builtin.debug:
 | 
			
		||||
    msg: "Benchmark actual duration: {{ (end_time | int - start_time | int) }} second(s)"
 | 
			
		||||
 | 
			
		||||
- name: Mount dynamic disk
 | 
			
		||||
  ansible.posix.mount:
 | 
			
		||||
    path: /mnt/blockstorage
 | 
			
		||||
 
 | 
			
		||||
@@ -19,6 +19,25 @@
 | 
			
		||||
        executable: /opt/firstboot/tty.sh
 | 
			
		||||
        workingdir: /tmp/
 | 
			
		||||
      metacluster:
 | 
			
		||||
        components:
 | 
			
		||||
          - name: ArgoCD
 | 
			
		||||
            url: https://gitops.${FQDN}
 | 
			
		||||
            healthcheck: https://gitops.${FQDN}
 | 
			
		||||
          - name: Gitea
 | 
			
		||||
            url: https://git.${FQDN}
 | 
			
		||||
            healthcheck: https://git.${FQDN}
 | 
			
		||||
          - name: Harbor
 | 
			
		||||
            url: https://registry.${FQDN}
 | 
			
		||||
            healthcheck: https://registry.${FQDN}
 | 
			
		||||
          - name: Longhorn
 | 
			
		||||
            url: https://storage.${FQDN}
 | 
			
		||||
            healthcheck: https://storage.${FQDN}
 | 
			
		||||
          - name: StepCA
 | 
			
		||||
            url: ''
 | 
			
		||||
            healthcheck: https://ca.${FQDN}/health
 | 
			
		||||
          - name: Traefik
 | 
			
		||||
            url: https://ingress.${FQDN}
 | 
			
		||||
            healthcheck: https://ingress.${FQDN}
 | 
			
		||||
        fqdn: "{{ vapp['metacluster.fqdn'] }}"
 | 
			
		||||
        vip: "{{ vapp['metacluster.vip'] }}"
 | 
			
		||||
  loop:
 | 
			
		||||
 
 | 
			
		||||
@@ -1,8 +1,8 @@
 | 
			
		||||
mirrors:
 | 
			
		||||
{% for entry in _template.data %}
 | 
			
		||||
  {{ entry }}:
 | 
			
		||||
{% for registry in _template.registries %}
 | 
			
		||||
  {{ registry }}:
 | 
			
		||||
    endpoint:
 | 
			
		||||
      - https://registry.{{ _template.hv.fqdn }}
 | 
			
		||||
    rewrite:
 | 
			
		||||
      "(.*)": "library/{{ entry }}/$1"
 | 
			
		||||
      "(.*)": "library/{{ registry }}/$1"
 | 
			
		||||
{% endfor %}
 | 
			
		||||
 
 | 
			
		||||
@@ -12,11 +12,14 @@ DFLT='\033[0m'        # Reset colour
 | 
			
		||||
LCLR='\033[K'         # Clear to end of line
 | 
			
		||||
PRST='\033[0;0H'      # Reset cursor position
 | 
			
		||||
 | 
			
		||||
# COMPONENTS=('ca' 'ingress' 'storage' 'registry' 'git' 'gitops')
 | 
			
		||||
COMPONENTS=('storage' 'registry' 'git' 'gitops')
 | 
			
		||||
FQDN='{{ _template.metacluster.fqdn }}'
 | 
			
		||||
IPADDRESS='{{ _template.metacluster.vip }}'
 | 
			
		||||
 | 
			
		||||
declare -A COMPONENTS
 | 
			
		||||
{% for component in _template.metacluster.components %}
 | 
			
		||||
COMPONENTS["{{ component.name }}\t({{ component.url }})"]="{{ component.healthcheck }}"
 | 
			
		||||
{% endfor %}
 | 
			
		||||
 | 
			
		||||
I=0
 | 
			
		||||
 | 
			
		||||
while /bin/true; do
 | 
			
		||||
@@ -30,13 +33,13 @@ while /bin/true; do
 | 
			
		||||
  echo -e "${PRST}" > /dev/tty1
 | 
			
		||||
  echo -e "\n\n\t${DFLT}To manage this appliance, please connect to one of the following:${LCLR}\n" > /dev/tty1
 | 
			
		||||
 | 
			
		||||
  for c in "${COMPONENTS[@]}"; do
 | 
			
		||||
    STATUS=$(curl -ks "https://${c}.${FQDN}" -o /dev/null -w '%{http_code}')
 | 
			
		||||
  for c in $( echo "${!COMPONENTS[@]}" | tr ' ' $'\n' | sort); do
 | 
			
		||||
    STATUS=$(curl -kLs "${COMPONENTS[${c}]}" -o /dev/null -w '%{http_code}')
 | 
			
		||||
 | 
			
		||||
    if [[ "${STATUS}" -eq "200" ]]; then
 | 
			
		||||
      echo -e "\t [${BGRN}+${DFLT}] ${BBLU}https://${c}.${FQDN}${DFLT}${LCLR}" > /dev/tty1
 | 
			
		||||
      echo -e "\t [${BGRN}+${DFLT}] ${BBLU}${c}${DFLT}${LCLR}" > /dev/tty1
 | 
			
		||||
    else
 | 
			
		||||
      echo -e "\t [${BRED}-${DFLT}] ${BBLU}https://${c}.${FQDN}${DFLT}${LCLR}" > /dev/tty1
 | 
			
		||||
      echo -e "\t [${BRED}-${DFLT}] ${BBLU}${c}${DFLT}${LCLR}" > /dev/tty1
 | 
			
		||||
    fi
 | 
			
		||||
  done
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,5 @@
 | 
			
		||||
playbook:
 | 
			
		||||
  retries: 5
 | 
			
		||||
  retries: 10
 | 
			
		||||
  delay:
 | 
			
		||||
    # These values are multiplied with the value of `storage_benchmark`
 | 
			
		||||
    long: 2
 | 
			
		||||
@@ -8,3 +8,16 @@ playbook:
 | 
			
		||||
 | 
			
		||||
# This default value is updated during the playbook, based on an I/O intensive operation
 | 
			
		||||
storage_benchmark: 30
 | 
			
		||||
 | 
			
		||||
config:
 | 
			
		||||
  clusterapi:
 | 
			
		||||
    size_matrix:
 | 
			
		||||
      small:
 | 
			
		||||
        cpu: 2
 | 
			
		||||
        memory: 6144
 | 
			
		||||
      medium:
 | 
			
		||||
        cpu: 4
 | 
			
		||||
        memory: 8192
 | 
			
		||||
      large:
 | 
			
		||||
        cpu: 8
 | 
			
		||||
        memory: 16384
 | 
			
		||||
 
 | 
			
		||||
@@ -22,6 +22,6 @@
 | 
			
		||||
      spec:
 | 
			
		||||
        numberOfReplicas: {{ (lookup('kubernetes.core.k8s', kind='node', kubeconfig=(kubeconfig.path)) | length | int) - 1 }}
 | 
			
		||||
    kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
  loop: "{{ lookup('kubernetes.core.k8s', api_version='longhorn.io/v1beta2', kind='volume', namespace='longhorn-system', kubeconfig=(kubeconfig.path)) }}"
 | 
			
		||||
  loop: "{{ query('kubernetes.core.k8s', api_version='longhorn.io/v1beta2', kind='volume', namespace='longhorn-system', kubeconfig=(kubeconfig.path)) }}"
 | 
			
		||||
  loop_control:
 | 
			
		||||
    label: "{{ item.metadata.name }}"
 | 
			
		||||
 
 | 
			
		||||
@@ -7,7 +7,7 @@
 | 
			
		||||
        release_namespace: gitea
 | 
			
		||||
        wait: false
 | 
			
		||||
        kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
        values: "{{ components.gitea.chart_values }}"
 | 
			
		||||
        values: "{{ components['gitea'].chart_values }}"
 | 
			
		||||
 | 
			
		||||
    - name: Ensure gitea API availability
 | 
			
		||||
      ansible.builtin.uri:
 | 
			
		||||
 
 | 
			
		||||
@@ -7,7 +7,7 @@
 | 
			
		||||
        release_namespace: argo-cd
 | 
			
		||||
        wait: false
 | 
			
		||||
        kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
        values: "{{ components.argocd.chart_values }}"
 | 
			
		||||
        values: "{{ components['argo-cd'].chart_values }}"
 | 
			
		||||
 | 
			
		||||
    - name: Ensure argo-cd API availability
 | 
			
		||||
      ansible.builtin.uri:
 | 
			
		||||
 
 | 
			
		||||
@@ -7,7 +7,7 @@
 | 
			
		||||
        release_namespace: harbor
 | 
			
		||||
        wait: false
 | 
			
		||||
        kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
        values: "{{ components.harbor.chart_values }}"
 | 
			
		||||
        values: "{{ components['harbor'].chart_values }}"
 | 
			
		||||
 | 
			
		||||
    - name: Ensure harbor API availability
 | 
			
		||||
      ansible.builtin.uri:
 | 
			
		||||
 
 | 
			
		||||
@@ -11,7 +11,7 @@
 | 
			
		||||
          spec:
 | 
			
		||||
            numberOfReplicas: {{ lookup('kubernetes.core.k8s', kind='node', kubeconfig=(kubeconfig.path)) | length | int }}
 | 
			
		||||
        kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
      loop: "{{ lookup('kubernetes.core.k8s', api_version='longhorn.io/v1beta2', kind='volume', namespace='longhorn-system', kubeconfig=(kubeconfig.path)) }}"
 | 
			
		||||
      loop: "{{ query('kubernetes.core.k8s', api_version='longhorn.io/v1beta2', kind='volume', namespace='longhorn-system', kubeconfig=(kubeconfig.path)) }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        label: "{{ item.metadata.name }}"
 | 
			
		||||
 | 
			
		||||
@@ -34,7 +34,7 @@
 | 
			
		||||
        release_namespace: longhorn-system
 | 
			
		||||
        wait: false
 | 
			
		||||
        kubeconfig: "{{ kubeconfig.path }}"
 | 
			
		||||
        values: "{{ components.longhorn.chart_values }}"
 | 
			
		||||
        values: "{{ components['longhorn'].chart_values }}"
 | 
			
		||||
 | 
			
		||||
    - name: Ensure longhorn API availability
 | 
			
		||||
      ansible.builtin.uri:
 | 
			
		||||
 
 | 
			
		||||
@@ -1,81 +1,68 @@
 | 
			
		||||
- block:
 | 
			
		||||
 | 
			
		||||
    - name: Check for existing templates on hypervisor
 | 
			
		||||
    - name: Check for existing template on hypervisor
 | 
			
		||||
      community.vmware.vmware_guest_info:
 | 
			
		||||
        name: "{{ (item | basename | split('.'))[:-1] | join('.') }}"
 | 
			
		||||
        name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
 | 
			
		||||
      register: existing_ova
 | 
			
		||||
      loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}"
 | 
			
		||||
      ignore_errors: yes
 | 
			
		||||
 | 
			
		||||
    - name: Parse OVA files for network mappings
 | 
			
		||||
      ansible.builtin.shell:
 | 
			
		||||
        cmd: govc import.spec -json {{ item }}
 | 
			
		||||
      environment:
 | 
			
		||||
        GOVC_INSECURE: '1'
 | 
			
		||||
        GOVC_URL: "{{ vapp['hv.fqdn'] }}"
 | 
			
		||||
        GOVC_USERNAME: "{{ vapp['hv.username'] }}"
 | 
			
		||||
        GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
 | 
			
		||||
      register: ova_spec
 | 
			
		||||
      when: existing_ova.results[index] is failed
 | 
			
		||||
      loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        index_var: index
 | 
			
		||||
    - name: Store inventory path of existing template
 | 
			
		||||
      ansible.builtin.set_fact:
 | 
			
		||||
        nodetemplate_inventorypath: "{{ existing_ova.instance.hw_folder ~ '/' ~ existing_ova.instance.hw_name }}"
 | 
			
		||||
      when: existing_ova is not failed
 | 
			
		||||
 | 
			
		||||
    - name: Deploy OVA templates on hypervisor
 | 
			
		||||
      community.vmware.vmware_deploy_ovf:
 | 
			
		||||
        cluster: "{{ vcenter_info.cluster }}"
 | 
			
		||||
        datastore: "{{ vcenter_info.datastore }}"
 | 
			
		||||
        folder: "{{ vcenter_info.folder }}"
 | 
			
		||||
        name: "{{ (item | basename | split('.'))[:-1] | join('.') }}"
 | 
			
		||||
        networks: "{u'{{ ova_spec.results[index].stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}"
 | 
			
		||||
        allow_duplicates: no
 | 
			
		||||
        power_on: false
 | 
			
		||||
        ovf: "{{ item }}"
 | 
			
		||||
      register: ova_deploy
 | 
			
		||||
      when: existing_ova.results[index] is failed
 | 
			
		||||
      loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        index_var: index
 | 
			
		||||
    - block:
 | 
			
		||||
 | 
			
		||||
    - name: Add vApp properties on deployed VM's
 | 
			
		||||
      ansible.builtin.shell:
 | 
			
		||||
        cmd: >-
 | 
			
		||||
          npp-prepper \
 | 
			
		||||
            --server "{{ vapp['hv.fqdn'] }}" \
 | 
			
		||||
            --username "{{ vapp['hv.username'] }}" \
 | 
			
		||||
            --password "{{ vapp['hv.password'] }}" \
 | 
			
		||||
            vm \
 | 
			
		||||
            --datacenter "{{ vcenter_info.datacenter }}" \
 | 
			
		||||
            --portgroup "{{ vcenter_info.network }}" \
 | 
			
		||||
            --name "{{ item.instance.hw_name }}"
 | 
			
		||||
      when: existing_ova.results[index] is failed
 | 
			
		||||
      loop: "{{ ova_deploy.results }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        index_var: index
 | 
			
		||||
        label: "{{ item.item }}"
 | 
			
		||||
        - name: Parse OVA file for network mappings
 | 
			
		||||
          ansible.builtin.shell:
 | 
			
		||||
            cmd: govc import.spec -json {{ filename }}
 | 
			
		||||
          environment:
 | 
			
		||||
            GOVC_INSECURE: '1'
 | 
			
		||||
            GOVC_URL: "{{ vapp['hv.fqdn'] }}"
 | 
			
		||||
            GOVC_USERNAME: "{{ vapp['hv.username'] }}"
 | 
			
		||||
            GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
 | 
			
		||||
          register: ova_spec
 | 
			
		||||
 | 
			
		||||
    - name: Create snapshot on deployed VM's
 | 
			
		||||
      community.vmware.vmware_guest_snapshot:
 | 
			
		||||
        folder: "{{ vcenter_info.folder }}"
 | 
			
		||||
        name: "{{ item.instance.hw_name }}"
 | 
			
		||||
        state: present
 | 
			
		||||
        snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base"
 | 
			
		||||
      when: ova_deploy.results[index] is not skipped
 | 
			
		||||
      loop: "{{ ova_deploy.results }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        index_var: index
 | 
			
		||||
        label: "{{ item.item }}"
 | 
			
		||||
        - name: Deploy OVA template on hypervisor
 | 
			
		||||
          community.vmware.vmware_deploy_ovf:
 | 
			
		||||
            cluster: "{{ vcenter_info.cluster }}"
 | 
			
		||||
            datastore: "{{ vcenter_info.datastore }}"
 | 
			
		||||
            name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
 | 
			
		||||
            networks: "{u'{{ ova_spec.stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}"
 | 
			
		||||
            allow_duplicates: no
 | 
			
		||||
            power_on: false
 | 
			
		||||
            ovf: "{{ filename }}"
 | 
			
		||||
          register: ova_deploy
 | 
			
		||||
 | 
			
		||||
    - name: Mark deployed VM's as templates
 | 
			
		||||
      community.vmware.vmware_guest:
 | 
			
		||||
        name: "{{ item.instance.hw_name }}"
 | 
			
		||||
        is_template: yes
 | 
			
		||||
      when: ova_deploy.results[index] is not skipped
 | 
			
		||||
      loop: "{{ ova_deploy.results }}"
 | 
			
		||||
      loop_control:
 | 
			
		||||
        index_var: index
 | 
			
		||||
        label: "{{ item.item }}"
 | 
			
		||||
        - name: Add additional placeholder disk
 | 
			
		||||
          community.vmware.vmware_guest_disk:
 | 
			
		||||
            name: "{{ ova_deploy.instance.hw_name }}"
 | 
			
		||||
            disk:
 | 
			
		||||
              - size: 1Gb
 | 
			
		||||
                scsi_controller: 1
 | 
			
		||||
                scsi_type: paravirtual
 | 
			
		||||
                unit_number: 0
 | 
			
		||||
 | 
			
		||||
        # Disabled to allow disks to be resized; at the cost of cloning speed
 | 
			
		||||
        # - name: Create snapshot on deployed VM
 | 
			
		||||
        #   community.vmware.vmware_guest_snapshot:
 | 
			
		||||
        #     name: "{{ ova_deploy.instance.hw_name }}"
 | 
			
		||||
        #     state: present
 | 
			
		||||
        #     snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base"
 | 
			
		||||
 | 
			
		||||
        - name: Mark deployed VM as templates
 | 
			
		||||
          community.vmware.vmware_guest:
 | 
			
		||||
            name: "{{ ova_deploy.instance.hw_name }}"
 | 
			
		||||
            is_template: yes
 | 
			
		||||
 | 
			
		||||
        - name: Store inventory path of deployed template
 | 
			
		||||
          ansible.builtin.set_fact:
 | 
			
		||||
            nodetemplate_inventorypath: "{{ ova_deploy.instance.hw_folder ~ '/' ~ ova_deploy.instance.hw_name }}"
 | 
			
		||||
 | 
			
		||||
      when: existing_ova is failed
 | 
			
		||||
 | 
			
		||||
  vars:
 | 
			
		||||
    filename: "{{ query('ansible.builtin.fileglob', '/opt/metacluster/node-templates/*.ova') | first }}"
 | 
			
		||||
  module_defaults:
 | 
			
		||||
    group/vmware:
 | 
			
		||||
      hostname: "{{ vapp['hv.fqdn'] }}"
 | 
			
		||||
@@ -83,3 +70,4 @@
 | 
			
		||||
      username: "{{ vapp['hv.username'] }}"
 | 
			
		||||
      password: "{{ vapp['hv.password'] }}"
 | 
			
		||||
      datacenter: "{{ vcenter_info.datacenter }}"
 | 
			
		||||
      folder: "{{ vcenter_info.folder }}"
 | 
			
		||||
 
 | 
			
		||||
@@ -1,9 +1,5 @@
 | 
			
		||||
- block:
 | 
			
		||||
 | 
			
		||||
  - name: Lookup kubeadm container images
 | 
			
		||||
    ansible.builtin.set_fact:
 | 
			
		||||
      kubeadm_images: "{{ lookup('ansible.builtin.file', '/opt/metacluster/cluster-api/imagelist').splitlines() }}"
 | 
			
		||||
 | 
			
		||||
  - name: Copy kubeadm container images to dedicated project
 | 
			
		||||
    ansible.builtin.uri:
 | 
			
		||||
      url: https://registry.{{ vapp['metacluster.fqdn'] }}/api/v2.0/projects/kubeadm/repositories/{{ ( item | regex_findall('([^:/]+)') )[-2] }}/artifacts?from=library/{{ item | replace('/', '%2F') | replace(':', '%3A') }}
 | 
			
		||||
@@ -12,7 +8,7 @@
 | 
			
		||||
        Authorization: "Basic {{ ('admin:' ~ vapp['metacluster.password']) | b64encode }}"
 | 
			
		||||
      body:
 | 
			
		||||
        from: "{{ item }}"
 | 
			
		||||
    loop: "{{ kubeadm_images }}"
 | 
			
		||||
    loop: "{{ lookup('ansible.builtin.file', '/opt/metacluster/cluster-api/imagelist').splitlines() }}"
 | 
			
		||||
 | 
			
		||||
  module_defaults:
 | 
			
		||||
    ansible.builtin.uri:
 | 
			
		||||
 
 | 
			
		||||
@@ -1,13 +1,7 @@
 | 
			
		||||
platform:
 | 
			
		||||
 | 
			
		||||
  k3s:
 | 
			
		||||
    version: v1.26.1+k3s1
 | 
			
		||||
 | 
			
		||||
  gitops:
 | 
			
		||||
    repository:
 | 
			
		||||
      uri: https://code.spamasaurus.com/djpbessems/GitOps.MetaCluster.git
 | 
			
		||||
      # revision: v0.1.0
 | 
			
		||||
      revision: HEAD
 | 
			
		||||
    version: v1.25.9+k3s1
 | 
			
		||||
 | 
			
		||||
  packaged_components:
 | 
			
		||||
    - name: traefik
 | 
			
		||||
@@ -39,8 +33,12 @@ platform:
 | 
			
		||||
  helm_repositories:
 | 
			
		||||
    - name: argo
 | 
			
		||||
      url: https://argoproj.github.io/argo-helm
 | 
			
		||||
    - name: dex
 | 
			
		||||
      url: https://charts.dexidp.io
 | 
			
		||||
    - name: authentik
 | 
			
		||||
      url: https://charts.goauthentik.io
 | 
			
		||||
    # - name: codecentric
 | 
			
		||||
    #   url: https://codecentric.github.io/helm-charts
 | 
			
		||||
    # - name: dex
 | 
			
		||||
    #   url: https://charts.dexidp.io
 | 
			
		||||
    - name: gitea-charts
 | 
			
		||||
      url: https://dl.gitea.io/charts/
 | 
			
		||||
    - name: harbor
 | 
			
		||||
@@ -58,7 +56,7 @@ components:
 | 
			
		||||
 | 
			
		||||
  argo-cd:
 | 
			
		||||
    helm:
 | 
			
		||||
      version: 5.24.0  # (= ArgoCD v2.6.3)
 | 
			
		||||
      version: 5.27.4  # (= ArgoCD v2.6.7)
 | 
			
		||||
      chart: argo/argo-cd
 | 
			
		||||
      parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
 | 
			
		||||
      chart_values: !unsafe |
 | 
			
		||||
@@ -73,6 +71,32 @@ components:
 | 
			
		||||
            hosts:
 | 
			
		||||
              - gitops.{{ vapp['metacluster.fqdn'] }}
 | 
			
		||||
 | 
			
		||||
  authentik:
 | 
			
		||||
    helm:
 | 
			
		||||
      version: 2023.3.1
 | 
			
		||||
      chart: authentik/authentik
 | 
			
		||||
      parse_logic: helm template . --set postgresql.enabled=true,redis.enabled=true | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
 | 
			
		||||
      chart_values: !unsafe |
 | 
			
		||||
        authentik:
 | 
			
		||||
          avatars: none
 | 
			
		||||
          secret_key: "{{ lookup('ansible.builtin.password', '/dev/null length=64 chars=ascii_lowercase,digits seed=' ~ vapp['guestinfo.hostname']) }}"
 | 
			
		||||
          postgresql:
 | 
			
		||||
            password: "{{ lookup('ansible.builtin.password', '/dev/null length=32 chars=ascii_lowercase,digits seed=' ~ vapp['guestinfo.hostname']) }}"
 | 
			
		||||
        env:
 | 
			
		||||
          AUTHENTIK_BOOTSTRAP_PASSWORD: "{{ vapp['metacluster.password'] }}"
 | 
			
		||||
        ingress:
 | 
			
		||||
          enabled: true
 | 
			
		||||
          hosts:
 | 
			
		||||
            - host: auth.{{ vapp['metacluster.fqdn'] }}
 | 
			
		||||
              paths:
 | 
			
		||||
                - path: "/"
 | 
			
		||||
                  pathType: Prefix
 | 
			
		||||
        postgresql:
 | 
			
		||||
          enabled: true
 | 
			
		||||
          postgresqlPassword: "{{ lookup('ansible.builtin.password', '/dev/null length=32 chars=ascii_lowercase,digits seed=' ~ vapp['guestinfo.hostname']) }}"
 | 
			
		||||
        redis:
 | 
			
		||||
          enabled: true
 | 
			
		||||
 | 
			
		||||
  cert-manager:
 | 
			
		||||
    helm:
 | 
			
		||||
      version: 1.11.0
 | 
			
		||||
@@ -85,67 +109,67 @@ components:
 | 
			
		||||
    management:
 | 
			
		||||
      version:
 | 
			
		||||
        # Must match the version referenced at `dependencies.static_binaries[.filename==clusterctl].url`
 | 
			
		||||
        base: v1.3.5
 | 
			
		||||
        base: v1.4.0
 | 
			
		||||
        # Must match the version referenced at `components.cert-manager.helm.version`
 | 
			
		||||
        cert_manager: v1.11.0
 | 
			
		||||
        infrastructure_vsphere: v1.5.3
 | 
			
		||||
        infrastructure_vsphere: v1.6.0
 | 
			
		||||
        ipam_incluster: v0.1.0-alpha.2
 | 
			
		||||
        # Refer to `https://console.cloud.google.com/gcr/images/cloud-provider-vsphere/GLOBAL/cpi/release/manager` for available tags
 | 
			
		||||
        cpi_vsphere: v1.25.2
 | 
			
		||||
    workload:
 | 
			
		||||
      version:
 | 
			
		||||
        calico: v3.25.0
 | 
			
		||||
        # k8s: v1.25.5
 | 
			
		||||
        k8s: v1.26.2
 | 
			
		||||
        k8s: v1.25.9
 | 
			
		||||
      node_template:
 | 
			
		||||
        # url: https://{{ repo_username }}:{{ repo_password }}@sn.itch.fyi/Repository/rel/ubuntu-2004-kube-v1.23.15.ova
 | 
			
		||||
        url: https://{{ repo_username }}:{{ repo_password }}@sn.itch.fyi/Repository/rel/ubuntu-2004-kube-v1.26.2.ova
 | 
			
		||||
        url: https://{{ repo_username }}:{{ repo_password }}@sn.itch.fyi/Repository/rel/ubuntu-2204-kube-v1.25.9.ova
 | 
			
		||||
 | 
			
		||||
  dex:
 | 
			
		||||
    helm:
 | 
			
		||||
      version: 0.13.0 # (= Dex 2.35.3)
 | 
			
		||||
      chart: dex/dex
 | 
			
		||||
      parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
 | 
			
		||||
      chart_values: !unsafe |
 | 
			
		||||
        config:
 | 
			
		||||
          connectors:
 | 
			
		||||
            - type: ldap
 | 
			
		||||
              id: ldap
 | 
			
		||||
              name: "LDAP"
 | 
			
		||||
              config:
 | 
			
		||||
                host: "{{ vapp['ldap.fqdn'] }}:636"
 | 
			
		||||
                insecureNoSSL: false
 | 
			
		||||
                insecureSkipVerify: true
 | 
			
		||||
                bindDN: "{{ vapp['ldap.dn'] }}"
 | 
			
		||||
                bindPW: "{{ vapp['ldap.password'] }}"
 | 
			
		||||
  # dex:
 | 
			
		||||
  #   helm:
 | 
			
		||||
  #     version: 0.13.0 # (= Dex 2.35.3)
 | 
			
		||||
  #     chart: dex/dex
 | 
			
		||||
  #     parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
 | 
			
		||||
  #     chart_values: !unsafe |
 | 
			
		||||
  #       config:
 | 
			
		||||
  #         connectors:
 | 
			
		||||
  #           - type: ldap
 | 
			
		||||
  #             id: ldap
 | 
			
		||||
  #             name: "LDAP"
 | 
			
		||||
  #             config:
 | 
			
		||||
  #               host: "{{ vapp['ldap.fqdn'] }}:636"
 | 
			
		||||
  #               insecureNoSSL: false
 | 
			
		||||
  #               insecureSkipVerify: true
 | 
			
		||||
  #               bindDN: "{{ vapp['ldap.dn'] }}"
 | 
			
		||||
  #               bindPW: "{{ vapp['ldap.password'] }}"
 | 
			
		||||
 | 
			
		||||
                usernamePrompt: "Username"
 | 
			
		||||
                userSearch:
 | 
			
		||||
                  baseDN: OU=Administrators,OU=Useraccounts,DC=bessems,DC=eu
 | 
			
		||||
                  filter: "(objectClass=person)"
 | 
			
		||||
                  username: userPrincipalName
 | 
			
		||||
                  idAttr: DN
 | 
			
		||||
                  emailAttr: userPrincipalName
 | 
			
		||||
                  nameAttr: cn
 | 
			
		||||
  #               usernamePrompt: "Username"
 | 
			
		||||
  #               userSearch:
 | 
			
		||||
  #                 baseDN: OU=Administrators,OU=Useraccounts,DC=bessems,DC=eu
 | 
			
		||||
  #                 filter: "(objectClass=person)"
 | 
			
		||||
  #                 username: userPrincipalName
 | 
			
		||||
  #                 idAttr: DN
 | 
			
		||||
  #                 emailAttr: userPrincipalName
 | 
			
		||||
  #                 nameAttr: cn
 | 
			
		||||
 | 
			
		||||
                groupSearch:
 | 
			
		||||
                  baseDN: OU=Roles,OU=Groups,DC=bessems,DC=eu
 | 
			
		||||
                  filter: "(objectClass=group)"
 | 
			
		||||
                  userMatchers:
 | 
			
		||||
                  - userAttr: DN
 | 
			
		||||
                    groupAttr: member
 | 
			
		||||
                  nameAttr: cn
 | 
			
		||||
          enablePasswordDB: true
 | 
			
		||||
          issuer: https://oidc.{{ vapp['metacluster.fqdn'] }}
 | 
			
		||||
          storage:
 | 
			
		||||
            type: kubernetes
 | 
			
		||||
            config:
 | 
			
		||||
              inCluster: true
 | 
			
		||||
        ingress:
 | 
			
		||||
          enabled: true
 | 
			
		||||
          hosts:
 | 
			
		||||
            - host: oidc.{{ vapp['metacluster.fqdn'] }}
 | 
			
		||||
              paths:
 | 
			
		||||
                - path: /
 | 
			
		||||
                  pathType: Prefix
 | 
			
		||||
  #               groupSearch:
 | 
			
		||||
  #                 baseDN: OU=Roles,OU=Groups,DC=bessems,DC=eu
 | 
			
		||||
  #                 filter: "(objectClass=group)"
 | 
			
		||||
  #                 userMatchers:
 | 
			
		||||
  #                 - userAttr: DN
 | 
			
		||||
  #                   groupAttr: member
 | 
			
		||||
  #                 nameAttr: cn
 | 
			
		||||
  #         enablePasswordDB: true
 | 
			
		||||
  #         issuer: https://oidc.{{ vapp['metacluster.fqdn'] }}
 | 
			
		||||
  #         storage:
 | 
			
		||||
  #           type: kubernetes
 | 
			
		||||
  #           config:
 | 
			
		||||
  #             inCluster: true
 | 
			
		||||
  #       ingress:
 | 
			
		||||
  #         enabled: true
 | 
			
		||||
  #         hosts:
 | 
			
		||||
  #           - host: oidc.{{ vapp['metacluster.fqdn'] }}
 | 
			
		||||
  #             paths:
 | 
			
		||||
  #               - path: /
 | 
			
		||||
  #                 pathType: Prefix
 | 
			
		||||
 | 
			
		||||
  gitea:
 | 
			
		||||
    helm:
 | 
			
		||||
@@ -201,6 +225,38 @@ components:
 | 
			
		||||
            registry:
 | 
			
		||||
              size: 25Gi
 | 
			
		||||
 | 
			
		||||
  # keycloakx:
 | 
			
		||||
  #   helm:
 | 
			
		||||
  #     version: 2.1.1  # (= Keycloak 20.0.3)
 | 
			
		||||
  #     chart: codecentric/keycloakx
 | 
			
		||||
  #     parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
 | 
			
		||||
  #     chart_values: !unsafe |
 | 
			
		||||
  #       command:
 | 
			
		||||
  #         - "/opt/keycloak/bin/kc.sh"
 | 
			
		||||
  #         - "start"
 | 
			
		||||
  #         - "--http-enabled=true"
 | 
			
		||||
  #         - "--http-port=8080"
 | 
			
		||||
  #         - "--hostname-strict=false"
 | 
			
		||||
  #         - "--hostname-strict-https=false"
 | 
			
		||||
  #       extraEnv: |
 | 
			
		||||
  #         - name: KEYCLOAK_ADMIN
 | 
			
		||||
  #           value: admin
 | 
			
		||||
  #         - name: KEYCLOAK_ADMIN_PASSWORD
 | 
			
		||||
  #           value: {{ vapp['metacluster.password'] }}
 | 
			
		||||
  #         - name: KC_PROXY
 | 
			
		||||
  #           value: "passthrough"
 | 
			
		||||
  #         - name: JAVA_OPTS_APPEND
 | 
			
		||||
  #           value: >-
 | 
			
		||||
  #             -Djgroups.dns.query={% raw %}{{ include "keycloak.fullname" . }}{% endraw %}-headless
 | 
			
		||||
  #       ingress:
 | 
			
		||||
  #         enabled: true
 | 
			
		||||
  #         rules:
 | 
			
		||||
  #           - host: keycloak.{{ vapp['metacluster.fqdn'] }}
 | 
			
		||||
  #             paths:
 | 
			
		||||
  #               - path: /
 | 
			
		||||
  #                 pathType: Prefix
 | 
			
		||||
  #         tls: []
 | 
			
		||||
 | 
			
		||||
  kube-prometheus-stack:
 | 
			
		||||
    helm:
 | 
			
		||||
      version: 45.2.0
 | 
			
		||||
@@ -218,7 +274,7 @@ components:
 | 
			
		||||
 | 
			
		||||
  longhorn:
 | 
			
		||||
    helm:
 | 
			
		||||
      version: 1.4.0
 | 
			
		||||
      version: 1.4.1
 | 
			
		||||
      chart: longhorn/longhorn
 | 
			
		||||
      parse_logic: cat values.yaml | yq eval '.. | select(has("repository")) | .repository + ":" + .tag'
 | 
			
		||||
      chart_values: !unsafe |
 | 
			
		||||
@@ -239,14 +295,6 @@ components:
 | 
			
		||||
      parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sed '/:/!s/$/:latest/' | sort -u
 | 
			
		||||
      chart_values: !unsafe |
 | 
			
		||||
        ca:
 | 
			
		||||
          bootstrap:
 | 
			
		||||
            postInitHook: |
 | 
			
		||||
              echo '{{ vapp["metacluster.password"] }}' > ~/pwfile
 | 
			
		||||
              step ca provisioner add acme \
 | 
			
		||||
                --type ACME \
 | 
			
		||||
                --password-file=~/pwfile \
 | 
			
		||||
                --force-cn
 | 
			
		||||
              rm ~/pwfile
 | 
			
		||||
          dns: ca.{{ vapp['metacluster.fqdn'] }},step-certificates.step-ca.svc.cluster.local,127.0.0.1
 | 
			
		||||
          password: "{{ vapp['metacluster.password'] }}"
 | 
			
		||||
          provisioner:
 | 
			
		||||
@@ -286,7 +334,7 @@ dependencies:
 | 
			
		||||
 | 
			
		||||
  static_binaries:
 | 
			
		||||
    - filename: clusterctl
 | 
			
		||||
      url: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.5/clusterctl-linux-amd64
 | 
			
		||||
      url: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.0/clusterctl-linux-amd64
 | 
			
		||||
    - filename: govc
 | 
			
		||||
      url: https://github.com/vmware/govmomi/releases/download/v0.29.0/govc_Linux_x86_64.tar.gz
 | 
			
		||||
      archive: compressed
 | 
			
		||||
@@ -297,10 +345,8 @@ dependencies:
 | 
			
		||||
    - filename: kubectl-slice
 | 
			
		||||
      url: https://github.com/patrickdappollonio/kubectl-slice/releases/download/v1.2.5/kubectl-slice_linux_x86_64.tar.gz
 | 
			
		||||
      archive: compressed
 | 
			
		||||
    # - filename: npp-prepper
 | 
			
		||||
    #   url: https://code.spamasaurus.com/api/packages/djpbessems/generic/npp-prepper/v0.5.1/npp-prepper
 | 
			
		||||
    - filename: skopeo
 | 
			
		||||
      url: https://code.spamasaurus.com/api/packages/djpbessems/generic/skopeo/v1.11.1/skopeo_linux_amd64
 | 
			
		||||
      url: https://code.spamasaurus.com/api/packages/djpbessems/generic/skopeo/v1.12.0/skopeo_linux_amd64
 | 
			
		||||
    - filename: step
 | 
			
		||||
      url: https://dl.step.sm/gh-release/cli/gh-release-header/v0.23.0/step_linux_0.23.0_amd64.tar.gz
 | 
			
		||||
      archive: compressed
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										27
									
								
								ansible/vars/workloadcluster.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								ansible/vars/workloadcluster.yml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,27 @@
 | 
			
		||||
downstream:
 | 
			
		||||
 | 
			
		||||
  helm_repositories:
 | 
			
		||||
    - name: longhorn
 | 
			
		||||
      url: https://charts.longhorn.io
 | 
			
		||||
    - name: sealed-secrets
 | 
			
		||||
      url: https://bitnami-labs.github.io/sealed-secrets
 | 
			
		||||
 | 
			
		||||
  helm_charts:
 | 
			
		||||
 | 
			
		||||
    longhorn:
 | 
			
		||||
      version: 1.4.1
 | 
			
		||||
      chart: longhorn/longhorn
 | 
			
		||||
      namespace: longhorn-system
 | 
			
		||||
      parse_logic: cat values.yaml | yq eval '.. | select(has("repository")) | .repository + ":" + .tag'
 | 
			
		||||
      chart_values: !unsafe |
 | 
			
		||||
        defaultSettings:
 | 
			
		||||
          createDefaultDiskLabeledNodes: true
 | 
			
		||||
          defaultDataPath: /mnt/blockstorage
 | 
			
		||||
 | 
			
		||||
    sealed-secrets:
 | 
			
		||||
      version: 2.8.1  # (= Sealed Secrets v0.20.2)
 | 
			
		||||
      chart: sealed-secrets/sealed-secrets
 | 
			
		||||
      namespace: sealed-secrets
 | 
			
		||||
      parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
 | 
			
		||||
      # chart_values: !unsafe |
 | 
			
		||||
      #   # Empty
 | 
			
		||||
@@ -6,22 +6,12 @@ packer {
 | 
			
		||||
build {
 | 
			
		||||
  source "vsphere-iso.ubuntu" {
 | 
			
		||||
    name = "bootstrap"
 | 
			
		||||
    vm_name = "ova.bootstrap-${var.vm_name}"
 | 
			
		||||
 | 
			
		||||
    export {
 | 
			
		||||
      images                = false
 | 
			
		||||
      output_directory      = "/scratch/airgapped-k8s/bootstrap"
 | 
			
		||||
    }
 | 
			
		||||
    vm_name = "bld_${var.vm_name}_bootstrap"
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  source "vsphere-iso.ubuntu" {
 | 
			
		||||
    name = "upgrade"
 | 
			
		||||
    vm_name = "ova.upgrade-${var.vm_name}"
 | 
			
		||||
 | 
			
		||||
    export {
 | 
			
		||||
      images                = false
 | 
			
		||||
      output_directory      = "/scratch/airgapped-k8s/upgrade"
 | 
			
		||||
    }
 | 
			
		||||
    vm_name = "bld_${var.vm_name}_upgrade"
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  provisioner "ansible" {
 | 
			
		||||
@@ -34,6 +24,8 @@ build {
 | 
			
		||||
      "PYTHONUNBUFFERED=1"
 | 
			
		||||
    ]
 | 
			
		||||
    use_proxy        = "false"
 | 
			
		||||
    collections_path = "ansible/collections"
 | 
			
		||||
 | 
			
		||||
    extra_arguments  = [
 | 
			
		||||
      "--extra-vars", "appliancetype=${source.name}",
 | 
			
		||||
      "--extra-vars", "ansible_ssh_pass=${var.ssh_password}",
 | 
			
		||||
@@ -48,11 +40,11 @@ build {
 | 
			
		||||
    inline = [
 | 
			
		||||
      "pwsh -command \"& scripts/Update-OvfConfiguration.ps1 \\",
 | 
			
		||||
      " -ApplianceType '${source.name}' \\",
 | 
			
		||||
      " -OVFFile '/scratch/airgapped-k8s/${source.name}/ova.${source.name}-${var.vm_name}.ovf' \"",
 | 
			
		||||
      " -OVFFile '/scratch/bld_${var.vm_name}_${source.name}.ovf' \"",
 | 
			
		||||
      "pwsh -file scripts/Update-Manifest.ps1 \\",
 | 
			
		||||
      " -ManifestFileName '/scratch/airgapped-k8s/${source.name}/ova.${source.name}-${var.vm_name}.mf'",
 | 
			
		||||
      " -ManifestFileName '/scratch/bld_${var.vm_name}_${source.name}.mf'",
 | 
			
		||||
      "ovftool --acceptAllEulas --allowExtraConfig --overwrite \\",
 | 
			
		||||
      " '/scratch/airgapped-k8s/${source.name}/ova.${source.name}-${var.vm_name}.ovf' \\",
 | 
			
		||||
      " '/scratch/bld_${var.vm_name}_${source.name}.ovf' \\",
 | 
			
		||||
      " /output/airgapped-k8s-${var.k8s_version}.${source.name}.ova"
 | 
			
		||||
    ]
 | 
			
		||||
  }
 | 
			
		||||
 
 | 
			
		||||
@@ -53,4 +53,9 @@ source "vsphere-iso" "ubuntu" {
 | 
			
		||||
  shutdown_timeout        = "5m"
 | 
			
		||||
 | 
			
		||||
  remove_cdrom            = true
 | 
			
		||||
 | 
			
		||||
  export {
 | 
			
		||||
    images                = false
 | 
			
		||||
    output_directory      = "/scratch"
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,14 +1,16 @@
 | 
			
		||||
DeploymentConfigurations:
 | 
			
		||||
 | 
			
		||||
- Id: cp1w1
 | 
			
		||||
- Id: cp1w1ws0
 | 
			
		||||
  Label: 'Workload-cluster: 1 control-plane node/1 worker node'
 | 
			
		||||
  Description: 1 control-plane node/1 worker node
 | 
			
		||||
 | 
			
		||||
- Id: cp1w2
 | 
			
		||||
  Label: 'Workload-cluster: 1 control-plane node/2 worker nodes'
 | 
			
		||||
  Description: |
 | 
			
		||||
    1 control-plane node
 | 
			
		||||
    2 worker nodes
 | 
			
		||||
- Id: cp1w1ws1
 | 
			
		||||
  Label: 'Workload-cluster: 1 control-plane node/1 worker node/1 worker-storage node'
 | 
			
		||||
  Description: 1 control-plane node/1 worker node/1 worker-storage node
 | 
			
		||||
 | 
			
		||||
- Id: core
 | 
			
		||||
  Label: No workload-cluster
 | 
			
		||||
  Description: Only the metacluster is deployed (useful for recovery scenario's)
 | 
			
		||||
 | 
			
		||||
DynamicDisks:
 | 
			
		||||
 | 
			
		||||
@@ -26,8 +28,9 @@ PropertyCategories:
 | 
			
		||||
  - Key: deployment.type
 | 
			
		||||
    Type: string
 | 
			
		||||
    Value:
 | 
			
		||||
    - cp1w1
 | 
			
		||||
    - cp1w2
 | 
			
		||||
    - cp1w1ws0
 | 
			
		||||
    - cp1w1ws1
 | 
			
		||||
    - core
 | 
			
		||||
    UserConfigurable: false
 | 
			
		||||
 | 
			
		||||
- Name: 1) Meta-cluster
 | 
			
		||||
@@ -124,50 +127,62 @@ PropertyCategories:
 | 
			
		||||
    Label: Workload-cluster name*
 | 
			
		||||
    Description: ''
 | 
			
		||||
    DefaultValue: 'workload-{{ hostname.suffix }}'
 | 
			
		||||
    Configurations: '*'
 | 
			
		||||
    Configurations:
 | 
			
		||||
    - cp1w1ws0
 | 
			
		||||
    - cp1w1ws1
 | 
			
		||||
    UserConfigurable: true
 | 
			
		||||
 | 
			
		||||
  - Key: workloadcluster.vip
 | 
			
		||||
    Type: ip
 | 
			
		||||
    Label: Workload-cluster virtual IP address*
 | 
			
		||||
    Description: Workload-cluster control plane endpoint virtual IP address
 | 
			
		||||
    DefaultValue: ''
 | 
			
		||||
    Configurations: '*'
 | 
			
		||||
    DefaultValue: '0.0.0.0'
 | 
			
		||||
    Configurations:
 | 
			
		||||
    - cp1w1ws0
 | 
			
		||||
    - cp1w1ws1
 | 
			
		||||
    UserConfigurable: true
 | 
			
		||||
 | 
			
		||||
  - Key: ippool.startip
 | 
			
		||||
    Type: ip
 | 
			
		||||
    Label: Workload-cluster IP-pool start IP address*
 | 
			
		||||
    Description: All nodes for the workload-cluster will be provisioned within this IP pool
 | 
			
		||||
    DefaultValue: ''
 | 
			
		||||
    Configurations: '*'
 | 
			
		||||
    DefaultValue: '0.0.0.0'
 | 
			
		||||
    Configurations:
 | 
			
		||||
    - cp1w1ws0
 | 
			
		||||
    - cp1w1ws1
 | 
			
		||||
    UserConfigurable: true
 | 
			
		||||
 | 
			
		||||
  - Key: ippool.endip
 | 
			
		||||
    Type: ip
 | 
			
		||||
    Label: Workload-cluster IP-pool end IP address*
 | 
			
		||||
    Description: All nodes for the workload-cluster will be provisioned within this IP pool
 | 
			
		||||
    DefaultValue: ''
 | 
			
		||||
    Configurations: '*'
 | 
			
		||||
    DefaultValue: '0.0.0.0'
 | 
			
		||||
    Configurations:
 | 
			
		||||
    - cp1w1ws0
 | 
			
		||||
    - cp1w1ws1
 | 
			
		||||
    UserConfigurable: true
 | 
			
		||||
 | 
			
		||||
  - Key: workloadcluster.nodesize
 | 
			
		||||
    Type: string["small", "medium"]
 | 
			
		||||
    Type: string["small", "medium", "large"]
 | 
			
		||||
    Label: Workload-cluster node size*
 | 
			
		||||
    Description: |
 | 
			
		||||
      All worker-nodes for the workload-cluster will be provisioned with number of cpu-cores and memory as specified:
 | 
			
		||||
      All worker and worker-storage nodes for the workload-cluster will be provisioned with number of cpu-cores and memory as specified:
 | 
			
		||||
      - SMALL: 2 vCPU/6GB RAM
 | 
			
		||||
      - MEDIUM: 4 vCPU/8GB RAM
 | 
			
		||||
      - LARGE: 8 vCPU/16GB RAM
 | 
			
		||||
    DefaultValue: 'small'
 | 
			
		||||
    Configurations: '*'
 | 
			
		||||
    Configurations:
 | 
			
		||||
    - cp1w1ws0
 | 
			
		||||
    - cp1w1ws1
 | 
			
		||||
    UserConfigurable: true
 | 
			
		||||
 | 
			
		||||
  - Key: workloadcluster.additionaldisk
 | 
			
		||||
    Type: int(0..120)
 | 
			
		||||
    Label: Workload-cluster block storage disk size*
 | 
			
		||||
    Description: 'All worker-nodes for the workload-cluster will be provisioned with an additional disk of the specified size'
 | 
			
		||||
    DefaultValue: '20'
 | 
			
		||||
    Configurations: '*'
 | 
			
		||||
    Description: 'All worker-storage nodes for the workload-cluster will be provisioned with an additional disk of the specified size'
 | 
			
		||||
    DefaultValue: '42'
 | 
			
		||||
    Configurations:
 | 
			
		||||
    - cp1w1ws1
 | 
			
		||||
    UserConfigurable: true
 | 
			
		||||
 | 
			
		||||
- Name: 4) Common
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user