17 Commits

Author SHA1 Message Date
5740faeb9d feat: Add cli binary
All checks were successful
Container & Helm chart / Linting (push) Successful in 7s
Container & Helm chart / Semantic Release (Dry-run) (push) Successful in 1m9s
Container & Helm chart / Kubernetes Bootstrap Appliance (push) Successful in 34m49s
2024-06-15 19:48:17 +10:00
e057f313ea chore: Ensure api availability 2024-06-15 19:47:44 +10:00
ac38731dcf chore: Configure argo workflows permissions
All checks were successful
Container & Helm chart / Linting (push) Successful in 1m35s
Container & Helm chart / Semantic Release (Dry-run) (push) Successful in 2m15s
Container & Helm chart / Kubernetes Bootstrap Appliance (push) Successful in 32m49s
2024-06-14 12:32:06 +10:00
9cbb84a0f3 chore: Remove redundant node template injection task
All checks were successful
Container & Helm chart / Linting (push) Successful in 6s
Container & Helm chart / Semantic Release (Dry-run) (push) Successful in 1m12s
Container & Helm chart / Kubernetes Bootstrap Appliance (push) Successful in 31m55s
2024-06-12 22:10:38 +10:00
066ec9a967 chore: Remove redundant kustomize patch 2024-06-12 22:10:16 +10:00
dda14af238 chore: Refactor jq keys according to govc output
All checks were successful
Container & Helm chart / Linting (push) Successful in 6s
Container & Helm chart / Semantic Release (Dry-run) (push) Successful in 58s
Container & Helm chart / Kubernetes Bootstrap Appliance (push) Successful in 38m38s
2024-06-12 17:01:12 +10:00
2db1c4d623 chore: Add deployment playbook
All checks were successful
Container & Helm chart / Linting (push) Successful in 5s
Container & Helm chart / Semantic Release (Dry-run) (push) Successful in 49s
Container & Helm chart / Kubernetes Bootstrap Appliance (push) Successful in 40m27s
2024-06-12 12:28:58 +10:00
1451e8f105 chore: Create target namespaces proactively 2024-06-12 12:27:17 +10:00
baf809159b chore: Fix incorrect variable reference
All checks were successful
Container & Helm chart / Linting (push) Successful in 6s
Container & Helm chart / Semantic Release (Dry-run) (push) Successful in 58s
Container & Helm chart / Kubernetes Bootstrap Appliance (push) Successful in 41m3s
2024-06-12 10:30:01 +10:00
066a21b1d2 chore: Align controller details with latest chart versions
All checks were successful
Container & Helm chart / Linting (push) Successful in 5s
Container & Helm chart / Semantic Release (Dry-run) (push) Successful in 1m16s
Container & Helm chart / Kubernetes Bootstrap Appliance (push) Successful in 37m13s
2024-06-11 21:58:14 +10:00
46fe962e77 chore: Duplicate certificate provisioner w/ custom claims 2024-06-11 21:57:38 +10:00
74070f266c feat: Include new component argo workflows 2024-06-11 21:57:00 +10:00
20f28f7d8a chore: Correctly inject chart values
All checks were successful
Container & Helm chart / Linting (push) Successful in 5s
Container & Helm chart / Semantic Release (Dry-run) (push) Successful in 51s
Container & Helm chart / Kubernetes Bootstrap Appliance (push) Successful in 35m41s
2024-06-11 12:00:45 +10:00
2802b49d02 chore: Fix incorrect task module 2024-06-11 12:00:08 +10:00
594e62cf71 feat: Remove node-template hypervisor upload logic (treat as prerequisite instead)
Some checks failed
Container & Helm chart / Linting (push) Successful in 6s
Container & Helm chart / Semantic Release (Dry-run) (push) Successful in 50s
Container & Helm chart / Kubernetes Bootstrap Appliance (push) Failing after 6m31s
2024-06-11 11:25:35 +10:00
544f98a8fb chore: Add Traefik persistent volume permissions workaround
All checks were successful
Container & Helm chart / Linting (push) Successful in 6s
Container & Helm chart / Semantic Release (Dry-run) (push) Successful in 1m4s
Container & Helm chart / Kubernetes Bootstrap Appliance (push) Successful in 36m45s
2024-06-10 22:19:29 +10:00
562e0b8167 build: Cleanup virtual machine after builds 2024-06-10 15:59:19 +10:00
25 changed files with 348 additions and 500 deletions

View File

@ -1,226 +0,0 @@
kind: pipeline
type: kubernetes
name: 'Packer Build'
volumes:
- name: output
claim:
name: flexvolsmb-drone-output
- name: scratch
claim:
name: flexvolsmb-drone-scratch
trigger:
event:
exclude:
- tag
steps:
- name: Debugging information
image: bv11-cr01.bessems.eu/library/packer-extended
pull: always
commands:
- ansible --version
- ovftool --version
- packer --version
- yamllint --version
- name: Linting
image: bv11-cr01.bessems.eu/library/packer-extended
pull: always
commands:
- |
yamllint -d "{extends: relaxed, rules: {line-length: disable}}" \
ansible \
packer/preseed/UbuntuServer22.04/user-data \
scripts
- name: Semantic Release (Dry-run)
image: bv11-cr01.bessems.eu/proxy/library/node:20-slim
pull: always
commands:
- |
apt-get update
- |
apt-get install -y --no-install-recommends \
curl \
git-core \
jq \
ca-certificates
- |
curl -L https://api.github.com/repos/mikefarah/yq/releases/latest | \
jq -r '.assets[] | select(.name | endswith("yq_linux_amd64")) | .browser_download_url' | \
xargs -I {} curl -L -o /bin/yq {} && \
chmod +x /bin/yq
- |
npm install \
semantic-release \
@semantic-release/commit-analyzer \
@semantic-release/exec \
- |
export K8S_VERSION=$(yq '.components.clusterapi.workload.version.k8s' < ./ansible/vars/metacluster.yml)
export GIT_CREDENTIALS=$${GIT_USERNAME}:$${GIT_APIKEY}
- |
npx semantic-release \
--package @semantic-release/exec \
--package semantic-release \
--branches ${DRONE_BRANCH} \
--tag-format "K8s_$${K8S_VERSION}-v\$${version}" \
--dry-run \
--plugins @semantic-release/commit-analyzer,@semantic-release/exec \
--analyzeCommits @semantic-release/commit-analyzer \
--verifyRelease @semantic-release/exec \
--verifyReleaseCmd 'echo "$${nextRelease.version}" > .version'
environment:
GIT_APIKEY:
from_secret: git_apikey
GIT_USERNAME: djpbessems
- name: Install Ansible Galaxy collections
image: bv11-cr01.bessems.eu/library/packer-extended
pull: always
commands:
- |
ansible-galaxy collection install \
-r ansible/requirements.yml \
-p ./ansible/collections
- name: Kubernetes Bootstrap Appliance
image: bv11-cr01.bessems.eu/library/packer-extended
pull: always
commands:
- |
sed -i -e "s/<<img-password>>/$${SSH_PASSWORD}/g" \
packer/preseed/UbuntuServer22.04/user-data
- |
export K8S_VERSION=$(yq '.components.clusterapi.workload.version.k8s' < ./ansible/vars/metacluster.yml)
export APPLIANCE_VERSION=$(cat .version)
- |
packer init -upgrade \
./packer
- |
packer validate \
-only=vsphere-iso.bootstrap \
-var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
-var docker_username=$${DOCKER_USERNAME} \
-var docker_password=$${DOCKER_PASSWORD} \
-var repo_username=$${REPO_USERNAME} \
-var repo_password=$${REPO_PASSWORD} \
-var ssh_password=$${SSH_PASSWORD} \
-var vsphere_password=$${VSPHERE_PASSWORD} \
-var k8s_version=$K8S_VERSION \
-var appliance_version=$APPLIANCE_VERSION \
./packer
- |
packer build \
-on-error=cleanup -timestamp-ui \
-only=vsphere-iso.bootstrap \
-var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
-var docker_username=$${DOCKER_USERNAME} \
-var docker_password=$${DOCKER_PASSWORD} \
-var repo_username=$${REPO_USERNAME} \
-var repo_password=$${REPO_PASSWORD} \
-var ssh_password=$${SSH_PASSWORD} \
-var vsphere_password=$${VSPHERE_PASSWORD} \
-var k8s_version=$K8S_VERSION \
-var appliance_version=$APPLIANCE_VERSION \
./packer
environment:
DOCKER_USERNAME:
from_secret: docker_username
DOCKER_PASSWORD:
from_secret: docker_password
# PACKER_LOG: 1
REPO_USERNAME:
from_secret: repo_username
REPO_PASSWORD:
from_secret: repo_password
SSH_PASSWORD:
from_secret: ssh_password
VSPHERE_PASSWORD:
from_secret: vsphere_password
volumes:
- name: output
path: /output
- name: scratch
path: /scratch
- name: Kubernetes Upgrade Appliance
image: bv11-cr01.bessems.eu/library/packer-extended
pull: alwaysquery(
commands:
- |
sed -i -e "s/<<img-password>>/$${SSH_PASSWORD}/g" \
packer/preseed/UbuntuServer22.04/user-data
- |
export K8S_VERSION=$(yq '.components.clusterapi.workload.version.k8s' < ./ansible/vars/metacluster.yml)
export APPLIANCE_VERSION=$(cat .version)
- |
packer init -upgrade \
./packer
- |
packer validate \
-only=vsphere-iso.upgrade \
-var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
-var docker_username=$${DOCKER_USERNAME} \
-var docker_password=$${DOCKER_PASSWORD} \
-var repo_username=$${REPO_USERNAME} \
-var repo_password=$${REPO_PASSWORD} \
-var ssh_password=$${SSH_PASSWORD} \
-var vsphere_password=$${VSPHERE_PASSWORD} \
-var k8s_version=$K8S_VERSION \
-var appliance_version=$APPLIANCE_VERSION \
./packer
- |
packer build \
-on-error=cleanup -timestamp-ui \
-only=vsphere-iso.upgrade \
-var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
-var docker_username=$${DOCKER_USERNAME} \
-var docker_password=$${DOCKER_PASSWORD} \
-var repo_username=$${REPO_USERNAME} \
-var repo_password=$${REPO_PASSWORD} \
-var ssh_password=$${SSH_PASSWORD} \
-var vsphere_password=$${VSPHERE_PASSWORD} \
-var k8s_version=$K8S_VERSION \
-var appliance_version=$APPLIANCE_VERSION \
./packer
environment:
DOCKER_USERNAME:
from_secret: docker_username
DOCKER_PASSWORD:
from_secret: docker_password
# PACKER_LOG: 1
REPO_USERNAME:
from_secret: repo_username
REPO_PASSWORD:
from_secret: repo_password
SSH_PASSWORD:
from_secret: ssh_password
VSPHERE_PASSWORD:
from_secret: vsphere_password
volumes:
- name: output
path: /output
- name: scratch
path: /scratch
- name: Remove temporary resources
image: bv11-cr01.bessems.eu/library/packer-extended
commands:
- |
pwsh -file scripts/Remove-Resources.ps1 \
-VMName $DRONE_BUILD_NUMBER-${DRONE_COMMIT_SHA:0:10} \
-VSphereFQDN 'bv11-vc.bessems.lan' \
-VSphereUsername 'administrator@vsphere.local' \
-VSpherePassword $${VSPHERE_PASSWORD}
environment:
VSPHERE_PASSWORD:
from_secret: vsphere_password
volumes:
- name: scratch
path: /scratch
when:
status:
- success
- failure

View File

@ -83,12 +83,9 @@ jobs:
echo "BUILD_COMMIT=$(echo ${{ gitea.sha }} | cut -c 1-10)" >> $GITHUB_ENV
echo "BUILD_SUFFIX=$(openssl rand -hex 3)" >> $GITHUB_ENV
- name: Run `packer validate`
- name: Validate packer template files
id: validate
run: |
# BUILD_COMMIT=$(echo "${{ gitea.sha }}" | cut -c 1-10)
# BUILD_SUFFIX=$(openssl rand -hex 3)
packer validate \
-only=vsphere-iso.bootstrap \
-var vm_name=${{ gitea.run_number }}-${BUILD_COMMIT}-${BUILD_SUFFIX} \
@ -101,12 +98,10 @@ jobs:
-var k8s_version=${{ steps.get_k8sversion.outputs.result }} \
-var appliance_version=${{ needs.semrel_dryrun.outputs.version }} \
./packer
- name: Run `packer build`
- name: Build packer template
run: |
# BUILD_COMMIT=$(echo "${{ gitea.sha }}" | cut -c 1-10)
# BUILD_SUFFIX=$(openssl rand -hex 3)
packer build \
-on-error=cleanup -timestamp-ui \
-only=vsphere-iso.bootstrap \
-var vm_name=${{ gitea.run_number }}-${BUILD_COMMIT}-${BUILD_SUFFIX} \
-var docker_username=${{ secrets.DOCKER_USERNAME }} \
@ -121,7 +116,6 @@ jobs:
# env:
# PACKER_LOG: 1
# semrel:
# name: Semantic Release
# runs-on: dind-rootless

4
.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
**/hv.vcenter.yaml
**/ova.bootstrap.yaml
**/pb.secrets.yaml
**/pwdfile

View File

@ -29,4 +29,3 @@
- import_tasks: manifests.yml
- import_tasks: kubeadm.yml
- import_tasks: containerimages.yml
- import_tasks: nodetemplates.yml

View File

@ -1,4 +0,0 @@
- name: Download node-template image
ansible.builtin.uri:
url: "{{ components.clusterapi.workload.node_template.url }}"
dest: /opt/workloadcluster/node-templates/{{ components.clusterapi.workload.node_template.url | basename}}

View File

@ -1,10 +1,40 @@
- block:
- name: Inject password into values file
ansible.builtin.copy:
dest: "{{ stepconfig.path }}"
content: "{{ lookup('ansible.builtin.file', stepconfig.path) | regex_replace('(ca_password|provisioner_password):[ ]?\n', '\\1: ' ~ (vapp['metacluster.password'] | b64encode) ~ '\n') }}"
no_log: true
- name: Import generated values file into dictionary and combine with custom values
ansible.builtin.set_fact:
values_initial: |
{{
lookup('ansible.builtin.file', stepconfig.path) | from_yaml |
combine( components['step-certificates'].chart_values | from_yaml, recursive=True, list_merge='append')
}}
- name: Duplicate default provisioner with modified claims
ansible.builtin.set_fact:
values_new: |
{{
values_initial |
combine({'inject':{'config':{'files':{'ca.json':{'authority': {'provisioners': [
values_initial.inject.config.files['ca.json'].authority.provisioners[0] | combine({'name':'long-lived', 'claims':{'maxTLSCertDuration':'87660h'}})
]}}}}}}, list_merge='append_rp', recursive=true)
}}
# We're facing several bugs or niche cases that result in incorrect output, despite being behaviour by design:
# - Ansible's `to_yaml` filter, sees `\n` escape sequences in PEM certificate strings and correctly converts them to actual newlines - without any way to prevent this
# So we cannot rely on Ansible to (re)create the helm chart values file
# - Python's yaml interpreter sees strings with a value of `y` as short for `yes` or `true`, even when that string is a key name.
# So we cannot use a straightforward yaml document as input for the Ansible helm module (which is written in Python)
#
# Lets explain the following workaround steps:
# - First we convert the dictionary to a json-object (through Ansible), so that yq can read it
# - Second we convert the json-object in its entirety to yaml (through yq), so that yq can actually manipulate it.
# - Finally, we take one specific subkey's contents (list of dictionaries) and iterate over each with the following steps (with `map`):
# - Convert the dictionary to json with `tojson`
# - Remove newlines (and spaces) with `sub`
# - Remove outer quotes (') with `sed`
- name: Save updated values file
ansible.builtin.shell:
cmd: |
echo '{{ values_new | to_nice_json }}' | yq -p json -o yaml | yq e '.inject.config.files["ca.json"].authority.provisioners |= map(tojson | sub("[\n ]";""))' | sed -e "s/- '/- /;s/'$//" > {{ stepconfig.path }}
- name: Install step-ca chart
kubernetes.core.helm:

View File

@ -6,7 +6,11 @@
initContainers:
- name: volume-permissions
image: busybox:1
command: ["sh", "-c", "touch /data/acme.json && chmod -Rv 600 /data/* && chown 65532:65532 /data/acme.json"]
command: ["sh", "-c", "touch /data/acme.json; chown 65532 /data/acme.json; chmod -v 600 /data/acme.json"]
securityContext:
runAsNonRoot: false
runAsGroup: 0
runAsUser: 0
volumeMounts:
- name: data
mountPath: /data

View File

@ -1,6 +1,7 @@
- import_tasks: init.yml
- import_tasks: k3s.yml
- import_tasks: assets.yml
- import_tasks: workflow.yml
- import_tasks: virtualip.yml
- import_tasks: metadata.yml
- import_tasks: storage.yml

View File

@ -0,0 +1,54 @@
- block:
- name: Create target namespace(s)
kubernetes.core.k8s:
name: "{{ item }}"
kind: Namespace
state: present
kubeconfig: "{{ kubeconfig.path }}"
loop:
# - argo-workflows
- firstboot
- name: Create ClusterRoleBinding for default serviceaccount
kubernetes.core.k8s:
state: present
kubeconfig: "{{ kubeconfig.path }}"
definition: |
kind: ClusterRoleBinding
metadata:
name: argo-workflows-firstboot-clusteradmin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: default
namespace: firstboot
- name: Install argo-workflows chart
kubernetes.core.helm:
name: argo-workflows
chart_ref: /opt/metacluster/helm-charts/argo-workflows
release_namespace: argo-workflows
create_namespace: true
wait: false
kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components['argo-workflows'].chart_values }}"
- name: Ensure argo workflows API availability
ansible.builtin.uri:
url: https://workflow.{{ vapp['metacluster.fqdn'] }}/api/v1/version
method: GET
register: api_readycheck
until:
- api_readycheck.json.version is defined
retries: "{{ playbook.retries }}"
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
module_defaults:
ansible.builtin.uri:
validate_certs: no
status_code: [200, 201]
body_format: json

View File

@ -127,8 +127,8 @@
wait: true
kubeconfig: "{{ kubeconfig.path }}"
loop:
- name: caip-in-cluster-controller-manager
namespace: caip-in-cluster-system
- name: capi-ipam-in-cluster-controller-manager
namespace: capi-ipam-in-cluster-system
- name: capi-controller-manager
namespace: capi-system
- name: capv-controller-manager
@ -172,7 +172,7 @@
- name: Generate nodepool kustomization manifest
ansible.builtin.template:
src: kustomization.nodepool.j2
src: kustomization.longhorn-storage.j2
dest: "{{ capi_clustermanifest.path }}/kustomization.yaml"
vars:
_template:

View File

@ -1,57 +0,0 @@
- name: Gather hypervisor details
ansible.builtin.shell:
cmd: govc ls -L {{ item.moref }} | awk -F/ '{print ${{ item.part }}}'
environment:
GOVC_INSECURE: '1'
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
register: govc_inventory
loop:
- attribute: cluster
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
part: (NF-1)
- attribute: datacenter
moref: VirtualMachine:{{ moref_id }}
part: 2
- attribute: datastore
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "datastore").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
part: NF
- attribute: folder
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "parent").Val | .Type + ":" + .Value')
part: 0
# - attribute: host
# moref: >-
# $(govc object.collect -json VirtualMachine:{{ moref_id }} | \
# jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
# part: NF
- attribute: network
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "network").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
part: NF
- attribute: resourcepool
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "resourcePool").Val | .Type + ":" + .Value')
part: 0
loop_control:
label: "{{ item.attribute }}"
- name: Retrieve hypervisor TLS thumbprint
ansible.builtin.shell:
cmd: openssl s_client -connect {{ vapp['hv.fqdn'] }}:443 < /dev/null 2>/dev/null | openssl x509 -fingerprint -noout -in /dev/stdin | awk -F'=' '{print $2}'
register: tls_thumbprint
- name: Store hypervisor details in dictionary
ansible.builtin.set_fact:
vcenter_info: "{{ vcenter_info | default({}) | combine({ item.item.attribute : item.stdout }) }}"
loop: "{{ govc_inventory.results }}"
loop_control:
label: "{{ item.item.attribute }}"

View File

@ -1,73 +0,0 @@
- block:
- name: Check for existing template on hypervisor
community.vmware.vmware_guest_info:
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
register: existing_ova
ignore_errors: yes
- name: Store inventory path of existing template
ansible.builtin.set_fact:
nodetemplate_inventorypath: "{{ existing_ova.instance.hw_folder ~ '/' ~ existing_ova.instance.hw_name }}"
when: existing_ova is not failed
- block:
- name: Parse OVA file for network mappings
ansible.builtin.shell:
cmd: govc import.spec -json {{ filename }}
environment:
GOVC_INSECURE: '1'
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
register: ova_spec
- name: Deploy OVA template on hypervisor
community.vmware.vmware_deploy_ovf:
cluster: "{{ vcenter_info.cluster }}"
datastore: "{{ vcenter_info.datastore }}"
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
networks: "{u'{{ ova_spec.stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}"
allow_duplicates: no
power_on: false
ovf: "{{ filename }}"
register: ova_deploy
- name: Add additional placeholder disk
community.vmware.vmware_guest_disk:
name: "{{ ova_deploy.instance.hw_name }}"
disk:
- size: 1Mb
scsi_controller: 1
scsi_type: paravirtual
unit_number: 0
# Disabled to allow disks to be resized; at the cost of cloning speed
# - name: Create snapshot on deployed VM
# community.vmware.vmware_guest_snapshot:
# name: "{{ ova_deploy.instance.hw_name }}"
# state: present
# snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base"
- name: Mark deployed VM as templates
community.vmware.vmware_guest:
name: "{{ ova_deploy.instance.hw_name }}"
is_template: yes
- name: Store inventory path of deployed template
ansible.builtin.set_fact:
nodetemplate_inventorypath: "{{ ova_deploy.instance.hw_folder ~ '/' ~ ova_deploy.instance.hw_name }}"
when: existing_ova is failed
vars:
filename: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | first }}"
module_defaults:
group/vmware:
hostname: "{{ vapp['hv.fqdn'] }}"
validate_certs: no
username: "{{ vapp['hv.username'] }}"
password: "{{ vapp['hv.password'] }}"
datacenter: "{{ vcenter_info.datacenter }}"
folder: "{{ vcenter_info.folder }}"

View File

@ -4,34 +4,6 @@ resources:
- cluster-template.yaml
patches:
- patch: |-
apiVersion: v1
kind: Secret
metadata:
name: csi-vsphere-config
namespace: '${NAMESPACE}'
stringData:
data: |
apiVersion: v1
kind: Secret
metadata:
name: csi-vsphere-config
namespace: kube-system
stringData:
csi-vsphere.conf: |+
[Global]
insecure-flag = true
thumbprint = "${VSPHERE_TLS_THUMBPRINT}"
cluster-id = "${NAMESPACE}/${CLUSTER_NAME}"
[VirtualCenter "${VSPHERE_SERVER}"]
user = "${VSPHERE_USERNAME}"
password = "${VSPHERE_PASSWORD}"
datacenters = "${VSPHERE_DATACENTER}"
[Network]
public-network = "${VSPHERE_NETWORK}"
type: Opaque
- patch: |-
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
@ -95,6 +67,7 @@ patches:
spec:
template:
spec:
diskGiB: 60
network:
devices:
- dhcp4: false
@ -114,6 +87,7 @@ patches:
spec:
template:
spec:
diskGiB: 60
network:
devices:
- dhcp4: false
@ -125,6 +99,25 @@ patches:
- {{ _template.network.dnsserver }}
networkName: '${VSPHERE_NETWORK}'
- target:
group: addons.cluster.x-k8s.io
version: v1beta1
kind: ClusterResourceSet
name: \${CLUSTER_NAME}-crs-0
patch: |-
- op: replace
path: /spec/resources
value:
- kind: Secret
name: cloud-controller-manager
- kind: Secret
name: cloud-provider-vsphere-credentials
- kind: ConfigMap
name: cpi-manifests
- op: add
path: /spec/strategy
value: Reconcile
- target:
group: controlplane.cluster.x-k8s.io
version: v1beta1
@ -198,6 +191,8 @@ patches:
- op: replace
path: /metadata/name
value: ${CLUSTER_NAME}-master
- op: remove
path: /spec/template/spec/thumbprint
- target:
group: controlplane.cluster.x-k8s.io
version: v1beta1
@ -237,6 +232,8 @@ patches:
- op: replace
path: /spec/template/spec/memoryMiB
value: {{ _template.nodesize.memory }}
- op: remove
path: /spec/template/spec/thumbprint
- target:
group: cluster.x-k8s.io
version: v1beta1
@ -258,3 +255,12 @@ patches:
- op: replace
path: /metadata/name
value: ${CLUSTER_NAME}-worker
- target:
group: infrastructure.cluster.x-k8s.io
version: v1beta1
kind: VSphereCluster
name: .*
patch: |-
- op: remove
path: /spec/thumbprint

View File

@ -11,7 +11,7 @@
- attribute: cluster
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
jq -r '.[] | select(.name == "runtime").val.host | .type + ":" + .value')
part: (NF-1)
- attribute: datacenter
moref: VirtualMachine:{{ moref_id }}
@ -19,27 +19,27 @@
- attribute: datastore
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "datastore").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
jq -r '.[] | select(.name == "datastore").val._value | .[].type + ":" + .[].value')
part: NF
- attribute: folder
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "parent").Val | .Type + ":" + .Value')
jq -r '.[] | select(.name == "parent").val | .type + ":" + .value')
part: 0
# - attribute: host
# moref: >-
# $(govc object.collect -json VirtualMachine:{{ moref_id }} | \
# jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
# jq -r '.[] | select(.name == "runtime").val.host | .type + ":" + .value')
# part: NF
- attribute: network
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "network").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
jq -r '.[] | select(.name == "network").val._value | .[].type + ":" + .[].value')
part: NF
- attribute: resourcepool
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "resourcePool").Val | .Type + ":" + .Value')
jq -r '.[] | select(.name == "resourcePool").val | .type + ":" + .value')
part: 0
loop_control:
label: "{{ item.attribute }}"

View File

@ -0,0 +1,33 @@
- block:
- name: Check for existing template
community.vmware.vmware_guest_info:
name: "{{ vapp['workloadcluster.nodetemplate'] }}"
hostname: "{{ vapp['hv.fqdn'] }}"
validate_certs: false
username: "{{ vapp['hv.username'] }}"
password: "{{ vapp['hv.password'] }}"
datacenter: "{{ vcenter_info.datacenter }}"
folder: "{{ vcenter_info.folder }}"
register: nodetemplate
until:
- nodetemplate is not failed
retries: 600
delay: 30
#wait for 5 hr.
vars:
color_reset: "\e[0m"
ansible_callback_diy_runner_retry_msg: >-
{%- set result = ansible_callback_diy.result.output -%}
{%- set retries_left = result.retries - result.attempts -%}
TEMPLATE '{{ vapp['workloadcluster.nodetemplate'] }}' NOT FOUND; PLEASE UPLOAD MANUALLY -- ({{ retries_left }} retries left)
ansible_callback_diy_runner_retry_msg_color: bright yellow
- name: Store inventory path of existing template
ansible.builtin.set_fact:
nodetemplate_inventorypath: "{{ nodetemplate.instance.hw_folder ~ '/' ~ nodetemplate.instance.hw_name }}"
rescue:
- name: CRITICAL ERROR
ansible.builtin.fail:
msg: Required node-template is not available; cannot continue

View File

@ -1,73 +0,0 @@
- block:
- name: Check for existing template on hypervisor
community.vmware.vmware_guest_info:
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
register: existing_ova
ignore_errors: yes
- name: Store inventory path of existing template
ansible.builtin.set_fact:
nodetemplate_inventorypath: "{{ existing_ova.instance.hw_folder ~ '/' ~ existing_ova.instance.hw_name }}"
when: existing_ova is not failed
- block:
- name: Parse OVA file for network mappings
ansible.builtin.shell:
cmd: govc import.spec -json {{ filename }}
environment:
GOVC_INSECURE: '1'
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
register: ova_spec
- name: Deploy OVA template on hypervisor
community.vmware.vmware_deploy_ovf:
cluster: "{{ vcenter_info.cluster }}"
datastore: "{{ vcenter_info.datastore }}"
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
networks: "{u'{{ ova_spec.stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}"
allow_duplicates: no
power_on: false
ovf: "{{ filename }}"
register: ova_deploy
- name: Add additional placeholder disk
community.vmware.vmware_guest_disk:
name: "{{ ova_deploy.instance.hw_name }}"
disk:
- size: 1Gb
scsi_controller: 1
scsi_type: paravirtual
unit_number: 0
# Disabled to allow disks to be resized; at the cost of cloning speed
# - name: Create snapshot on deployed VM
# community.vmware.vmware_guest_snapshot:
# name: "{{ ova_deploy.instance.hw_name }}"
# state: present
# snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base"
- name: Mark deployed VM as templates
community.vmware.vmware_guest:
name: "{{ ova_deploy.instance.hw_name }}"
is_template: yes
- name: Store inventory path of deployed template
ansible.builtin.set_fact:
nodetemplate_inventorypath: "{{ ova_deploy.instance.hw_folder ~ '/' ~ ova_deploy.instance.hw_name }}"
when: existing_ova is failed
vars:
filename: "{{ query('ansible.builtin.fileglob', '/opt/metacluster/node-templates/*.ova') | first }}"
module_defaults:
group/vmware:
hostname: "{{ vapp['hv.fqdn'] }}"
validate_certs: no
username: "{{ vapp['hv.username'] }}"
password: "{{ vapp['hv.password'] }}"
datacenter: "{{ vcenter_info.datacenter }}"
folder: "{{ vcenter_info.folder }}"

View File

@ -37,9 +37,30 @@
state: directory
- name: Configure Ansible defaults
ansible.builtin.template:
src: ansible.j2
ansible.builtin.copy:
dest: /etc/ansible/ansible.cfg
content: |
[defaults]
callbacks_enabled = ansible.posix.profile_tasks
force_color = true
stdout_callback = community.general.diy
[callback_diy]
[callback_profile_tasks]
task_output_limit = 0
- name: Create default shell aliases
ansible.builtin.lineinfile:
path: ~/.bashrc
state: present
line: "{{ item }}"
insertafter: EOF
loop:
- alias k="kubectl"
- alias less="less -rf"
loop_control:
label: "{{ (item | regex_findall('([^ =\"]+)'))[2] }}"
- name: Cleanup
ansible.builtin.apt:

View File

@ -1,6 +0,0 @@
[defaults]
callbacks_enabled = ansible.posix.profile_tasks
force_color = true
[callback_profile_tasks]
task_output_limit = 5

View File

@ -58,7 +58,8 @@ components:
argo-cd:
helm:
version: 6.7.7 # (=ArgoCD v.2.10.5)
# Must match the version referenced at `dependencies.static_binaries[.filename==argo].url`
version: 6.7.7 # (=Argo CD v2.10.5)
chart: argo/argo-cd
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
chart_values: !unsafe |
@ -79,6 +80,33 @@ components:
ingress:
enabled: true
argo-workflows:
helm:
version: 0.41.8 # (=Argo Workflows v3.5.7)
chart: argo/argo-workflows
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
chart_values: !unsafe |
# workflow:
# serviceAccount:
# create: true
# name: "argo-workflows"
# rbac:
# create: true
controller:
workflowNamespaces:
- default
- firstboot
server:
authModes:
- server
ingress:
enabled: true
hosts:
- workflow.{{ vapp['metacluster.fqdn']}}
paths:
- /
pathType: Prefix
cert-manager:
helm:
version: 1.14.4
@ -103,8 +131,8 @@ components:
calico: v3.27.3
k8s: v1.30.1
node_template:
# url: https://{{ repo_username }}:{{ repo_password }}@sn.itch.fyi/Repository/rel/ubuntu-2204-kube-v1.27.1.ova
url: https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/templates%2Fv1.30.0/ubuntu-2204-kube-v1.30.0.ova
# Not used anymore; should be uploaded to hypervisor manually!
# https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/templates%2Fv1.30.0/
dex:
helm:
@ -336,6 +364,8 @@ dependencies:
- registry.k8s.io/sig-storage/livenessprobe:v2.10.0
static_binaries:
- filename: argo
url: https://github.com/argoproj/argo-workflows/releases/download/v3.5.7/argo-linux-amd64.gz
- filename: clusterctl
url: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.3/clusterctl-linux-amd64
- filename: govc

78
deployment/playbook.yml Normal file
View File

@ -0,0 +1,78 @@
- hosts: localhost
vars_files:
- vars/ova.bootstrap.yaml
- vars/hv.vcenter.yaml
- vars/pb.secrets.yaml
tasks:
- name: Retrieve target folder details
community.vmware.vmware_vm_info:
hostname: "{{ hv.hostname }}"
username: "{{ hv.username }}"
password: "{{ secrets.hv.password }}"
folder: "{{ hv.folder }}"
validate_certs: false
register: vm_info
- name: User prompt
ansible.builtin.pause:
prompt: Virtual machine '{{ appliance.id }}' already exists. Delete to continue [yes] or abort [no]?"
register: prompt
until:
- prompt.user_input in ['yes', 'no']
delay: 0
when: (vm_info | selectattr('guest_name', 'equalto', appliance.id) | length) > 0
- name: Destroy existing VM
community.vmware.vmware_guest:
hostname: "{{ hv.hostname }}"
username: "{{ hv.username }}"
password: "{{ secrets.hv.password }}"
folder: "{{ hv.folder }}"
name: appliance.id
state: absent
when:
- (vm_info | selectattr('guest_name', 'equalto', appliance.id) | length) > 0
- (prompt.user_input | bool) == true
- name: Deploy VM from OVA-template
community.vmware.vmware_deploy_ovf:
hostname: "{{ hv.hostname }}"
username: "{{ hv.username }}"
password: "{{ secrets.hv.password }}"
validate_certs: false
datacenter: "{{ hv.datacenter }}"
folder: "{{ hv.folder }}"
cluster: "{{ hv.cluster }}"
name: airgapped-k8s-meta1
datastore: "{{ hv.datastore }}"
disk_provisioning: thin
networks:
"LAN": "{{ hv.network }}"
power_on: yes
ovf: "{{ appliance.path }}/{{ appliance.filename }}"
deployment_option: cp1w1ws0
properties:
metacluster.fqdn: k8s.lab
metacluster.vip: 192.168.154.125
metacluster.token: "{{ secrets.appliance.installtoken }}"
# guestinfo.hostname: _default
metacluster.password: "{{ secrets.appliance.password }}"
guestinfo.ipaddress: 192.168.154.126
guestinfo.prefixlength: '24'
guestinfo.dnsserver: 192.168.154.225
guestinfo.gateway: 192.168.154.1
# workloadcluster.name: _default
workloadcluster.vip: 192.168.154.130
ippool.startip: 192.168.154.135
ippool.endip: 192.168.154.140
workloadcluster.nodetemplate: ubuntu-2204-kube-v1.30.0
workloadcluster.nodesize: small
# workloadcluster.additionaldisk: '75'
guestinfo.rootsshkey: ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAiRc7Og+cRJGFwdUzgpX9YqvVenTk54N4kqM7emEfYHdsJLMjKQyxr8hklHmsam5dzxx3itFzc6SLf/ldJJ2JZuzE5FiCqUXXv4UFwN6HF5xqn7PTLicvWZH93H4m1gOlD5Dfzi4Es34v5zRBwbMScOgekk/LweTgl35jGKDgMP5DjGTqkPf7Ndh9+iuQrz99JEr8egl3bj+jIlKjScfaQbbnu3AJIRwZwTKgw0AOkLliQdEPNLvG5/ZImxJG4oHV9/uNkfdJObLjT1plR1HbVNskV5fuRNE/vnUiWl9jAJ1RT83GOqV0sQ+Q7p214fkgqb3JPvci/s0Bb7RA85hBEQ== bessems.eu
hv.fqdn: "{{ hv.hostname }}"
hv.username: "{{ hv.username }}"
hv.password: "{{ secrets.hv.password }}"
ldap.fqdn: _unused
ldap.dn: _unused
ldap.password: _unused

View File

@ -0,0 +1,5 @@
collections:
# - ansible.posix
# - ansible.utils
# - community.general
- community.vmware

View File

@ -58,4 +58,6 @@ source "vsphere-iso" "ubuntu" {
export {
output_directory = "/data/scratch"
}
destroy = true
}

View File

@ -162,6 +162,19 @@ PropertyCategories:
- cp1w1ws1
UserConfigurable: true
- Key: workloadcluster.nodetemplate
Type: string["ubuntu-2204-kube-v1.30.0", "photon-5-kube-v1.30.0.ova"]
Label: Workload-cluster node template
Description: |
All worker and worker-storage nodes for the workload-cluster will be provisioned with this node template.
Note:
Make sure that this exact template has been uploaded to the vCenter instance before powering on this appliance!
DefaultValue: ubuntu-2204-kube-v1.30.0
Configurations:
- cp1w1ws0
- cp1w1ws1
UserConfigurable: true
- Key: workloadcluster.nodesize
Type: string["small", "medium", "large"]
Label: Workload-cluster node size*

View File

@ -44,7 +44,7 @@ PropertyCategories:
Configurations: '*'
UserConfigurable: true
- Name: 2) Add meta-cluster node
- Name: 2) Meta-cluster new node
ProductProperties:
- Key: guestinfo.hostname
@ -95,7 +95,20 @@ PropertyCategories:
# Configurations: '*'
# UserConfigurable: true
- Name: 3) Common
- Name: 3) Workload-cluster
ProductProperties:
- Key: workloadcluster.nodetemplate
Type: string["ubuntu-2204-kube-v1.30.0", "photon-5-kube-v1.30.0.ova"]
Label: Workload-cluster node template
Description: |
All worker and worker-storage nodes for the workload-cluster will be provisioned with this node template.
Note:
Make sure that this exact template has been uploaded to the vCenter instance before powering on this appliance!
DefaultValue: ubuntu-2204-kube-v1.30.0
UserConfigurable: true
- Name: 4) Common
ProductProperties:
- Key: guestinfo.rootsshkey
@ -106,7 +119,7 @@ PropertyCategories:
Configurations: '*'
UserConfigurable: true
- Name: 4) Hypervisor
- Name: 5) Hypervisor
ProductProperties:
- Key: hv.fqdn