Compare commits
No commits in common. "master" and "Appliance.AirgappedK8s-1.27.x" have entirely different histories.
master
...
Appliance.
|
@ -0,0 +1,226 @@
|
||||||
|
kind: pipeline
|
||||||
|
type: kubernetes
|
||||||
|
name: 'Packer Build'
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- name: output
|
||||||
|
claim:
|
||||||
|
name: flexvolsmb-drone-output
|
||||||
|
- name: scratch
|
||||||
|
claim:
|
||||||
|
name: flexvolsmb-drone-scratch
|
||||||
|
|
||||||
|
trigger:
|
||||||
|
event:
|
||||||
|
exclude:
|
||||||
|
- tag
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Debugging information
|
||||||
|
image: bv11-cr01.bessems.eu/library/packer-extended
|
||||||
|
pull: always
|
||||||
|
commands:
|
||||||
|
- ansible --version
|
||||||
|
- ovftool --version
|
||||||
|
- packer --version
|
||||||
|
- yamllint --version
|
||||||
|
|
||||||
|
- name: Linting
|
||||||
|
image: bv11-cr01.bessems.eu/library/packer-extended
|
||||||
|
pull: always
|
||||||
|
commands:
|
||||||
|
- |
|
||||||
|
yamllint -d "{extends: relaxed, rules: {line-length: disable}}" \
|
||||||
|
ansible \
|
||||||
|
packer/preseed/UbuntuServer22.04/user-data \
|
||||||
|
scripts
|
||||||
|
|
||||||
|
- name: Semantic Release (Dry-run)
|
||||||
|
image: bv11-cr01.bessems.eu/proxy/library/node:20-slim
|
||||||
|
pull: always
|
||||||
|
commands:
|
||||||
|
- |
|
||||||
|
apt-get update
|
||||||
|
- |
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
curl \
|
||||||
|
git-core \
|
||||||
|
jq \
|
||||||
|
ca-certificates
|
||||||
|
- |
|
||||||
|
curl -L https://api.github.com/repos/mikefarah/yq/releases/latest | \
|
||||||
|
jq -r '.assets[] | select(.name | endswith("yq_linux_amd64")) | .browser_download_url' | \
|
||||||
|
xargs -I {} curl -L -o /bin/yq {} && \
|
||||||
|
chmod +x /bin/yq
|
||||||
|
- |
|
||||||
|
npm install \
|
||||||
|
semantic-release \
|
||||||
|
@semantic-release/commit-analyzer \
|
||||||
|
@semantic-release/exec \
|
||||||
|
- |
|
||||||
|
export K8S_VERSION=$(yq '.components.clusterapi.workload.version.k8s' < ./ansible/vars/metacluster.yml)
|
||||||
|
export GIT_CREDENTIALS=$${GIT_USERNAME}:$${GIT_APIKEY}
|
||||||
|
- |
|
||||||
|
npx semantic-release \
|
||||||
|
--package @semantic-release/exec \
|
||||||
|
--package semantic-release \
|
||||||
|
--branches ${DRONE_BRANCH} \
|
||||||
|
--tag-format "K8s_$${K8S_VERSION}-v\$${version}" \
|
||||||
|
--dry-run \
|
||||||
|
--plugins @semantic-release/commit-analyzer,@semantic-release/exec \
|
||||||
|
--analyzeCommits @semantic-release/commit-analyzer \
|
||||||
|
--verifyRelease @semantic-release/exec \
|
||||||
|
--verifyReleaseCmd 'echo "$${nextRelease.version}" > .version'
|
||||||
|
environment:
|
||||||
|
GIT_APIKEY:
|
||||||
|
from_secret: git_apikey
|
||||||
|
GIT_USERNAME: djpbessems
|
||||||
|
|
||||||
|
- name: Install Ansible Galaxy collections
|
||||||
|
image: bv11-cr01.bessems.eu/library/packer-extended
|
||||||
|
pull: always
|
||||||
|
commands:
|
||||||
|
- |
|
||||||
|
ansible-galaxy collection install \
|
||||||
|
-r ansible/requirements.yml \
|
||||||
|
-p ./ansible/collections
|
||||||
|
|
||||||
|
- name: Kubernetes Bootstrap Appliance
|
||||||
|
image: bv11-cr01.bessems.eu/library/packer-extended
|
||||||
|
pull: always
|
||||||
|
commands:
|
||||||
|
- |
|
||||||
|
sed -i -e "s/<<img-password>>/$${SSH_PASSWORD}/g" \
|
||||||
|
packer/preseed/UbuntuServer22.04/user-data
|
||||||
|
- |
|
||||||
|
export K8S_VERSION=$(yq '.components.clusterapi.workload.version.k8s' < ./ansible/vars/metacluster.yml)
|
||||||
|
export APPLIANCE_VERSION=$(cat .version)
|
||||||
|
- |
|
||||||
|
packer init -upgrade \
|
||||||
|
./packer
|
||||||
|
- |
|
||||||
|
packer validate \
|
||||||
|
-only=vsphere-iso.bootstrap \
|
||||||
|
-var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
|
||||||
|
-var docker_username=$${DOCKER_USERNAME} \
|
||||||
|
-var docker_password=$${DOCKER_PASSWORD} \
|
||||||
|
-var repo_username=$${REPO_USERNAME} \
|
||||||
|
-var repo_password=$${REPO_PASSWORD} \
|
||||||
|
-var ssh_password=$${SSH_PASSWORD} \
|
||||||
|
-var vsphere_password=$${VSPHERE_PASSWORD} \
|
||||||
|
-var k8s_version=$K8S_VERSION \
|
||||||
|
-var appliance_version=$APPLIANCE_VERSION \
|
||||||
|
./packer
|
||||||
|
- |
|
||||||
|
packer build \
|
||||||
|
-on-error=cleanup -timestamp-ui \
|
||||||
|
-only=vsphere-iso.bootstrap \
|
||||||
|
-var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
|
||||||
|
-var docker_username=$${DOCKER_USERNAME} \
|
||||||
|
-var docker_password=$${DOCKER_PASSWORD} \
|
||||||
|
-var repo_username=$${REPO_USERNAME} \
|
||||||
|
-var repo_password=$${REPO_PASSWORD} \
|
||||||
|
-var ssh_password=$${SSH_PASSWORD} \
|
||||||
|
-var vsphere_password=$${VSPHERE_PASSWORD} \
|
||||||
|
-var k8s_version=$K8S_VERSION \
|
||||||
|
-var appliance_version=$APPLIANCE_VERSION \
|
||||||
|
./packer
|
||||||
|
environment:
|
||||||
|
DOCKER_USERNAME:
|
||||||
|
from_secret: docker_username
|
||||||
|
DOCKER_PASSWORD:
|
||||||
|
from_secret: docker_password
|
||||||
|
# PACKER_LOG: 1
|
||||||
|
REPO_USERNAME:
|
||||||
|
from_secret: repo_username
|
||||||
|
REPO_PASSWORD:
|
||||||
|
from_secret: repo_password
|
||||||
|
SSH_PASSWORD:
|
||||||
|
from_secret: ssh_password
|
||||||
|
VSPHERE_PASSWORD:
|
||||||
|
from_secret: vsphere_password
|
||||||
|
volumes:
|
||||||
|
- name: output
|
||||||
|
path: /output
|
||||||
|
- name: scratch
|
||||||
|
path: /scratch
|
||||||
|
|
||||||
|
- name: Kubernetes Upgrade Appliance
|
||||||
|
image: bv11-cr01.bessems.eu/library/packer-extended
|
||||||
|
pull: alwaysquery(
|
||||||
|
commands:
|
||||||
|
- |
|
||||||
|
sed -i -e "s/<<img-password>>/$${SSH_PASSWORD}/g" \
|
||||||
|
packer/preseed/UbuntuServer22.04/user-data
|
||||||
|
- |
|
||||||
|
export K8S_VERSION=$(yq '.components.clusterapi.workload.version.k8s' < ./ansible/vars/metacluster.yml)
|
||||||
|
export APPLIANCE_VERSION=$(cat .version)
|
||||||
|
- |
|
||||||
|
packer init -upgrade \
|
||||||
|
./packer
|
||||||
|
- |
|
||||||
|
packer validate \
|
||||||
|
-only=vsphere-iso.upgrade \
|
||||||
|
-var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
|
||||||
|
-var docker_username=$${DOCKER_USERNAME} \
|
||||||
|
-var docker_password=$${DOCKER_PASSWORD} \
|
||||||
|
-var repo_username=$${REPO_USERNAME} \
|
||||||
|
-var repo_password=$${REPO_PASSWORD} \
|
||||||
|
-var ssh_password=$${SSH_PASSWORD} \
|
||||||
|
-var vsphere_password=$${VSPHERE_PASSWORD} \
|
||||||
|
-var k8s_version=$K8S_VERSION \
|
||||||
|
-var appliance_version=$APPLIANCE_VERSION \
|
||||||
|
./packer
|
||||||
|
- |
|
||||||
|
packer build \
|
||||||
|
-on-error=cleanup -timestamp-ui \
|
||||||
|
-only=vsphere-iso.upgrade \
|
||||||
|
-var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
|
||||||
|
-var docker_username=$${DOCKER_USERNAME} \
|
||||||
|
-var docker_password=$${DOCKER_PASSWORD} \
|
||||||
|
-var repo_username=$${REPO_USERNAME} \
|
||||||
|
-var repo_password=$${REPO_PASSWORD} \
|
||||||
|
-var ssh_password=$${SSH_PASSWORD} \
|
||||||
|
-var vsphere_password=$${VSPHERE_PASSWORD} \
|
||||||
|
-var k8s_version=$K8S_VERSION \
|
||||||
|
-var appliance_version=$APPLIANCE_VERSION \
|
||||||
|
./packer
|
||||||
|
environment:
|
||||||
|
DOCKER_USERNAME:
|
||||||
|
from_secret: docker_username
|
||||||
|
DOCKER_PASSWORD:
|
||||||
|
from_secret: docker_password
|
||||||
|
# PACKER_LOG: 1
|
||||||
|
REPO_USERNAME:
|
||||||
|
from_secret: repo_username
|
||||||
|
REPO_PASSWORD:
|
||||||
|
from_secret: repo_password
|
||||||
|
SSH_PASSWORD:
|
||||||
|
from_secret: ssh_password
|
||||||
|
VSPHERE_PASSWORD:
|
||||||
|
from_secret: vsphere_password
|
||||||
|
volumes:
|
||||||
|
- name: output
|
||||||
|
path: /output
|
||||||
|
- name: scratch
|
||||||
|
path: /scratch
|
||||||
|
|
||||||
|
- name: Remove temporary resources
|
||||||
|
image: bv11-cr01.bessems.eu/library/packer-extended
|
||||||
|
commands:
|
||||||
|
- |
|
||||||
|
pwsh -file scripts/Remove-Resources.ps1 \
|
||||||
|
-VMName $DRONE_BUILD_NUMBER-${DRONE_COMMIT_SHA:0:10} \
|
||||||
|
-VSphereFQDN 'bv11-vc.bessems.lan' \
|
||||||
|
-VSphereUsername 'administrator@vsphere.local' \
|
||||||
|
-VSpherePassword $${VSPHERE_PASSWORD}
|
||||||
|
environment:
|
||||||
|
VSPHERE_PASSWORD:
|
||||||
|
from_secret: vsphere_password
|
||||||
|
volumes:
|
||||||
|
- name: scratch
|
||||||
|
path: /scratch
|
||||||
|
when:
|
||||||
|
status:
|
||||||
|
- success
|
||||||
|
- failure
|
16
README.md
16
README.md
|
@ -1,15 +1 @@
|
||||||
# Packer.Images
|
# Packer.Images [![Build Status](https://ci.spamasaurus.com/api/badges/djpbessems/Packer.Images/status.svg?ref=refs/heads/Kubernetes.Bootstrap.Appliance)](https://ci.spamasaurus.com/djpbessems/Packer.Images)
|
||||||
|
|
||||||
Opinionated set of packer templates for producing .OVA appliances, which can then be deployed (semi)unattended through the use of vApp properties:
|
|
||||||
|
|
||||||
## [![Build Status](https://ci.spamasaurus.com/api/badges/djpbessems/Packer.Images/status.svg?ref=refs/heads/UbuntuServer20.04) **Ubuntu Server 20.04**](https://code.spamasaurus.com/djpbessems/Packer.Images/src/branch/UbuntuServer20.04) - <small>LTS</small>
|
|
||||||
Lorem ipsum.
|
|
||||||
|
|
||||||
## [![Build Status](https://ci.spamasaurus.com/api/badges/djpbessems/Packer.Images/status.svg?ref=refs/heads/Server2019) **Windows Server 2019**](https://code.spamasaurus.com/djpbessems/Packer.Images/src/branch/Server2019) - <small>LTSC xx09</small>
|
|
||||||
This image in itself does not actually provide much benefit over other customization methods that are available during an unattended deployment; it serves primarily as a basis for the following images.
|
|
||||||
|
|
||||||
## [![Build Status](https://ci.spamasaurus.com/api/badges/djpbessems/Packer.Images/status.svg?ref=refs/heads/ADDS) **ADDS**](https://code.spamasaurus.com/djpbessems/Packer.Images/src/branch/ADDS) - <small>Active Directory Domain Services</small>
|
|
||||||
Lorem ipsum.
|
|
||||||
|
|
||||||
## [![Build Status](https://ci.spamasaurus.com/api/badges/djpbessems/Packer.Images/status.svg?ref=refs/heads/ADCS) **ADCS**](https://code.spamasaurus.com/djpbessems/Packer.Images/src/branch/ADCS) - <small>Active Directory Certificate Services</small>
|
|
||||||
Lorem ipsum.
|
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
[defaults]
|
||||||
|
deprecation_warnings = False
|
||||||
|
remote_tmp = /tmp/.ansible-${USER}/tmp
|
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: false
|
||||||
|
vars_files:
|
||||||
|
- metacluster.yml
|
||||||
|
- workloadcluster.yml
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- os
|
||||||
|
- firstboot
|
||||||
|
- assets
|
|
@ -0,0 +1,9 @@
|
||||||
|
collections:
|
||||||
|
- name: https://github.com/ansible-collections/ansible.posix
|
||||||
|
type: git
|
||||||
|
- name: https://github.com/ansible-collections/ansible.utils
|
||||||
|
type: git
|
||||||
|
- name: https://github.com/ansible-collections/community.general
|
||||||
|
type: git
|
||||||
|
- name: https://github.com/ansible-collections/kubernetes.core
|
||||||
|
type: git
|
|
@ -0,0 +1,75 @@
|
||||||
|
- name: Parse Cluster-API manifests for container images
|
||||||
|
ansible.builtin.shell:
|
||||||
|
# This set of commands is necessary to deal with multi-line scalar values
|
||||||
|
# eg.:
|
||||||
|
# key: |
|
||||||
|
# multi-line
|
||||||
|
# value
|
||||||
|
cmd: >-
|
||||||
|
cat {{ item.dest }} | yq --no-doc eval '.. | .image? | select(.)' | awk '!/ /';
|
||||||
|
cat {{ item.dest }} | yq eval '.data.data' | yq --no-doc eval '.. | .image? | select(.)';
|
||||||
|
cat {{ item.dest }} | yq --no-doc eval '.. | .files? | with_entries(select(.value.path == "*.yaml")).[0].content' | awk '!/null/' | yq eval '.. | .image? | select(.)'
|
||||||
|
register: clusterapi_parsedmanifests
|
||||||
|
loop: "{{ clusterapi_manifests.results }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.dest | basename }}"
|
||||||
|
|
||||||
|
- name: Parse pinniped manifest for container images
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
cat {{ pinniped_manifest.dest }} | yq --no-doc eval '.. | .image? | select(.)' | awk '!/ /';
|
||||||
|
register: pinniped_parsedmanifest
|
||||||
|
|
||||||
|
- name: Parse metacluster helm charts for container images
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: "{{ item.value.helm.parse_logic }}"
|
||||||
|
chdir: /opt/metacluster/helm-charts/{{ item.key }}
|
||||||
|
register: chartimages_metacluster
|
||||||
|
when: item.value.helm is defined
|
||||||
|
loop: "{{ query('ansible.builtin.dict', components) }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.key }}"
|
||||||
|
|
||||||
|
- name: Parse workloadcluster helm charts for container images
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: "{{ item.value.parse_logic }}"
|
||||||
|
chdir: /opt/workloadcluster/helm-charts/{{ item.value.namespace }}/{{ item.key }}
|
||||||
|
register: chartimages_workloadcluster
|
||||||
|
loop: "{{ query('ansible.builtin.dict', downstream.helm_charts) }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.key }}"
|
||||||
|
|
||||||
|
- name: Store container images in dicts
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
containerimages_{{ item.source }}: "{{ item.results }}"
|
||||||
|
loop:
|
||||||
|
- source: charts
|
||||||
|
results: "{{ (chartimages_metacluster | json_query('results[*].stdout_lines')) + (chartimages_workloadcluster | json_query('results[*].stdout_lines')) | select() | flatten | list }}"
|
||||||
|
- source: kubeadm
|
||||||
|
results: "{{ kubeadmimages.stdout_lines }}"
|
||||||
|
- source: clusterapi
|
||||||
|
results: "{{ clusterapi_parsedmanifests | json_query('results[*].stdout_lines') | select() | flatten | list }}"
|
||||||
|
- source: pinniped
|
||||||
|
results: "{{ pinniped_parsedmanifest.stdout_lines }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.source }}"
|
||||||
|
|
||||||
|
- name: Log in to container registry
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
skopeo login \
|
||||||
|
docker.io \
|
||||||
|
--username={{ docker_username }} \
|
||||||
|
--password={{ docker_password }}
|
||||||
|
no_log: true
|
||||||
|
|
||||||
|
- name: Pull and store containerimages
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
skopeo copy \
|
||||||
|
--insecure-policy \
|
||||||
|
--retry-times=5 \
|
||||||
|
docker://{{ item }} \
|
||||||
|
docker-archive:./{{ ( item | regex_findall('[^/:]+'))[-2] }}_{{ lookup('ansible.builtin.password', '/dev/null length=5 chars=ascii_lowercase,digits seed={{ item }}') }}.tar:{{ item }}
|
||||||
|
chdir: /opt/metacluster/container-images
|
||||||
|
loop: "{{ (containerimages_charts + containerimages_kubeadm + containerimages_clusterapi + containerimages_pinniped + dependencies.container_images) | flatten | unique | sort }}"
|
|
@ -0,0 +1,31 @@
|
||||||
|
---
|
||||||
|
- name: Initialize tempfolder
|
||||||
|
ansible.builtin.tempfile:
|
||||||
|
state: directory
|
||||||
|
register: archive
|
||||||
|
|
||||||
|
- name: Download & extract archived static binary
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
src: "{{ item.url }}"
|
||||||
|
dest: "{{ archive.path }}"
|
||||||
|
remote_src: yes
|
||||||
|
extra_opts: "{{ item.extra_opts | default(omit) }}"
|
||||||
|
register: staticbinary_download
|
||||||
|
retries: 5
|
||||||
|
delay: 5
|
||||||
|
until: staticbinary_download is not failed
|
||||||
|
|
||||||
|
- name: Install extracted binary
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "{{ archive.path }}/{{ item.filename }}"
|
||||||
|
dest: /usr/local/bin/{{ item.filename }}
|
||||||
|
remote_src: yes
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Cleanup tempfolder
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ archive.path }}"
|
||||||
|
state: absent
|
||||||
|
when: archive.path is defined
|
|
@ -0,0 +1,54 @@
|
||||||
|
- name: Download & install static binaries
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "{{ item.url }}"
|
||||||
|
url_username: "{{ item.username | default(omit) }}"
|
||||||
|
url_password: "{{ item.password | default(omit) }}"
|
||||||
|
dest: /usr/local/bin/{{ item.filename }}
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
register: staticbinary_download
|
||||||
|
loop: "{{ dependencies.static_binaries | selectattr('archive', 'undefined') }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.filename }}"
|
||||||
|
retries: 5
|
||||||
|
delay: 5
|
||||||
|
until: staticbinary_download is not failed
|
||||||
|
|
||||||
|
- name: Download, extract & install archived static binaries
|
||||||
|
include_tasks: dependencies.archive_compressed.yml
|
||||||
|
loop: "{{ dependencies.static_binaries | rejectattr('archive', 'undefined') | selectattr('archive', 'equalto', 'compressed') }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.filename }}"
|
||||||
|
|
||||||
|
- name: Install ansible-galaxy collections
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: ansible-galaxy collection install {{ item }}
|
||||||
|
register: collections
|
||||||
|
loop: "{{ dependencies.ansible_galaxy_collections }}"
|
||||||
|
retries: 5
|
||||||
|
delay: 5
|
||||||
|
until: collections is not failed
|
||||||
|
|
||||||
|
- name: Install distro packages
|
||||||
|
ansible.builtin.apt:
|
||||||
|
pkg: "{{ dependencies.packages.apt }}"
|
||||||
|
state: latest
|
||||||
|
update_cache: yes
|
||||||
|
install_recommends: no
|
||||||
|
|
||||||
|
- name: Upgrade all packages
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: '*'
|
||||||
|
state: latest
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Install additional python packages
|
||||||
|
ansible.builtin.pip:
|
||||||
|
name: "{{ dependencies.packages.pip }}"
|
||||||
|
state: latest
|
||||||
|
|
||||||
|
- name: Cleanup apt cache
|
||||||
|
ansible.builtin.apt:
|
||||||
|
autoremove: yes
|
||||||
|
purge: yes
|
|
@ -0,0 +1,31 @@
|
||||||
|
- name: Add helm repositories
|
||||||
|
kubernetes.core.helm_repository:
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
repo_url: "{{ item.url }}"
|
||||||
|
state: present
|
||||||
|
loop: "{{ platform.helm_repositories + downstream.helm_repositories }}"
|
||||||
|
|
||||||
|
- name: Fetch helm charts for metacluster
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: helm fetch {{ item.value.helm.chart }} --untar --version {{ item.value.helm.version }}
|
||||||
|
chdir: /opt/metacluster/helm-charts
|
||||||
|
when: item.value.helm is defined
|
||||||
|
register: helmcharts_metacluster
|
||||||
|
loop: "{{ query('ansible.builtin.dict', components) }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.key }}"
|
||||||
|
retries: 5
|
||||||
|
delay: 5
|
||||||
|
until: helmcharts_metacluster is not failed
|
||||||
|
|
||||||
|
- name: Fetch helm charts for workloadcluster
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: helm fetch {{ item.value.chart }} --untardir ./{{ item.value.namespace }} --untar --version {{ item.value.version }}
|
||||||
|
chdir: /opt/workloadcluster/helm-charts
|
||||||
|
register: helmcharts_workloadcluster
|
||||||
|
loop: "{{ query('ansible.builtin.dict', downstream.helm_charts) }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.key }}"
|
||||||
|
retries: 5
|
||||||
|
delay: 5
|
||||||
|
until: helmcharts_workloadcluster is not failed
|
|
@ -0,0 +1,43 @@
|
||||||
|
- name: Download & install K3s binary
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://github.com/k3s-io/k3s/releases/download/{{ platform.k3s.version }}/k3s
|
||||||
|
dest: /usr/local/bin/k3s
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
register: download
|
||||||
|
until: download is not failed
|
||||||
|
retries: 3
|
||||||
|
delay: 10
|
||||||
|
|
||||||
|
- name: Download K3s images tarball
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://github.com/k3s-io/k3s/releases/download/{{ platform.k3s.version }}/k3s-airgap-images-amd64.tar.gz
|
||||||
|
dest: /var/lib/rancher/k3s/agent/images
|
||||||
|
register: download
|
||||||
|
until: download is not failed
|
||||||
|
retries: 3
|
||||||
|
delay: 10
|
||||||
|
|
||||||
|
- name: Download K3s install script
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://raw.githubusercontent.com/k3s-io/k3s/{{ platform.k3s.version | urlencode }}/install.sh
|
||||||
|
dest: /opt/metacluster/k3s/install.sh
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
register: download
|
||||||
|
until: download is not failed
|
||||||
|
retries: 3
|
||||||
|
delay: 10
|
||||||
|
|
||||||
|
- name: Inject manifests
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: helmchartconfig.j2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/{{ item.name }}-config.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0600
|
||||||
|
loop: "{{ platform.packaged_components }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.name }}"
|
|
@ -0,0 +1,26 @@
|
||||||
|
- name: Initialize tempfile
|
||||||
|
ansible.builtin.tempfile:
|
||||||
|
state: directory
|
||||||
|
register: kubeadm
|
||||||
|
|
||||||
|
- name: Download kubeadm binary
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://dl.k8s.io/release/{{ components.clusterapi.workload.version.k8s }}/bin/linux/amd64/kubeadm
|
||||||
|
dest: "{{ kubeadm.path }}/kubeadm"
|
||||||
|
mode: u+x
|
||||||
|
|
||||||
|
- name: Retrieve container images list
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: "{{ kubeadm.path }}/kubeadm config images list --kubernetes-version {{ components.clusterapi.workload.version.k8s }}"
|
||||||
|
register: kubeadmimages
|
||||||
|
|
||||||
|
- name: Store list of container images for reference
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /opt/metacluster/cluster-api/imagelist
|
||||||
|
content: "{{ kubeadmimages.stdout }}"
|
||||||
|
|
||||||
|
- name: Cleanup tempfile
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ kubeadm.path }}"
|
||||||
|
state: absent
|
||||||
|
when: kubeadm.path is defined
|
|
@ -0,0 +1,32 @@
|
||||||
|
- name: Create folder structure(s)
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
loop:
|
||||||
|
- /etc/rancher/k3s
|
||||||
|
- /opt/metacluster/cluster-api/bootstrap-kubeadm/{{ components.clusterapi.management.version.base }}
|
||||||
|
- /opt/metacluster/cluster-api/cert-manager/{{ components.clusterapi.management.version.cert_manager }}
|
||||||
|
- /opt/metacluster/cluster-api/cluster-api/{{ components.clusterapi.management.version.base }}
|
||||||
|
- /opt/metacluster/cluster-api/cni-calico/{{ components.clusterapi.workload.version.calico }}
|
||||||
|
- /opt/metacluster/cluster-api/control-plane-kubeadm/{{ components.clusterapi.management.version.base }}
|
||||||
|
- /opt/metacluster/cluster-api/infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }}
|
||||||
|
- /opt/metacluster/cluster-api/ipam-in-cluster/{{ components.clusterapi.management.version.ipam_incluster }}
|
||||||
|
- /opt/metacluster/container-images
|
||||||
|
- /opt/metacluster/git-repositories
|
||||||
|
- /opt/metacluster/helm-charts
|
||||||
|
- /opt/metacluster/k3s
|
||||||
|
- /opt/metacluster/kube-vip
|
||||||
|
- /opt/metacluster/pinniped
|
||||||
|
- /opt/workloadcluster/helm-charts
|
||||||
|
- /opt/workloadcluster/node-templates
|
||||||
|
- /var/lib/rancher/k3s/agent/images
|
||||||
|
- /var/lib/rancher/k3s/server/manifests
|
||||||
|
|
||||||
|
- import_tasks: dependencies.yml
|
||||||
|
- import_tasks: k3s.yml
|
||||||
|
- import_tasks: helm.yml
|
||||||
|
# - import_tasks: git.yml
|
||||||
|
- import_tasks: manifests.yml
|
||||||
|
- import_tasks: kubeadm.yml
|
||||||
|
- import_tasks: containerimages.yml
|
||||||
|
- import_tasks: nodetemplates.yml
|
|
@ -0,0 +1,137 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Aggregate meta-cluster chart_values into dict
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
metacluster_chartvalues: "{{ metacluster_chartvalues | default({}) | combine({ item.key: { 'chart_values': (item.value.helm.chart_values | from_yaml) } }) }}"
|
||||||
|
when: item.value.helm.chart_values is defined
|
||||||
|
loop: "{{ query('ansible.builtin.dict', components) }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.key }}"
|
||||||
|
|
||||||
|
- name: Combine and write dict to vars_file
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /opt/firstboot/ansible/vars/metacluster.yml
|
||||||
|
content: >-
|
||||||
|
{{
|
||||||
|
{ 'components': (
|
||||||
|
metacluster_chartvalues |
|
||||||
|
combine({ 'clusterapi' : components['clusterapi'] }) |
|
||||||
|
combine({ 'kubevip' : components['kubevip'] }) |
|
||||||
|
combine({ 'localuserauthenticator': components['pinniped']['local-user-authenticator'] })),
|
||||||
|
'appliance': {
|
||||||
|
'version': (applianceversion)
|
||||||
|
}
|
||||||
|
} | to_nice_yaml(indent=2, width=4096)
|
||||||
|
}}
|
||||||
|
|
||||||
|
- name: Aggregate workload-cluster chart_values into dict
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
workloadcluster_chartvalues: |
|
||||||
|
{{
|
||||||
|
workloadcluster_chartvalues | default({}) | combine({
|
||||||
|
item.key: {
|
||||||
|
'chart_values': (item.value.chart_values | default('') | from_yaml),
|
||||||
|
'extra_manifests': (item.value.extra_manifests | default([])),
|
||||||
|
'namespace': (item.value.namespace)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}}
|
||||||
|
loop: "{{ query('ansible.builtin.dict', downstream.helm_charts) }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.key }}"
|
||||||
|
|
||||||
|
- name: Write dict to vars_file
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /opt/firstboot/ansible/vars/workloadcluster.yml
|
||||||
|
content: >-
|
||||||
|
{{
|
||||||
|
{ 'downstream_components': ( workloadcluster_chartvalues )
|
||||||
|
} | to_nice_yaml(indent=2, width=4096)
|
||||||
|
}}
|
||||||
|
|
||||||
|
- name: Download Cluster-API manifests
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "{{ item.url }}"
|
||||||
|
dest: /opt/metacluster/cluster-api/{{ item.dest }}
|
||||||
|
register: clusterapi_manifests
|
||||||
|
loop:
|
||||||
|
# This list is based on `clusterctl config repositories`
|
||||||
|
# Note: Each manifest also needs a `metadata.yaml` file stored in the respective folder
|
||||||
|
- url: https://github.com/kubernetes-sigs/cluster-api/releases/download/{{ components.clusterapi.management.version.base }}/bootstrap-components.yaml
|
||||||
|
dest: bootstrap-kubeadm/{{ components.clusterapi.management.version.base }}/bootstrap-components.yaml
|
||||||
|
- url: https://github.com/kubernetes-sigs/cluster-api/releases/download/{{ components.clusterapi.management.version.base }}/core-components.yaml
|
||||||
|
dest: cluster-api/{{ components.clusterapi.management.version.base }}/core-components.yaml
|
||||||
|
- url: https://github.com/kubernetes-sigs/cluster-api/releases/download/{{ components.clusterapi.management.version.base }}/control-plane-components.yaml
|
||||||
|
dest: control-plane-kubeadm/{{ components.clusterapi.management.version.base }}/control-plane-components.yaml
|
||||||
|
# This downloads the same `metadata.yaml` file to three separate folders
|
||||||
|
- url: https://github.com/kubernetes-sigs/cluster-api/releases/download/{{ components.clusterapi.management.version.base }}/metadata.yaml
|
||||||
|
dest: bootstrap-kubeadm/{{ components.clusterapi.management.version.base }}/metadata.yaml
|
||||||
|
- url: https://github.com/kubernetes-sigs/cluster-api/releases/download/{{ components.clusterapi.management.version.base }}/metadata.yaml
|
||||||
|
dest: cluster-api/{{ components.clusterapi.management.version.base }}/metadata.yaml
|
||||||
|
- url: https://github.com/kubernetes-sigs/cluster-api/releases/download/{{ components.clusterapi.management.version.base }}/metadata.yaml
|
||||||
|
dest: control-plane-kubeadm/{{ components.clusterapi.management.version.base }}/metadata.yaml
|
||||||
|
# The vsphere infrastructure provider requires multiple files (`cluster-template.yaml` and `metadata.yaml` on top of default files)
|
||||||
|
- url: https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/{{ components.clusterapi.management.version.infrastructure_vsphere }}/infrastructure-components.yaml
|
||||||
|
dest: infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }}/infrastructure-components.yaml
|
||||||
|
- url: https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/{{ components.clusterapi.management.version.infrastructure_vsphere }}/cluster-template.yaml
|
||||||
|
dest: infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }}/cluster-template.yaml
|
||||||
|
- url: https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/{{ components.clusterapi.management.version.infrastructure_vsphere }}/metadata.yaml
|
||||||
|
dest: infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }}/metadata.yaml
|
||||||
|
# Additionally, cert-manager is a prerequisite
|
||||||
|
- url: https://github.com/cert-manager/cert-manager/releases/download/{{ components.clusterapi.management.version.cert_manager }}/cert-manager.yaml
|
||||||
|
dest: cert-manager/{{ components.clusterapi.management.version.cert_manager }}/cert-manager.yaml
|
||||||
|
# Finally, workload clusters will need a CNI plugin
|
||||||
|
- url: https://raw.githubusercontent.com/projectcalico/calico/{{ components.clusterapi.workload.version.calico }}/manifests/calico.yaml
|
||||||
|
dest: cni-calico/{{ components.clusterapi.workload.version.calico }}/calico.yaml
|
||||||
|
# IPAM in-cluster provider (w/ metadata.yaml)
|
||||||
|
- url: https://github.com/telekom/cluster-api-ipam-provider-in-cluster/releases/download/{{ components.clusterapi.management.version.ipam_incluster }}/ipam-components.yaml
|
||||||
|
dest: ipam-in-cluster/{{ components.clusterapi.management.version.ipam_incluster }}/ipam-components.yaml
|
||||||
|
- url: https://github.com/telekom/cluster-api-ipam-provider-in-cluster/releases/download/{{ components.clusterapi.management.version.ipam_incluster }}/metadata.yaml
|
||||||
|
dest: ipam-in-cluster/{{ components.clusterapi.management.version.ipam_incluster }}/metadata.yaml
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.url | basename }}"
|
||||||
|
retries: 5
|
||||||
|
delay: 5
|
||||||
|
until: clusterapi_manifests is not failed
|
||||||
|
|
||||||
|
- name: Update cluster-template with image tags
|
||||||
|
ansible.builtin.replace:
|
||||||
|
dest: /opt/metacluster/cluster-api/infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }}/cluster-template.yaml
|
||||||
|
regexp: ':\${CPI_IMAGE_K8S_VERSION}'
|
||||||
|
replace: ":{{ components.clusterapi.management.version.cpi_vsphere }}"
|
||||||
|
|
||||||
|
- name: Download kube-vip RBAC manifest
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://kube-vip.io/manifests/rbac.yaml
|
||||||
|
dest: /opt/metacluster/kube-vip/rbac.yaml
|
||||||
|
register: kubevip_manifest
|
||||||
|
retries: 5
|
||||||
|
delay: 5
|
||||||
|
until: kubevip_manifest is not failed
|
||||||
|
|
||||||
|
- name: Download pinniped local-user-authenticator manifest
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://get.pinniped.dev/{{ components.pinniped['local-user-authenticator'].version }}/install-local-user-authenticator.yaml
|
||||||
|
dest: /opt/metacluster/pinniped/local-user-authenticator.yaml
|
||||||
|
register: pinniped_manifest
|
||||||
|
retries: 5
|
||||||
|
delay: 5
|
||||||
|
until: pinniped_manifest is not failed
|
||||||
|
|
||||||
|
- name: Trim image hash from manifest
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: /opt/metacluster/pinniped/local-user-authenticator.yaml
|
||||||
|
regexp: '([ ]*image: .*)@.*'
|
||||||
|
replace: '\1'
|
||||||
|
no_log: true
|
||||||
|
|
||||||
|
# - name: Inject manifests
|
||||||
|
# ansible.builtin.template:
|
||||||
|
# src: "{{ item.type }}.j2"
|
||||||
|
# dest: /var/lib/rancher/k3s/server/manifests/{{ item.name }}-manifest.yaml
|
||||||
|
# owner: root
|
||||||
|
# group: root
|
||||||
|
# mode: 0600
|
||||||
|
# loop: "{{ query('ansible.builtin.dict', components) | map(attribute='value.manifests') | list | select('defined') | flatten }}"
|
||||||
|
# loop_control:
|
||||||
|
# label: "{{ item.type ~ '/' ~ item.name }}"
|
|
@ -0,0 +1,4 @@
|
||||||
|
- name: Download node-template image
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "{{ components.clusterapi.workload.node_template.url }}"
|
||||||
|
dest: /opt/workloadcluster/node-templates/{{ components.clusterapi.workload.node_template.url | basename}}
|
|
@ -0,0 +1,8 @@
|
||||||
|
apiVersion: helm.cattle.io/v1
|
||||||
|
kind: HelmChartConfig
|
||||||
|
metadata:
|
||||||
|
name: {{ item.name }}
|
||||||
|
namespace: {{ item.namespace }}
|
||||||
|
spec:
|
||||||
|
valuesContent: |-
|
||||||
|
{{ item.config }}
|
|
@ -0,0 +1,30 @@
|
||||||
|
---
|
||||||
|
- hosts: 127.0.0.1
|
||||||
|
connection: local
|
||||||
|
gather_facts: true
|
||||||
|
vars:
|
||||||
|
# Needed by some templating in various tasks
|
||||||
|
_newline: "\n"
|
||||||
|
vars_files:
|
||||||
|
- defaults.yml
|
||||||
|
- metacluster.yml
|
||||||
|
- workloadcluster.yml
|
||||||
|
# become: true
|
||||||
|
roles:
|
||||||
|
- vapp
|
||||||
|
- network
|
||||||
|
- preflight
|
||||||
|
- users
|
||||||
|
- disks
|
||||||
|
- metacluster
|
||||||
|
- workloadcluster
|
||||||
|
- tty
|
||||||
|
- cleanup
|
||||||
|
handlers:
|
||||||
|
- name: Apply manifests
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
src: "{{ item }}"
|
||||||
|
state: present
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
loop: "{{ query('ansible.builtin.fileglob', '/var/lib/rancher/k3s/server/manifests/*.yaml') | sort }}"
|
||||||
|
ignore_errors: yes
|
|
@ -0,0 +1,176 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Install dex
|
||||||
|
kubernetes.core.helm:
|
||||||
|
name: dex
|
||||||
|
chart_ref: /opt/metacluster/helm-charts/dex
|
||||||
|
release_namespace: dex
|
||||||
|
create_namespace: true
|
||||||
|
wait: false
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
values: "{{ components['dex'].chart_values }}"
|
||||||
|
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Install pinniped local-user-authenticator
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
src: /opt/metacluster/pinniped/local-user-authenticator.yaml
|
||||||
|
state: present
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
|
||||||
|
- name: Create local-user-authenticator accounts
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
template: secret.j2
|
||||||
|
state: present
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
name: "{{ item.username }}"
|
||||||
|
namespace: local-user-authenticator
|
||||||
|
type: ''
|
||||||
|
data:
|
||||||
|
- key: groups
|
||||||
|
value: "{{ 'group1,group2' | b64encode }}"
|
||||||
|
- key: passwordHash
|
||||||
|
value: "{{ item.password | b64encode }}"
|
||||||
|
loop: "{{ components['localuserauthenticator'].users }}"
|
||||||
|
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Install pinniped chart
|
||||||
|
kubernetes.core.helm:
|
||||||
|
name: pinniped
|
||||||
|
chart_ref: /opt/metacluster/helm-charts/pinniped
|
||||||
|
release_namespace: pinniped-supervisor
|
||||||
|
create_namespace: true
|
||||||
|
wait: false
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
values: "{{ components['pinniped'].chart_values }}"
|
||||||
|
|
||||||
|
- name: Add ingress for supervisor
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
template: "{{ item.kind }}.j2"
|
||||||
|
state: present
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
namespace: "{{ item.namespace }}"
|
||||||
|
spec: "{{ item.spec }}"
|
||||||
|
loop:
|
||||||
|
- kind: ingressroute
|
||||||
|
name: pinniped-supervisor
|
||||||
|
namespace: pinniped-supervisor
|
||||||
|
spec: |2
|
||||||
|
entryPoints:
|
||||||
|
- web
|
||||||
|
- websecure
|
||||||
|
routes:
|
||||||
|
- kind: Rule
|
||||||
|
match: Host(`auth.{{ vapp['metacluster.fqdn'] }}`)
|
||||||
|
services:
|
||||||
|
- kind: Service
|
||||||
|
name: pinniped-supervisor
|
||||||
|
namespace: pinniped-supervisor
|
||||||
|
port: 443
|
||||||
|
scheme: https
|
||||||
|
serversTransport: pinniped-supervisor
|
||||||
|
- kind: serverstransport
|
||||||
|
name: pinniped-supervisor
|
||||||
|
namespace: pinniped-supervisor
|
||||||
|
spec: |2
|
||||||
|
insecureSkipVerify: true
|
||||||
|
serverName: auth.{{ vapp['metacluster.fqdn'] }}
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.kind ~ '/' ~ item.name ~ ' (' ~ item.namespace ~ ')' }}"
|
||||||
|
|
||||||
|
- name: Ensure pinniped API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://auth.{{ vapp['metacluster.fqdn'] }}/healthz
|
||||||
|
method: GET
|
||||||
|
register: api_readycheck
|
||||||
|
until:
|
||||||
|
- api_readycheck.status == 200
|
||||||
|
- api_readycheck.msg is search("OK")
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ ((storage_benchmark | float) * playbook.delay.short) | int }}"
|
||||||
|
|
||||||
|
# TODO: Migrate to step-ca
|
||||||
|
- name: Initialize tempfile
|
||||||
|
ansible.builtin.tempfile:
|
||||||
|
state: directory
|
||||||
|
register: certificate
|
||||||
|
|
||||||
|
- name: Create private key (RSA, 4096 bits)
|
||||||
|
community.crypto.openssl_privatekey:
|
||||||
|
path: "{{ certificate.path }}/certificate.key"
|
||||||
|
|
||||||
|
- name: Create self-signed certificate
|
||||||
|
community.crypto.x509_certificate:
|
||||||
|
path: "{{ certificate.path }}/certificate.crt"
|
||||||
|
privatekey_path: "{{ certificate.path }}/certificate.key"
|
||||||
|
provider: selfsigned
|
||||||
|
|
||||||
|
- name: Store self-signed certificate for use by pinniped supervisor
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
template: secret.j2
|
||||||
|
state: present
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
name: pinniped-supervisor-tls
|
||||||
|
namespace: pinniped-supervisor
|
||||||
|
type: kubernetes.io/tls
|
||||||
|
data:
|
||||||
|
- key: tls.crt
|
||||||
|
value: "{{ lookup('ansible.builtin.file', certificate.path ~ '/certificate.crt') | b64encode }}"
|
||||||
|
- key: tls.key
|
||||||
|
value: "{{ lookup('ansible.builtin.file', certificate.path ~ '/certificate.key') | b64encode }}"
|
||||||
|
# TODO: Migrate to step-ca
|
||||||
|
|
||||||
|
- name: Create pinniped resources
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
template: "{{ item.kind }}.j2"
|
||||||
|
state: present
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
namespace: "{{ item.namespace }}"
|
||||||
|
type: "{{ item.type | default('') }}"
|
||||||
|
data: "{{ item.data | default(omit) }}"
|
||||||
|
spec: "{{ item.spec | default(omit) }}"
|
||||||
|
loop:
|
||||||
|
- kind: oidcidentityprovider
|
||||||
|
name: dex-staticpasswords
|
||||||
|
namespace: pinniped-supervisor
|
||||||
|
spec: |2
|
||||||
|
issuer: https://idps.{{ vapp['metacluster.fqdn'] }}
|
||||||
|
tls:
|
||||||
|
certificateAuthorityData: "{{ (stepca_cm_certs.resources[0].data['intermediate_ca.crt'] ~ _newline ~ stepca_cm_certs.resources[0].data['root_ca.crt']) | b64encode }}"
|
||||||
|
authorizationConfig:
|
||||||
|
additionalScopes: [offline_access, groups, email]
|
||||||
|
allowPasswordGrant: false
|
||||||
|
claims:
|
||||||
|
username: email
|
||||||
|
groups: groups
|
||||||
|
client:
|
||||||
|
secretName: dex-clientcredentials
|
||||||
|
- kind: secret
|
||||||
|
name: dex-clientcredentials
|
||||||
|
namespace: pinniped-supervisor
|
||||||
|
type: secrets.pinniped.dev/oidc-client
|
||||||
|
data:
|
||||||
|
- key: clientID
|
||||||
|
value: "{{ 'pinniped-supervisor' | b64encode }}"
|
||||||
|
- key: clientSecret
|
||||||
|
value: "{{ lookup('ansible.builtin.password', '/dev/null length=64 chars=ascii_lowercase,digits seed=' ~ vapp['metacluster.fqdn']) | b64encode }}"
|
||||||
|
- kind: federationdomain
|
||||||
|
name: metacluster-sso
|
||||||
|
namespace: pinniped-supervisor
|
||||||
|
spec: |2
|
||||||
|
issuer: https://auth.{{ vapp['metacluster.fqdn'] }}/sso
|
||||||
|
tls:
|
||||||
|
secretName: pinniped-supervisor-tls
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.kind ~ '/' ~ item.name }}"
|
|
@ -0,0 +1,139 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Inject password into values file
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ stepconfig.path }}"
|
||||||
|
content: "{{ lookup('ansible.builtin.file', stepconfig.path) | regex_replace('(ca_password|provisioner_password):[ ]?\n', '\\1: ' ~ (vapp['metacluster.password'] | b64encode) ~ '\n') }}"
|
||||||
|
no_log: true
|
||||||
|
|
||||||
|
- name: Install step-ca chart
|
||||||
|
kubernetes.core.helm:
|
||||||
|
name: step-certificates
|
||||||
|
chart_ref: /opt/metacluster/helm-charts/step-certificates
|
||||||
|
release_namespace: step-ca
|
||||||
|
create_namespace: true
|
||||||
|
# Unable to use REST api based readycheck due to lack of ingress
|
||||||
|
wait: true
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
values_files:
|
||||||
|
- "{{ stepconfig.path }}"
|
||||||
|
|
||||||
|
- name: Retrieve configmap w/ root certificate
|
||||||
|
kubernetes.core.k8s_info:
|
||||||
|
kind: ConfigMap
|
||||||
|
name: step-certificates-certs
|
||||||
|
namespace: step-ca
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
register: stepca_cm_certs
|
||||||
|
|
||||||
|
- name: Create target namespaces
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
kind: Namespace
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
loop:
|
||||||
|
- argo-cd
|
||||||
|
- gitea
|
||||||
|
# - kube-system
|
||||||
|
|
||||||
|
- name: Store root certificate in namespaced configmaps/secrets
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
state: present
|
||||||
|
template: "{{ item.kind }}.j2"
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
namespace: "{{ item.namespace }}"
|
||||||
|
annotations: "{{ item.annotations | default('{}') | indent(width=4, first=True) }}"
|
||||||
|
labels: "{{ item.labels | default('{}') | indent(width=4, first=True) }}"
|
||||||
|
type: "{{ item.type | default('') }}"
|
||||||
|
data: "{{ item.data }}"
|
||||||
|
loop:
|
||||||
|
- name: argocd-tls-certs-cm
|
||||||
|
namespace: argo-cd
|
||||||
|
kind: configmap
|
||||||
|
annotations: |
|
||||||
|
meta.helm.sh/release-name: argo-cd
|
||||||
|
meta.helm.sh/release-namespace: argo-cd
|
||||||
|
labels: |
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
app.kubernetes.io/name: argocd-cm
|
||||||
|
app.kubernetes.io/part-of: argocd
|
||||||
|
data:
|
||||||
|
- key: git.{{ vapp['metacluster.fqdn'] }}
|
||||||
|
value: "{{ stepca_cm_certs.resources[0].data['root_ca.crt'] }}"
|
||||||
|
- name: step-certificates-certs
|
||||||
|
namespace: gitea
|
||||||
|
kind: secret
|
||||||
|
data:
|
||||||
|
- key: ca_chain.crt
|
||||||
|
value: "{{ (stepca_cm_certs.resources[0].data['intermediate_ca.crt'] ~ _newline ~ stepca_cm_certs.resources[0].data['root_ca.crt']) | b64encode }}"
|
||||||
|
- name: step-certificates-certs
|
||||||
|
namespace: kube-system
|
||||||
|
kind: secret
|
||||||
|
data:
|
||||||
|
- key: root_ca.crt
|
||||||
|
value: "{{ stepca_cm_certs.resources[0].data['root_ca.crt'] | b64encode }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.kind ~ '/' ~ item.name ~ ' (' ~ item.namespace ~ ')' }}"
|
||||||
|
|
||||||
|
- name: Configure step-ca passthrough ingress
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: ingressroutetcp.j2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0600
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
name: step-ca
|
||||||
|
namespace: step-ca
|
||||||
|
spec: |2
|
||||||
|
entryPoints:
|
||||||
|
- websecure
|
||||||
|
routes:
|
||||||
|
- match: HostSNI(`ca.{{ vapp['metacluster.fqdn'] }}`)
|
||||||
|
services:
|
||||||
|
- name: step-certificates
|
||||||
|
port: 443
|
||||||
|
tls:
|
||||||
|
passthrough: true
|
||||||
|
notify:
|
||||||
|
- Apply manifests
|
||||||
|
|
||||||
|
- name: Inject step-ca certificate into traefik container
|
||||||
|
ansible.builtin.blockinfile:
|
||||||
|
path: /var/lib/rancher/k3s/server/manifests/traefik-config.yaml
|
||||||
|
block: |2
|
||||||
|
volumes:
|
||||||
|
- name: step-certificates-certs
|
||||||
|
mountPath: /step-ca
|
||||||
|
type: secret
|
||||||
|
env:
|
||||||
|
- name: LEGO_CA_CERTIFICATES
|
||||||
|
value: /step-ca/root_ca.crt
|
||||||
|
marker: ' # {mark} ANSIBLE MANAGED BLOCK [rootca]'
|
||||||
|
notify:
|
||||||
|
- Apply manifests
|
||||||
|
|
||||||
|
- name: Trigger handlers
|
||||||
|
ansible.builtin.meta: flush_handlers
|
||||||
|
|
||||||
|
- name: Ensure step-ca API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://ca.{{ vapp['metacluster.fqdn'] }}/health
|
||||||
|
method: GET
|
||||||
|
register: api_readycheck
|
||||||
|
until:
|
||||||
|
- api_readycheck.json.status is defined
|
||||||
|
- api_readycheck.json.status == 'ok'
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201]
|
||||||
|
body_format: json
|
|
@ -0,0 +1,151 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Install gitea chart
|
||||||
|
kubernetes.core.helm:
|
||||||
|
name: gitea
|
||||||
|
chart_ref: /opt/metacluster/helm-charts/gitea
|
||||||
|
release_namespace: gitea
|
||||||
|
create_namespace: true
|
||||||
|
wait: false
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
values: "{{ components['gitea'].chart_values }}"
|
||||||
|
|
||||||
|
- name: Ensure gitea API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://git.{{ vapp['metacluster.fqdn'] }}/api/healthz
|
||||||
|
method: GET
|
||||||
|
register: api_readycheck
|
||||||
|
until:
|
||||||
|
- api_readycheck.json.status is defined
|
||||||
|
- api_readycheck.json.status == 'pass'
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||||
|
|
||||||
|
- name: Configure additional SSH ingress
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: ingressroutetcp.j2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0600
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
name: gitea-ssh
|
||||||
|
namespace: gitea
|
||||||
|
spec: |2
|
||||||
|
entryPoints:
|
||||||
|
- ssh
|
||||||
|
routes:
|
||||||
|
- match: HostSNI(`*`)
|
||||||
|
services:
|
||||||
|
- name: gitea-ssh
|
||||||
|
port: 22
|
||||||
|
notify:
|
||||||
|
- Apply manifests
|
||||||
|
|
||||||
|
- name: Trigger handlers
|
||||||
|
ansible.builtin.meta: flush_handlers
|
||||||
|
|
||||||
|
- name: Generate gitea API token
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://git.{{ vapp['metacluster.fqdn'] }}/api/v1/users/administrator/tokens
|
||||||
|
method: POST
|
||||||
|
user: administrator
|
||||||
|
password: "{{ vapp['metacluster.password'] }}"
|
||||||
|
force_basic_auth: yes
|
||||||
|
body:
|
||||||
|
name: token_init_{{ lookup('password', '/dev/null length=5 chars=ascii_letters,digits') }}
|
||||||
|
register: gitea_api_token
|
||||||
|
|
||||||
|
- name: Retrieve existing gitea configuration
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://git.{{ vapp['metacluster.fqdn'] }}/api/v1/repos/search
|
||||||
|
method: GET
|
||||||
|
register: gitea_existing_config
|
||||||
|
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Register SSH public key
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://git.{{ vapp['metacluster.fqdn'] }}/api/v1/user/keys
|
||||||
|
method: POST
|
||||||
|
headers:
|
||||||
|
Authorization: token {{ gitea_api_token.json.sha1 }}
|
||||||
|
body:
|
||||||
|
key: "{{ gitops_sshkey.public_key }}"
|
||||||
|
read_only: false
|
||||||
|
title: GitOps
|
||||||
|
|
||||||
|
- name: Create organization(s)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://git.{{ vapp['metacluster.fqdn'] }}/api/v1/orgs
|
||||||
|
method: POST
|
||||||
|
headers:
|
||||||
|
Authorization: token {{ gitea_api_token.json.sha1 }}
|
||||||
|
body: "{{ item }}"
|
||||||
|
loop:
|
||||||
|
- full_name: Meta-cluster
|
||||||
|
description: Meta-cluster configuration items
|
||||||
|
username: mc
|
||||||
|
website: https://git.{{ vapp['metacluster.fqdn'] }}/mc
|
||||||
|
location: '[...]'
|
||||||
|
visibility: public
|
||||||
|
- full_name: Workload-cluster
|
||||||
|
description: Workload-cluster configuration items
|
||||||
|
username: wl
|
||||||
|
website: https://git.{{ vapp['metacluster.fqdn'] }}/wl
|
||||||
|
location: '[...]'
|
||||||
|
visibility: public
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.full_name }}"
|
||||||
|
|
||||||
|
- name: Create repositories
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://git.{{ vapp['metacluster.fqdn'] }}/api/v1/orgs/{{ item.organization }}/repos
|
||||||
|
method: POST
|
||||||
|
headers:
|
||||||
|
Authorization: token {{ gitea_api_token.json.sha1 }}
|
||||||
|
body: "{{ item.body }}"
|
||||||
|
loop:
|
||||||
|
- organization: mc
|
||||||
|
body:
|
||||||
|
name: GitOps.ClusterAPI
|
||||||
|
auto_init: true
|
||||||
|
default_branch: main
|
||||||
|
description: ClusterAPI manifests
|
||||||
|
- organization: mc
|
||||||
|
body:
|
||||||
|
name: GitOps.Config
|
||||||
|
# auto_init: true
|
||||||
|
# default_branch: main
|
||||||
|
description: GitOps manifests
|
||||||
|
- organization: wl
|
||||||
|
body:
|
||||||
|
name: GitOps.Config
|
||||||
|
auto_init: true
|
||||||
|
default_branch: main
|
||||||
|
description: GitOps manifests
|
||||||
|
- organization: wl
|
||||||
|
body:
|
||||||
|
name: ClusterAccess.Store
|
||||||
|
auto_init: true
|
||||||
|
default_branch: main
|
||||||
|
description: Kubeconfig files
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.organization ~ '/' ~ item.body.name }}"
|
||||||
|
|
||||||
|
# - name: Rebase/Push source gitops repository
|
||||||
|
# ansible.builtin.shell:
|
||||||
|
# cmd: |
|
||||||
|
# git config --local http.sslVerify false
|
||||||
|
# git remote set-url origin https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.Config.git
|
||||||
|
# git push
|
||||||
|
# chdir: /opt/metacluster/git-repositories/gitops
|
||||||
|
|
||||||
|
when: (gitea_existing_config.json is undefined) or (gitea_existing_config.json.data | length == 0)
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201]
|
||||||
|
body_format: json
|
|
@ -0,0 +1,75 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Install argo-cd chart
|
||||||
|
kubernetes.core.helm:
|
||||||
|
name: argo-cd
|
||||||
|
chart_ref: /opt/metacluster/helm-charts/argo-cd
|
||||||
|
release_namespace: argo-cd
|
||||||
|
create_namespace: true
|
||||||
|
wait: false
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
values: "{{ components['argo-cd'].chart_values }}"
|
||||||
|
|
||||||
|
- name: Ensure argo-cd API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://gitops.{{ vapp['metacluster.fqdn'] }}/api/version
|
||||||
|
method: GET
|
||||||
|
register: api_readycheck
|
||||||
|
until:
|
||||||
|
- api_readycheck.json.Version is defined
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||||
|
|
||||||
|
- name: Generate argo-cd API token
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://gitops.{{ vapp['metacluster.fqdn'] }}/api/v1/session
|
||||||
|
method: POST
|
||||||
|
force_basic_auth: yes
|
||||||
|
body:
|
||||||
|
username: admin
|
||||||
|
password: "{{ vapp['metacluster.password'] }}"
|
||||||
|
register: argocd_api_token
|
||||||
|
|
||||||
|
- name: Configure metacluster-gitops repository
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: gitrepo.j2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0600
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
name: gitrepo-mc-gitopsconfig
|
||||||
|
namespace: argo-cd
|
||||||
|
url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.Config.git
|
||||||
|
notify:
|
||||||
|
- Apply manifests
|
||||||
|
|
||||||
|
- name: Create applicationset
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: applicationset.j2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.application.name }}-manifest.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0600
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
application:
|
||||||
|
name: applicationset-metacluster
|
||||||
|
namespace: argo-cd
|
||||||
|
cluster:
|
||||||
|
url: https://kubernetes.default.svc
|
||||||
|
repository:
|
||||||
|
url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.Config.git
|
||||||
|
revision: main
|
||||||
|
notify:
|
||||||
|
- Apply manifests
|
||||||
|
|
||||||
|
- name: Trigger handlers
|
||||||
|
ansible.builtin.meta: flush_handlers
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201]
|
||||||
|
body_format: json
|
|
@ -0,0 +1,44 @@
|
||||||
|
- name: Reconfigure traefik container for persistence
|
||||||
|
ansible.builtin.blockinfile:
|
||||||
|
path: /var/lib/rancher/k3s/server/manifests/traefik-config.yaml
|
||||||
|
block: |2
|
||||||
|
deployment:
|
||||||
|
initContainers:
|
||||||
|
- name: volume-permissions
|
||||||
|
image: busybox:1
|
||||||
|
command: ["sh", "-c", "touch /data/acme.json && chmod -Rv 600 /data/* && chown 65532:65532 /data/acme.json"]
|
||||||
|
volumeMounts:
|
||||||
|
- name: data
|
||||||
|
mountPath: /data
|
||||||
|
persistence:
|
||||||
|
enabled: true
|
||||||
|
marker: ' # {mark} ANSIBLE MANAGED BLOCK [persistence]'
|
||||||
|
notify:
|
||||||
|
- Apply manifests
|
||||||
|
|
||||||
|
- name: Configure traefik dashboard ingress
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: ingressroute.j2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0600
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
name: traefik-dashboard
|
||||||
|
namespace: kube-system
|
||||||
|
spec: |2
|
||||||
|
entryPoints:
|
||||||
|
- web
|
||||||
|
- websecure
|
||||||
|
routes:
|
||||||
|
- kind: Rule
|
||||||
|
match: Host(`ingress.{{ vapp['metacluster.fqdn'] }}`)
|
||||||
|
services:
|
||||||
|
- kind: TraefikService
|
||||||
|
name: api@internal
|
||||||
|
notify:
|
||||||
|
- Apply manifests
|
||||||
|
|
||||||
|
- name: Trigger handlers
|
||||||
|
ansible.builtin.meta: flush_handlers
|
|
@ -0,0 +1,100 @@
|
||||||
|
- name: Configure fallback name resolution
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/hosts
|
||||||
|
line: "{{ vapp['guestinfo.ipaddress'] }} {{ item ~ '.' ~ vapp['metacluster.fqdn'] }}"
|
||||||
|
state: present
|
||||||
|
loop:
|
||||||
|
# TODO: Make this list dynamic
|
||||||
|
- ca
|
||||||
|
- git
|
||||||
|
- gitops
|
||||||
|
- ingress
|
||||||
|
- registry
|
||||||
|
- storage
|
||||||
|
|
||||||
|
- name: Create step-ca config dictionary
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
stepconfig: "{{ { 'path': ansible_env.HOME ~ '/.step/config/values.yaml' } }}"
|
||||||
|
|
||||||
|
- name: Create step-ca target folder
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ stepconfig.path | dirname }}"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Initialize tempfile
|
||||||
|
ansible.builtin.tempfile:
|
||||||
|
state: file
|
||||||
|
register: stepca_password
|
||||||
|
|
||||||
|
- name: Store password in tempfile
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ stepca_password.path }}"
|
||||||
|
content: "{{ vapp['metacluster.password'] }}"
|
||||||
|
no_log: true
|
||||||
|
|
||||||
|
- name: Generate step-ca helm chart values (including root CA certificate)
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
step ca init \
|
||||||
|
--helm \
|
||||||
|
--deployment-type=standalone \
|
||||||
|
--name=ca.{{ vapp['metacluster.fqdn'] }} \
|
||||||
|
--dns=ca.{{ vapp['metacluster.fqdn'] }} \
|
||||||
|
--dns=step-certificates.step-ca.svc.cluster.local \
|
||||||
|
--dns=127.0.0.1 \
|
||||||
|
--address=:9000 \
|
||||||
|
--provisioner=admin \
|
||||||
|
--acme \
|
||||||
|
--password-file={{ stepca_password.path }} | tee {{ stepconfig.path }}
|
||||||
|
creates: "{{ stepconfig.path }}"
|
||||||
|
|
||||||
|
- name: Cleanup tempfile
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ stepca_password.path }}"
|
||||||
|
state: absent
|
||||||
|
when: stepca_password.path is defined
|
||||||
|
|
||||||
|
- name: Store root CA certificate
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /usr/local/share/ca-certificates/root_ca.crt
|
||||||
|
content: "{{ (lookup('ansible.builtin.file', stepconfig.path) | from_yaml).inject.certificates.root_ca }}"
|
||||||
|
|
||||||
|
- name: Update certificate truststore
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: update-ca-certificates
|
||||||
|
|
||||||
|
- name: Extract container images (for idempotency purposes)
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
src: /opt/metacluster/container-images/image-tarballs.tgz
|
||||||
|
dest: /opt/metacluster/container-images
|
||||||
|
remote_src: no
|
||||||
|
when:
|
||||||
|
- lookup('ansible.builtin.fileglob', '/opt/metacluster/container-images/*.tgz') is match('.*image-tarballs.tgz')
|
||||||
|
|
||||||
|
- name: Get all stored fully qualified container image names
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
skopeo list-tags \
|
||||||
|
--insecure-policy \
|
||||||
|
docker-archive:./{{ item | basename }} | \
|
||||||
|
jq -r '.Tags[0]'
|
||||||
|
chdir: /opt/metacluster/container-images
|
||||||
|
register: registry_artifacts
|
||||||
|
loop: "{{ query('ansible.builtin.fileglob', '/opt/metacluster/container-images/*.tar') | sort }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item | basename }}"
|
||||||
|
|
||||||
|
- name: Get source registries of all artifacts
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
source_registries: "{{ (source_registries | default([]) + [(item | split('/'))[0]]) | unique | sort }}"
|
||||||
|
loop: "{{ registry_artifacts | json_query('results[*].stdout') | select | sort }}"
|
||||||
|
|
||||||
|
- name: Configure K3s node for private registry
|
||||||
|
ansible.builtin.template:
|
||||||
|
dest: /etc/rancher/k3s/registries.yaml
|
||||||
|
src: registries.j2
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
registries: "{{ source_registries }}"
|
||||||
|
hv:
|
||||||
|
fqdn: "{{ vapp['metacluster.fqdn'] }}"
|
|
@ -0,0 +1,91 @@
|
||||||
|
- name: Store custom configuration files
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ item.filename }}"
|
||||||
|
content: "{{ item.content }}"
|
||||||
|
loop:
|
||||||
|
- filename: /etc/rancher/k3s/config.yaml
|
||||||
|
content: |
|
||||||
|
kubelet-arg:
|
||||||
|
- "config=/etc/rancher/k3s/kubelet.config"
|
||||||
|
- "image-gc-high-threshold=95"
|
||||||
|
- filename: /etc/rancher/k3s/kubelet.config
|
||||||
|
content: |
|
||||||
|
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||||
|
kind: KubeletConfiguration
|
||||||
|
|
||||||
|
shutdownGracePeriod: 180s
|
||||||
|
shtudownGracePeriodCriticalPods: 60s
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.filename }}"
|
||||||
|
|
||||||
|
- name: Gather service facts
|
||||||
|
ansible.builtin.service_facts:
|
||||||
|
# Module requires no attributes
|
||||||
|
|
||||||
|
- name: Install K3s
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: ./install.sh
|
||||||
|
chdir: /opt/metacluster/k3s
|
||||||
|
environment:
|
||||||
|
INSTALL_K3S_SKIP_DOWNLOAD: 'true'
|
||||||
|
INSTALL_K3S_EXEC: "server --cluster-init --token {{ vapp['metacluster.token'] | trim }} --tls-san {{ vapp['metacluster.vip'] }} --disable local-storage --config /etc/rancher/k3s/config.yaml"
|
||||||
|
when: ansible_facts.services['k3s.service'] is undefined
|
||||||
|
|
||||||
|
- name: Ensure API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://{{ vapp['guestinfo.ipaddress'] }}:6443/livez?verbose
|
||||||
|
method: GET
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 401]
|
||||||
|
register: api_readycheck
|
||||||
|
until: api_readycheck.json.apiVersion is defined
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.medium | int) }}"
|
||||||
|
|
||||||
|
- name: Install tab-completion
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: |-
|
||||||
|
{{ item }} completion bash > /etc/bash_completion.d/{{ item }}
|
||||||
|
creates: /etc/bash_completion.d/{{ item }}
|
||||||
|
loop:
|
||||||
|
- kubectl
|
||||||
|
- helm
|
||||||
|
- step
|
||||||
|
|
||||||
|
- name: Create kubeconfig dictionary
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
kubeconfig: "{{ { 'path': ansible_env.HOME ~ '/.kube/config' } }}"
|
||||||
|
|
||||||
|
- name: Create kubeconfig target folder
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ kubeconfig.path | dirname }}"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Retrieve kubeconfig
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: kubectl config view --raw
|
||||||
|
register: kubectl_config
|
||||||
|
no_log: true
|
||||||
|
|
||||||
|
- name: Store kubeconfig in tempfile
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ kubeconfig.path }}"
|
||||||
|
content: "{{ kubectl_config.stdout }}"
|
||||||
|
mode: 0600
|
||||||
|
no_log: true
|
||||||
|
|
||||||
|
- name: Add label to node object
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
name: "{{ ansible_facts.nodename | lower }}"
|
||||||
|
kind: Node
|
||||||
|
state: patched
|
||||||
|
definition:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
ova.airgappedk8s/moref_id: "{{ moref_id }}"
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
register: k8snode_patch
|
||||||
|
until:
|
||||||
|
- k8snode_patch.result.metadata.labels['ova.airgappedk8s/moref_id'] is defined
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.medium | int) }}"
|
|
@ -0,0 +1,12 @@
|
||||||
|
- import_tasks: init.yml
|
||||||
|
- import_tasks: k3s.yml
|
||||||
|
- import_tasks: assets.yml
|
||||||
|
- import_tasks: virtualip.yml
|
||||||
|
- import_tasks: metadata.yml
|
||||||
|
- import_tasks: storage.yml
|
||||||
|
- import_tasks: ingress.yml
|
||||||
|
- import_tasks: certauthority.yml
|
||||||
|
- import_tasks: registry.yml
|
||||||
|
- import_tasks: git.yml
|
||||||
|
- import_tasks: gitops.yml
|
||||||
|
- import_tasks: authentication.yml
|
|
@ -0,0 +1,57 @@
|
||||||
|
- block:
|
||||||
|
- name: Aggregate manifest-component versions into dictionary
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
manifest_versions: "{{ manifest_versions | default([]) + [ item | combine( {'type': 'manifest', 'id': index } ) ] }}"
|
||||||
|
loop:
|
||||||
|
- name: cluster-api
|
||||||
|
versions:
|
||||||
|
management:
|
||||||
|
base: "{{ components.clusterapi.management.version.base }}"
|
||||||
|
cert_manager: "{{ components.clusterapi.management.version.cert_manager }}"
|
||||||
|
infrastructure_vsphere: "{{ components.clusterapi.management.version.infrastructure_vsphere }}"
|
||||||
|
ipam_incluster: "{{ components.clusterapi.management.version.ipam_incluster }}"
|
||||||
|
cpi_vsphere: "{{ components.clusterapi.management.version.cpi_vsphere }}"
|
||||||
|
workload:
|
||||||
|
calico: "{{ components.clusterapi.workload.version.calico }}"
|
||||||
|
k8s: "{{ components.clusterapi.workload.version.k8s }}"
|
||||||
|
- name: kube-vip
|
||||||
|
version: "{{ components.kubevip.version }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.name }}"
|
||||||
|
index_var: index
|
||||||
|
|
||||||
|
- name: Install json-server chart
|
||||||
|
kubernetes.core.helm:
|
||||||
|
name: json-server
|
||||||
|
chart_ref: /opt/metacluster/helm-charts/json-server
|
||||||
|
release_namespace: json-server
|
||||||
|
create_namespace: true
|
||||||
|
wait: false
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
values: |
|
||||||
|
{{
|
||||||
|
components['json-server'].chart_values |
|
||||||
|
combine(
|
||||||
|
{ 'jsonServer': { 'seedData': { 'configInline': (
|
||||||
|
{ 'appliance': { "version": appliance.version }, 'components': manifest_versions, 'healthz': { 'status': 'running' } }
|
||||||
|
) | to_json } } }
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
|
||||||
|
- name: Ensure json-server API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://version.{{ vapp['metacluster.fqdn'] }}/healthz
|
||||||
|
method: GET
|
||||||
|
# This mock REST API -ironically- does not support json encoded body argument
|
||||||
|
body_format: raw
|
||||||
|
register: api_readycheck
|
||||||
|
until:
|
||||||
|
- api_readycheck.json.status is defined
|
||||||
|
- api_readycheck.json.status == 'running'
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201]
|
|
@ -0,0 +1,50 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Install harbor chart
|
||||||
|
kubernetes.core.helm:
|
||||||
|
name: harbor
|
||||||
|
chart_ref: /opt/metacluster/helm-charts/harbor
|
||||||
|
release_namespace: harbor
|
||||||
|
create_namespace: true
|
||||||
|
wait: false
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
values: "{{ components['harbor'].chart_values }}"
|
||||||
|
|
||||||
|
- name: Ensure harbor API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://registry.{{ vapp['metacluster.fqdn'] }}/api/v2.0/health
|
||||||
|
method: GET
|
||||||
|
register: api_readycheck
|
||||||
|
until:
|
||||||
|
- api_readycheck.json.status is defined
|
||||||
|
- api_readycheck.json.status == 'healthy'
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||||
|
|
||||||
|
- name: Push images to registry
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
skopeo copy \
|
||||||
|
--insecure-policy \
|
||||||
|
--dest-tls-verify=false \
|
||||||
|
--dest-creds admin:{{ vapp['metacluster.password'] }} \
|
||||||
|
docker-archive:./{{ item | basename }} \
|
||||||
|
docker://registry.{{ vapp['metacluster.fqdn'] }}/library/$( \
|
||||||
|
skopeo list-tags \
|
||||||
|
--insecure-policy \
|
||||||
|
docker-archive:./{{ item | basename }} | \
|
||||||
|
jq -r '.Tags[0]')
|
||||||
|
chdir: /opt/metacluster/container-images/
|
||||||
|
register: push_result
|
||||||
|
loop: "{{ query('ansible.builtin.fileglob', '/opt/metacluster/container-images/*.tar') | sort }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item | basename }}"
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ ((storage_benchmark | float) * playbook.delay.short) | int }}"
|
||||||
|
until: push_result is not failed
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201, 401]
|
||||||
|
body_format: json
|
|
@ -0,0 +1,26 @@
|
||||||
|
- block:
|
||||||
|
- name: Install longhorn chart
|
||||||
|
kubernetes.core.helm:
|
||||||
|
name: longhorn
|
||||||
|
chart_ref: /opt/metacluster/helm-charts/longhorn
|
||||||
|
release_namespace: longhorn-system
|
||||||
|
create_namespace: true
|
||||||
|
wait: false
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
values: "{{ components['longhorn'].chart_values }}"
|
||||||
|
|
||||||
|
- name: Ensure longhorn API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://storage.{{ vapp['metacluster.fqdn'] }}/v1
|
||||||
|
method: GET
|
||||||
|
register: api_readycheck
|
||||||
|
until:
|
||||||
|
- api_readycheck is not failed
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201]
|
||||||
|
body_format: json
|
|
@ -0,0 +1,27 @@
|
||||||
|
- name: Generate kube-vip manifest
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:{{ components.kubevip.version }} vip \
|
||||||
|
/kube-vip manifest daemonset \
|
||||||
|
--interface eth0 \
|
||||||
|
--address {{ vapp['metacluster.vip'] }} \
|
||||||
|
--inCluster \
|
||||||
|
--taint \
|
||||||
|
--controlplane \
|
||||||
|
--services \
|
||||||
|
--arp \
|
||||||
|
--leaderElection
|
||||||
|
register: kubevip_manifest
|
||||||
|
|
||||||
|
- name: Inject manifests
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/kubevip-manifest.yaml
|
||||||
|
content: |
|
||||||
|
{{ lookup('ansible.builtin.file', '/opt/metacluster/kube-vip/rbac.yaml') }}
|
||||||
|
---
|
||||||
|
{{ kubevip_manifest.stdout | replace('imagePullPolicy: Always', 'imagePullPolicy: IfNotPresent') }}
|
||||||
|
notify:
|
||||||
|
- Apply manifests
|
||||||
|
|
||||||
|
- name: Trigger handlers
|
||||||
|
ansible.builtin.meta: flush_handlers
|
|
@ -0,0 +1,25 @@
|
||||||
|
- name: Check for expected vApp properties
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- vapp[item] is defined
|
||||||
|
- (vapp[item] | length) > 0
|
||||||
|
quiet: true
|
||||||
|
loop:
|
||||||
|
- deployment.type
|
||||||
|
- guestinfo.dnsserver
|
||||||
|
- guestinfo.gateway
|
||||||
|
- guestinfo.hostname
|
||||||
|
- guestinfo.ipaddress
|
||||||
|
- guestinfo.prefixlength
|
||||||
|
- guestinfo.rootsshkey
|
||||||
|
- hv.fqdn
|
||||||
|
- hv.password
|
||||||
|
- hv.username
|
||||||
|
- ippool.endip
|
||||||
|
- ippool.startip
|
||||||
|
- metacluster.fqdn
|
||||||
|
- metacluster.password
|
||||||
|
- metacluster.token
|
||||||
|
- metacluster.vip
|
||||||
|
- workloadcluster.name
|
||||||
|
- workloadcluster.vip
|
|
@ -0,0 +1,40 @@
|
||||||
|
- name: Initialize tempfolder
|
||||||
|
ansible.builtin.tempfile:
|
||||||
|
state: directory
|
||||||
|
register: pinniped_kubeconfig
|
||||||
|
|
||||||
|
- name: Pull existing repository
|
||||||
|
ansible.builtin.git:
|
||||||
|
repo: https://git.{{ vapp['metacluster.fqdn'] }}/wl/ClusterAccess.Store.git
|
||||||
|
dest: "{{ pinniped_kubeconfig.path }}"
|
||||||
|
version: main
|
||||||
|
|
||||||
|
- name: Generate kubeconfig
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: pinniped get kubeconfig --kubeconfig {{ capi_kubeconfig.path }}
|
||||||
|
register: pinniped_config
|
||||||
|
until:
|
||||||
|
- pinniped_config is not failed
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ ((storage_benchmark | float) * playbook.delay.short) | int }}"
|
||||||
|
|
||||||
|
- name: Store kubeconfig in tempfile
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ pinniped_kubeconfig.path }}/kubeconfig"
|
||||||
|
content: "{{ pinniped_config.stdout }}"
|
||||||
|
mode: 0600
|
||||||
|
no_log: true
|
||||||
|
|
||||||
|
- name: Push git repository
|
||||||
|
lvrfrc87.git_acp.git_acp:
|
||||||
|
path: "{{ pinniped_kubeconfig.path }}"
|
||||||
|
branch: main
|
||||||
|
comment: "Upload kubeconfig files"
|
||||||
|
add:
|
||||||
|
- .
|
||||||
|
url: https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/wl/ClusterAccess.Store.git
|
||||||
|
environment:
|
||||||
|
GIT_AUTHOR_NAME: administrator
|
||||||
|
GIT_AUTHOR_EMAIL: administrator@{{ vapp['metacluster.fqdn'] }}
|
||||||
|
GIT_COMMITTER_NAME: administrator
|
||||||
|
GIT_COMMITTER_EMAIL: administrator@{{ vapp['metacluster.fqdn'] }}
|
|
@ -0,0 +1,328 @@
|
||||||
|
- block:
|
||||||
|
# Below tasks circumvent usernames with `<domain>\<username>` format, which causes CAPV to
|
||||||
|
# incorrectly interpret the backslash (despite automatic escaping) as an escape sequence.
|
||||||
|
# `vcenter_session.user` will instead contain the username in `<username>@<domain>` format.
|
||||||
|
|
||||||
|
- name: Generate vCenter API token
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://{{ vapp['hv.fqdn'] }}/api/session
|
||||||
|
method: POST
|
||||||
|
headers:
|
||||||
|
Authorization: Basic {{ ( vapp['hv.username'] ~ ':' ~ vapp['hv.password'] ) | b64encode }}
|
||||||
|
register: vcenterapi_token
|
||||||
|
|
||||||
|
- name: Retrieve vCenter API session details
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://{{ vapp['hv.fqdn'] }}/api/session
|
||||||
|
method: GET
|
||||||
|
headers:
|
||||||
|
vmware-api-session-id: "{{ vcenterapi_token.json }}"
|
||||||
|
register: vcenter_session
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201]
|
||||||
|
body_format: json
|
||||||
|
|
||||||
|
- name: Configure clusterctl
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: clusterctl.j2
|
||||||
|
dest: /opt/metacluster/cluster-api/clusterctl.yaml
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
version:
|
||||||
|
base: "{{ components.clusterapi.management.version.base }}"
|
||||||
|
cert_manager: "{{ components.clusterapi.management.version.cert_manager }}"
|
||||||
|
infrastructure_vsphere: "{{ components.clusterapi.management.version.infrastructure_vsphere }}"
|
||||||
|
ipam_incluster: "{{ components.clusterapi.management.version.ipam_incluster }}"
|
||||||
|
hv:
|
||||||
|
fqdn: "{{ vapp['hv.fqdn'] }}"
|
||||||
|
tlsthumbprint: "{{ tls_thumbprint.stdout }}"
|
||||||
|
username: "{{ vcenter_session.json.user }}"
|
||||||
|
password: "{{ vapp['hv.password'] }}"
|
||||||
|
datacenter: "{{ vcenter_info.datacenter }}"
|
||||||
|
datastore: "{{ vcenter_info.datastore }}"
|
||||||
|
network: "{{ vcenter_info.network }}"
|
||||||
|
resourcepool: "{{ vcenter_info.resourcepool }}"
|
||||||
|
folder: "{{ vcenter_info.folder }}"
|
||||||
|
cluster:
|
||||||
|
nodetemplate: "{{ nodetemplate_inventorypath }}"
|
||||||
|
publickey: "{{ vapp['guestinfo.rootsshkey'] }}"
|
||||||
|
version: "{{ components.clusterapi.workload.version.k8s }}"
|
||||||
|
vip: "{{ vapp['workloadcluster.vip'] }}"
|
||||||
|
|
||||||
|
- name: Generate cluster-template kustomization manifest
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: kustomization.cluster-template.j2
|
||||||
|
dest: /opt/metacluster/cluster-api/infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }}/kustomization.yaml
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
network:
|
||||||
|
fqdn: "{{ vapp['metacluster.fqdn'] }}"
|
||||||
|
dnsserver: "{{ vapp['guestinfo.dnsserver'] }}"
|
||||||
|
nodesize:
|
||||||
|
cpu: "{{ config.clusterapi.size_matrix[ vapp['workloadcluster.nodesize'] ].cpu }}"
|
||||||
|
memory: "{{ config.clusterapi.size_matrix[ vapp['workloadcluster.nodesize'] ].memory }}"
|
||||||
|
rootca: "{{ stepca_cm_certs.resources[0].data['root_ca.crt'] }}"
|
||||||
|
runcmds:
|
||||||
|
- update-ca-certificates
|
||||||
|
registries: "{{ source_registries }}"
|
||||||
|
|
||||||
|
- name: Store custom cluster-template
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /opt/metacluster/cluster-api/custom-cluster-template.yaml
|
||||||
|
content: "{{ lookup('kubernetes.core.kustomize', dir='/opt/metacluster/cluster-api/infrastructure-vsphere/' ~ components.clusterapi.management.version.infrastructure_vsphere ) }}"
|
||||||
|
|
||||||
|
- name: Initialize Cluster API management cluster
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
clusterctl init \
|
||||||
|
-v5 \
|
||||||
|
--infrastructure vsphere:{{ components.clusterapi.management.version.infrastructure_vsphere }} \
|
||||||
|
--ipam in-cluster:{{ components.clusterapi.management.version.ipam_incluster }} \
|
||||||
|
--config ./clusterctl.yaml \
|
||||||
|
--kubeconfig {{ kubeconfig.path }}
|
||||||
|
chdir: /opt/metacluster/cluster-api
|
||||||
|
|
||||||
|
- name: Initialize tempfolder
|
||||||
|
ansible.builtin.tempfile:
|
||||||
|
state: directory
|
||||||
|
register: capi_clustermanifest
|
||||||
|
|
||||||
|
- name: Pull existing repository
|
||||||
|
ansible.builtin.git:
|
||||||
|
repo: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
|
||||||
|
dest: "{{ capi_clustermanifest.path }}"
|
||||||
|
version: main
|
||||||
|
|
||||||
|
- name: Generate Cluster API provider manifests
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
clusterctl generate provider \
|
||||||
|
-v5 \
|
||||||
|
--{{ item.type }} {{ item.name }}:{{ item.version }} \
|
||||||
|
--config ./clusterctl.yaml > {{ capi_clustermanifest.path }}/provider-{{ item.name }}.yaml
|
||||||
|
chdir: /opt/metacluster/cluster-api
|
||||||
|
loop:
|
||||||
|
- type: infrastructure
|
||||||
|
name: vsphere
|
||||||
|
version: "{{ components.clusterapi.management.version.infrastructure_vsphere }}"
|
||||||
|
- type: ipam
|
||||||
|
name: in-cluster
|
||||||
|
version: "{{ components.clusterapi.management.version.ipam_incluster }}"
|
||||||
|
|
||||||
|
- name: Split cluster API provider manifests into separate files
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
awk 'BEGINFILE {print "---"}{print}' {{ capi_clustermanifest.path }}/provider-*.yaml |
|
||||||
|
kubectl slice \
|
||||||
|
-o {{ capi_clustermanifest.path }}/providers
|
||||||
|
|
||||||
|
- name: Ensure controller availability
|
||||||
|
kubernetes.core.k8s_info:
|
||||||
|
kind: Deployment
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
namespace: "{{ item.namespace }}"
|
||||||
|
wait: true
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
loop:
|
||||||
|
- name: caip-in-cluster-controller-manager
|
||||||
|
namespace: caip-in-cluster-system
|
||||||
|
- name: capi-controller-manager
|
||||||
|
namespace: capi-system
|
||||||
|
- name: capv-controller-manager
|
||||||
|
namespace: capv-system
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.name }}"
|
||||||
|
|
||||||
|
- name: Parse vApp for workload cluster sizing
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
clustersize: >-
|
||||||
|
{{ {
|
||||||
|
'controlplane': vapp['deployment.type'] | regex_findall('^cp(\d)+') | first,
|
||||||
|
'worker': vapp['deployment.type'] | regex_findall('w(\d)+') | first,
|
||||||
|
'workerstorage': vapp['deployment.type'] | regex_findall('ws(\d)+$') | first
|
||||||
|
} }}
|
||||||
|
|
||||||
|
- name: Generate workload cluster manifest
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
clusterctl generate cluster \
|
||||||
|
{{ vapp['workloadcluster.name'] | lower }} \
|
||||||
|
--control-plane-machine-count {{ clustersize.controlplane }} \
|
||||||
|
--worker-machine-count {{ clustersize.worker }} \
|
||||||
|
--from ./custom-cluster-template.yaml \
|
||||||
|
--config ./clusterctl.yaml \
|
||||||
|
--kubeconfig {{ kubeconfig.path }}
|
||||||
|
chdir: /opt/metacluster/cluster-api
|
||||||
|
register: clusterctl_newcluster
|
||||||
|
|
||||||
|
- name: Save workload cluster manifest
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ capi_clustermanifest.path }}/new-cluster.yaml"
|
||||||
|
content: "{{ clusterctl_newcluster.stdout }}"
|
||||||
|
|
||||||
|
- name: Split workload cluster manifest into separate files
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
kubectl slice \
|
||||||
|
-f {{ capi_clustermanifest.path }}/new-cluster.yaml \
|
||||||
|
-o {{ capi_clustermanifest.path }}/downstream-cluster
|
||||||
|
|
||||||
|
- name: Generate nodepool kustomization manifest
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: kustomization.nodepool.j2
|
||||||
|
dest: "{{ capi_clustermanifest.path }}/kustomization.yaml"
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
cluster:
|
||||||
|
name: "{{ vapp['workloadcluster.name'] }}"
|
||||||
|
nodepool:
|
||||||
|
size: "{{ clustersize.workerstorage }}"
|
||||||
|
additionaldisk: "{{ vapp['workloadcluster.additionaldisk'] }}"
|
||||||
|
|
||||||
|
- name: Store nodepool manifest
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ capi_clustermanifest.path }}/nodepool-worker-storage.yaml"
|
||||||
|
content: "{{ lookup('kubernetes.core.kustomize', dir=capi_clustermanifest.path) }}"
|
||||||
|
|
||||||
|
- name: Split nodepool manifest into separate files
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
kubectl slice \
|
||||||
|
-f {{ capi_clustermanifest.path }}/nodepool-worker-storage.yaml \
|
||||||
|
-o {{ capi_clustermanifest.path }}/downstream-cluster
|
||||||
|
|
||||||
|
- name: Create in-cluster IpPool
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: ippool.j2
|
||||||
|
dest: "{{ capi_clustermanifest.path }}/downstream-cluster/inclusterippool-{{ _template.cluster.name }}.yml"
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
cluster:
|
||||||
|
name: "{{ vapp['workloadcluster.name'] | lower }}"
|
||||||
|
namespace: default
|
||||||
|
network:
|
||||||
|
startip: "{{ vapp['ippool.startip'] }}"
|
||||||
|
endip: "{{ vapp['ippool.endip'] }}"
|
||||||
|
prefix: "{{ vapp['guestinfo.prefixlength'] }}"
|
||||||
|
gateway: "{{ vapp['guestinfo.gateway'] }}"
|
||||||
|
|
||||||
|
- name: Push git repository
|
||||||
|
lvrfrc87.git_acp.git_acp:
|
||||||
|
path: "{{ capi_clustermanifest.path }}"
|
||||||
|
branch: main
|
||||||
|
comment: "Upload manifests"
|
||||||
|
add:
|
||||||
|
- ./downstream-cluster
|
||||||
|
- ./providers
|
||||||
|
clean: untracked
|
||||||
|
url: https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
|
||||||
|
environment:
|
||||||
|
GIT_AUTHOR_NAME: administrator
|
||||||
|
GIT_AUTHOR_EMAIL: administrator@{{ vapp['metacluster.fqdn'] }}
|
||||||
|
GIT_COMMITTER_NAME: administrator
|
||||||
|
GIT_COMMITTER_EMAIL: administrator@{{ vapp['metacluster.fqdn'] }}
|
||||||
|
|
||||||
|
# - name: Cleanup tempfolder
|
||||||
|
# ansible.builtin.file:
|
||||||
|
# path: "{{ capi_clustermanifest.path }}"
|
||||||
|
# state: absent
|
||||||
|
# when: capi_clustermanifest.path is defined
|
||||||
|
|
||||||
|
- name: Configure Cluster API repository
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: gitrepo.j2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0600
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
name: gitrepo-mc-gitopsclusterapi
|
||||||
|
namespace: argo-cd
|
||||||
|
url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
|
||||||
|
notify:
|
||||||
|
- Apply manifests
|
||||||
|
|
||||||
|
- name: WORKAROUND - Wait for ingress ACME requests to complete
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
openssl s_client -connect registry.{{ vapp['metacluster.fqdn'] }}:443 -servername registry.{{ vapp['metacluster.fqdn'] }} 2>/dev/null </dev/null | \
|
||||||
|
openssl x509 -noout -subject | \
|
||||||
|
grep 'subject=CN = registry.{{ vapp['metacluster.fqdn'] }}'
|
||||||
|
register: certificate_subject
|
||||||
|
until: certificate_subject is not failed
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.medium | int) }}"
|
||||||
|
|
||||||
|
- name: Create application
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: application.j2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.application.name }}-manifest.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0600
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
application:
|
||||||
|
name: application-clusterapi-workloadcluster
|
||||||
|
namespace: argo-cd
|
||||||
|
cluster:
|
||||||
|
name: https://kubernetes.default.svc
|
||||||
|
namespace: default
|
||||||
|
repository:
|
||||||
|
url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
|
||||||
|
path: downstream-cluster
|
||||||
|
revision: main
|
||||||
|
notify:
|
||||||
|
- Apply manifests
|
||||||
|
|
||||||
|
- name: Trigger handlers
|
||||||
|
ansible.builtin.meta: flush_handlers
|
||||||
|
|
||||||
|
- name: Wait for cluster to be available
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
kubectl wait clusters.cluster.x-k8s.io/{{ vapp['workloadcluster.name'] | lower }} \
|
||||||
|
--for=condition=Ready \
|
||||||
|
--timeout 0s
|
||||||
|
register: cluster_readycheck
|
||||||
|
until: cluster_readycheck is succeeded
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||||
|
|
||||||
|
- name: Initialize tempfile
|
||||||
|
ansible.builtin.tempfile:
|
||||||
|
state: file
|
||||||
|
register: capi_kubeconfig
|
||||||
|
|
||||||
|
- name: Retrieve kubeconfig
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
clusterctl get kubeconfig \
|
||||||
|
{{ vapp['workloadcluster.name'] | lower }} \
|
||||||
|
--kubeconfig {{ kubeconfig.path }}
|
||||||
|
register: capi_kubectl_config
|
||||||
|
|
||||||
|
- name: Store kubeconfig in tempfile
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ capi_kubeconfig.path }}"
|
||||||
|
content: "{{ capi_kubectl_config.stdout }}"
|
||||||
|
mode: 0600
|
||||||
|
no_log: true
|
||||||
|
|
||||||
|
# TODO: move to git repo
|
||||||
|
- name: Apply cni plugin manifest
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
definition: |
|
||||||
|
{{
|
||||||
|
lookup('ansible.builtin.file', '/opt/metacluster/cluster-api/cni-calico/' ~ components.clusterapi.workload.version.calico ~ '/calico.yaml') |
|
||||||
|
regex_replace('# - name: CALICO_IPV4POOL_CIDR', '- name: CALICO_IPV4POOL_CIDR') |
|
||||||
|
regex_replace('# value: "192.168.0.0/16"', ' value: "172.30.0.0/16"')
|
||||||
|
}}
|
||||||
|
state: present
|
||||||
|
wait: true
|
||||||
|
kubeconfig: "{{ capi_kubeconfig.path }}"
|
||||||
|
# TODO: move to git repo
|
|
@ -0,0 +1,132 @@
|
||||||
|
- name: Aggregate helm charts from filesystem
|
||||||
|
ansible.builtin.find:
|
||||||
|
path: /opt/workloadcluster/helm-charts
|
||||||
|
file_type: directory
|
||||||
|
recurse: false
|
||||||
|
register: helm_charts
|
||||||
|
|
||||||
|
- name: Pull existing repository
|
||||||
|
ansible.builtin.git:
|
||||||
|
repo: https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git
|
||||||
|
dest: /opt/workloadcluster/git-repositories/gitops
|
||||||
|
version: main
|
||||||
|
|
||||||
|
- name: Create folder structure within new git-repository
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
loop:
|
||||||
|
- /opt/workloadcluster/git-repositories/gitops/charts
|
||||||
|
- /opt/workloadcluster/git-repositories/gitops/values
|
||||||
|
|
||||||
|
- name: Create hard-links to populate new git-repository
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
cp -lr {{ item.path }}/ /opt/workloadcluster/git-repositories/gitops/charts
|
||||||
|
loop: "{{ helm_charts.files }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.path | basename }}"
|
||||||
|
|
||||||
|
- name: Write custom manifests to respective chart templates store
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "{{ src }}"
|
||||||
|
dest: /opt/workloadcluster/git-repositories/gitops/charts/{{ manifest.value.namespace }}/{{ manifest.key }}/templates/{{ (src | split('.'))[0] ~ '-' ~ _template.name ~ '.yaml' }}
|
||||||
|
vars:
|
||||||
|
manifest: "{{ item.0 }}"
|
||||||
|
src: "{{ item.1.src }}"
|
||||||
|
_template: "{{ item.1._template }}"
|
||||||
|
loop: "{{ query('ansible.builtin.subelements', query('ansible.builtin.dict', downstream_components), 'value.extra_manifests') }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ (src | split('.'))[0] ~ '-' ~ _template.name }}"
|
||||||
|
|
||||||
|
- name: Create subfolders
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /opt/workloadcluster/git-repositories/gitops/values/{{ item.key }}
|
||||||
|
state: directory
|
||||||
|
loop: "{{ query('ansible.builtin.dict', downstream_components) }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.key }}"
|
||||||
|
|
||||||
|
- name: Write chart values to file
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /opt/workloadcluster/git-repositories/gitops/values/{{ item.key }}/values.yaml
|
||||||
|
content: "{{ item.value.chart_values | default('# Empty') | to_nice_yaml(indent=2, width=4096) }}"
|
||||||
|
loop: "{{ query('ansible.builtin.dict', downstream_components) }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.key }}"
|
||||||
|
|
||||||
|
- name: Push git repository
|
||||||
|
lvrfrc87.git_acp.git_acp:
|
||||||
|
path: /opt/workloadcluster/git-repositories/gitops
|
||||||
|
branch: main
|
||||||
|
comment: "Upload charts"
|
||||||
|
add:
|
||||||
|
- .
|
||||||
|
url: https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git
|
||||||
|
environment:
|
||||||
|
GIT_AUTHOR_NAME: administrator
|
||||||
|
GIT_AUTHOR_EMAIL: administrator@{{ vapp['metacluster.fqdn'] }}
|
||||||
|
GIT_COMMITTER_NAME: administrator
|
||||||
|
GIT_COMMITTER_EMAIL: administrator@{{ vapp['metacluster.fqdn'] }}
|
||||||
|
|
||||||
|
- name: Retrieve workload-cluster kubeconfig
|
||||||
|
kubernetes.core.k8s_info:
|
||||||
|
kind: Secret
|
||||||
|
name: "{{ vapp['workloadcluster.name'] }}-kubeconfig"
|
||||||
|
namespace: default
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
register: secret_workloadcluster_kubeconfig
|
||||||
|
|
||||||
|
- name: Register workload-cluster in argo-cd
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
template: cluster.j2
|
||||||
|
state: present
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
cluster:
|
||||||
|
name: "{{ vapp['workloadcluster.name'] | lower }}"
|
||||||
|
secret: argocd-cluster-{{ vapp['workloadcluster.name'] | lower }}
|
||||||
|
url: https://{{ vapp['workloadcluster.vip'] }}:6443
|
||||||
|
kubeconfig:
|
||||||
|
ca: "{{ (secret_workloadcluster_kubeconfig.resources[0].data.value | b64decode | from_yaml).clusters[0].cluster['certificate-authority-data'] }}"
|
||||||
|
certificate: "{{ (secret_workloadcluster_kubeconfig.resources[0].data.value | b64decode | from_yaml).users[0].user['client-certificate-data'] }}"
|
||||||
|
key: "{{ (secret_workloadcluster_kubeconfig.resources[0].data.value | b64decode | from_yaml).users[0].user['client-key-data'] }}"
|
||||||
|
|
||||||
|
- name: Configure workload-cluster GitOps repository
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: gitrepo.j2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0600
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
name: gitrepo-wl-gitopsconfig
|
||||||
|
namespace: argo-cd
|
||||||
|
url: https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git
|
||||||
|
notify:
|
||||||
|
- Apply manifests
|
||||||
|
|
||||||
|
- name: Create applicationset
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: applicationset.j2
|
||||||
|
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.application.name }}-manifest.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0600
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
application:
|
||||||
|
name: applicationset-workloadcluster
|
||||||
|
namespace: argo-cd
|
||||||
|
cluster:
|
||||||
|
url: https://{{ vapp['workloadcluster.vip'] }}:6443
|
||||||
|
repository:
|
||||||
|
url: https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git
|
||||||
|
revision: main
|
||||||
|
notify:
|
||||||
|
- Apply manifests
|
||||||
|
|
||||||
|
- name: Trigger handlers
|
||||||
|
ansible.builtin.meta: flush_handlers
|
|
@ -0,0 +1,57 @@
|
||||||
|
- name: Gather hypervisor details
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: govc ls -L {{ item.moref }} | awk -F/ '{print ${{ item.part }}}'
|
||||||
|
environment:
|
||||||
|
GOVC_INSECURE: '1'
|
||||||
|
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
|
||||||
|
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
|
||||||
|
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
|
||||||
|
register: govc_inventory
|
||||||
|
loop:
|
||||||
|
- attribute: cluster
|
||||||
|
moref: >-
|
||||||
|
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||||
|
jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
|
||||||
|
part: (NF-1)
|
||||||
|
- attribute: datacenter
|
||||||
|
moref: VirtualMachine:{{ moref_id }}
|
||||||
|
part: 2
|
||||||
|
- attribute: datastore
|
||||||
|
moref: >-
|
||||||
|
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||||
|
jq -r '.[] | select(.Name == "datastore").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
|
||||||
|
part: NF
|
||||||
|
- attribute: folder
|
||||||
|
moref: >-
|
||||||
|
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||||
|
jq -r '.[] | select(.Name == "parent").Val | .Type + ":" + .Value')
|
||||||
|
part: 0
|
||||||
|
# - attribute: host
|
||||||
|
# moref: >-
|
||||||
|
# $(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||||
|
# jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
|
||||||
|
# part: NF
|
||||||
|
- attribute: network
|
||||||
|
moref: >-
|
||||||
|
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||||
|
jq -r '.[] | select(.Name == "network").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
|
||||||
|
part: NF
|
||||||
|
- attribute: resourcepool
|
||||||
|
moref: >-
|
||||||
|
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||||
|
jq -r '.[] | select(.Name == "resourcePool").Val | .Type + ":" + .Value')
|
||||||
|
part: 0
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.attribute }}"
|
||||||
|
|
||||||
|
- name: Retrieve hypervisor TLS thumbprint
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: openssl s_client -connect {{ vapp['hv.fqdn'] }}:443 < /dev/null 2>/dev/null | openssl x509 -fingerprint -noout -in /dev/stdin | awk -F'=' '{print $2}'
|
||||||
|
register: tls_thumbprint
|
||||||
|
|
||||||
|
- name: Store hypervisor details in dictionary
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
vcenter_info: "{{ vcenter_info | default({}) | combine({ item.item.attribute : item.stdout }) }}"
|
||||||
|
loop: "{{ govc_inventory.results }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.item.attribute }}"
|
|
@ -0,0 +1,12 @@
|
||||||
|
- import_tasks: hypervisor.yml
|
||||||
|
- import_tasks: registry.yml
|
||||||
|
- import_tasks: nodetemplates.yml
|
||||||
|
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- import_tasks: clusterapi.yml
|
||||||
|
- import_tasks: gitops.yml
|
||||||
|
- import_tasks: authentication.yml
|
||||||
|
|
||||||
|
when:
|
||||||
|
- vapp['deployment.type'] != 'core'
|
|
@ -0,0 +1,73 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Check for existing template on hypervisor
|
||||||
|
community.vmware.vmware_guest_info:
|
||||||
|
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
|
||||||
|
register: existing_ova
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Store inventory path of existing template
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
nodetemplate_inventorypath: "{{ existing_ova.instance.hw_folder ~ '/' ~ existing_ova.instance.hw_name }}"
|
||||||
|
when: existing_ova is not failed
|
||||||
|
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Parse OVA file for network mappings
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: govc import.spec -json {{ filename }}
|
||||||
|
environment:
|
||||||
|
GOVC_INSECURE: '1'
|
||||||
|
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
|
||||||
|
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
|
||||||
|
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
|
||||||
|
register: ova_spec
|
||||||
|
|
||||||
|
- name: Deploy OVA template on hypervisor
|
||||||
|
community.vmware.vmware_deploy_ovf:
|
||||||
|
cluster: "{{ vcenter_info.cluster }}"
|
||||||
|
datastore: "{{ vcenter_info.datastore }}"
|
||||||
|
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
|
||||||
|
networks: "{u'{{ ova_spec.stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}"
|
||||||
|
allow_duplicates: no
|
||||||
|
power_on: false
|
||||||
|
ovf: "{{ filename }}"
|
||||||
|
register: ova_deploy
|
||||||
|
|
||||||
|
- name: Add additional placeholder disk
|
||||||
|
community.vmware.vmware_guest_disk:
|
||||||
|
name: "{{ ova_deploy.instance.hw_name }}"
|
||||||
|
disk:
|
||||||
|
- size: 1Mb
|
||||||
|
scsi_controller: 1
|
||||||
|
scsi_type: paravirtual
|
||||||
|
unit_number: 0
|
||||||
|
|
||||||
|
# Disabled to allow disks to be resized; at the cost of cloning speed
|
||||||
|
# - name: Create snapshot on deployed VM
|
||||||
|
# community.vmware.vmware_guest_snapshot:
|
||||||
|
# name: "{{ ova_deploy.instance.hw_name }}"
|
||||||
|
# state: present
|
||||||
|
# snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base"
|
||||||
|
|
||||||
|
- name: Mark deployed VM as templates
|
||||||
|
community.vmware.vmware_guest:
|
||||||
|
name: "{{ ova_deploy.instance.hw_name }}"
|
||||||
|
is_template: yes
|
||||||
|
|
||||||
|
- name: Store inventory path of deployed template
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
nodetemplate_inventorypath: "{{ ova_deploy.instance.hw_folder ~ '/' ~ ova_deploy.instance.hw_name }}"
|
||||||
|
|
||||||
|
when: existing_ova is failed
|
||||||
|
|
||||||
|
vars:
|
||||||
|
filename: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | first }}"
|
||||||
|
module_defaults:
|
||||||
|
group/vmware:
|
||||||
|
hostname: "{{ vapp['hv.fqdn'] }}"
|
||||||
|
validate_certs: no
|
||||||
|
username: "{{ vapp['hv.username'] }}"
|
||||||
|
password: "{{ vapp['hv.password'] }}"
|
||||||
|
datacenter: "{{ vcenter_info.datacenter }}"
|
||||||
|
folder: "{{ vcenter_info.folder }}"
|
|
@ -0,0 +1,40 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Create dedicated kubeadm project within container registry
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://registry.{{ vapp['metacluster.fqdn'] }}/api/v2.0/projects
|
||||||
|
method: POST
|
||||||
|
headers:
|
||||||
|
Authorization: "Basic {{ ('admin:' ~ vapp['metacluster.password']) | b64encode }}"
|
||||||
|
body:
|
||||||
|
project_name: kubeadm
|
||||||
|
public: true
|
||||||
|
storage_limit: 0
|
||||||
|
metadata:
|
||||||
|
enable_content_trust: 'false'
|
||||||
|
enable_content_trust_cosign: 'false'
|
||||||
|
auto_scan: 'true'
|
||||||
|
severity: none
|
||||||
|
prevent_vul: 'false'
|
||||||
|
public: 'true'
|
||||||
|
reuse_sys_cve_allowlist: 'true'
|
||||||
|
|
||||||
|
- name: Lookup kubeadm container images
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
kubeadm_images: "{{ lookup('ansible.builtin.file', '/opt/metacluster/cluster-api/imagelist').splitlines() }}"
|
||||||
|
|
||||||
|
- name: Copy kubeadm container images to dedicated project
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://registry.{{ vapp['metacluster.fqdn'] }}/api/v2.0/projects/kubeadm/repositories/{{ ( item | regex_findall('([^:/]+)') )[-2] }}/artifacts?from=library/{{ item | replace('/', '%2F') | replace(':', '%3A') }}
|
||||||
|
method: POST
|
||||||
|
headers:
|
||||||
|
Authorization: "Basic {{ ('admin:' ~ vapp['metacluster.password']) | b64encode }}"
|
||||||
|
body:
|
||||||
|
from: "{{ item }}"
|
||||||
|
loop: "{{ kubeadm_images }}"
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201, 409]
|
||||||
|
body_format: json
|
|
@ -0,0 +1,16 @@
|
||||||
|
apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: Application
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.application.name }}
|
||||||
|
namespace: {{ _template.application.namespace }}
|
||||||
|
spec:
|
||||||
|
destination:
|
||||||
|
namespace: {{ _template.cluster.namespace }}
|
||||||
|
server: {{ _template.cluster.name }}
|
||||||
|
project: default
|
||||||
|
source:
|
||||||
|
repoURL: {{ _template.repository.url }}
|
||||||
|
path: {{ _template.repository.path }}
|
||||||
|
targetRevision: {{ _template.repository.revision }}
|
||||||
|
syncPolicy:
|
||||||
|
automated: {}
|
|
@ -0,0 +1,33 @@
|
||||||
|
apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: ApplicationSet
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.application.name }}
|
||||||
|
namespace: {{ _template.application.namespace }}
|
||||||
|
spec:
|
||||||
|
generators:
|
||||||
|
- git:
|
||||||
|
repoURL: {{ _template.repository.url }}
|
||||||
|
revision: {{ _template.repository.revision }}
|
||||||
|
directories:
|
||||||
|
- path: charts/*/*
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: application-{% raw %}{{ path.basename }}{% endraw +%}
|
||||||
|
spec:
|
||||||
|
project: default
|
||||||
|
syncPolicy:
|
||||||
|
automated:
|
||||||
|
prune: true
|
||||||
|
selfHeal: true
|
||||||
|
syncOptions:
|
||||||
|
- CreateNamespace=true
|
||||||
|
sources:
|
||||||
|
- repoURL: {{ _template.repository.url }}
|
||||||
|
targetRevision: {{ _template.repository.revision }}
|
||||||
|
path: {% raw %}'{{ path }}'{% endraw +%}
|
||||||
|
helm:
|
||||||
|
valueFiles:
|
||||||
|
- /values/{% raw %}{{ path.basename }}{% endraw %}/values.yaml
|
||||||
|
destination:
|
||||||
|
server: {{ _template.cluster.url }}
|
||||||
|
namespace: {% raw %}'{{ path[1] }}'{% endraw +%}
|
|
@ -0,0 +1,20 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.cluster.secret }}
|
||||||
|
namespace: argo-cd
|
||||||
|
labels:
|
||||||
|
argocd.argoproj.io/secret-type: cluster
|
||||||
|
type: Opaque
|
||||||
|
stringData:
|
||||||
|
name: {{ _template.cluster.name }}
|
||||||
|
server: {{ _template.cluster.url }}
|
||||||
|
config: |
|
||||||
|
{
|
||||||
|
"tlsClientConfig": {
|
||||||
|
"insecure": false,
|
||||||
|
"caData": "{{ _template.kubeconfig.ca }}",
|
||||||
|
"certData": "{{ _template.kubeconfig.certificate }}",
|
||||||
|
"keyData": "{{ _template.kubeconfig.key }}"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,42 @@
|
||||||
|
providers:
|
||||||
|
- name: "kubeadm"
|
||||||
|
url: "/opt/metacluster/cluster-api/bootstrap-kubeadm/{{ _template.version.base }}/bootstrap-components.yaml"
|
||||||
|
type: "BootstrapProvider"
|
||||||
|
- name: "cluster-api"
|
||||||
|
url: "/opt/metacluster/cluster-api/cluster-api/{{ _template.version.base }}/core-components.yaml"
|
||||||
|
type: "CoreProvider"
|
||||||
|
- name: "kubeadm"
|
||||||
|
url: "/opt/metacluster/cluster-api/control-plane-kubeadm/{{ _template.version.base }}/control-plane-components.yaml"
|
||||||
|
type: "ControlPlaneProvider"
|
||||||
|
- name: "vsphere"
|
||||||
|
url: "/opt/metacluster/cluster-api/infrastructure-vsphere/{{ _template.version.infrastructure_vsphere }}/infrastructure-components.yaml"
|
||||||
|
type: "InfrastructureProvider"
|
||||||
|
- name: "in-cluster"
|
||||||
|
url: "/opt/metacluster/cluster-api/ipam-in-cluster/{{ _template.version.ipam_incluster }}/ipam-components.yaml"
|
||||||
|
type: "IPAMProvider"
|
||||||
|
|
||||||
|
cert-manager:
|
||||||
|
url: "/opt/metacluster/cluster-api/cert-manager/{{ _template.version.cert_manager }}/cert-manager.yaml"
|
||||||
|
version: "{{ _template.version.cert_manager }}"
|
||||||
|
|
||||||
|
## -- Controller settings -- ##
|
||||||
|
VSPHERE_SERVER: "{{ _template.hv.fqdn }}"
|
||||||
|
VSPHERE_TLS_THUMBPRINT: "{{ _template.hv.tlsthumbprint }}"
|
||||||
|
VSPHERE_USERNAME: "{{ _template.hv.username }}"
|
||||||
|
VSPHERE_PASSWORD: "{{ _template.hv.password }}"
|
||||||
|
|
||||||
|
## -- Required workload cluster default settings -- ##
|
||||||
|
VSPHERE_DATACENTER: "{{ _template.hv.datacenter }}"
|
||||||
|
VSPHERE_DATASTORE: "{{ _template.hv.datastore }}"
|
||||||
|
VSPHERE_STORAGE_POLICY: ""
|
||||||
|
VSPHERE_NETWORK: "{{ _template.hv.network }}"
|
||||||
|
VSPHERE_RESOURCE_POOL: "{{ _template.hv.resourcepool }}"
|
||||||
|
VSPHERE_FOLDER: "{{ _template.hv.folder }}"
|
||||||
|
|
||||||
|
VSPHERE_TEMPLATE: "{{ _template.cluster.nodetemplate }}"
|
||||||
|
VSPHERE_SSH_AUTHORIZED_KEY: "{{ _template.cluster.publickey }}"
|
||||||
|
|
||||||
|
KUBERNETES_VERSION: "{{ _template.cluster.version }}"
|
||||||
|
CONTROL_PLANE_ENDPOINT_IP: "{{ _template.cluster.vip }}"
|
||||||
|
VIP_NETWORK_INTERFACE: ""
|
||||||
|
EXP_CLUSTER_RESOURCE_SET: "true"
|
|
@ -0,0 +1,14 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.name }}
|
||||||
|
namespace: {{ _template.namespace }}
|
||||||
|
annotations:
|
||||||
|
{{ _template.annotations }}
|
||||||
|
labels:
|
||||||
|
{{ _template.labels }}
|
||||||
|
data:
|
||||||
|
{% for kv_pair in _template.data %}
|
||||||
|
"{{ kv_pair.key }}": |
|
||||||
|
{{ kv_pair.value | indent(width=4, first=True) }}
|
||||||
|
{% endfor %}
|
|
@ -0,0 +1,7 @@
|
||||||
|
apiVersion: config.supervisor.pinniped.dev/v1alpha1
|
||||||
|
kind: FederationDomain
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.name }}
|
||||||
|
namespace: {{ _template.namespace }}
|
||||||
|
spec:
|
||||||
|
{{ _template.spec }}
|
|
@ -0,0 +1,9 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.name }}
|
||||||
|
namespace: {{ _template.namespace }}
|
||||||
|
labels:
|
||||||
|
argocd.argoproj.io/secret-type: repository
|
||||||
|
stringData:
|
||||||
|
url: {{ _template.url }}
|
|
@ -0,0 +1,7 @@
|
||||||
|
apiVersion: traefik.containo.us/v1alpha1
|
||||||
|
kind: IngressRoute
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.name }}
|
||||||
|
namespace: {{ _template.namespace }}
|
||||||
|
spec:
|
||||||
|
{{ _template.spec }}
|
|
@ -0,0 +1,7 @@
|
||||||
|
apiVersion: traefik.containo.us/v1alpha1
|
||||||
|
kind: IngressRouteTCP
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.name }}
|
||||||
|
namespace: {{ _template.namespace }}
|
||||||
|
spec:
|
||||||
|
{{ _template.spec }}
|
|
@ -0,0 +1,10 @@
|
||||||
|
apiVersion: ipam.cluster.x-k8s.io/v1alpha2
|
||||||
|
kind: InClusterIPPool
|
||||||
|
metadata:
|
||||||
|
name: inclusterippool-{{ _template.cluster.name }}
|
||||||
|
namespace: {{ _template.cluster.namespace }}
|
||||||
|
spec:
|
||||||
|
addresses:
|
||||||
|
- {{ _template.cluster.network.startip }}-{{ _template.cluster.network.endip }}
|
||||||
|
prefix: {{ _template.cluster.network.prefix }}
|
||||||
|
gateway: {{ _template.cluster.network.gateway }}
|
|
@ -0,0 +1,6 @@
|
||||||
|
apiVersion: authentication.concierge.pinniped.dev/v1alpha1
|
||||||
|
kind: JWTAuthenticator
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.name }}
|
||||||
|
spec:
|
||||||
|
{{ _template.spec }}
|
|
@ -0,0 +1,260 @@
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
resources:
|
||||||
|
- cluster-template.yaml
|
||||||
|
|
||||||
|
patches:
|
||||||
|
- patch: |-
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: csi-vsphere-config
|
||||||
|
namespace: '${NAMESPACE}'
|
||||||
|
stringData:
|
||||||
|
data: |
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: csi-vsphere-config
|
||||||
|
namespace: kube-system
|
||||||
|
stringData:
|
||||||
|
csi-vsphere.conf: |+
|
||||||
|
[Global]
|
||||||
|
insecure-flag = true
|
||||||
|
thumbprint = "${VSPHERE_TLS_THUMBPRINT}"
|
||||||
|
cluster-id = "${NAMESPACE}/${CLUSTER_NAME}"
|
||||||
|
|
||||||
|
[VirtualCenter "${VSPHERE_SERVER}"]
|
||||||
|
user = "${VSPHERE_USERNAME}"
|
||||||
|
password = "${VSPHERE_PASSWORD}"
|
||||||
|
datacenters = "${VSPHERE_DATACENTER}"
|
||||||
|
|
||||||
|
[Network]
|
||||||
|
public-network = "${VSPHERE_NETWORK}"
|
||||||
|
type: Opaque
|
||||||
|
- patch: |-
|
||||||
|
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
|
||||||
|
kind: KubeadmControlPlane
|
||||||
|
metadata:
|
||||||
|
name: '${CLUSTER_NAME}'
|
||||||
|
namespace: '${NAMESPACE}'
|
||||||
|
spec:
|
||||||
|
kubeadmConfigSpec:
|
||||||
|
clusterConfiguration:
|
||||||
|
imageRepository: registry.{{ _template.network.fqdn }}/kubeadm
|
||||||
|
- patch: |-
|
||||||
|
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
|
||||||
|
kind: KubeadmConfigTemplate
|
||||||
|
metadata:
|
||||||
|
name: '${CLUSTER_NAME}-md-0'
|
||||||
|
namespace: '${NAMESPACE}'
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
clusterConfiguration:
|
||||||
|
imageRepository: registry.{{ _template.network.fqdn }}/kubeadm
|
||||||
|
- patch: |-
|
||||||
|
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
|
||||||
|
kind: KubeadmConfigTemplate
|
||||||
|
metadata:
|
||||||
|
name: '${CLUSTER_NAME}-md-0'
|
||||||
|
namespace: '${NAMESPACE}'
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
files:
|
||||||
|
- content: |
|
||||||
|
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||||
|
config_path = "/etc/containerd/certs.d"
|
||||||
|
append: true
|
||||||
|
path: /etc/containerd/config.toml
|
||||||
|
{% for registry in _template.registries %}
|
||||||
|
- content: |
|
||||||
|
server = "https://{{ registry }}"
|
||||||
|
|
||||||
|
[host."https://registry.{{ _template.network.fqdn }}/v2/library/{{ registry }}"]
|
||||||
|
capabilities = ["pull", "resolve"]
|
||||||
|
override_path = true
|
||||||
|
owner: root:root
|
||||||
|
path: /etc/containerd/certs.d/{{ registry }}/hosts.toml
|
||||||
|
{% endfor %}
|
||||||
|
- content: |
|
||||||
|
network: {config: disabled}
|
||||||
|
owner: root:root
|
||||||
|
path: /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg
|
||||||
|
- content: |
|
||||||
|
{{ _template.rootca | indent(width=14, first=False) | trim }}
|
||||||
|
owner: root:root
|
||||||
|
path: /usr/local/share/ca-certificates/root_ca.crt
|
||||||
|
- patch: |-
|
||||||
|
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
|
||||||
|
kind: VSphereMachineTemplate
|
||||||
|
metadata:
|
||||||
|
name: ${CLUSTER_NAME}
|
||||||
|
namespace: '${NAMESPACE}'
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
network:
|
||||||
|
devices:
|
||||||
|
- dhcp4: false
|
||||||
|
addressesFromPools:
|
||||||
|
- apiGroup: ipam.cluster.x-k8s.io
|
||||||
|
kind: InClusterIPPool
|
||||||
|
name: inclusterippool-${CLUSTER_NAME}
|
||||||
|
nameservers:
|
||||||
|
- {{ _template.network.dnsserver }}
|
||||||
|
networkName: '${VSPHERE_NETWORK}'
|
||||||
|
- patch: |-
|
||||||
|
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
|
||||||
|
kind: VSphereMachineTemplate
|
||||||
|
metadata:
|
||||||
|
name: ${CLUSTER_NAME}-worker
|
||||||
|
namespace: '${NAMESPACE}'
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
network:
|
||||||
|
devices:
|
||||||
|
- dhcp4: false
|
||||||
|
addressesFromPools:
|
||||||
|
- apiGroup: ipam.cluster.x-k8s.io
|
||||||
|
kind: InClusterIPPool
|
||||||
|
name: inclusterippool-${CLUSTER_NAME}
|
||||||
|
nameservers:
|
||||||
|
- {{ _template.network.dnsserver }}
|
||||||
|
networkName: '${VSPHERE_NETWORK}'
|
||||||
|
|
||||||
|
- target:
|
||||||
|
group: controlplane.cluster.x-k8s.io
|
||||||
|
version: v1beta1
|
||||||
|
kind: KubeadmControlPlane
|
||||||
|
name: .*
|
||||||
|
patch: |-
|
||||||
|
- op: add
|
||||||
|
path: /spec/kubeadmConfigSpec/files/-
|
||||||
|
value:
|
||||||
|
content: |
|
||||||
|
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||||
|
config_path = "/etc/containerd/certs.d"
|
||||||
|
append: true
|
||||||
|
path: /etc/containerd/config.toml
|
||||||
|
{% for registry in _template.registries %}
|
||||||
|
- op: add
|
||||||
|
path: /spec/kubeadmConfigSpec/files/-
|
||||||
|
value:
|
||||||
|
content: |
|
||||||
|
server = "https://{{ registry }}"
|
||||||
|
|
||||||
|
[host."https://registry.{{ _template.network.fqdn }}/v2/library/{{ registry }}"]
|
||||||
|
capabilities = ["pull", "resolve"]
|
||||||
|
override_path = true
|
||||||
|
owner: root:root
|
||||||
|
path: /etc/containerd/certs.d/{{ registry }}/hosts.toml
|
||||||
|
{% endfor %}
|
||||||
|
- op: add
|
||||||
|
path: /spec/kubeadmConfigSpec/files/-
|
||||||
|
value:
|
||||||
|
content: |
|
||||||
|
network: {config: disabled}
|
||||||
|
owner: root:root
|
||||||
|
path: /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg
|
||||||
|
- op: add
|
||||||
|
path: /spec/kubeadmConfigSpec/files/-
|
||||||
|
value:
|
||||||
|
content: |
|
||||||
|
{{ _template.rootca | indent(width=10, first=False) | trim }}
|
||||||
|
owner: root:root
|
||||||
|
path: /usr/local/share/ca-certificates/root_ca.crt
|
||||||
|
- target:
|
||||||
|
group: bootstrap.cluster.x-k8s.io
|
||||||
|
version: v1beta1
|
||||||
|
kind: KubeadmConfigTemplate
|
||||||
|
name: .*
|
||||||
|
patch: |-
|
||||||
|
{% for cmd in _template.runcmds %}
|
||||||
|
- op: add
|
||||||
|
path: /spec/template/spec/preKubeadmCommands/-
|
||||||
|
value: {{ cmd }}
|
||||||
|
{% endfor %}
|
||||||
|
- target:
|
||||||
|
group: controlplane.cluster.x-k8s.io
|
||||||
|
version: v1beta1
|
||||||
|
kind: KubeadmControlPlane
|
||||||
|
name: .*
|
||||||
|
patch: |-
|
||||||
|
{% for cmd in _template.runcmds %}
|
||||||
|
- op: add
|
||||||
|
path: /spec/kubeadmConfigSpec/preKubeadmCommands/-
|
||||||
|
value: {{ cmd }}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
- target:
|
||||||
|
group: infrastructure.cluster.x-k8s.io
|
||||||
|
version: v1beta1
|
||||||
|
kind: VSphereMachineTemplate
|
||||||
|
name: \${CLUSTER_NAME}
|
||||||
|
patch: |-
|
||||||
|
- op: replace
|
||||||
|
path: /metadata/name
|
||||||
|
value: ${CLUSTER_NAME}-master
|
||||||
|
- target:
|
||||||
|
group: controlplane.cluster.x-k8s.io
|
||||||
|
version: v1beta1
|
||||||
|
kind: KubeadmControlPlane
|
||||||
|
name: \${CLUSTER_NAME}
|
||||||
|
patch: |-
|
||||||
|
- op: replace
|
||||||
|
path: /metadata/name
|
||||||
|
value: ${CLUSTER_NAME}-master
|
||||||
|
- op: replace
|
||||||
|
path: /spec/machineTemplate/infrastructureRef/name
|
||||||
|
value: ${CLUSTER_NAME}-master
|
||||||
|
- target:
|
||||||
|
group: cluster.x-k8s.io
|
||||||
|
version: v1beta1
|
||||||
|
kind: Cluster
|
||||||
|
name: \${CLUSTER_NAME}
|
||||||
|
patch: |-
|
||||||
|
- op: replace
|
||||||
|
path: /spec/clusterNetwork/pods
|
||||||
|
value:
|
||||||
|
cidrBlocks:
|
||||||
|
- 172.30.0.0/16
|
||||||
|
- op: replace
|
||||||
|
path: /spec/controlPlaneRef/name
|
||||||
|
value: ${CLUSTER_NAME}-master
|
||||||
|
|
||||||
|
- target:
|
||||||
|
group: infrastructure.cluster.x-k8s.io
|
||||||
|
version: v1beta1
|
||||||
|
kind: VSphereMachineTemplate
|
||||||
|
name: \${CLUSTER_NAME}-worker
|
||||||
|
patch: |-
|
||||||
|
- op: replace
|
||||||
|
path: /spec/template/spec/numCPUs
|
||||||
|
value: {{ _template.nodesize.cpu }}
|
||||||
|
- op: replace
|
||||||
|
path: /spec/template/spec/memoryMiB
|
||||||
|
value: {{ _template.nodesize.memory }}
|
||||||
|
- target:
|
||||||
|
group: cluster.x-k8s.io
|
||||||
|
version: v1beta1
|
||||||
|
kind: MachineDeployment
|
||||||
|
name: \${CLUSTER_NAME}-md-0
|
||||||
|
patch: |-
|
||||||
|
- op: replace
|
||||||
|
path: /metadata/name
|
||||||
|
value: ${CLUSTER_NAME}-worker
|
||||||
|
- op: replace
|
||||||
|
path: /spec/template/spec/bootstrap/configRef/name
|
||||||
|
value: ${CLUSTER_NAME}-worker
|
||||||
|
- target:
|
||||||
|
group: bootstrap.cluster.x-k8s.io
|
||||||
|
version: v1beta1
|
||||||
|
kind: KubeadmConfigTemplate
|
||||||
|
name: \${CLUSTER_NAME}-md-0
|
||||||
|
patch: |-
|
||||||
|
- op: replace
|
||||||
|
path: /metadata/name
|
||||||
|
value: ${CLUSTER_NAME}-worker
|
|
@ -0,0 +1,83 @@
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
resources:
|
||||||
|
- downstream-cluster/kubeadmconfigtemplate-{{ _template.cluster.name }}-worker.yaml
|
||||||
|
- downstream-cluster/machinedeployment-{{ _template.cluster.name }}-worker.yaml
|
||||||
|
- downstream-cluster/vspheremachinetemplate-{{ _template.cluster.name }}-worker.yaml
|
||||||
|
|
||||||
|
patches:
|
||||||
|
- patch: |-
|
||||||
|
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
|
||||||
|
kind: KubeadmConfigTemplate
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.cluster.name }}-worker
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
diskSetup:
|
||||||
|
filesystems:
|
||||||
|
- device: /dev/sdb1
|
||||||
|
filesystem: ext4
|
||||||
|
label: blockstorage
|
||||||
|
partitions:
|
||||||
|
- device: /dev/sdb
|
||||||
|
layout: true
|
||||||
|
tableType: gpt
|
||||||
|
joinConfiguration:
|
||||||
|
nodeRegistration:
|
||||||
|
kubeletExtraArgs:
|
||||||
|
node-labels: "node.longhorn.io/create-default-disk=true"
|
||||||
|
mounts:
|
||||||
|
- - LABEL=blockstorage
|
||||||
|
- /mnt/blockstorage
|
||||||
|
- patch: |-
|
||||||
|
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
|
||||||
|
kind: VSphereMachineTemplate
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.cluster.name }}-worker
|
||||||
|
namespace: default
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
additionalDisksGiB:
|
||||||
|
- {{ _template.nodepool.additionaldisk }}
|
||||||
|
|
||||||
|
- target:
|
||||||
|
group: bootstrap.cluster.x-k8s.io
|
||||||
|
version: v1beta1
|
||||||
|
kind: KubeadmConfigTemplate
|
||||||
|
name: {{ _template.cluster.name }}-worker
|
||||||
|
patch: |-
|
||||||
|
- op: replace
|
||||||
|
path: /metadata/name
|
||||||
|
value: {{ _template.cluster.name }}-worker-storage
|
||||||
|
|
||||||
|
- target:
|
||||||
|
group: cluster.x-k8s.io
|
||||||
|
version: v1beta1
|
||||||
|
kind: MachineDeployment
|
||||||
|
name: {{ _template.cluster.name }}-worker
|
||||||
|
patch: |-
|
||||||
|
- op: replace
|
||||||
|
path: /metadata/name
|
||||||
|
value: {{ _template.cluster.name }}-worker-storage
|
||||||
|
- op: replace
|
||||||
|
path: /spec/template/spec/bootstrap/configRef/name
|
||||||
|
value: {{ _template.cluster.name }}-worker-storage
|
||||||
|
- op: replace
|
||||||
|
path: /spec/template/spec/infrastructureRef/name
|
||||||
|
value: {{ _template.cluster.name }}-worker-storage
|
||||||
|
- op: replace
|
||||||
|
path: /spec/replicas
|
||||||
|
value: {{ _template.nodepool.size }}
|
||||||
|
|
||||||
|
- target:
|
||||||
|
group: infrastructure.cluster.x-k8s.io
|
||||||
|
version: v1beta1
|
||||||
|
kind: VSphereMachineTemplate
|
||||||
|
name: {{ _template.cluster.name }}-worker
|
||||||
|
patch: |-
|
||||||
|
- op: replace
|
||||||
|
path: /metadata/name
|
||||||
|
value: {{ _template.cluster.name }}-worker-storage
|
|
@ -0,0 +1,7 @@
|
||||||
|
apiVersion: idp.supervisor.pinniped.dev/v1alpha1
|
||||||
|
kind: OIDCIdentityProvider
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.name }}
|
||||||
|
namespace: {{ _template.namespace }}
|
||||||
|
spec:
|
||||||
|
{{ _template.spec }}
|
|
@ -0,0 +1,10 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.name }}
|
||||||
|
namespace: {{ _template.namespace }}
|
||||||
|
type: {{ _template.type }}
|
||||||
|
data:
|
||||||
|
{% for kv_pair in _template.data %}
|
||||||
|
"{{ kv_pair.key }}": {{ kv_pair.value }}
|
||||||
|
{% endfor %}
|
|
@ -0,0 +1,7 @@
|
||||||
|
apiVersion: traefik.containo.us/v1alpha1
|
||||||
|
kind: ServersTransport
|
||||||
|
metadata:
|
||||||
|
name: {{ _template.name }}
|
||||||
|
namespace: {{ _template.namespace }}
|
||||||
|
spec:
|
||||||
|
{{ _template.spec }}
|
|
@ -0,0 +1,4 @@
|
||||||
|
- name: Disable crontab job
|
||||||
|
ansible.builtin.cron:
|
||||||
|
name: firstboot
|
||||||
|
state: absent
|
|
@ -0,0 +1,6 @@
|
||||||
|
- import_tasks: service.yml
|
||||||
|
- import_tasks: cron.yml
|
||||||
|
|
||||||
|
# - name: Reboot host
|
||||||
|
# ansible.builtin.shell:
|
||||||
|
# cmd: systemctl reboot
|
|
@ -0,0 +1,30 @@
|
||||||
|
- name: Create tarball compression service
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "{{ item.src }}"
|
||||||
|
dest: "{{ item.dest }}"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "{{ item.mode | default(omit) }}"
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
service:
|
||||||
|
name: compressTarballs
|
||||||
|
executable: /opt/firstboot/compresstarballs.sh
|
||||||
|
workingdir: /opt/metacluster/container-images/
|
||||||
|
loop:
|
||||||
|
- src: compresstarballs.j2
|
||||||
|
dest: "{{ _template.service.executable }}"
|
||||||
|
mode: o+x
|
||||||
|
- src: systemdunit.j2
|
||||||
|
dest: /etc/systemd/system/{{ _template.service.name }}.service
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.src }}"
|
||||||
|
|
||||||
|
- name: Enable/Start services
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: "{{ item }}"
|
||||||
|
enabled: yes
|
||||||
|
state: started
|
||||||
|
loop:
|
||||||
|
- compressTarballs
|
||||||
|
- ttyConsoleMessage
|
|
@ -0,0 +1,40 @@
|
||||||
|
- name: Create volume group
|
||||||
|
community.general.lvg:
|
||||||
|
vg: longhorn_vg
|
||||||
|
pvs:
|
||||||
|
- /dev/sdb
|
||||||
|
pvresize: yes
|
||||||
|
|
||||||
|
- name: Create logical volume
|
||||||
|
community.general.lvol:
|
||||||
|
vg: longhorn_vg
|
||||||
|
lv: longhorn_lv
|
||||||
|
size: 100%VG
|
||||||
|
|
||||||
|
- name: Store begin timestamp
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
start_time: "{{ lookup('pipe', 'date +%s') }}"
|
||||||
|
|
||||||
|
- name: Create filesystem
|
||||||
|
community.general.filesystem:
|
||||||
|
dev: /dev/mapper/longhorn_vg-longhorn_lv
|
||||||
|
fstype: ext4
|
||||||
|
|
||||||
|
- name: Store end timestamp
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
end_time: "{{ lookup('pipe', 'date +%s') }}"
|
||||||
|
|
||||||
|
- name: Calculate crude storage benchmark
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
storage_benchmark: "{{ [storage_benchmark, (end_time | int - start_time | int)] | max }}"
|
||||||
|
|
||||||
|
- name: Log benchmark actual duration
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Benchmark actual duration: {{ (end_time | int - start_time | int) }} second(s)"
|
||||||
|
|
||||||
|
- name: Mount dynamic disk
|
||||||
|
ansible.posix.mount:
|
||||||
|
path: /mnt/blockstorage
|
||||||
|
src: /dev/mapper/longhorn_vg-longhorn_lv
|
||||||
|
fstype: ext4
|
||||||
|
state: mounted
|
|
@ -0,0 +1,12 @@
|
||||||
|
- name: Import container images
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: k3s ctr image import {{ item }} --digests
|
||||||
|
chdir: /opt/metacluster/container-images
|
||||||
|
register: import_result
|
||||||
|
loop: "{{ query('ansible.builtin.fileglob', '/opt/metacluster/container-images/*.tar') | sort }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item | basename }}"
|
||||||
|
# Probably should add a task before that ensures K3s node is fully initialized before starting imports; currently K3s goes away briefly during this loop
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ ((storage_benchmark | float) * playbook.delay.short) | int }}"
|
||||||
|
until: import_result is not failed
|
|
@ -0,0 +1,19 @@
|
||||||
|
- name: Set hostname
|
||||||
|
ansible.builtin.hostname:
|
||||||
|
name: "{{ vapp['guestinfo.hostname'] }}"
|
||||||
|
|
||||||
|
- name: Create netplan configuration file
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: netplan.j2
|
||||||
|
dest: /etc/netplan/00-installer-config.yaml
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
macaddress: "{{ ansible_facts.default_ipv4.macaddress }}"
|
||||||
|
ipaddress: "{{ vapp['guestinfo.ipaddress'] }}"
|
||||||
|
prefixlength: "{{ vapp['guestinfo.prefixlength'] }}"
|
||||||
|
gateway: "{{ vapp['guestinfo.gateway'] }}"
|
||||||
|
dnsserver: "{{ vapp['guestinfo.dnsserver'] }}"
|
||||||
|
|
||||||
|
- name: Apply netplan configuration
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: /usr/sbin/netplan apply
|
|
@ -0,0 +1,13 @@
|
||||||
|
network:
|
||||||
|
version: 2
|
||||||
|
ethernets:
|
||||||
|
id0:
|
||||||
|
set-name: eth0
|
||||||
|
match:
|
||||||
|
macaddress: {{ _template.macaddress }}
|
||||||
|
addresses:
|
||||||
|
- {{ _template.ipaddress }}/{{ _template.prefixlength }}
|
||||||
|
gateway4: {{ _template.gateway }}
|
||||||
|
nameservers:
|
||||||
|
addresses:
|
||||||
|
- {{ _template.dnsserver }}
|
|
@ -0,0 +1,2 @@
|
||||||
|
- import_tasks: vapp.yml
|
||||||
|
- import_tasks: vcenter.yml
|
|
@ -0,0 +1,16 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Check for vCenter connectivity
|
||||||
|
community.vmware.vmware_vcenter_settings_info:
|
||||||
|
schema: vsphere
|
||||||
|
register: vcenter_info
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ ((storage_benchmark | float) * playbook.delay.short) | int }}"
|
||||||
|
until: vcenter_info is not failed
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
group/vmware:
|
||||||
|
hostname: "{{ vapp['hv.fqdn'] }}"
|
||||||
|
validate_certs: no
|
||||||
|
username: "{{ vapp['hv.username'] }}"
|
||||||
|
password: "{{ vapp['hv.password'] }}"
|
|
@ -0,0 +1,50 @@
|
||||||
|
- name: Create folder structure(s)
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
loop:
|
||||||
|
- /opt/firstboot
|
||||||
|
|
||||||
|
- name: Create tty console message service
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "{{ item.src }}"
|
||||||
|
dest: "{{ item.dest }}"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "{{ item.mode | default(omit) }}"
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
service:
|
||||||
|
name: ttyConsoleMessage
|
||||||
|
executable: /opt/firstboot/tty.sh
|
||||||
|
workingdir: /tmp/
|
||||||
|
metacluster:
|
||||||
|
components:
|
||||||
|
- name: ArgoCD
|
||||||
|
url: https://gitops.${FQDN}
|
||||||
|
healthcheck: https://gitops.${FQDN}
|
||||||
|
- name: Gitea
|
||||||
|
url: https://git.${FQDN}
|
||||||
|
healthcheck: https://git.${FQDN}
|
||||||
|
- name: Harbor
|
||||||
|
url: https://registry.${FQDN}
|
||||||
|
healthcheck: https://registry.${FQDN}
|
||||||
|
- name: Longhorn
|
||||||
|
url: https://storage.${FQDN}
|
||||||
|
healthcheck: https://storage.${FQDN}
|
||||||
|
- name: StepCA
|
||||||
|
url: ''
|
||||||
|
healthcheck: https://ca.${FQDN}/health
|
||||||
|
- name: Traefik
|
||||||
|
url: https://ingress.${FQDN}
|
||||||
|
healthcheck: https://ingress.${FQDN}
|
||||||
|
fqdn: "{{ vapp['metacluster.fqdn'] }}"
|
||||||
|
vip: "{{ vapp['metacluster.vip'] }}"
|
||||||
|
loop:
|
||||||
|
- src: tty.j2
|
||||||
|
dest: "{{ _template.service.executable }}"
|
||||||
|
mode: o+x
|
||||||
|
- src: systemdunit.j2
|
||||||
|
dest: /etc/systemd/system/{{ _template.service.name }}.service
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.src }}"
|
|
@ -0,0 +1,39 @@
|
||||||
|
- name: Set root password
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: root
|
||||||
|
password: "{{ vapp['metacluster.password'] | password_hash('sha512', 65534 | random(seed=vapp['guestinfo.hostname']) | string) }}"
|
||||||
|
generate_ssh_key: yes
|
||||||
|
ssh_key_bits: 2048
|
||||||
|
ssh_key_file: .ssh/id_rsa
|
||||||
|
|
||||||
|
- name: Save root SSH publickey
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /root/.ssh/authorized_keys
|
||||||
|
line: "{{ vapp['guestinfo.rootsshkey'] }}"
|
||||||
|
|
||||||
|
- name: Disable SSH password authentication
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/ssh/sshd_config
|
||||||
|
regex: "{{ item.regex }}"
|
||||||
|
line: "{{ item.line }}"
|
||||||
|
state: "{{ item.state }}"
|
||||||
|
loop:
|
||||||
|
- regex: '^#PasswordAuthentication'
|
||||||
|
line: 'PasswordAuthentication no'
|
||||||
|
state: present
|
||||||
|
- regex: '^PasswordAuthentication yes'
|
||||||
|
line: 'PasswordAuthentication yes'
|
||||||
|
state: absent
|
||||||
|
loop_control:
|
||||||
|
label: "{{ '[' ~ item.regex ~ '] ' ~ item.state }}"
|
||||||
|
|
||||||
|
- name: Create dedicated SSH keypair
|
||||||
|
community.crypto.openssh_keypair:
|
||||||
|
path: /root/.ssh/git_rsa_id
|
||||||
|
register: gitops_sshkey
|
||||||
|
|
||||||
|
- name: Delete 'ubuntu' user
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: ubuntu
|
||||||
|
state: absent
|
||||||
|
remove: yes
|
|
@ -0,0 +1,38 @@
|
||||||
|
- name: Store current ovfEnvironment
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: /usr/bin/vmtoolsd --cmd "info-get guestinfo.ovfEnv"
|
||||||
|
register: ovfenv
|
||||||
|
|
||||||
|
- name: Parse XML for MoRef ID
|
||||||
|
community.general.xml:
|
||||||
|
xmlstring: "{{ ovfenv.stdout }}"
|
||||||
|
namespaces:
|
||||||
|
ns: http://schemas.dmtf.org/ovf/environment/1
|
||||||
|
ve: http://www.vmware.com/schema/ovfenv
|
||||||
|
xpath: /ns:Environment
|
||||||
|
content: attribute
|
||||||
|
register: environment_attribute
|
||||||
|
|
||||||
|
- name: Store MoRef ID
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
moref_id: "{{ ((environment_attribute.matches[0].values() | list)[0].values() | list)[1] }}"
|
||||||
|
|
||||||
|
- name: Parse XML for vApp properties
|
||||||
|
community.general.xml:
|
||||||
|
xmlstring: "{{ ovfenv.stdout }}"
|
||||||
|
namespaces:
|
||||||
|
ns: http://schemas.dmtf.org/ovf/environment/1
|
||||||
|
xpath: /ns:Environment/ns:PropertySection/ns:Property
|
||||||
|
content: attribute
|
||||||
|
register: property_section
|
||||||
|
|
||||||
|
- name: Assign vApp properties to dictionary
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
vapp: >-
|
||||||
|
{{ vapp | default({}) | combine({
|
||||||
|
((item.values() | list)[0].values() | list)[0]:
|
||||||
|
((item.values() | list)[0].values() | list)[1]})
|
||||||
|
}}
|
||||||
|
loop: "{{ property_section.matches }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ ((item.values() | list)[0].values() | list)[0] }}"
|
|
@ -0,0 +1,10 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Change working directory
|
||||||
|
pushd {{ _template.service.workingdir }}
|
||||||
|
|
||||||
|
# Compress *.tar files
|
||||||
|
if tar -czf image-tarballs.tgz *.tar --remove-files; then
|
||||||
|
# Disable systemd unit
|
||||||
|
systemctl disable {{ _template.service.name }}
|
||||||
|
fi
|
|
@ -0,0 +1,8 @@
|
||||||
|
mirrors:
|
||||||
|
{% for registry in _template.registries %}
|
||||||
|
{{ registry }}:
|
||||||
|
endpoint:
|
||||||
|
- https://registry.{{ _template.hv.fqdn }}
|
||||||
|
rewrite:
|
||||||
|
"(.*)": "library/{{ registry }}/$1"
|
||||||
|
{% endfor %}
|
|
@ -0,0 +1,9 @@
|
||||||
|
[Unit]
|
||||||
|
Description={{ _template.service.name }}
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart={{ _template.service.executable }}
|
||||||
|
Nice=10
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
|
@ -0,0 +1,50 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
export TERM=linux
|
||||||
|
|
||||||
|
BGRN='\033[1;92m'
|
||||||
|
BGRY='\033[1;30m'
|
||||||
|
BBLU='\033[1;34m'
|
||||||
|
BRED='\033[1;91m'
|
||||||
|
BWHI='\033[1;97m'
|
||||||
|
CBLA='\033[?16;0;30c' # Hide blinking cursor
|
||||||
|
DFLT='\033[0m' # Reset colour
|
||||||
|
LCLR='\033[K' # Clear to end of line
|
||||||
|
PRST='\033[0;0H' # Reset cursor position
|
||||||
|
|
||||||
|
FQDN='{{ _template.metacluster.fqdn }}'
|
||||||
|
IPADDRESS='{{ _template.metacluster.vip }}'
|
||||||
|
|
||||||
|
declare -A COMPONENTS
|
||||||
|
{% for component in _template.metacluster.components %}
|
||||||
|
COMPONENTS["{{ component.name }}\t({{ component.url }})"]="{{ component.healthcheck }}"
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
I=0
|
||||||
|
|
||||||
|
while /bin/true; do
|
||||||
|
if [[ $I -gt 59 ]]; then
|
||||||
|
clear > /dev/tty1
|
||||||
|
I=0
|
||||||
|
else
|
||||||
|
I=$(( $I + 1 ))
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${PRST}" > /dev/tty1
|
||||||
|
echo -e "\n\n\t${DFLT}To manage this appliance, please connect to one of the following:${LCLR}\n" > /dev/tty1
|
||||||
|
|
||||||
|
for c in $( echo "${!COMPONENTS[@]}" | tr ' ' $'\n' | sort); do
|
||||||
|
STATUS=$(curl -kLs "${COMPONENTS[${c}]}" -o /dev/null -w '%{http_code}')
|
||||||
|
|
||||||
|
if [[ "${STATUS}" -eq "200" ]]; then
|
||||||
|
echo -e "\t [${BGRN}+${DFLT}] ${BBLU}${c}${DFLT}${LCLR}" > /dev/tty1
|
||||||
|
else
|
||||||
|
echo -e "\t [${BRED}-${DFLT}] ${BBLU}${c}${DFLT}${LCLR}" > /dev/tty1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo -e "\n\t${BGRY}Note that your DNS zone ${DFLT}must have${BGRY} respective records defined,\n\teach pointing to: ${DFLT}${IPADDRESS}${LCLR}" > /dev/tty1
|
||||||
|
|
||||||
|
echo -e "${CBLA}" > /dev/tty1
|
||||||
|
sleep 1
|
||||||
|
done
|
|
@ -0,0 +1,23 @@
|
||||||
|
playbook:
|
||||||
|
retries: 10
|
||||||
|
delay:
|
||||||
|
# These values are multiplied with the value of `storage_benchmark`
|
||||||
|
long: 2
|
||||||
|
medium: 1
|
||||||
|
short: 0.5
|
||||||
|
|
||||||
|
# This default value is updated during the playbook, based on an I/O intensive operation
|
||||||
|
storage_benchmark: 30
|
||||||
|
|
||||||
|
config:
|
||||||
|
clusterapi:
|
||||||
|
size_matrix:
|
||||||
|
small:
|
||||||
|
cpu: 2
|
||||||
|
memory: 6144
|
||||||
|
medium:
|
||||||
|
cpu: 4
|
||||||
|
memory: 8192
|
||||||
|
large:
|
||||||
|
cpu: 8
|
||||||
|
memory: 16384
|
|
@ -0,0 +1,27 @@
|
||||||
|
---
|
||||||
|
- hosts: 127.0.0.1
|
||||||
|
connection: local
|
||||||
|
gather_facts: true
|
||||||
|
vars_files:
|
||||||
|
- defaults.yml
|
||||||
|
- metacluster.yml
|
||||||
|
# become: true
|
||||||
|
roles:
|
||||||
|
- vapp
|
||||||
|
- network
|
||||||
|
- preflight
|
||||||
|
- users
|
||||||
|
- disks
|
||||||
|
- metacluster
|
||||||
|
- workloadcluster
|
||||||
|
- decommission
|
||||||
|
- tty
|
||||||
|
- cleanup
|
||||||
|
handlers:
|
||||||
|
- name: Apply manifests
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
src: "{{ item }}"
|
||||||
|
state: present
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
loop: "{{ query('ansible.builtin.fileglob', '/var/lib/rancher/k3s/server/manifests/*.yaml') | sort }}"
|
||||||
|
ignore_errors: yes
|
|
@ -0,0 +1,35 @@
|
||||||
|
- name: Cordon node
|
||||||
|
kubernetes.core.k8s_drain:
|
||||||
|
name: "{{ decom_nodename }}"
|
||||||
|
state: cordon
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
|
||||||
|
- name: Drain node
|
||||||
|
# Currently does not work; returns with error "Failed to delete pod [...] due to: Too Many Requests"
|
||||||
|
# See also: https://github.com/ansible-collections/kubernetes.core/issues/474
|
||||||
|
# kubernetes.core.k8s_drain:
|
||||||
|
# name: "{{ decom_nodename }}"
|
||||||
|
# state: drain
|
||||||
|
# delete_options:
|
||||||
|
# ignore_daemonsets: true
|
||||||
|
# delete_emptydir_data: true
|
||||||
|
# kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
kubectl drain {{ decom_nodename }} \
|
||||||
|
--delete-emptydir-data \
|
||||||
|
--ignore-daemonsets
|
||||||
|
register: nodedrain_results
|
||||||
|
until:
|
||||||
|
- nodedrain_results is not failed
|
||||||
|
- (nodedrain_results.stdout_lines | last) is match('node/.* drained')
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ ((storage_benchmark | float) * playbook.delay.short) | int }}"
|
||||||
|
|
||||||
|
- name: Delete node
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
name: "{{ decom_nodename }}"
|
||||||
|
kind: node
|
||||||
|
state: absent
|
||||||
|
wait: true
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
|
@ -0,0 +1,18 @@
|
||||||
|
- name: Lookup node name and moref-id for decommissioning
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
decom_nodename: >-
|
||||||
|
{{
|
||||||
|
lookup('kubernetes.core.k8s', kind='Node', kubeconfig=(kubeconfig.path)) |
|
||||||
|
json_query('[? metadata.name != `' ~ ansible_facts.nodename ~ '`].metadata.name') |
|
||||||
|
first
|
||||||
|
}}
|
||||||
|
decom_morefid: >-
|
||||||
|
{{
|
||||||
|
lookup('kubernetes.core.k8s', kind='Node', kubeconfig=(kubeconfig.path)) |
|
||||||
|
json_query('[? metadata.name != `' ~ ansible_facts.nodename ~ '`].metadata.labels."ova.airgappedk8s/moref_id"') |
|
||||||
|
first
|
||||||
|
}}
|
||||||
|
|
||||||
|
- import_tasks: storage.yml
|
||||||
|
- import_tasks: k3s.yml
|
||||||
|
- import_tasks: virtualmachine.yml
|
|
@ -0,0 +1,27 @@
|
||||||
|
- name: Disable disk scheduling and evict replicas
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
api_version: longhorn.io/v1beta2
|
||||||
|
kind: lhn
|
||||||
|
name: "{{ decom_nodename }}"
|
||||||
|
namespace: longhorn-system
|
||||||
|
state: patched
|
||||||
|
definition: |
|
||||||
|
spec:
|
||||||
|
allowScheduling: false
|
||||||
|
evictionRequested: true
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
|
||||||
|
- name: Reduce replica amount for each volume
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
api_version: longhorn.io/v1beta2
|
||||||
|
kind: volume
|
||||||
|
name: "{{ item.metadata.name }}"
|
||||||
|
namespace: longhorn-system
|
||||||
|
state: patched
|
||||||
|
definition: |
|
||||||
|
spec:
|
||||||
|
numberOfReplicas: {{ (lookup('kubernetes.core.k8s', kind='node', kubeconfig=(kubeconfig.path)) | length | int) - 1 }}
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
loop: "{{ query('kubernetes.core.k8s', api_version='longhorn.io/v1beta2', kind='volume', namespace='longhorn-system', kubeconfig=(kubeconfig.path)) }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.metadata.name }}"
|
|
@ -0,0 +1,26 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Lookup VM name
|
||||||
|
community.vmware.vmware_guest_info:
|
||||||
|
moid: "{{ decom_morefid }}"
|
||||||
|
register: virtualmachine_details
|
||||||
|
|
||||||
|
- name: Power off VM
|
||||||
|
community.vmware.vmware_guest:
|
||||||
|
name: "{{ virtualmachine_details.instance.hw_name }}"
|
||||||
|
folder: "{{ virtualmachine_details.instance.hw_folder }}"
|
||||||
|
state: poweredoff
|
||||||
|
|
||||||
|
# - name: Delete VM
|
||||||
|
# community.vmware.vmware_guest:
|
||||||
|
# name: "{{ virtualmachine_details.hw_name }}"
|
||||||
|
# folder: "{{ virtualmachine_details.hw_folder }}"
|
||||||
|
# state: absent
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
group/vmware:
|
||||||
|
hostname: "{{ vapp['hv.fqdn'] }}"
|
||||||
|
validate_certs: no
|
||||||
|
username: "{{ vapp['hv.username'] }}"
|
||||||
|
password: "{{ vapp['hv.password'] }}"
|
||||||
|
datacenter: "{{ vcenter_info.datacenter }}"
|
|
@ -0,0 +1,52 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Initialize tempfile
|
||||||
|
ansible.builtin.tempfile:
|
||||||
|
state: file
|
||||||
|
register: values_file
|
||||||
|
|
||||||
|
- name: Lookup current chart values
|
||||||
|
kubernetes.core.helm_info:
|
||||||
|
name: step-certificates
|
||||||
|
namespace: step-ca
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
register: stepca_values
|
||||||
|
|
||||||
|
- name: Write chart values w/ password to tempfile
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ values_file.path }}"
|
||||||
|
content: "{{ stepca_values.status | json_query('values') | to_yaml }}"
|
||||||
|
no_log: true
|
||||||
|
|
||||||
|
- name: Upgrade step-ca chart
|
||||||
|
kubernetes.core.helm:
|
||||||
|
name: step-certificates
|
||||||
|
chart_ref: /opt/metacluster/helm-charts/step-certificates
|
||||||
|
release_namespace: step-ca
|
||||||
|
wait: false
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
values_files:
|
||||||
|
- "{{ values_file.path }}"
|
||||||
|
|
||||||
|
- name: Cleanup tempfile
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ values_file.path }}"
|
||||||
|
state: absent
|
||||||
|
when: values_file.path is defined
|
||||||
|
|
||||||
|
- name: Ensure step-ca API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://ca.{{ vapp['metacluster.fqdn'] }}/health
|
||||||
|
method: GET
|
||||||
|
register: api_readycheck
|
||||||
|
until:
|
||||||
|
- api_readycheck.json.status is defined
|
||||||
|
- api_readycheck.json.status == 'ok'
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201]
|
||||||
|
body_format: json
|
|
@ -0,0 +1,50 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Push images to registry
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: >-
|
||||||
|
skopeo copy \
|
||||||
|
--insecure-policy \
|
||||||
|
--dest-tls-verify=false \
|
||||||
|
--dest-creds admin:{{ vapp['metacluster.password'] }} \
|
||||||
|
docker-archive:./{{ item | basename }} \
|
||||||
|
docker://registry.{{ vapp['metacluster.fqdn'] }}/library/$( \
|
||||||
|
skopeo list-tags \
|
||||||
|
--insecure-policy \
|
||||||
|
docker-archive:./{{ item | basename }} | \
|
||||||
|
jq -r '.Tags[0]')
|
||||||
|
chdir: /opt/metacluster/container-images/
|
||||||
|
register: push_result
|
||||||
|
loop: "{{ query('ansible.builtin.fileglob', '/opt/metacluster/container-images/*.tar') | sort }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item | basename }}"
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ ((storage_benchmark | float) * playbook.delay.short) | int }}"
|
||||||
|
until: push_result is not failed
|
||||||
|
|
||||||
|
- name: Get all stored container images (=artifacts)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://registry.{{ vapp['metacluster.fqdn'] }}/api/v2.0/search?q=library
|
||||||
|
method: GET
|
||||||
|
register: registry_artifacts
|
||||||
|
|
||||||
|
- name: Get source registries of all artifacts
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
source_registries: "{{ (source_registries | default([]) + [(item | split('/'))[1]]) | unique | sort }}"
|
||||||
|
loop: "{{ registry_artifacts.json.repository | json_query('[*].repository_name') }}"
|
||||||
|
|
||||||
|
- name: Configure K3s node for private registry
|
||||||
|
ansible.builtin.template:
|
||||||
|
dest: /etc/rancher/k3s/registries.yaml
|
||||||
|
src: registries.j2
|
||||||
|
vars:
|
||||||
|
_template:
|
||||||
|
data: "{{ source_registries }}"
|
||||||
|
hv:
|
||||||
|
fqdn: "{{ vapp['metacluster.fqdn'] }}"
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201, 401]
|
||||||
|
body_format: json
|
|
@ -0,0 +1,27 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Upgrade gitea chart
|
||||||
|
kubernetes.core.helm:
|
||||||
|
name: gitea
|
||||||
|
chart_ref: /opt/metacluster/helm-charts/gitea
|
||||||
|
release_namespace: gitea
|
||||||
|
wait: false
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
values: "{{ components['gitea'].chart_values }}"
|
||||||
|
|
||||||
|
- name: Ensure gitea API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://git.{{ vapp['metacluster.fqdn'] }}/api/healthz
|
||||||
|
method: GET
|
||||||
|
register: api_readycheck
|
||||||
|
until:
|
||||||
|
- api_readycheck.json.status is defined
|
||||||
|
- api_readycheck.json.status == 'pass'
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201]
|
||||||
|
body_format: json
|
|
@ -0,0 +1,26 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Upgrade argo-cd chart
|
||||||
|
kubernetes.core.helm:
|
||||||
|
name: argo-cd
|
||||||
|
chart_ref: /opt/metacluster/helm-charts/argo-cd
|
||||||
|
release_namespace: argo-cd
|
||||||
|
wait: false
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
values: "{{ components['argo-cd'].chart_values }}"
|
||||||
|
|
||||||
|
- name: Ensure argo-cd API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://gitops.{{ vapp['metacluster.fqdn'] }}/api/version
|
||||||
|
method: GET
|
||||||
|
register: api_readycheck
|
||||||
|
until:
|
||||||
|
- api_readycheck.json.Version is defined
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201]
|
||||||
|
body_format: json
|
|
@ -0,0 +1,35 @@
|
||||||
|
- name: Configure fallback name resolution
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/hosts
|
||||||
|
line: "{{ vapp['metacluster.vip'] }} {{ item ~ '.' ~ vapp['metacluster.fqdn'] }}"
|
||||||
|
state: present
|
||||||
|
loop:
|
||||||
|
# TODO: Make this list dynamic
|
||||||
|
- ca
|
||||||
|
- git
|
||||||
|
- gitops
|
||||||
|
- ingress
|
||||||
|
- registry
|
||||||
|
- storage
|
||||||
|
|
||||||
|
- name: Retrieve root CA certificate
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://ca.{{ vapp['metacluster.fqdn'] }}/roots
|
||||||
|
validate_certs: no
|
||||||
|
method: GET
|
||||||
|
status_code: [200, 201]
|
||||||
|
register: rootca_certificate
|
||||||
|
|
||||||
|
- name: Store root CA certificate
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /usr/local/share/ca-certificates/root_ca.crt
|
||||||
|
content: "{{ rootca_certificate.json.crts | list | join('\n') }}"
|
||||||
|
|
||||||
|
- name: Update certificate truststore
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: update-ca-certificates
|
||||||
|
|
||||||
|
- name: Remove redundant files
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /var/lib/rancher/k3s/server/manifests/traefik-config.yaml
|
||||||
|
state: absent
|
|
@ -0,0 +1,80 @@
|
||||||
|
- name: Store custom configuration files
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ item.filename }}"
|
||||||
|
content: "{{ item.content }}"
|
||||||
|
loop:
|
||||||
|
- filename: /etc/rancher/k3s/config.yaml
|
||||||
|
content: |
|
||||||
|
kubelet-arg:
|
||||||
|
- "config=/etc/rancher/k3s/kubelet.config"
|
||||||
|
- filename: /etc/rancher/k3s/kubelet.config
|
||||||
|
content: |
|
||||||
|
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||||
|
kind: KubeletConfiguration
|
||||||
|
|
||||||
|
shutdownGracePeriod: 180s
|
||||||
|
shtudownGracePeriodCriticalPods: 60s
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.filename }}"
|
||||||
|
|
||||||
|
- name: Gather service facts
|
||||||
|
ansible.builtin.service_facts:
|
||||||
|
# Module requires no attributes
|
||||||
|
|
||||||
|
- name: Install K3s
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: ./install.sh
|
||||||
|
chdir: /opt/metacluster/k3s
|
||||||
|
environment:
|
||||||
|
INSTALL_K3S_SKIP_DOWNLOAD: 'true'
|
||||||
|
# To prevent from overwriting traefik's existing configuration, "disable" it on this new node
|
||||||
|
INSTALL_K3S_EXEC: "server --token {{ vapp['metacluster.token'] | trim }} --server https://{{ vapp['metacluster.vip'] }}:6443 --disable local-storage,traefik --config /etc/rancher/k3s/config.yaml"
|
||||||
|
when: ansible_facts.services['k3s.service'] is undefined
|
||||||
|
|
||||||
|
- name: Ensure API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://{{ vapp['guestinfo.ipaddress'] }}:6443/livez?verbose
|
||||||
|
method: GET
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 401]
|
||||||
|
register: api_readycheck
|
||||||
|
until: api_readycheck.json.apiVersion is defined
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.medium | int) }}"
|
||||||
|
|
||||||
|
- name: Install kubectl tab-completion
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: kubectl completion bash | tee /etc/bash_completion.d/kubectl
|
||||||
|
|
||||||
|
- name: Initialize tempfile
|
||||||
|
ansible.builtin.tempfile:
|
||||||
|
state: file
|
||||||
|
register: kubeconfig
|
||||||
|
|
||||||
|
- name: Retrieve kubeconfig
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: kubectl config view --raw
|
||||||
|
register: kubectl_config
|
||||||
|
|
||||||
|
- name: Store kubeconfig in tempfile
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ kubeconfig.path }}"
|
||||||
|
content: "{{ kubectl_config.stdout }}"
|
||||||
|
mode: 0600
|
||||||
|
no_log: true
|
||||||
|
|
||||||
|
- name: Add label to node object
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
name: "{{ ansible_facts.nodename | lower }}"
|
||||||
|
kind: Node
|
||||||
|
state: patched
|
||||||
|
definition:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
ova.airgappedk8s/moref_id: "{{ moref_id }}"
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
register: k8snode_patch
|
||||||
|
until:
|
||||||
|
- k8snode_patch.result.metadata.labels['ova.airgappedk8s/moref_id'] is defined
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.medium | int) }}"
|
|
@ -0,0 +1,9 @@
|
||||||
|
- import_tasks: init.yml
|
||||||
|
- import_tasks: containerimages.yml
|
||||||
|
- import_tasks: k3s.yml
|
||||||
|
- import_tasks: assets.yml
|
||||||
|
- import_tasks: storage.yml
|
||||||
|
- import_tasks: registry.yml
|
||||||
|
- import_tasks: certauthority.yml
|
||||||
|
- import_tasks: git.yml
|
||||||
|
- import_tasks: gitops.yml
|
|
@ -0,0 +1,27 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Upgrade harbor chart
|
||||||
|
kubernetes.core.helm:
|
||||||
|
name: harbor
|
||||||
|
chart_ref: /opt/metacluster/helm-charts/harbor
|
||||||
|
release_namespace: harbor
|
||||||
|
wait: false
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
values: "{{ components['harbor'].chart_values }}"
|
||||||
|
|
||||||
|
- name: Ensure harbor API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://registry.{{ vapp['metacluster.fqdn'] }}/api/v2.0/health
|
||||||
|
method: GET
|
||||||
|
register: api_readycheck
|
||||||
|
until:
|
||||||
|
- api_readycheck.json.status is defined
|
||||||
|
- api_readycheck.json.status == 'healthy'
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201, 401]
|
||||||
|
body_format: json
|
|
@ -0,0 +1,53 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Increase replicas for each volume
|
||||||
|
kubernetes.core.k8s:
|
||||||
|
api_version: longhorn.io/v1beta2
|
||||||
|
kind: volume
|
||||||
|
name: "{{ item.metadata.name }}"
|
||||||
|
namespace: longhorn-system
|
||||||
|
state: patched
|
||||||
|
definition: |
|
||||||
|
spec:
|
||||||
|
numberOfReplicas: {{ lookup('kubernetes.core.k8s', kind='node', kubeconfig=(kubeconfig.path)) | length | int }}
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
loop: "{{ query('kubernetes.core.k8s', api_version='longhorn.io/v1beta2', kind='volume', namespace='longhorn-system', kubeconfig=(kubeconfig.path)) }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.metadata.name }}"
|
||||||
|
|
||||||
|
- name: Wait for replica rebuilds to complete
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://storage.{{ vapp['metacluster.fqdn'] }}/v1/volumes
|
||||||
|
method: GET
|
||||||
|
register: volume_details
|
||||||
|
until:
|
||||||
|
- volume_details.json is defined
|
||||||
|
- (volume_details.json | json_query('data[? state==`attached`].robustness') | unique | length) == 1
|
||||||
|
- (volume_details.json | json_query('data[? state==`attached`].robustness') | first) == "healthy"
|
||||||
|
retries: "{{ ( playbook.retries * 2) | int }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||||
|
|
||||||
|
- name: Upgrade longhorn chart
|
||||||
|
kubernetes.core.helm:
|
||||||
|
name: longhorn
|
||||||
|
chart_ref: /opt/metacluster/helm-charts/longhorn
|
||||||
|
release_namespace: longhorn-system
|
||||||
|
wait: false
|
||||||
|
kubeconfig: "{{ kubeconfig.path }}"
|
||||||
|
values: "{{ components['longhorn'].chart_values }}"
|
||||||
|
|
||||||
|
- name: Ensure longhorn API availability
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://storage.{{ vapp['metacluster.fqdn'] }}/v1
|
||||||
|
method: GET
|
||||||
|
register: api_readycheck
|
||||||
|
until:
|
||||||
|
- api_readycheck is not failed
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201]
|
||||||
|
body_format: json
|
|
@ -0,0 +1,3 @@
|
||||||
|
- import_tasks: vapp.yml
|
||||||
|
- import_tasks: vcenter.yml
|
||||||
|
- import_tasks: metacluster.yml
|
|
@ -0,0 +1,11 @@
|
||||||
|
- name: Check for metacluster connectivity
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://{{ vapp['metacluster.vip'] }}:6443/livez?verbose
|
||||||
|
method: GET
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 401]
|
||||||
|
register: api_readycheck
|
||||||
|
until:
|
||||||
|
- api_readycheck.json.apiVersion is defined
|
||||||
|
retries: "{{ playbook.retries }}"
|
||||||
|
delay: "{{ (storage_benchmark | int) * (playbook.delay.medium | int) }}"
|
|
@ -0,0 +1,20 @@
|
||||||
|
- name: Check for expected vApp properties
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- vapp[item] is defined
|
||||||
|
- (vapp[item] | length) > 0
|
||||||
|
quiet: true
|
||||||
|
loop:
|
||||||
|
- guestinfo.dnsserver
|
||||||
|
- guestinfo.gateway
|
||||||
|
- guestinfo.hostname
|
||||||
|
- guestinfo.ipaddress
|
||||||
|
- guestinfo.prefixlength
|
||||||
|
- guestinfo.rootsshkey
|
||||||
|
- hv.fqdn
|
||||||
|
- hv.password
|
||||||
|
- hv.username
|
||||||
|
- metacluster.fqdn
|
||||||
|
- metacluster.password
|
||||||
|
- metacluster.token
|
||||||
|
- metacluster.vip
|
|
@ -0,0 +1 @@
|
||||||
|
# ...
|
|
@ -0,0 +1,57 @@
|
||||||
|
- name: Gather hypervisor details
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: govc ls -L {{ item.moref }} | awk -F/ '{print ${{ item.part }}}'
|
||||||
|
environment:
|
||||||
|
GOVC_INSECURE: '1'
|
||||||
|
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
|
||||||
|
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
|
||||||
|
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
|
||||||
|
register: govc_inventory
|
||||||
|
loop:
|
||||||
|
- attribute: cluster
|
||||||
|
moref: >-
|
||||||
|
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||||
|
jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
|
||||||
|
part: (NF-1)
|
||||||
|
- attribute: datacenter
|
||||||
|
moref: VirtualMachine:{{ moref_id }}
|
||||||
|
part: 2
|
||||||
|
- attribute: datastore
|
||||||
|
moref: >-
|
||||||
|
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||||
|
jq -r '.[] | select(.Name == "datastore").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
|
||||||
|
part: NF
|
||||||
|
- attribute: folder
|
||||||
|
moref: >-
|
||||||
|
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||||
|
jq -r '.[] | select(.Name == "parent").Val | .Type + ":" + .Value')
|
||||||
|
part: 0
|
||||||
|
# - attribute: host
|
||||||
|
# moref: >-
|
||||||
|
# $(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||||
|
# jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
|
||||||
|
# part: NF
|
||||||
|
- attribute: network
|
||||||
|
moref: >-
|
||||||
|
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||||
|
jq -r '.[] | select(.Name == "network").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
|
||||||
|
part: NF
|
||||||
|
- attribute: resourcepool
|
||||||
|
moref: >-
|
||||||
|
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||||
|
jq -r '.[] | select(.Name == "resourcePool").Val | .Type + ":" + .Value')
|
||||||
|
part: 0
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.attribute }}"
|
||||||
|
|
||||||
|
- name: Retrieve hypervisor TLS thumbprint
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: openssl s_client -connect {{ vapp['hv.fqdn'] }}:443 < /dev/null 2>/dev/null | openssl x509 -fingerprint -noout -in /dev/stdin | awk -F'=' '{print $2}'
|
||||||
|
register: tls_thumbprint
|
||||||
|
|
||||||
|
- name: Store hypervisor details in dictionary
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
vcenter_info: "{{ vcenter_info | default({}) | combine({ item.item.attribute : item.stdout }) }}"
|
||||||
|
loop: "{{ govc_inventory.results }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.item.attribute }}"
|
|
@ -0,0 +1,4 @@
|
||||||
|
- import_tasks: hypervisor.yml
|
||||||
|
- import_tasks: registry.yml
|
||||||
|
- import_tasks: nodetemplates.yml
|
||||||
|
# - import_tasks: clusterapi.yml
|
|
@ -0,0 +1,73 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Check for existing template on hypervisor
|
||||||
|
community.vmware.vmware_guest_info:
|
||||||
|
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
|
||||||
|
register: existing_ova
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Store inventory path of existing template
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
nodetemplate_inventorypath: "{{ existing_ova.instance.hw_folder ~ '/' ~ existing_ova.instance.hw_name }}"
|
||||||
|
when: existing_ova is not failed
|
||||||
|
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Parse OVA file for network mappings
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: govc import.spec -json {{ filename }}
|
||||||
|
environment:
|
||||||
|
GOVC_INSECURE: '1'
|
||||||
|
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
|
||||||
|
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
|
||||||
|
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
|
||||||
|
register: ova_spec
|
||||||
|
|
||||||
|
- name: Deploy OVA template on hypervisor
|
||||||
|
community.vmware.vmware_deploy_ovf:
|
||||||
|
cluster: "{{ vcenter_info.cluster }}"
|
||||||
|
datastore: "{{ vcenter_info.datastore }}"
|
||||||
|
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
|
||||||
|
networks: "{u'{{ ova_spec.stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}"
|
||||||
|
allow_duplicates: no
|
||||||
|
power_on: false
|
||||||
|
ovf: "{{ filename }}"
|
||||||
|
register: ova_deploy
|
||||||
|
|
||||||
|
- name: Add additional placeholder disk
|
||||||
|
community.vmware.vmware_guest_disk:
|
||||||
|
name: "{{ ova_deploy.instance.hw_name }}"
|
||||||
|
disk:
|
||||||
|
- size: 1Gb
|
||||||
|
scsi_controller: 1
|
||||||
|
scsi_type: paravirtual
|
||||||
|
unit_number: 0
|
||||||
|
|
||||||
|
# Disabled to allow disks to be resized; at the cost of cloning speed
|
||||||
|
# - name: Create snapshot on deployed VM
|
||||||
|
# community.vmware.vmware_guest_snapshot:
|
||||||
|
# name: "{{ ova_deploy.instance.hw_name }}"
|
||||||
|
# state: present
|
||||||
|
# snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base"
|
||||||
|
|
||||||
|
- name: Mark deployed VM as templates
|
||||||
|
community.vmware.vmware_guest:
|
||||||
|
name: "{{ ova_deploy.instance.hw_name }}"
|
||||||
|
is_template: yes
|
||||||
|
|
||||||
|
- name: Store inventory path of deployed template
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
nodetemplate_inventorypath: "{{ ova_deploy.instance.hw_folder ~ '/' ~ ova_deploy.instance.hw_name }}"
|
||||||
|
|
||||||
|
when: existing_ova is failed
|
||||||
|
|
||||||
|
vars:
|
||||||
|
filename: "{{ query('ansible.builtin.fileglob', '/opt/metacluster/node-templates/*.ova') | first }}"
|
||||||
|
module_defaults:
|
||||||
|
group/vmware:
|
||||||
|
hostname: "{{ vapp['hv.fqdn'] }}"
|
||||||
|
validate_certs: no
|
||||||
|
username: "{{ vapp['hv.username'] }}"
|
||||||
|
password: "{{ vapp['hv.password'] }}"
|
||||||
|
datacenter: "{{ vcenter_info.datacenter }}"
|
||||||
|
folder: "{{ vcenter_info.folder }}"
|
|
@ -0,0 +1,17 @@
|
||||||
|
- block:
|
||||||
|
|
||||||
|
- name: Copy kubeadm container images to dedicated project
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: https://registry.{{ vapp['metacluster.fqdn'] }}/api/v2.0/projects/kubeadm/repositories/{{ ( item | regex_findall('([^:/]+)') )[-2] }}/artifacts?from=library/{{ item | replace('/', '%2F') | replace(':', '%3A') }}
|
||||||
|
method: POST
|
||||||
|
headers:
|
||||||
|
Authorization: "Basic {{ ('admin:' ~ vapp['metacluster.password']) | b64encode }}"
|
||||||
|
body:
|
||||||
|
from: "{{ item }}"
|
||||||
|
loop: "{{ lookup('ansible.builtin.file', '/opt/metacluster/cluster-api/imagelist').splitlines() }}"
|
||||||
|
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.uri:
|
||||||
|
validate_certs: no
|
||||||
|
status_code: [200, 201, 409]
|
||||||
|
body_format: json
|
|
@ -0,0 +1,34 @@
|
||||||
|
- name: Create destination folder
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /opt/firstboot
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Create firstboot script file
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: firstboot.j2
|
||||||
|
dest: /opt/firstboot/firstboot.sh
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: o+x
|
||||||
|
|
||||||
|
- name: Create @reboot crontab job
|
||||||
|
ansible.builtin.cron:
|
||||||
|
name: firstboot
|
||||||
|
special_time: reboot
|
||||||
|
job: "/opt/firstboot/firstboot.sh >/dev/tty1 2>&1"
|
||||||
|
|
||||||
|
- name: Copy payload folder (common)
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: ansible_payload/common/
|
||||||
|
dest: /opt/firstboot/ansible/
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
- name: Copy payload folder (per appliancetype)
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: ansible_payload/{{ appliancetype }}/
|
||||||
|
dest: /opt/firstboot/ansible/
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
|
@ -0,0 +1,10 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Workaround for ansible output regression
|
||||||
|
export PYTHONUNBUFFERED=1
|
||||||
|
|
||||||
|
# Apply firstboot configuration w/ ansible
|
||||||
|
/usr/local/bin/ansible-playbook /opt/firstboot/ansible/playbook.yml | tee -a /var/log/firstboot.log > /dev/tty1 2>&1
|
||||||
|
|
||||||
|
# Cleanup console
|
||||||
|
clear > /dev/tty1
|
|
@ -0,0 +1,13 @@
|
||||||
|
- name: Delete cloud-init package
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: cloud-init
|
||||||
|
state: absent
|
||||||
|
purge: yes
|
||||||
|
|
||||||
|
- name: Delete cloud-init files
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
loop:
|
||||||
|
- /etc/cloud
|
||||||
|
- /var/lib/cloud
|
|
@ -0,0 +1,5 @@
|
||||||
|
- name: Enable crontab logging
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/rsyslog.d/50-default.conf
|
||||||
|
regexp: '^#cron\.\*.*'
|
||||||
|
line: "cron.*\t\t\t\t./var/log/cron.log"
|
|
@ -0,0 +1,7 @@
|
||||||
|
- import_tasks: tty.yml
|
||||||
|
- import_tasks: snapd.yml
|
||||||
|
- import_tasks: cloud-init.yml
|
||||||
|
- import_tasks: logging.yml
|
||||||
|
- import_tasks: services.yml
|
||||||
|
- import_tasks: packages.yml
|
||||||
|
- import_tasks: sysctl.yml
|
|
@ -0,0 +1,47 @@
|
||||||
|
- name: Configure 'needrestart' package
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/needrestart/needrestart.conf
|
||||||
|
regexp: "{{ item.regexp }}"
|
||||||
|
line: "{{ item.line }}"
|
||||||
|
loop:
|
||||||
|
- regexp: "^#\\$nrconf\\{restart\\} = 'i';"
|
||||||
|
line: "$nrconf{restart} = 'a';"
|
||||||
|
- regexp: "^#\\$nrconf\\{kernelhints\\} = -1;"
|
||||||
|
line: "$nrconf{kernelhints} = -1;"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.line }}"
|
||||||
|
|
||||||
|
- name: Install additional packages
|
||||||
|
ansible.builtin.apt:
|
||||||
|
pkg: "{{ packages.apt }}"
|
||||||
|
state: latest
|
||||||
|
update_cache: yes
|
||||||
|
install_recommends: no
|
||||||
|
|
||||||
|
- name: Upgrade all packages
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: '*'
|
||||||
|
state: latest
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Install additional python packages
|
||||||
|
ansible.builtin.pip:
|
||||||
|
name: "{{ item }}"
|
||||||
|
executable: pip3
|
||||||
|
state: latest
|
||||||
|
loop: "{{ packages.pip }}"
|
||||||
|
|
||||||
|
- name: Create folder
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/ansible
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Configure Ansible defaults
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: ansible.j2
|
||||||
|
dest: /etc/ansible/ansible.cfg
|
||||||
|
|
||||||
|
- name: Cleanup
|
||||||
|
ansible.builtin.apt:
|
||||||
|
autoremove: yes
|
||||||
|
purge: yes
|
|
@ -0,0 +1,5 @@
|
||||||
|
- name: Disable & mask networkd-wait-online
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: systemd-networkd-wait-online
|
||||||
|
enabled: no
|
||||||
|
masked: yes
|
|
@ -0,0 +1,19 @@
|
||||||
|
- name: Delete snapd package
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: snapd
|
||||||
|
state: absent
|
||||||
|
purge: yes
|
||||||
|
|
||||||
|
- name: Delete leftover files
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /root/snap
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Hold snapd package
|
||||||
|
ansible.builtin.dpkg_selections:
|
||||||
|
name: snapd
|
||||||
|
selection: hold
|
||||||
|
|
||||||
|
- name: Reload systemd unit configurations
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
daemon_reload: yes
|
|
@ -0,0 +1,11 @@
|
||||||
|
- name: Configure inotify limits
|
||||||
|
ansible.posix.sysctl:
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
value: "{{ item.value }}"
|
||||||
|
loop:
|
||||||
|
- name: fs.inotify.max_user_instances
|
||||||
|
value: '512'
|
||||||
|
- name: fs.inotify.max_user_watches
|
||||||
|
value: '524288'
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.name ~ '=' ~ item.value }}"
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue