Compare commits
159 Commits
v1.0.0
...
Appliance.
Author | SHA1 | Date | |
---|---|---|---|
5740faeb9d | |||
e057f313ea | |||
ac38731dcf | |||
9cbb84a0f3 | |||
066ec9a967 | |||
dda14af238 | |||
2db1c4d623 | |||
1451e8f105 | |||
baf809159b | |||
066a21b1d2 | |||
46fe962e77 | |||
74070f266c | |||
20f28f7d8a | |||
2802b49d02 | |||
594e62cf71 | |||
544f98a8fb | |||
562e0b8167 | |||
88e37bb706 | |||
8323668aeb | |||
afe29e3407 | |||
2473aa05dc | |||
7870ef8cf0 | |||
e42479f214 | |||
d459c98045 | |||
bf7ccc8962 | |||
75309bdf11 | |||
1469ba08d8 | |||
8764634ea0 | |||
1b4c4b5d64 | |||
8c1016a231 | |||
663804e1b6 | |||
33ba3771cc | |||
c05b58e93a | |||
29c89e16ee | |||
201c6f8bca | |||
67f91f2962 | |||
be02f884ac | |||
8037d4d1c7 | |||
d99fca654f | |||
7d431d88c3 | |||
691dcee21f | |||
95c14fc385 | |||
33cd272d53 | |||
e46e5c0802 | |||
2d468d8b83 | |||
f45b96f42b | |||
4dc5a6ed39 | |||
34af03ca99 | |||
55c594d242 | |||
8555e677b3 | |||
3cefd67458 | |||
1a0e674aa8 | |||
6e37fd756b | |||
6568acf541 | |||
092ce5eabc | |||
a785e57126 | |||
71e9957122 | |||
877fc24235 | |||
778a7581c0 | |||
d1bce54a2d | |||
a8cb53429d | |||
e1f83f2245 | |||
1cfca1fa4a | |||
27cb500b8c | |||
720bc43546 | |||
49b8b80db0 | |||
3bc3da54be | |||
8e7e23c8bc | |||
e4cfc26e2c | |||
3b89aed52b | |||
5cdd6ef052 | |||
ef8766b5ca | |||
ab14a966e0 | |||
f6961b5e3a | |||
c1a8a35494 | |||
ba7e233c27 | |||
8c6a9f38ba | |||
bf3d7ed239 | |||
0509a7cb8a | |||
01601de897 | |||
a2198f1109 | |||
7cc8fbbccb | |||
da0558711c | |||
90082ca36a | |||
b2ae56e54b | |||
b21b8b5376 | |||
931eaf366c | |||
32dda728cb | |||
4c1f1fce5e | |||
bb58e287b7 | |||
ef58b823c2 | |||
5000c324e1 | |||
87e89cfa27 | |||
ac5d3e3668 | |||
616f8b9a53 | |||
2c5e8e10b5 | |||
17ad64013a | |||
eb2ada2164 | |||
3e3a92c344 | |||
d86f70a458 | |||
436995accc | |||
0310bb9d1a | |||
21f03ba048 | |||
b009395f62 | |||
2110eb9e2c | |||
423ecc2f95 | |||
1a1440f751 | |||
b17501ee1d | |||
87eb5e0dd7 | |||
f5ed60fa38 | |||
eab5cfc688 | |||
05b271214c | |||
455a2e14be | |||
f5154f6961 | |||
4bf5121086 | |||
393b1092e5 | |||
36c30ca646 | |||
8005b172a5 | |||
13f4965278 | |||
05f085aee7 | |||
072fc56050 | |||
5363eba1a3 | |||
a245cc3d48 | |||
51c477fb07 | |||
1446cba537 | |||
0501a035f2 | |||
6e942af974 | |||
89874d57ce | |||
2b497d4653 | |||
cfa4a5379a | |||
a2c2766ff7 | |||
76d3b6c742 | |||
a5248bd54c | |||
cbedc9679f | |||
740b6b3dc9 | |||
0ba87988bc | |||
aa14a8a3a8 | |||
1f55ff7cfa | |||
ba4a0148ff | |||
c177dbd03b | |||
2e8ce6cc00 | |||
7fd1cf73db | |||
cf001cd0ce | |||
438b944011 | |||
679a9457b5 | |||
8b4a1e380c | |||
0468cd6269 | |||
b808397d47 | |||
8fd0136bb7 | |||
479d896599 | |||
263f156eb1 | |||
f1dfc83d7c | |||
5b950a3834 | |||
978f39d45b | |||
9b9ab6b784 | |||
24dca2755a | |||
0d1db2f29f | |||
48c14afd0f | |||
2addda3f06 |
220
.drone.yml
220
.drone.yml
@ -1,220 +0,0 @@
|
||||
kind: pipeline
|
||||
type: kubernetes
|
||||
name: 'Packer Build'
|
||||
|
||||
volumes:
|
||||
- name: output
|
||||
claim:
|
||||
name: flexvolsmb-drone-output
|
||||
- name: scratch
|
||||
claim:
|
||||
name: flexvolsmb-drone-scratch
|
||||
|
||||
steps:
|
||||
- name: Debugging information
|
||||
image: bv11-cr01.bessems.eu/library/packer-extended
|
||||
pull: always
|
||||
commands:
|
||||
- ansible --version
|
||||
- ovftool --version
|
||||
- packer --version
|
||||
- yamllint --version
|
||||
|
||||
- name: Linting
|
||||
depends_on:
|
||||
- Debugging information
|
||||
image: bv11-cr01.bessems.eu/library/packer-extended
|
||||
pull: always
|
||||
commands:
|
||||
- |
|
||||
yamllint -d "{extends: relaxed, rules: {line-length: disable}}" \
|
||||
ansible \
|
||||
packer/preseed/UbuntuServer22.04/user-data \
|
||||
scripts
|
||||
|
||||
- name: Semantic Release (Dry-run)
|
||||
depends_on:
|
||||
- Linting
|
||||
image: bv11-cr01.bessems.eu/proxy/library/node:20-slim
|
||||
pull: always
|
||||
commands:
|
||||
- |
|
||||
apt-get update
|
||||
- |
|
||||
apt-get install -y --no-install-recommends \
|
||||
git-core \
|
||||
ca-certificates
|
||||
- |
|
||||
npm install \
|
||||
semantic-release \
|
||||
@semantic-release/commit-analyzer \
|
||||
@semantic-release/release-notes-generator\
|
||||
@semantic-release/exec
|
||||
- |
|
||||
export GIT_CREDENTIALS=$${GIT_USERNAME}:$${GIT_APIKEY}
|
||||
- |
|
||||
npx semantic-release \
|
||||
--branches ${DRONE_BRANCH} \
|
||||
--plugins @semantic-release/commit-analyzer,@semantic-release/release-notes-generator,@semantic-release/exec \
|
||||
--dry-run
|
||||
environment:
|
||||
GIT_APIKEY:
|
||||
from_secret: git_apikey
|
||||
GIT_USERNAME: djpbessems
|
||||
|
||||
# Add random change for testing semantic release commit
|
||||
|
||||
# - name: Install Ansible Galaxy collections
|
||||
# depends_on:
|
||||
# - Semantic Release (Dry-run)
|
||||
# image: bv11-cr01.bessems.eu/library/packer-extended
|
||||
# pull: always
|
||||
# commands:
|
||||
# - |
|
||||
# ansible-galaxy collection install \
|
||||
# -r ansible/requirements.yml \
|
||||
# -p ./ansible/collections
|
||||
# volumes:
|
||||
# - name: scratch
|
||||
# path: /scratch
|
||||
|
||||
# - name: Kubernetes Bootstrap Appliance
|
||||
# depends_on:
|
||||
# - Install Ansible Galaxy collections
|
||||
# image: bv11-cr01.bessems.eu/library/packer-extended
|
||||
# pull: always
|
||||
# commands:
|
||||
# - |
|
||||
# sed -i -e "s/<<img-password>>/$${SSH_PASSWORD}/g" \
|
||||
# packer/preseed/UbuntuServer22.04/user-data
|
||||
# - |
|
||||
# export K8S_VERSION=$(yq '.components.clusterapi.workload.version.k8s' < ./ansible/vars/metacluster.yml)
|
||||
# - |
|
||||
# packer init -upgrade \
|
||||
# ./packer
|
||||
# - |
|
||||
# packer validate \
|
||||
# -only=vsphere-iso.bootstrap \
|
||||
# -var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
|
||||
# -var docker_username=$${DOCKER_USERNAME} \
|
||||
# -var docker_password=$${DOCKER_PASSWORD} \
|
||||
# -var repo_username=$${REPO_USERNAME} \
|
||||
# -var repo_password=$${REPO_PASSWORD} \
|
||||
# -var ssh_password=$${SSH_PASSWORD} \
|
||||
# -var vsphere_password=$${VSPHERE_PASSWORD} \
|
||||
# -var k8s_version=$K8S_VERSION \
|
||||
# ./packer
|
||||
# - |
|
||||
# packer build \
|
||||
# -on-error=cleanup -timestamp-ui \
|
||||
# -only=vsphere-iso.bootstrap \
|
||||
# -var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
|
||||
# -var docker_username=$${DOCKER_USERNAME} \
|
||||
# -var docker_password=$${DOCKER_PASSWORD} \
|
||||
# -var repo_username=$${REPO_USERNAME} \
|
||||
# -var repo_password=$${REPO_PASSWORD} \
|
||||
# -var ssh_password=$${SSH_PASSWORD} \
|
||||
# -var vsphere_password=$${VSPHERE_PASSWORD} \
|
||||
# -var k8s_version=$K8S_VERSION \
|
||||
# ./packer
|
||||
# environment:
|
||||
# DOCKER_USERNAME:
|
||||
# from_secret: docker_username
|
||||
# DOCKER_PASSWORD:
|
||||
# from_secret: docker_password
|
||||
# # PACKER_LOG: 1
|
||||
# REPO_USERNAME:
|
||||
# from_secret: repo_username
|
||||
# REPO_PASSWORD:
|
||||
# from_secret: repo_password
|
||||
# SSH_PASSWORD:
|
||||
# from_secret: ssh_password
|
||||
# VSPHERE_PASSWORD:
|
||||
# from_secret: vsphere_password
|
||||
# volumes:
|
||||
# - name: output
|
||||
# path: /output
|
||||
# - name: scratch
|
||||
# path: /scratch
|
||||
|
||||
# - name: Kubernetes Upgrade Appliance
|
||||
# depends_on:
|
||||
# - Install Ansible Galaxy collections
|
||||
# image: bv11-cr01.bessems.eu/library/packer-extended
|
||||
# pull: alwaysquery(
|
||||
# commands:
|
||||
# - |
|
||||
# sed -i -e "s/<<img-password>>/$${SSH_PASSWORD}/g" \
|
||||
# packer/preseed/UbuntuServer22.04/user-data
|
||||
# - |
|
||||
# export K8S_VERSION=$(yq '.components.clusterapi.workload.version.k8s' < ./ansible/vars/metacluster.yml)
|
||||
# - |
|
||||
# packer init -upgrade \
|
||||
# ./packer
|
||||
# - |
|
||||
# packer validate \
|
||||
# -only=vsphere-iso.upgrade \
|
||||
# -var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
|
||||
# -var docker_username=$${DOCKER_USERNAME} \
|
||||
# -var docker_password=$${DOCKER_PASSWORD} \
|
||||
# -var repo_username=$${REPO_USERNAME} \
|
||||
# -var repo_password=$${REPO_PASSWORD} \
|
||||
# -var ssh_password=$${SSH_PASSWORD} \
|
||||
# -var vsphere_password=$${VSPHERE_PASSWORD} \
|
||||
# -var k8s_version=$K8S_VERSION \
|
||||
# ./packer
|
||||
# - |
|
||||
# packer build \
|
||||
# -on-error=cleanup -timestamp-ui \
|
||||
# -only=vsphere-iso.upgrade \
|
||||
# -var vm_name=${DRONE_BUILD_NUMBER}-${DRONE_COMMIT_SHA:0:10}-$(openssl rand -hex 3) \
|
||||
# -var docker_username=$${DOCKER_USERNAME} \
|
||||
# -var docker_password=$${DOCKER_PASSWORD} \
|
||||
# -var repo_username=$${REPO_USERNAME} \
|
||||
# -var repo_password=$${REPO_PASSWORD} \
|
||||
# -var ssh_password=$${SSH_PASSWORD} \
|
||||
# -var vsphere_password=$${VSPHERE_PASSWORD} \
|
||||
# -var k8s_version=$K8S_VERSION \
|
||||
# ./packer
|
||||
# environment:
|
||||
# DOCKER_USERNAME:
|
||||
# from_secret: docker_username
|
||||
# DOCKER_PASSWORD:
|
||||
# from_secret: docker_password
|
||||
# # PACKER_LOG: 1
|
||||
# REPO_USERNAME:
|
||||
# from_secret: repo_username
|
||||
# REPO_PASSWORD:
|
||||
# from_secret: repo_password
|
||||
# SSH_PASSWORD:
|
||||
# from_secret: ssh_password
|
||||
# VSPHERE_PASSWORD:
|
||||
# from_secret: vsphere_password
|
||||
# volumes:
|
||||
# - name: output
|
||||
# path: /output
|
||||
# - name: scratch
|
||||
# path: /scratch
|
||||
|
||||
# - name: Remove temporary resources
|
||||
# depends_on:
|
||||
# - Kubernetes Bootstrap Appliance
|
||||
# - Kubernetes Upgrade Appliance
|
||||
# image: bv11-cr01.bessems.eu/library/packer-extended
|
||||
# commands:
|
||||
# - |
|
||||
# pwsh -file scripts/Remove-Resources.ps1 \
|
||||
# -VMName $DRONE_BUILD_NUMBER-${DRONE_COMMIT_SHA:0:10} \
|
||||
# -VSphereFQDN 'bv11-vc.bessems.lan' \
|
||||
# -VSphereUsername 'administrator@vsphere.local' \
|
||||
# -VSpherePassword $${VSPHERE_PASSWORD}
|
||||
# environment:
|
||||
# VSPHERE_PASSWORD:
|
||||
# from_secret: vsphere_password
|
||||
# volumes:
|
||||
# - name: scratch
|
||||
# path: /scratch
|
||||
# when:
|
||||
# status:
|
||||
# - success
|
||||
# - failure
|
145
.gitea/workflows/actions.yaml
Normal file
145
.gitea/workflows/actions.yaml
Normal file
@ -0,0 +1,145 @@
|
||||
name: Container & Helm chart
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
linting:
|
||||
name: Linting
|
||||
runs-on: dind-rootless
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v4
|
||||
- name: yamllint
|
||||
uses: bewuethr/yamllint-action@v1
|
||||
with:
|
||||
config-file: .yamllint.yaml
|
||||
|
||||
semrel_dryrun:
|
||||
name: Semantic Release (Dry-run)
|
||||
runs-on: dind-rootless
|
||||
outputs:
|
||||
version: ${{ steps.sem_rel.outputs.version }}
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 20
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
npm install \
|
||||
semantic-release \
|
||||
@semantic-release/commit-analyzer \
|
||||
@semantic-release/exec
|
||||
- name: Semantic Release (dry-run)
|
||||
id: sem_rel
|
||||
run: |
|
||||
npx semantic-release \
|
||||
--package @semantic-release/exec \
|
||||
--package semantic-release \
|
||||
--branches ${{ gitea.refname }} \
|
||||
--tag-format 'v${version}' \
|
||||
--dry-run \
|
||||
--plugins @semantic-release/commit-analyzer,@semantic-release/exec \
|
||||
--analyzeCommits @semantic-release/commit-analyzer \
|
||||
--verifyRelease @semantic-release/exec \
|
||||
--verifyReleaseCmd 'echo "version=${nextRelease.version}" >> $GITHUB_OUTPUT'
|
||||
env:
|
||||
GIT_CREDENTIALS: ${{ secrets.GIT_USERNAME }}:${{ secrets.GIT_APIKEY }}
|
||||
- name: Assert semantic release output
|
||||
run: |
|
||||
[[ -z "${{ steps.sem_rel.outputs.version }}" ]] && {
|
||||
echo 'No release tag - exiting'; exit 1
|
||||
} || {
|
||||
echo 'Release tag set correctly: ${{ steps.sem_rel.outputs.version }}'; exit 0
|
||||
}
|
||||
|
||||
build_image:
|
||||
name: Kubernetes Bootstrap Appliance
|
||||
container: code.spamasaurus.com/djpbessems/packer-extended:1.3.0
|
||||
runs-on: dind-rootless
|
||||
needs: [semrel_dryrun, linting]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Parse Kubernetes version
|
||||
uses: mikefarah/yq@master
|
||||
id: get_k8sversion
|
||||
with:
|
||||
cmd: yq '.components.clusterapi.workload.version.k8s' ansible/vars/metacluster.yml
|
||||
- name: Set up packer
|
||||
uses: hashicorp/setup-packer@main
|
||||
id: setup
|
||||
with:
|
||||
version: "latest"
|
||||
- name: Prepare build environment
|
||||
id: init
|
||||
run: |
|
||||
packer init -upgrade ./packer
|
||||
|
||||
ansible-galaxy collection install \
|
||||
-r ansible/requirements.yml \
|
||||
-p ./ansible/collections
|
||||
|
||||
echo "BUILD_COMMIT=$(echo ${{ gitea.sha }} | cut -c 1-10)" >> $GITHUB_ENV
|
||||
echo "BUILD_SUFFIX=$(openssl rand -hex 3)" >> $GITHUB_ENV
|
||||
- name: Validate packer template files
|
||||
id: validate
|
||||
run: |
|
||||
packer validate \
|
||||
-only=vsphere-iso.bootstrap \
|
||||
-var vm_name=${{ gitea.run_number }}-${BUILD_COMMIT}-${BUILD_SUFFIX} \
|
||||
-var docker_username=${{ secrets.DOCKER_USERNAME }} \
|
||||
-var docker_password=${{ secrets.DOCKER_PASSWORD }} \
|
||||
-var repo_username=${{ secrets.REPO_USERNAME }} \
|
||||
-var repo_password=${{ secrets.REPO_PASSWORD }} \
|
||||
-var ssh_password=${{ secrets.SSH_PASSWORD }} \
|
||||
-var hv_password=${{ secrets.HV_PASSWORD }} \
|
||||
-var k8s_version=${{ steps.get_k8sversion.outputs.result }} \
|
||||
-var appliance_version=${{ needs.semrel_dryrun.outputs.version }} \
|
||||
./packer
|
||||
- name: Build packer template
|
||||
run: |
|
||||
packer build \
|
||||
-on-error=cleanup -timestamp-ui \
|
||||
-only=vsphere-iso.bootstrap \
|
||||
-var vm_name=${{ gitea.run_number }}-${BUILD_COMMIT}-${BUILD_SUFFIX} \
|
||||
-var docker_username=${{ secrets.DOCKER_USERNAME }} \
|
||||
-var docker_password=${{ secrets.DOCKER_PASSWORD }} \
|
||||
-var repo_username=${{ secrets.REPO_USERNAME }} \
|
||||
-var repo_password=${{ secrets.REPO_PASSWORD }} \
|
||||
-var ssh_password=${{ secrets.SSH_PASSWORD }} \
|
||||
-var hv_password=${{ secrets.HV_PASSWORD }} \
|
||||
-var k8s_version=${{ steps.get_k8sversion.outputs.result }} \
|
||||
-var appliance_version=${{ needs.semrel_dryrun.outputs.version }} \
|
||||
./packer
|
||||
# env:
|
||||
# PACKER_LOG: 1
|
||||
|
||||
# semrel:
|
||||
# name: Semantic Release
|
||||
# runs-on: dind-rootless
|
||||
# needs: [build_container, build_chart]
|
||||
# steps:
|
||||
# - name: Check out repository code
|
||||
# uses: actions/checkout@v3
|
||||
# - name: Setup Node
|
||||
# uses: actions/setup-node@v3
|
||||
# with:
|
||||
# node-version: 20
|
||||
# - name: Install dependencies
|
||||
# run: |
|
||||
# npm install \
|
||||
# semantic-release \
|
||||
# @semantic-release/changelog \
|
||||
# @semantic-release/commit-analyzer \
|
||||
# @semantic-release/git \
|
||||
# @semantic-release/release-notes-generator
|
||||
# - name: Semantic Release
|
||||
# run: |
|
||||
# npx semantic-release \
|
||||
# --branches ${{ gitea.refname }} \
|
||||
# --tag-format 'v${version}' \
|
||||
# --plugins @semantic-release/commit-analyzer,@semantic-release/release-notes-generator,@semantic-release/changelog,@semantic-release/git
|
||||
# env:
|
||||
# GIT_CREDENTIALS: ${{ secrets.GIT_USERNAME }}:${{ secrets.GIT_APIKEY }}
|
4
.gitignore
vendored
Normal file
4
.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
**/hv.vcenter.yaml
|
||||
**/ova.bootstrap.yaml
|
||||
**/pb.secrets.yaml
|
||||
**/pwdfile
|
4
.yamllint.yaml
Normal file
4
.yamllint.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
extends: relaxed
|
||||
|
||||
rules:
|
||||
line-length: disable
|
@ -1,4 +1,4 @@
|
||||
- name: Parse manifests for container images
|
||||
- name: Parse Cluster-API manifests for container images
|
||||
ansible.builtin.shell:
|
||||
# This set of commands is necessary to deal with multi-line scalar values
|
||||
# eg.:
|
||||
@ -9,11 +9,17 @@
|
||||
cat {{ item.dest }} | yq --no-doc eval '.. | .image? | select(.)' | awk '!/ /';
|
||||
cat {{ item.dest }} | yq eval '.data.data' | yq --no-doc eval '.. | .image? | select(.)';
|
||||
cat {{ item.dest }} | yq --no-doc eval '.. | .files? | with_entries(select(.value.path == "*.yaml")).[0].content' | awk '!/null/' | yq eval '.. | .image? | select(.)'
|
||||
register: parsedmanifests
|
||||
register: clusterapi_parsedmanifests
|
||||
loop: "{{ clusterapi_manifests.results }}"
|
||||
loop_control:
|
||||
label: "{{ item.dest | basename }}"
|
||||
|
||||
- name: Parse pinniped manifest for container images
|
||||
ansible.builtin.shell:
|
||||
cmd: >-
|
||||
cat {{ pinniped_manifest.dest }} | yq --no-doc eval '.. | .image? | select(.)' | awk '!/ /';
|
||||
register: pinniped_parsedmanifest
|
||||
|
||||
- name: Parse metacluster helm charts for container images
|
||||
ansible.builtin.shell:
|
||||
cmd: "{{ item.value.helm.parse_logic }}"
|
||||
@ -41,8 +47,10 @@
|
||||
results: "{{ (chartimages_metacluster | json_query('results[*].stdout_lines')) + (chartimages_workloadcluster | json_query('results[*].stdout_lines')) | select() | flatten | list }}"
|
||||
- source: kubeadm
|
||||
results: "{{ kubeadmimages.stdout_lines }}"
|
||||
- source: manifests
|
||||
results: "{{ parsedmanifests | json_query('results[*].stdout_lines') | select() | flatten | list }}"
|
||||
- source: clusterapi
|
||||
results: "{{ clusterapi_parsedmanifests | json_query('results[*].stdout_lines') | select() | flatten | list }}"
|
||||
- source: pinniped
|
||||
results: "{{ pinniped_parsedmanifest.stdout_lines }}"
|
||||
loop_control:
|
||||
label: "{{ item.source }}"
|
||||
|
||||
@ -64,4 +72,4 @@
|
||||
docker://{{ item }} \
|
||||
docker-archive:./{{ ( item | regex_findall('[^/:]+'))[-2] }}_{{ lookup('ansible.builtin.password', '/dev/null length=5 chars=ascii_lowercase,digits seed={{ item }}') }}.tar:{{ item }}
|
||||
chdir: /opt/metacluster/container-images
|
||||
loop: "{{ (containerimages_charts + containerimages_kubeadm + containerimages_manifests + dependencies.container_images) | flatten | unique | sort }}"
|
||||
loop: "{{ (containerimages_charts + containerimages_kubeadm + containerimages_clusterapi + containerimages_pinniped + dependencies.container_images) | flatten | unique | sort }}"
|
||||
|
@ -16,8 +16,7 @@
|
||||
- /opt/metacluster/helm-charts
|
||||
- /opt/metacluster/k3s
|
||||
- /opt/metacluster/kube-vip
|
||||
- /opt/workloadcluster/git-repositories/gitops/charts
|
||||
- /opt/workloadcluster/git-repositories/gitops/values
|
||||
- /opt/metacluster/pinniped
|
||||
- /opt/workloadcluster/helm-charts
|
||||
- /opt/workloadcluster/node-templates
|
||||
- /var/lib/rancher/k3s/agent/images
|
||||
@ -30,4 +29,3 @@
|
||||
- import_tasks: manifests.yml
|
||||
- import_tasks: kubeadm.yml
|
||||
- import_tasks: containerimages.yml
|
||||
- import_tasks: nodetemplates.yml
|
||||
|
@ -1,6 +1,6 @@
|
||||
- block:
|
||||
|
||||
- name: Aggregate chart_values into dict
|
||||
- name: Aggregate meta-cluster chart_values into dict
|
||||
ansible.builtin.set_fact:
|
||||
metacluster_chartvalues: "{{ metacluster_chartvalues | default({}) | combine({ item.key: { 'chart_values': (item.value.helm.chart_values | from_yaml) } }) }}"
|
||||
when: item.value.helm.chart_values is defined
|
||||
@ -8,22 +8,34 @@
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Write dict to vars_file
|
||||
- name: Combine and write dict to vars_file
|
||||
ansible.builtin.copy:
|
||||
dest: /opt/firstboot/ansible/vars/metacluster.yml
|
||||
content: >-
|
||||
{{
|
||||
{ 'components': (
|
||||
metacluster_chartvalues |
|
||||
combine({ 'clusterapi': components.clusterapi }) |
|
||||
combine({ 'kubevip' : components.kubevip }) )
|
||||
combine({ 'clusterapi' : components['clusterapi'] }) |
|
||||
combine({ 'kubevip' : components['kubevip'] }) |
|
||||
combine({ 'localuserauthenticator': components['pinniped']['local-user-authenticator'] })),
|
||||
'appliance': {
|
||||
'version': (applianceversion)
|
||||
}
|
||||
} | to_nice_yaml(indent=2, width=4096)
|
||||
}}
|
||||
|
||||
- name: Aggregate chart_values into dict
|
||||
- name: Aggregate workload-cluster chart_values into dict
|
||||
ansible.builtin.set_fact:
|
||||
workloadcluster_chartvalues: "{{ workloadcluster_chartvalues | default({}) | combine({ item.key: { 'chart_values': (item.value.chart_values | default('') | from_yaml) } }) }}"
|
||||
# when: item.value.chart_values is defined
|
||||
workloadcluster_chartvalues: |
|
||||
{{
|
||||
workloadcluster_chartvalues | default({}) | combine({
|
||||
item.key: {
|
||||
'chart_values': (item.value.chart_values | default('') | from_yaml),
|
||||
'extra_manifests': (item.value.extra_manifests | default([])),
|
||||
'namespace': (item.value.namespace)
|
||||
}
|
||||
})
|
||||
}}
|
||||
loop: "{{ query('ansible.builtin.dict', downstream.helm_charts) }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
@ -37,7 +49,7 @@
|
||||
} | to_nice_yaml(indent=2, width=4096)
|
||||
}}
|
||||
|
||||
- name: Download ClusterAPI manifests
|
||||
- name: Download Cluster-API manifests
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ item.url }}"
|
||||
dest: /opt/metacluster/cluster-api/{{ item.dest }}
|
||||
@ -97,6 +109,22 @@
|
||||
delay: 5
|
||||
until: kubevip_manifest is not failed
|
||||
|
||||
- name: Download pinniped local-user-authenticator manifest
|
||||
ansible.builtin.get_url:
|
||||
url: https://get.pinniped.dev/{{ components.pinniped['local-user-authenticator'].version }}/install-local-user-authenticator.yaml
|
||||
dest: /opt/metacluster/pinniped/local-user-authenticator.yaml
|
||||
register: pinniped_manifest
|
||||
retries: 5
|
||||
delay: 5
|
||||
until: pinniped_manifest is not failed
|
||||
|
||||
- name: Trim image hash from manifest
|
||||
ansible.builtin.replace:
|
||||
path: /opt/metacluster/pinniped/local-user-authenticator.yaml
|
||||
regexp: '([ ]*image: .*)@.*'
|
||||
replace: '\1'
|
||||
no_log: true
|
||||
|
||||
# - name: Inject manifests
|
||||
# ansible.builtin.template:
|
||||
# src: "{{ item.type }}.j2"
|
||||
|
@ -1,4 +0,0 @@
|
||||
- name: Download node-template image
|
||||
ansible.builtin.uri:
|
||||
url: "{{ components.clusterapi.workload.node_template.url }}"
|
||||
dest: /opt/workloadcluster/node-templates/{{ components.clusterapi.workload.node_template.url | basename}}
|
@ -2,6 +2,9 @@
|
||||
- hosts: 127.0.0.1
|
||||
connection: local
|
||||
gather_facts: true
|
||||
vars:
|
||||
# Needed by some templating in various tasks
|
||||
_newline: "\n"
|
||||
vars_files:
|
||||
- defaults.yml
|
||||
- metacluster.yml
|
||||
|
@ -0,0 +1,176 @@
|
||||
- block:
|
||||
|
||||
- name: Install dex
|
||||
kubernetes.core.helm:
|
||||
name: dex
|
||||
chart_ref: /opt/metacluster/helm-charts/dex
|
||||
release_namespace: dex
|
||||
create_namespace: true
|
||||
wait: false
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
values: "{{ components['dex'].chart_values }}"
|
||||
|
||||
- block:
|
||||
|
||||
- name: Install pinniped local-user-authenticator
|
||||
kubernetes.core.k8s:
|
||||
src: /opt/metacluster/pinniped/local-user-authenticator.yaml
|
||||
state: present
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
|
||||
- name: Create local-user-authenticator accounts
|
||||
kubernetes.core.k8s:
|
||||
template: secret.j2
|
||||
state: present
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
vars:
|
||||
_template:
|
||||
name: "{{ item.username }}"
|
||||
namespace: local-user-authenticator
|
||||
type: ''
|
||||
data:
|
||||
- key: groups
|
||||
value: "{{ 'group1,group2' | b64encode }}"
|
||||
- key: passwordHash
|
||||
value: "{{ item.password | b64encode }}"
|
||||
loop: "{{ components['localuserauthenticator'].users }}"
|
||||
|
||||
- block:
|
||||
|
||||
- name: Install pinniped chart
|
||||
kubernetes.core.helm:
|
||||
name: pinniped
|
||||
chart_ref: /opt/metacluster/helm-charts/pinniped
|
||||
release_namespace: pinniped-supervisor
|
||||
create_namespace: true
|
||||
wait: false
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
values: "{{ components['pinniped'].chart_values }}"
|
||||
|
||||
- name: Add ingress for supervisor
|
||||
kubernetes.core.k8s:
|
||||
template: "{{ item.kind }}.j2"
|
||||
state: present
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
vars:
|
||||
_template:
|
||||
name: "{{ item.name }}"
|
||||
namespace: "{{ item.namespace }}"
|
||||
spec: "{{ item.spec }}"
|
||||
loop:
|
||||
- kind: ingressroute
|
||||
name: pinniped-supervisor
|
||||
namespace: pinniped-supervisor
|
||||
spec: |2
|
||||
entryPoints:
|
||||
- web
|
||||
- websecure
|
||||
routes:
|
||||
- kind: Rule
|
||||
match: Host(`auth.{{ vapp['metacluster.fqdn'] }}`)
|
||||
services:
|
||||
- kind: Service
|
||||
name: pinniped-supervisor
|
||||
namespace: pinniped-supervisor
|
||||
port: 443
|
||||
scheme: https
|
||||
serversTransport: pinniped-supervisor
|
||||
- kind: serverstransport
|
||||
name: pinniped-supervisor
|
||||
namespace: pinniped-supervisor
|
||||
spec: |2
|
||||
insecureSkipVerify: true
|
||||
serverName: auth.{{ vapp['metacluster.fqdn'] }}
|
||||
loop_control:
|
||||
label: "{{ item.kind ~ '/' ~ item.name ~ ' (' ~ item.namespace ~ ')' }}"
|
||||
|
||||
- name: Ensure pinniped API availability
|
||||
ansible.builtin.uri:
|
||||
url: https://auth.{{ vapp['metacluster.fqdn'] }}/healthz
|
||||
method: GET
|
||||
register: api_readycheck
|
||||
until:
|
||||
- api_readycheck.status == 200
|
||||
- api_readycheck.msg is search("OK")
|
||||
retries: "{{ playbook.retries }}"
|
||||
delay: "{{ ((storage_benchmark | float) * playbook.delay.short) | int }}"
|
||||
|
||||
# TODO: Migrate to step-ca
|
||||
- name: Initialize tempfile
|
||||
ansible.builtin.tempfile:
|
||||
state: directory
|
||||
register: certificate
|
||||
|
||||
- name: Create private key (RSA, 4096 bits)
|
||||
community.crypto.openssl_privatekey:
|
||||
path: "{{ certificate.path }}/certificate.key"
|
||||
|
||||
- name: Create self-signed certificate
|
||||
community.crypto.x509_certificate:
|
||||
path: "{{ certificate.path }}/certificate.crt"
|
||||
privatekey_path: "{{ certificate.path }}/certificate.key"
|
||||
provider: selfsigned
|
||||
|
||||
- name: Store self-signed certificate for use by pinniped supervisor
|
||||
kubernetes.core.k8s:
|
||||
template: secret.j2
|
||||
state: present
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
vars:
|
||||
_template:
|
||||
name: pinniped-supervisor-tls
|
||||
namespace: pinniped-supervisor
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
- key: tls.crt
|
||||
value: "{{ lookup('ansible.builtin.file', certificate.path ~ '/certificate.crt') | b64encode }}"
|
||||
- key: tls.key
|
||||
value: "{{ lookup('ansible.builtin.file', certificate.path ~ '/certificate.key') | b64encode }}"
|
||||
# TODO: Migrate to step-ca
|
||||
|
||||
- name: Create pinniped resources
|
||||
kubernetes.core.k8s:
|
||||
template: "{{ item.kind }}.j2"
|
||||
state: present
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
vars:
|
||||
_template:
|
||||
name: "{{ item.name }}"
|
||||
namespace: "{{ item.namespace }}"
|
||||
type: "{{ item.type | default('') }}"
|
||||
data: "{{ item.data | default(omit) }}"
|
||||
spec: "{{ item.spec | default(omit) }}"
|
||||
loop:
|
||||
- kind: oidcidentityprovider
|
||||
name: dex-staticpasswords
|
||||
namespace: pinniped-supervisor
|
||||
spec: |2
|
||||
issuer: https://idps.{{ vapp['metacluster.fqdn'] }}
|
||||
tls:
|
||||
certificateAuthorityData: "{{ (stepca_cm_certs.resources[0].data['intermediate_ca.crt'] ~ _newline ~ stepca_cm_certs.resources[0].data['root_ca.crt']) | b64encode }}"
|
||||
authorizationConfig:
|
||||
additionalScopes: [offline_access, groups, email]
|
||||
allowPasswordGrant: false
|
||||
claims:
|
||||
username: email
|
||||
groups: groups
|
||||
client:
|
||||
secretName: dex-clientcredentials
|
||||
- kind: secret
|
||||
name: dex-clientcredentials
|
||||
namespace: pinniped-supervisor
|
||||
type: secrets.pinniped.dev/oidc-client
|
||||
data:
|
||||
- key: clientID
|
||||
value: "{{ 'pinniped-supervisor' | b64encode }}"
|
||||
- key: clientSecret
|
||||
value: "{{ lookup('ansible.builtin.password', '/dev/null length=64 chars=ascii_lowercase,digits seed=' ~ vapp['metacluster.fqdn']) | b64encode }}"
|
||||
- kind: federationdomain
|
||||
name: metacluster-sso
|
||||
namespace: pinniped-supervisor
|
||||
spec: |2
|
||||
issuer: https://auth.{{ vapp['metacluster.fqdn'] }}/sso
|
||||
tls:
|
||||
secretName: pinniped-supervisor-tls
|
||||
loop_control:
|
||||
label: "{{ item.kind ~ '/' ~ item.name }}"
|
@ -1,15 +1,40 @@
|
||||
- block:
|
||||
|
||||
- name: Initialize tempfile
|
||||
ansible.builtin.tempfile:
|
||||
state: file
|
||||
register: values_file
|
||||
- name: Import generated values file into dictionary and combine with custom values
|
||||
ansible.builtin.set_fact:
|
||||
values_initial: |
|
||||
{{
|
||||
lookup('ansible.builtin.file', stepconfig.path) | from_yaml |
|
||||
combine( components['step-certificates'].chart_values | from_yaml, recursive=True, list_merge='append')
|
||||
}}
|
||||
|
||||
- name: Write chart values w/ password to tempfile
|
||||
ansible.builtin.copy:
|
||||
dest: "{{ values_file.path }}"
|
||||
content: "{{ stepca_values.stdout | regex_replace('(ca_password|provisioner_password): ', '\\1: ' ~ (vapp['metacluster.password'] | b64encode)) }}"
|
||||
no_log: true
|
||||
- name: Duplicate default provisioner with modified claims
|
||||
ansible.builtin.set_fact:
|
||||
values_new: |
|
||||
{{
|
||||
values_initial |
|
||||
combine({'inject':{'config':{'files':{'ca.json':{'authority': {'provisioners': [
|
||||
values_initial.inject.config.files['ca.json'].authority.provisioners[0] | combine({'name':'long-lived', 'claims':{'maxTLSCertDuration':'87660h'}})
|
||||
]}}}}}}, list_merge='append_rp', recursive=true)
|
||||
}}
|
||||
|
||||
# We're facing several bugs or niche cases that result in incorrect output, despite being behaviour by design:
|
||||
# - Ansible's `to_yaml` filter, sees `\n` escape sequences in PEM certificate strings and correctly converts them to actual newlines - without any way to prevent this
|
||||
# So we cannot rely on Ansible to (re)create the helm chart values file
|
||||
# - Python's yaml interpreter sees strings with a value of `y` as short for `yes` or `true`, even when that string is a key name.
|
||||
# So we cannot use a straightforward yaml document as input for the Ansible helm module (which is written in Python)
|
||||
#
|
||||
# Lets explain the following workaround steps:
|
||||
# - First we convert the dictionary to a json-object (through Ansible), so that yq can read it
|
||||
# - Second we convert the json-object in its entirety to yaml (through yq), so that yq can actually manipulate it.
|
||||
# - Finally, we take one specific subkey's contents (list of dictionaries) and iterate over each with the following steps (with `map`):
|
||||
# - Convert the dictionary to json with `tojson`
|
||||
# - Remove newlines (and spaces) with `sub`
|
||||
# - Remove outer quotes (') with `sed`
|
||||
- name: Save updated values file
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
echo '{{ values_new | to_nice_json }}' | yq -p json -o yaml | yq e '.inject.config.files["ca.json"].authority.provisioners |= map(tojson | sub("[\n ]";""))' | sed -e "s/- '/- /;s/'$//" > {{ stepconfig.path }}
|
||||
|
||||
- name: Install step-ca chart
|
||||
kubernetes.core.helm:
|
||||
@ -21,13 +46,7 @@
|
||||
wait: true
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
values_files:
|
||||
- "{{ values_file.path }}"
|
||||
|
||||
- name: Cleanup tempfile
|
||||
ansible.builtin.file:
|
||||
path: "{{ values_file.path }}"
|
||||
state: absent
|
||||
when: values_file.path is defined
|
||||
- "{{ stepconfig.path }}"
|
||||
|
||||
- name: Retrieve configmap w/ root certificate
|
||||
kubernetes.core.k8s_info:
|
||||
@ -45,6 +64,7 @@
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
loop:
|
||||
- argo-cd
|
||||
- gitea
|
||||
# - kube-system
|
||||
|
||||
- name: Store root certificate in namespaced configmaps/secrets
|
||||
@ -58,6 +78,7 @@
|
||||
namespace: "{{ item.namespace }}"
|
||||
annotations: "{{ item.annotations | default('{}') | indent(width=4, first=True) }}"
|
||||
labels: "{{ item.labels | default('{}') | indent(width=4, first=True) }}"
|
||||
type: "{{ item.type | default('') }}"
|
||||
data: "{{ item.data }}"
|
||||
loop:
|
||||
- name: argocd-tls-certs-cm
|
||||
@ -73,6 +94,12 @@
|
||||
data:
|
||||
- key: git.{{ vapp['metacluster.fqdn'] }}
|
||||
value: "{{ stepca_cm_certs.resources[0].data['root_ca.crt'] }}"
|
||||
- name: step-certificates-certs
|
||||
namespace: gitea
|
||||
kind: secret
|
||||
data:
|
||||
- key: ca_chain.crt
|
||||
value: "{{ (stepca_cm_certs.resources[0].data['intermediate_ca.crt'] ~ _newline ~ stepca_cm_certs.resources[0].data['root_ca.crt']) | b64encode }}"
|
||||
- name: step-certificates-certs
|
||||
namespace: kube-system
|
||||
kind: secret
|
||||
@ -93,7 +120,7 @@
|
||||
_template:
|
||||
name: step-ca
|
||||
namespace: step-ca
|
||||
config: |2
|
||||
spec: |2
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
|
@ -32,7 +32,7 @@
|
||||
_template:
|
||||
name: gitea-ssh
|
||||
namespace: gitea
|
||||
config: |2
|
||||
spec: |2
|
||||
entryPoints:
|
||||
- ssh
|
||||
routes:
|
||||
@ -55,6 +55,7 @@
|
||||
force_basic_auth: yes
|
||||
body:
|
||||
name: token_init_{{ lookup('password', '/dev/null length=5 chars=ascii_letters,digits') }}
|
||||
scopes: ["write:user","write:organization"]
|
||||
register: gitea_api_token
|
||||
|
||||
- name: Retrieve existing gitea configuration
|
||||
@ -110,8 +111,8 @@
|
||||
- organization: mc
|
||||
body:
|
||||
name: GitOps.ClusterAPI
|
||||
# auto_init: true
|
||||
# default_branch: main
|
||||
auto_init: true
|
||||
default_branch: main
|
||||
description: ClusterAPI manifests
|
||||
- organization: mc
|
||||
body:
|
||||
@ -122,15 +123,15 @@
|
||||
- organization: wl
|
||||
body:
|
||||
name: GitOps.Config
|
||||
# auto_init: true
|
||||
# default_branch: main
|
||||
auto_init: true
|
||||
default_branch: main
|
||||
description: GitOps manifests
|
||||
- organization: wl
|
||||
body:
|
||||
name: GitOps.HelmCharts
|
||||
# auto_init: true
|
||||
# default_branch: main
|
||||
description: Helm charts
|
||||
name: ClusterAccess.Store
|
||||
auto_init: true
|
||||
default_branch: main
|
||||
description: Kubeconfig files
|
||||
loop_control:
|
||||
label: "{{ item.organization ~ '/' ~ item.body.name }}"
|
||||
|
||||
|
@ -6,7 +6,11 @@
|
||||
initContainers:
|
||||
- name: volume-permissions
|
||||
image: busybox:1
|
||||
command: ["sh", "-c", "touch /data/acme.json && chmod -Rv 600 /data/* && chown 65532:65532 /data/acme.json"]
|
||||
command: ["sh", "-c", "touch /data/acme.json; chown 65532 /data/acme.json; chmod -v 600 /data/acme.json"]
|
||||
securityContext:
|
||||
runAsNonRoot: false
|
||||
runAsGroup: 0
|
||||
runAsUser: 0
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
@ -27,7 +31,7 @@
|
||||
_template:
|
||||
name: traefik-dashboard
|
||||
namespace: kube-system
|
||||
config: |2
|
||||
spec: |2
|
||||
entryPoints:
|
||||
- web
|
||||
- websecure
|
||||
|
@ -12,6 +12,15 @@
|
||||
- registry
|
||||
- storage
|
||||
|
||||
- name: Create step-ca config dictionary
|
||||
ansible.builtin.set_fact:
|
||||
stepconfig: "{{ { 'path': ansible_env.HOME ~ '/.step/config/values.yaml' } }}"
|
||||
|
||||
- name: Create step-ca target folder
|
||||
ansible.builtin.file:
|
||||
path: "{{ stepconfig.path | dirname }}"
|
||||
state: directory
|
||||
|
||||
- name: Initialize tempfile
|
||||
ansible.builtin.tempfile:
|
||||
state: file
|
||||
@ -36,8 +45,8 @@
|
||||
--address=:9000 \
|
||||
--provisioner=admin \
|
||||
--acme \
|
||||
--password-file={{ stepca_password.path }}
|
||||
register: stepca_values
|
||||
--password-file={{ stepca_password.path }} | tee {{ stepconfig.path }}
|
||||
creates: "{{ stepconfig.path }}"
|
||||
|
||||
- name: Cleanup tempfile
|
||||
ansible.builtin.file:
|
||||
@ -48,12 +57,20 @@
|
||||
- name: Store root CA certificate
|
||||
ansible.builtin.copy:
|
||||
dest: /usr/local/share/ca-certificates/root_ca.crt
|
||||
content: "{{ (stepca_values.stdout | from_yaml).inject.certificates.root_ca }}"
|
||||
content: "{{ (lookup('ansible.builtin.file', stepconfig.path) | from_yaml).inject.certificates.root_ca }}"
|
||||
|
||||
- name: Update certificate truststore
|
||||
ansible.builtin.command:
|
||||
cmd: update-ca-certificates
|
||||
|
||||
- name: Extract container images (for idempotency purposes)
|
||||
ansible.builtin.unarchive:
|
||||
src: /opt/metacluster/container-images/image-tarballs.tgz
|
||||
dest: /opt/metacluster/container-images
|
||||
remote_src: no
|
||||
when:
|
||||
- lookup('ansible.builtin.fileglob', '/opt/metacluster/container-images/*.tgz') is match('.*image-tarballs.tgz')
|
||||
|
||||
- name: Get all stored fully qualified container image names
|
||||
ansible.builtin.shell:
|
||||
cmd: >-
|
||||
|
@ -42,19 +42,30 @@
|
||||
retries: "{{ playbook.retries }}"
|
||||
delay: "{{ (storage_benchmark | int) * (playbook.delay.medium | int) }}"
|
||||
|
||||
- name: Install kubectl tab-completion
|
||||
- name: Install tab-completion
|
||||
ansible.builtin.shell:
|
||||
cmd: kubectl completion bash | tee /etc/bash_completion.d/kubectl
|
||||
cmd: |-
|
||||
{{ item }} completion bash > /etc/bash_completion.d/{{ item }}
|
||||
creates: /etc/bash_completion.d/{{ item }}
|
||||
loop:
|
||||
- kubectl
|
||||
- helm
|
||||
- step
|
||||
|
||||
- name: Initialize tempfile
|
||||
ansible.builtin.tempfile:
|
||||
state: file
|
||||
register: kubeconfig
|
||||
- name: Create kubeconfig dictionary
|
||||
ansible.builtin.set_fact:
|
||||
kubeconfig: "{{ { 'path': ansible_env.HOME ~ '/.kube/config' } }}"
|
||||
|
||||
- name: Create kubeconfig target folder
|
||||
ansible.builtin.file:
|
||||
path: "{{ kubeconfig.path | dirname }}"
|
||||
state: directory
|
||||
|
||||
- name: Retrieve kubeconfig
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl config view --raw
|
||||
register: kubectl_config
|
||||
no_log: true
|
||||
|
||||
- name: Store kubeconfig in tempfile
|
||||
ansible.builtin.copy:
|
||||
|
@ -1,10 +1,13 @@
|
||||
- import_tasks: init.yml
|
||||
- import_tasks: k3s.yml
|
||||
- import_tasks: assets.yml
|
||||
- import_tasks: kube-vip.yml
|
||||
- import_tasks: workflow.yml
|
||||
- import_tasks: virtualip.yml
|
||||
- import_tasks: metadata.yml
|
||||
- import_tasks: storage.yml
|
||||
- import_tasks: ingress.yml
|
||||
- import_tasks: certauthority.yml
|
||||
- import_tasks: registry.yml
|
||||
- import_tasks: git.yml
|
||||
- import_tasks: gitops.yml
|
||||
- import_tasks: authentication.yml
|
||||
|
@ -0,0 +1,57 @@
|
||||
- block:
|
||||
- name: Aggregate manifest-component versions into dictionary
|
||||
ansible.builtin.set_fact:
|
||||
manifest_versions: "{{ manifest_versions | default([]) + [ item | combine( {'type': 'manifest', 'id': index } ) ] }}"
|
||||
loop:
|
||||
- name: cluster-api
|
||||
versions:
|
||||
management:
|
||||
base: "{{ components.clusterapi.management.version.base }}"
|
||||
cert_manager: "{{ components.clusterapi.management.version.cert_manager }}"
|
||||
infrastructure_vsphere: "{{ components.clusterapi.management.version.infrastructure_vsphere }}"
|
||||
ipam_incluster: "{{ components.clusterapi.management.version.ipam_incluster }}"
|
||||
cpi_vsphere: "{{ components.clusterapi.management.version.cpi_vsphere }}"
|
||||
workload:
|
||||
calico: "{{ components.clusterapi.workload.version.calico }}"
|
||||
k8s: "{{ components.clusterapi.workload.version.k8s }}"
|
||||
- name: kube-vip
|
||||
version: "{{ components.kubevip.version }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
index_var: index
|
||||
|
||||
- name: Install json-server chart
|
||||
kubernetes.core.helm:
|
||||
name: json-server
|
||||
chart_ref: /opt/metacluster/helm-charts/json-server
|
||||
release_namespace: json-server
|
||||
create_namespace: true
|
||||
wait: false
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
values: |
|
||||
{{
|
||||
components['json-server'].chart_values |
|
||||
combine(
|
||||
{ 'jsonServer': { 'seedData': { 'configInline': (
|
||||
{ 'appliance': { "version": appliance.version }, 'components': manifest_versions, 'healthz': { 'status': 'running' } }
|
||||
) | to_json } } }
|
||||
)
|
||||
}}
|
||||
|
||||
- name: Ensure json-server API availability
|
||||
ansible.builtin.uri:
|
||||
url: https://version.{{ vapp['metacluster.fqdn'] }}/healthz
|
||||
method: GET
|
||||
# This mock REST API -ironically- does not support json encoded body argument
|
||||
body_format: raw
|
||||
register: api_readycheck
|
||||
until:
|
||||
- api_readycheck.json.status is defined
|
||||
- api_readycheck.json.status == 'running'
|
||||
retries: "{{ playbook.retries }}"
|
||||
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||
|
||||
module_defaults:
|
||||
ansible.builtin.uri:
|
||||
validate_certs: no
|
||||
status_code: [200, 201]
|
@ -0,0 +1,54 @@
|
||||
- block:
|
||||
|
||||
- name: Create target namespace(s)
|
||||
kubernetes.core.k8s:
|
||||
name: "{{ item }}"
|
||||
kind: Namespace
|
||||
state: present
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
loop:
|
||||
# - argo-workflows
|
||||
- firstboot
|
||||
|
||||
- name: Create ClusterRoleBinding for default serviceaccount
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
definition: |
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: argo-workflows-firstboot-clusteradmin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: firstboot
|
||||
|
||||
- name: Install argo-workflows chart
|
||||
kubernetes.core.helm:
|
||||
name: argo-workflows
|
||||
chart_ref: /opt/metacluster/helm-charts/argo-workflows
|
||||
release_namespace: argo-workflows
|
||||
create_namespace: true
|
||||
wait: false
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
values: "{{ components['argo-workflows'].chart_values }}"
|
||||
|
||||
- name: Ensure argo workflows API availability
|
||||
ansible.builtin.uri:
|
||||
url: https://workflow.{{ vapp['metacluster.fqdn'] }}/api/v1/version
|
||||
method: GET
|
||||
register: api_readycheck
|
||||
until:
|
||||
- api_readycheck.json.version is defined
|
||||
retries: "{{ playbook.retries }}"
|
||||
delay: "{{ (storage_benchmark | int) * (playbook.delay.long | int) }}"
|
||||
|
||||
module_defaults:
|
||||
ansible.builtin.uri:
|
||||
validate_certs: no
|
||||
status_code: [200, 201]
|
||||
body_format: json
|
@ -0,0 +1,40 @@
|
||||
- name: Initialize tempfolder
|
||||
ansible.builtin.tempfile:
|
||||
state: directory
|
||||
register: pinniped_kubeconfig
|
||||
|
||||
- name: Pull existing repository
|
||||
ansible.builtin.git:
|
||||
repo: https://git.{{ vapp['metacluster.fqdn'] }}/wl/ClusterAccess.Store.git
|
||||
dest: "{{ pinniped_kubeconfig.path }}"
|
||||
version: main
|
||||
|
||||
- name: Generate kubeconfig
|
||||
ansible.builtin.shell:
|
||||
cmd: pinniped get kubeconfig --kubeconfig {{ capi_kubeconfig.path }}
|
||||
register: pinniped_config
|
||||
until:
|
||||
- pinniped_config is not failed
|
||||
retries: "{{ playbook.retries }}"
|
||||
delay: "{{ ((storage_benchmark | float) * playbook.delay.short) | int }}"
|
||||
|
||||
- name: Store kubeconfig in tempfile
|
||||
ansible.builtin.copy:
|
||||
dest: "{{ pinniped_kubeconfig.path }}/kubeconfig"
|
||||
content: "{{ pinniped_config.stdout }}"
|
||||
mode: 0600
|
||||
no_log: true
|
||||
|
||||
- name: Push git repository
|
||||
lvrfrc87.git_acp.git_acp:
|
||||
path: "{{ pinniped_kubeconfig.path }}"
|
||||
branch: main
|
||||
comment: "Upload kubeconfig files"
|
||||
add:
|
||||
- .
|
||||
url: https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/wl/ClusterAccess.Store.git
|
||||
environment:
|
||||
GIT_AUTHOR_NAME: administrator
|
||||
GIT_AUTHOR_EMAIL: administrator@{{ vapp['metacluster.fqdn'] }}
|
||||
GIT_COMMITTER_NAME: administrator
|
||||
GIT_COMMITTER_EMAIL: administrator@{{ vapp['metacluster.fqdn'] }}
|
@ -85,6 +85,40 @@
|
||||
--kubeconfig {{ kubeconfig.path }}
|
||||
chdir: /opt/metacluster/cluster-api
|
||||
|
||||
- name: Initialize tempfolder
|
||||
ansible.builtin.tempfile:
|
||||
state: directory
|
||||
register: capi_clustermanifest
|
||||
|
||||
- name: Pull existing repository
|
||||
ansible.builtin.git:
|
||||
repo: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
|
||||
dest: "{{ capi_clustermanifest.path }}"
|
||||
version: main
|
||||
|
||||
- name: Generate Cluster API provider manifests
|
||||
ansible.builtin.shell:
|
||||
cmd: >-
|
||||
clusterctl generate provider \
|
||||
-v5 \
|
||||
--{{ item.type }} {{ item.name }}:{{ item.version }} \
|
||||
--config ./clusterctl.yaml > {{ capi_clustermanifest.path }}/provider-{{ item.name }}.yaml
|
||||
chdir: /opt/metacluster/cluster-api
|
||||
loop:
|
||||
- type: infrastructure
|
||||
name: vsphere
|
||||
version: "{{ components.clusterapi.management.version.infrastructure_vsphere }}"
|
||||
- type: ipam
|
||||
name: in-cluster
|
||||
version: "{{ components.clusterapi.management.version.ipam_incluster }}"
|
||||
|
||||
- name: Split cluster API provider manifests into separate files
|
||||
ansible.builtin.shell:
|
||||
cmd: >-
|
||||
awk 'BEGINFILE {print "---"}{print}' {{ capi_clustermanifest.path }}/provider-*.yaml |
|
||||
kubectl slice \
|
||||
-o {{ capi_clustermanifest.path }}/providers
|
||||
|
||||
- name: Ensure controller availability
|
||||
kubernetes.core.k8s_info:
|
||||
kind: Deployment
|
||||
@ -93,8 +127,8 @@
|
||||
wait: true
|
||||
kubeconfig: "{{ kubeconfig.path }}"
|
||||
loop:
|
||||
- name: caip-in-cluster-controller-manager
|
||||
namespace: caip-in-cluster-system
|
||||
- name: capi-ipam-in-cluster-controller-manager
|
||||
namespace: capi-ipam-in-cluster-system
|
||||
- name: capi-controller-manager
|
||||
namespace: capi-system
|
||||
- name: capv-controller-manager
|
||||
@ -124,26 +158,21 @@
|
||||
chdir: /opt/metacluster/cluster-api
|
||||
register: clusterctl_newcluster
|
||||
|
||||
- name: Initialize tempfolder
|
||||
ansible.builtin.tempfile:
|
||||
state: directory
|
||||
register: capi_clustermanifest
|
||||
|
||||
- name: Save workload cluster manifest
|
||||
ansible.builtin.copy:
|
||||
dest: "{{ capi_clustermanifest.path }}/new-cluster.yaml"
|
||||
content: "{{ clusterctl_newcluster.stdout }}"
|
||||
|
||||
- name: Split manifest into separate files
|
||||
- name: Split workload cluster manifest into separate files
|
||||
ansible.builtin.shell:
|
||||
cmd: >-
|
||||
kubectl slice \
|
||||
-f {{ capi_clustermanifest.path }}/new-cluster.yaml \
|
||||
-o {{ capi_clustermanifest.path }}/manifests
|
||||
-o {{ capi_clustermanifest.path }}/downstream-cluster
|
||||
|
||||
- name: Generate nodepool kustomization manifest
|
||||
ansible.builtin.template:
|
||||
src: kustomization.nodepool.j2
|
||||
src: kustomization.longhorn-storage.j2
|
||||
dest: "{{ capi_clustermanifest.path }}/kustomization.yaml"
|
||||
vars:
|
||||
_template:
|
||||
@ -155,13 +184,20 @@
|
||||
|
||||
- name: Store nodepool manifest
|
||||
ansible.builtin.copy:
|
||||
dest: "{{ capi_clustermanifest.path }}/manifests/nodepool-worker-storage.yaml"
|
||||
dest: "{{ capi_clustermanifest.path }}/nodepool-worker-storage.yaml"
|
||||
content: "{{ lookup('kubernetes.core.kustomize', dir=capi_clustermanifest.path) }}"
|
||||
|
||||
- name: Split nodepool manifest into separate files
|
||||
ansible.builtin.shell:
|
||||
cmd: >-
|
||||
kubectl slice \
|
||||
-f {{ capi_clustermanifest.path }}/nodepool-worker-storage.yaml \
|
||||
-o {{ capi_clustermanifest.path }}/downstream-cluster
|
||||
|
||||
- name: Create in-cluster IpPool
|
||||
ansible.builtin.template:
|
||||
src: ippool.j2
|
||||
dest: "{{ capi_clustermanifest.path }}/manifests/inclusterippool-{{ _template.cluster.name }}.yml"
|
||||
dest: "{{ capi_clustermanifest.path }}/downstream-cluster/inclusterippool-{{ _template.cluster.name }}.yml"
|
||||
vars:
|
||||
_template:
|
||||
cluster:
|
||||
@ -173,24 +209,27 @@
|
||||
prefix: "{{ vapp['guestinfo.prefixlength'] }}"
|
||||
gateway: "{{ vapp['guestinfo.gateway'] }}"
|
||||
|
||||
- name: Initialize/Push git repository
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
git init
|
||||
git config --global user.email "administrator@{{ vapp['metacluster.fqdn'] }}"
|
||||
git config --global user.name "administrator"
|
||||
git checkout -b main
|
||||
git add ./manifests
|
||||
git commit -m "Upload manifests"
|
||||
git remote add origin https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
|
||||
git push https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git --all
|
||||
chdir: "{{ capi_clustermanifest.path }}"
|
||||
|
||||
- name: Cleanup tempfolder
|
||||
ansible.builtin.file:
|
||||
- name: Push git repository
|
||||
lvrfrc87.git_acp.git_acp:
|
||||
path: "{{ capi_clustermanifest.path }}"
|
||||
state: absent
|
||||
when: capi_clustermanifest.path is defined
|
||||
branch: main
|
||||
comment: "Upload manifests"
|
||||
add:
|
||||
- ./downstream-cluster
|
||||
- ./providers
|
||||
clean: untracked
|
||||
url: https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
|
||||
environment:
|
||||
GIT_AUTHOR_NAME: administrator
|
||||
GIT_AUTHOR_EMAIL: administrator@{{ vapp['metacluster.fqdn'] }}
|
||||
GIT_COMMITTER_NAME: administrator
|
||||
GIT_COMMITTER_EMAIL: administrator@{{ vapp['metacluster.fqdn'] }}
|
||||
|
||||
# - name: Cleanup tempfolder
|
||||
# ansible.builtin.file:
|
||||
# path: "{{ capi_clustermanifest.path }}"
|
||||
# state: absent
|
||||
# when: capi_clustermanifest.path is defined
|
||||
|
||||
- name: Configure Cluster API repository
|
||||
ansible.builtin.template:
|
||||
@ -235,7 +274,7 @@
|
||||
namespace: default
|
||||
repository:
|
||||
url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
|
||||
path: manifests
|
||||
path: downstream-cluster
|
||||
revision: main
|
||||
notify:
|
||||
- Apply manifests
|
||||
@ -277,7 +316,12 @@
|
||||
# TODO: move to git repo
|
||||
- name: Apply cni plugin manifest
|
||||
kubernetes.core.k8s:
|
||||
src: /opt/metacluster/cluster-api/cni-calico/{{ components.clusterapi.workload.version.calico }}/calico.yaml
|
||||
definition: |
|
||||
{{
|
||||
lookup('ansible.builtin.file', '/opt/metacluster/cluster-api/cni-calico/' ~ components.clusterapi.workload.version.calico ~ '/calico.yaml') |
|
||||
regex_replace('# - name: CALICO_IPV4POOL_CIDR', '- name: CALICO_IPV4POOL_CIDR') |
|
||||
regex_replace('# value: "192.168.0.0/16"', ' value: "172.30.0.0/16"')
|
||||
}}
|
||||
state: present
|
||||
wait: true
|
||||
kubeconfig: "{{ capi_kubeconfig.path }}"
|
||||
|
@ -5,6 +5,20 @@
|
||||
recurse: false
|
||||
register: helm_charts
|
||||
|
||||
- name: Pull existing repository
|
||||
ansible.builtin.git:
|
||||
repo: https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git
|
||||
dest: /opt/workloadcluster/git-repositories/gitops
|
||||
version: main
|
||||
|
||||
- name: Create folder structure within new git-repository
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
loop:
|
||||
- /opt/workloadcluster/git-repositories/gitops/charts
|
||||
- /opt/workloadcluster/git-repositories/gitops/values
|
||||
|
||||
- name: Create hard-links to populate new git-repository
|
||||
ansible.builtin.shell:
|
||||
cmd: >-
|
||||
@ -13,6 +27,18 @@
|
||||
loop_control:
|
||||
label: "{{ item.path | basename }}"
|
||||
|
||||
- name: Write custom manifests to respective chart templates store
|
||||
ansible.builtin.template:
|
||||
src: "{{ src }}"
|
||||
dest: /opt/workloadcluster/git-repositories/gitops/charts/{{ manifest.value.namespace }}/{{ manifest.key }}/templates/{{ (src | split('.'))[0] ~ '-' ~ _template.name ~ '.yaml' }}
|
||||
vars:
|
||||
manifest: "{{ item.0 }}"
|
||||
src: "{{ item.1.src }}"
|
||||
_template: "{{ item.1._template }}"
|
||||
loop: "{{ query('ansible.builtin.subelements', query('ansible.builtin.dict', downstream_components), 'value.extra_manifests') }}"
|
||||
loop_control:
|
||||
label: "{{ (src | split('.'))[0] ~ '-' ~ _template.name }}"
|
||||
|
||||
- name: Create subfolders
|
||||
ansible.builtin.file:
|
||||
path: /opt/workloadcluster/git-repositories/gitops/values/{{ item.key }}
|
||||
@ -29,18 +55,19 @@
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Initialize/Push git repository
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
git init
|
||||
git config --global user.email "administrator@{{ vapp['metacluster.fqdn'] }}"
|
||||
git config --global user.name "administrator"
|
||||
git checkout -b main
|
||||
git add .
|
||||
git commit -m "Upload charts"
|
||||
git remote add origin https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git
|
||||
git push https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git --all
|
||||
chdir: /opt/workloadcluster/git-repositories/gitops
|
||||
- name: Push git repository
|
||||
lvrfrc87.git_acp.git_acp:
|
||||
path: /opt/workloadcluster/git-repositories/gitops
|
||||
branch: main
|
||||
comment: "Upload charts"
|
||||
add:
|
||||
- .
|
||||
url: https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git
|
||||
environment:
|
||||
GIT_AUTHOR_NAME: administrator
|
||||
GIT_AUTHOR_EMAIL: administrator@{{ vapp['metacluster.fqdn'] }}
|
||||
GIT_COMMITTER_NAME: administrator
|
||||
GIT_COMMITTER_EMAIL: administrator@{{ vapp['metacluster.fqdn'] }}
|
||||
|
||||
- name: Retrieve workload-cluster kubeconfig
|
||||
kubernetes.core.k8s_info:
|
||||
|
@ -1,57 +0,0 @@
|
||||
- name: Gather hypervisor details
|
||||
ansible.builtin.shell:
|
||||
cmd: govc ls -L {{ item.moref }} | awk -F/ '{print ${{ item.part }}}'
|
||||
environment:
|
||||
GOVC_INSECURE: '1'
|
||||
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
|
||||
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
|
||||
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
|
||||
register: govc_inventory
|
||||
loop:
|
||||
- attribute: cluster
|
||||
moref: >-
|
||||
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||
jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
|
||||
part: (NF-1)
|
||||
- attribute: datacenter
|
||||
moref: VirtualMachine:{{ moref_id }}
|
||||
part: 2
|
||||
- attribute: datastore
|
||||
moref: >-
|
||||
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||
jq -r '.[] | select(.Name == "datastore").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
|
||||
part: NF
|
||||
- attribute: folder
|
||||
moref: >-
|
||||
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||
jq -r '.[] | select(.Name == "parent").Val | .Type + ":" + .Value')
|
||||
part: 0
|
||||
# - attribute: host
|
||||
# moref: >-
|
||||
# $(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||
# jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
|
||||
# part: NF
|
||||
- attribute: network
|
||||
moref: >-
|
||||
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||
jq -r '.[] | select(.Name == "network").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
|
||||
part: NF
|
||||
- attribute: resourcepool
|
||||
moref: >-
|
||||
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||
jq -r '.[] | select(.Name == "resourcePool").Val | .Type + ":" + .Value')
|
||||
part: 0
|
||||
loop_control:
|
||||
label: "{{ item.attribute }}"
|
||||
|
||||
- name: Retrieve hypervisor TLS thumbprint
|
||||
ansible.builtin.shell:
|
||||
cmd: openssl s_client -connect {{ vapp['hv.fqdn'] }}:443 < /dev/null 2>/dev/null | openssl x509 -fingerprint -noout -in /dev/stdin | awk -F'=' '{print $2}'
|
||||
register: tls_thumbprint
|
||||
|
||||
- name: Store hypervisor details in dictionary
|
||||
ansible.builtin.set_fact:
|
||||
vcenter_info: "{{ vcenter_info | default({}) | combine({ item.item.attribute : item.stdout }) }}"
|
||||
loop: "{{ govc_inventory.results }}"
|
||||
loop_control:
|
||||
label: "{{ item.item.attribute }}"
|
@ -6,6 +6,7 @@
|
||||
|
||||
- import_tasks: clusterapi.yml
|
||||
- import_tasks: gitops.yml
|
||||
- import_tasks: authentication.yml
|
||||
|
||||
when:
|
||||
- vapp['deployment.type'] != 'core'
|
||||
|
@ -1,73 +0,0 @@
|
||||
- block:
|
||||
|
||||
- name: Check for existing template on hypervisor
|
||||
community.vmware.vmware_guest_info:
|
||||
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
|
||||
register: existing_ova
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Store inventory path of existing template
|
||||
ansible.builtin.set_fact:
|
||||
nodetemplate_inventorypath: "{{ existing_ova.instance.hw_folder ~ '/' ~ existing_ova.instance.hw_name }}"
|
||||
when: existing_ova is not failed
|
||||
|
||||
- block:
|
||||
|
||||
- name: Parse OVA file for network mappings
|
||||
ansible.builtin.shell:
|
||||
cmd: govc import.spec -json {{ filename }}
|
||||
environment:
|
||||
GOVC_INSECURE: '1'
|
||||
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
|
||||
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
|
||||
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
|
||||
register: ova_spec
|
||||
|
||||
- name: Deploy OVA template on hypervisor
|
||||
community.vmware.vmware_deploy_ovf:
|
||||
cluster: "{{ vcenter_info.cluster }}"
|
||||
datastore: "{{ vcenter_info.datastore }}"
|
||||
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
|
||||
networks: "{u'{{ ova_spec.stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}"
|
||||
allow_duplicates: no
|
||||
power_on: false
|
||||
ovf: "{{ filename }}"
|
||||
register: ova_deploy
|
||||
|
||||
- name: Add additional placeholder disk
|
||||
community.vmware.vmware_guest_disk:
|
||||
name: "{{ ova_deploy.instance.hw_name }}"
|
||||
disk:
|
||||
- size: 1Mb
|
||||
scsi_controller: 1
|
||||
scsi_type: paravirtual
|
||||
unit_number: 0
|
||||
|
||||
# Disabled to allow disks to be resized; at the cost of cloning speed
|
||||
# - name: Create snapshot on deployed VM
|
||||
# community.vmware.vmware_guest_snapshot:
|
||||
# name: "{{ ova_deploy.instance.hw_name }}"
|
||||
# state: present
|
||||
# snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base"
|
||||
|
||||
- name: Mark deployed VM as templates
|
||||
community.vmware.vmware_guest:
|
||||
name: "{{ ova_deploy.instance.hw_name }}"
|
||||
is_template: yes
|
||||
|
||||
- name: Store inventory path of deployed template
|
||||
ansible.builtin.set_fact:
|
||||
nodetemplate_inventorypath: "{{ ova_deploy.instance.hw_folder ~ '/' ~ ova_deploy.instance.hw_name }}"
|
||||
|
||||
when: existing_ova is failed
|
||||
|
||||
vars:
|
||||
filename: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | first }}"
|
||||
module_defaults:
|
||||
group/vmware:
|
||||
hostname: "{{ vapp['hv.fqdn'] }}"
|
||||
validate_certs: no
|
||||
username: "{{ vapp['hv.username'] }}"
|
||||
password: "{{ vapp['hv.password'] }}"
|
||||
datacenter: "{{ vcenter_info.datacenter }}"
|
||||
folder: "{{ vcenter_info.folder }}"
|
@ -0,0 +1,7 @@
|
||||
apiVersion: config.supervisor.pinniped.dev/v1alpha1
|
||||
kind: FederationDomain
|
||||
metadata:
|
||||
name: {{ _template.name }}
|
||||
namespace: {{ _template.namespace }}
|
||||
spec:
|
||||
{{ _template.spec }}
|
@ -4,4 +4,4 @@ metadata:
|
||||
name: {{ _template.name }}
|
||||
namespace: {{ _template.namespace }}
|
||||
spec:
|
||||
{{ _template.config }}
|
||||
{{ _template.spec }}
|
||||
|
@ -4,4 +4,4 @@ metadata:
|
||||
name: {{ _template.name }}
|
||||
namespace: {{ _template.namespace }}
|
||||
spec:
|
||||
{{ _template.config }}
|
||||
{{ _template.spec }}
|
||||
|
@ -1,10 +1,10 @@
|
||||
apiVersion: ipam.cluster.x-k8s.io/v1alpha1
|
||||
apiVersion: ipam.cluster.x-k8s.io/v1alpha2
|
||||
kind: InClusterIPPool
|
||||
metadata:
|
||||
name: inclusterippool-{{ _template.cluster.name }}
|
||||
namespace: {{ _template.cluster.namespace }}
|
||||
spec:
|
||||
start: {{ _template.cluster.network.startip }}
|
||||
end: {{ _template.cluster.network.endip }}
|
||||
addresses:
|
||||
- {{ _template.cluster.network.startip }}-{{ _template.cluster.network.endip }}
|
||||
prefix: {{ _template.cluster.network.prefix }}
|
||||
gateway: {{ _template.cluster.network.gateway }}
|
||||
|
@ -0,0 +1,6 @@
|
||||
apiVersion: authentication.concierge.pinniped.dev/v1alpha1
|
||||
kind: JWTAuthenticator
|
||||
metadata:
|
||||
name: {{ _template.name }}
|
||||
spec:
|
||||
{{ _template.spec }}
|
@ -3,36 +3,8 @@ kind: Kustomization
|
||||
resources:
|
||||
- cluster-template.yaml
|
||||
|
||||
patchesStrategicMerge:
|
||||
- |-
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: csi-vsphere-config
|
||||
namespace: '${NAMESPACE}'
|
||||
stringData:
|
||||
data: |
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: csi-vsphere-config
|
||||
namespace: kube-system
|
||||
stringData:
|
||||
csi-vsphere.conf: |+
|
||||
[Global]
|
||||
insecure-flag = true
|
||||
thumbprint = "${VSPHERE_TLS_THUMBPRINT}"
|
||||
cluster-id = "${NAMESPACE}/${CLUSTER_NAME}"
|
||||
|
||||
[VirtualCenter "${VSPHERE_SERVER}"]
|
||||
user = "${VSPHERE_USERNAME}"
|
||||
password = "${VSPHERE_PASSWORD}"
|
||||
datacenters = "${VSPHERE_DATACENTER}"
|
||||
|
||||
[Network]
|
||||
public-network = "${VSPHERE_NETWORK}"
|
||||
type: Opaque
|
||||
- |-
|
||||
patches:
|
||||
- patch: |-
|
||||
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
|
||||
kind: KubeadmControlPlane
|
||||
metadata:
|
||||
@ -42,7 +14,7 @@ patchesStrategicMerge:
|
||||
kubeadmConfigSpec:
|
||||
clusterConfiguration:
|
||||
imageRepository: registry.{{ _template.network.fqdn }}/kubeadm
|
||||
- |-
|
||||
- patch: |-
|
||||
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
|
||||
kind: KubeadmConfigTemplate
|
||||
metadata:
|
||||
@ -53,7 +25,7 @@ patchesStrategicMerge:
|
||||
spec:
|
||||
clusterConfiguration:
|
||||
imageRepository: registry.{{ _template.network.fqdn }}/kubeadm
|
||||
- |-
|
||||
- patch: |-
|
||||
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
|
||||
kind: KubeadmConfigTemplate
|
||||
metadata:
|
||||
@ -86,7 +58,7 @@ patchesStrategicMerge:
|
||||
{{ _template.rootca | indent(width=14, first=False) | trim }}
|
||||
owner: root:root
|
||||
path: /usr/local/share/ca-certificates/root_ca.crt
|
||||
- |-
|
||||
- patch: |-
|
||||
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
|
||||
kind: VSphereMachineTemplate
|
||||
metadata:
|
||||
@ -95,6 +67,7 @@ patchesStrategicMerge:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
diskGiB: 60
|
||||
network:
|
||||
devices:
|
||||
- dhcp4: false
|
||||
@ -105,7 +78,7 @@ patchesStrategicMerge:
|
||||
nameservers:
|
||||
- {{ _template.network.dnsserver }}
|
||||
networkName: '${VSPHERE_NETWORK}'
|
||||
- |-
|
||||
- patch: |-
|
||||
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
|
||||
kind: VSphereMachineTemplate
|
||||
metadata:
|
||||
@ -114,6 +87,7 @@ patchesStrategicMerge:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
diskGiB: 60
|
||||
network:
|
||||
devices:
|
||||
- dhcp4: false
|
||||
@ -125,132 +99,168 @@ patchesStrategicMerge:
|
||||
- {{ _template.network.dnsserver }}
|
||||
networkName: '${VSPHERE_NETWORK}'
|
||||
|
||||
patchesJson6902:
|
||||
- target:
|
||||
group: controlplane.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeadmControlPlane
|
||||
name: .*
|
||||
patch: |-
|
||||
- op: add
|
||||
path: /spec/kubeadmConfigSpec/files/-
|
||||
value:
|
||||
content: |
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
config_path = "/etc/containerd/certs.d"
|
||||
append: true
|
||||
path: /etc/containerd/config.toml
|
||||
- target:
|
||||
group: addons.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: ClusterResourceSet
|
||||
name: \${CLUSTER_NAME}-crs-0
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/resources
|
||||
value:
|
||||
- kind: Secret
|
||||
name: cloud-controller-manager
|
||||
- kind: Secret
|
||||
name: cloud-provider-vsphere-credentials
|
||||
- kind: ConfigMap
|
||||
name: cpi-manifests
|
||||
- op: add
|
||||
path: /spec/strategy
|
||||
value: Reconcile
|
||||
|
||||
- target:
|
||||
group: controlplane.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeadmControlPlane
|
||||
name: .*
|
||||
patch: |-
|
||||
- op: add
|
||||
path: /spec/kubeadmConfigSpec/files/-
|
||||
value:
|
||||
content: |
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
config_path = "/etc/containerd/certs.d"
|
||||
append: true
|
||||
path: /etc/containerd/config.toml
|
||||
{% for registry in _template.registries %}
|
||||
- op: add
|
||||
path: /spec/kubeadmConfigSpec/files/-
|
||||
value:
|
||||
content: |
|
||||
server = "https://{{ registry }}"
|
||||
- op: add
|
||||
path: /spec/kubeadmConfigSpec/files/-
|
||||
value:
|
||||
content: |
|
||||
server = "https://{{ registry }}"
|
||||
|
||||
[host."https://registry.{{ _template.network.fqdn }}/v2/library/{{ registry }}"]
|
||||
capabilities = ["pull", "resolve"]
|
||||
override_path = true
|
||||
owner: root:root
|
||||
path: /etc/containerd/certs.d/{{ registry }}/hosts.toml
|
||||
[host."https://registry.{{ _template.network.fqdn }}/v2/library/{{ registry }}"]
|
||||
capabilities = ["pull", "resolve"]
|
||||
override_path = true
|
||||
owner: root:root
|
||||
path: /etc/containerd/certs.d/{{ registry }}/hosts.toml
|
||||
{% endfor %}
|
||||
- op: add
|
||||
path: /spec/kubeadmConfigSpec/files/-
|
||||
value:
|
||||
content: |
|
||||
network: {config: disabled}
|
||||
owner: root:root
|
||||
path: /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg
|
||||
- op: add
|
||||
path: /spec/kubeadmConfigSpec/files/-
|
||||
value:
|
||||
content: |
|
||||
{{ _template.rootca | indent(width=12, first=False) | trim }}
|
||||
owner: root:root
|
||||
path: /usr/local/share/ca-certificates/root_ca.crt
|
||||
- target:
|
||||
group: bootstrap.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeadmConfigTemplate
|
||||
name: .*
|
||||
patch: |-
|
||||
- op: add
|
||||
path: /spec/kubeadmConfigSpec/files/-
|
||||
value:
|
||||
content: |
|
||||
network: {config: disabled}
|
||||
owner: root:root
|
||||
path: /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg
|
||||
- op: add
|
||||
path: /spec/kubeadmConfigSpec/files/-
|
||||
value:
|
||||
content: |
|
||||
{{ _template.rootca | indent(width=10, first=False) | trim }}
|
||||
owner: root:root
|
||||
path: /usr/local/share/ca-certificates/root_ca.crt
|
||||
- target:
|
||||
group: bootstrap.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeadmConfigTemplate
|
||||
name: .*
|
||||
patch: |-
|
||||
{% for cmd in _template.runcmds %}
|
||||
- op: add
|
||||
path: /spec/template/spec/preKubeadmCommands/-
|
||||
value: {{ cmd }}
|
||||
- op: add
|
||||
path: /spec/template/spec/preKubeadmCommands/-
|
||||
value: {{ cmd }}
|
||||
{% endfor %}
|
||||
- target:
|
||||
group: controlplane.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeadmControlPlane
|
||||
name: .*
|
||||
patch: |-
|
||||
- target:
|
||||
group: controlplane.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeadmControlPlane
|
||||
name: .*
|
||||
patch: |-
|
||||
{% for cmd in _template.runcmds %}
|
||||
- op: add
|
||||
path: /spec/kubeadmConfigSpec/preKubeadmCommands/-
|
||||
value: {{ cmd }}
|
||||
- op: add
|
||||
path: /spec/kubeadmConfigSpec/preKubeadmCommands/-
|
||||
value: {{ cmd }}
|
||||
{% endfor %}
|
||||
|
||||
- target:
|
||||
group: infrastructure.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: VSphereMachineTemplate
|
||||
name: \${CLUSTER_NAME}
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: ${CLUSTER_NAME}-master
|
||||
- target:
|
||||
group: controlplane.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeadmControlPlane
|
||||
name: \${CLUSTER_NAME}
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: ${CLUSTER_NAME}-master
|
||||
- op: replace
|
||||
path: /spec/machineTemplate/infrastructureRef/name
|
||||
value: ${CLUSTER_NAME}-master
|
||||
- target:
|
||||
group: cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: Cluster
|
||||
name: \${CLUSTER_NAME}
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/controlPlaneRef/name
|
||||
value: ${CLUSTER_NAME}-master
|
||||
- target:
|
||||
group: infrastructure.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: VSphereMachineTemplate
|
||||
name: \${CLUSTER_NAME}
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: ${CLUSTER_NAME}-master
|
||||
- op: remove
|
||||
path: /spec/template/spec/thumbprint
|
||||
- target:
|
||||
group: controlplane.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeadmControlPlane
|
||||
name: \${CLUSTER_NAME}
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: ${CLUSTER_NAME}-master
|
||||
- op: replace
|
||||
path: /spec/machineTemplate/infrastructureRef/name
|
||||
value: ${CLUSTER_NAME}-master
|
||||
- target:
|
||||
group: cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: Cluster
|
||||
name: \${CLUSTER_NAME}
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/clusterNetwork/pods
|
||||
value:
|
||||
cidrBlocks:
|
||||
- 172.30.0.0/16
|
||||
- op: replace
|
||||
path: /spec/controlPlaneRef/name
|
||||
value: ${CLUSTER_NAME}-master
|
||||
|
||||
- target:
|
||||
group: infrastructure.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: VSphereMachineTemplate
|
||||
name: \${CLUSTER_NAME}-worker
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/numCPUs
|
||||
value: {{ _template.nodesize.cpu }}
|
||||
- op: replace
|
||||
path: /spec/template/spec/memoryMiB
|
||||
value: {{ _template.nodesize.memory }}
|
||||
- target:
|
||||
group: cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: MachineDeployment
|
||||
name: \${CLUSTER_NAME}-md-0
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: ${CLUSTER_NAME}-worker
|
||||
- op: replace
|
||||
path: /spec/template/spec/bootstrap/configRef/name
|
||||
value: ${CLUSTER_NAME}-worker
|
||||
- target:
|
||||
group: bootstrap.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeadmConfigTemplate
|
||||
name: \${CLUSTER_NAME}-md-0
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: ${CLUSTER_NAME}-worker
|
||||
- target:
|
||||
group: infrastructure.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: VSphereMachineTemplate
|
||||
name: \${CLUSTER_NAME}-worker
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/numCPUs
|
||||
value: {{ _template.nodesize.cpu }}
|
||||
- op: replace
|
||||
path: /spec/template/spec/memoryMiB
|
||||
value: {{ _template.nodesize.memory }}
|
||||
- op: remove
|
||||
path: /spec/template/spec/thumbprint
|
||||
- target:
|
||||
group: cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: MachineDeployment
|
||||
name: \${CLUSTER_NAME}-md-0
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: ${CLUSTER_NAME}-worker
|
||||
- op: replace
|
||||
path: /spec/template/spec/bootstrap/configRef/name
|
||||
value: ${CLUSTER_NAME}-worker
|
||||
- target:
|
||||
group: bootstrap.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeadmConfigTemplate
|
||||
name: \${CLUSTER_NAME}-md-0
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: ${CLUSTER_NAME}-worker
|
||||
|
||||
- target:
|
||||
group: infrastructure.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: VSphereCluster
|
||||
name: .*
|
||||
patch: |-
|
||||
- op: remove
|
||||
path: /spec/thumbprint
|
||||
|
@ -0,0 +1,83 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- downstream-cluster/kubeadmconfigtemplate-{{ _template.cluster.name }}-worker.yaml
|
||||
- downstream-cluster/machinedeployment-{{ _template.cluster.name }}-worker.yaml
|
||||
- downstream-cluster/vspheremachinetemplate-{{ _template.cluster.name }}-worker.yaml
|
||||
|
||||
patches:
|
||||
- patch: |-
|
||||
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
|
||||
kind: KubeadmConfigTemplate
|
||||
metadata:
|
||||
name: {{ _template.cluster.name }}-worker
|
||||
namespace: default
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
diskSetup:
|
||||
filesystems:
|
||||
- device: /dev/sdb1
|
||||
filesystem: ext4
|
||||
label: blockstorage
|
||||
partitions:
|
||||
- device: /dev/sdb
|
||||
layout: true
|
||||
tableType: gpt
|
||||
joinConfiguration:
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "node.longhorn.io/create-default-disk=true"
|
||||
mounts:
|
||||
- - LABEL=blockstorage
|
||||
- /mnt/blockstorage
|
||||
- patch: |-
|
||||
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
|
||||
kind: VSphereMachineTemplate
|
||||
metadata:
|
||||
name: {{ _template.cluster.name }}-worker
|
||||
namespace: default
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
additionalDisksGiB:
|
||||
- {{ _template.nodepool.additionaldisk }}
|
||||
|
||||
- target:
|
||||
group: bootstrap.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeadmConfigTemplate
|
||||
name: {{ _template.cluster.name }}-worker
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: {{ _template.cluster.name }}-worker-storage
|
||||
|
||||
- target:
|
||||
group: cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: MachineDeployment
|
||||
name: {{ _template.cluster.name }}-worker
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: {{ _template.cluster.name }}-worker-storage
|
||||
- op: replace
|
||||
path: /spec/template/spec/bootstrap/configRef/name
|
||||
value: {{ _template.cluster.name }}-worker-storage
|
||||
- op: replace
|
||||
path: /spec/template/spec/infrastructureRef/name
|
||||
value: {{ _template.cluster.name }}-worker-storage
|
||||
- op: replace
|
||||
path: /spec/replicas
|
||||
value: {{ _template.nodepool.size }}
|
||||
|
||||
- target:
|
||||
group: infrastructure.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: VSphereMachineTemplate
|
||||
name: {{ _template.cluster.name }}-worker
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: {{ _template.cluster.name }}-worker-storage
|
@ -1,84 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- manifests/kubeadmconfigtemplate-{{ _template.cluster.name }}-worker.yaml
|
||||
- manifests/machinedeployment-{{ _template.cluster.name }}-worker.yaml
|
||||
- manifests/vspheremachinetemplate-{{ _template.cluster.name }}-worker.yaml
|
||||
|
||||
patchesStrategicMerge:
|
||||
- |-
|
||||
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
|
||||
kind: KubeadmConfigTemplate
|
||||
metadata:
|
||||
name: {{ _template.cluster.name }}-worker
|
||||
namespace: default
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
diskSetup:
|
||||
filesystems:
|
||||
- device: /dev/sdb1
|
||||
filesystem: ext4
|
||||
label: blockstorage
|
||||
partitions:
|
||||
- device: /dev/sdb
|
||||
layout: true
|
||||
tableType: gpt
|
||||
joinConfiguration:
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "node.longhorn.io/create-default-disk=true"
|
||||
mounts:
|
||||
- - LABEL=blockstorage
|
||||
- /mnt/blockstorage
|
||||
- |-
|
||||
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
|
||||
kind: VSphereMachineTemplate
|
||||
metadata:
|
||||
name: {{ _template.cluster.name }}-worker
|
||||
namespace: default
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
additionalDisksGiB:
|
||||
- {{ _template.nodepool.additionaldisk }}
|
||||
|
||||
patchesJson6902:
|
||||
- target:
|
||||
group: bootstrap.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeadmConfigTemplate
|
||||
name: {{ _template.cluster.name }}-worker
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: {{ _template.cluster.name }}-worker-storage
|
||||
|
||||
- target:
|
||||
group: cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: MachineDeployment
|
||||
name: {{ _template.cluster.name }}-worker
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: {{ _template.cluster.name }}-worker-storage
|
||||
- op: replace
|
||||
path: /spec/template/spec/bootstrap/configRef/name
|
||||
value: {{ _template.cluster.name }}-worker-storage
|
||||
- op: replace
|
||||
path: /spec/template/spec/infrastructureRef/name
|
||||
value: {{ _template.cluster.name }}-worker-storage
|
||||
- op: replace
|
||||
path: /spec/replicas
|
||||
value: {{ _template.nodepool.size }}
|
||||
|
||||
- target:
|
||||
group: infrastructure.cluster.x-k8s.io
|
||||
version: v1beta1
|
||||
kind: VSphereMachineTemplate
|
||||
name: {{ _template.cluster.name }}-worker
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/name
|
||||
value: {{ _template.cluster.name }}-worker-storage
|
@ -0,0 +1,7 @@
|
||||
apiVersion: idp.supervisor.pinniped.dev/v1alpha1
|
||||
kind: OIDCIdentityProvider
|
||||
metadata:
|
||||
name: {{ _template.name }}
|
||||
namespace: {{ _template.namespace }}
|
||||
spec:
|
||||
{{ _template.spec }}
|
@ -3,6 +3,7 @@ kind: Secret
|
||||
metadata:
|
||||
name: {{ _template.name }}
|
||||
namespace: {{ _template.namespace }}
|
||||
type: {{ _template.type }}
|
||||
data:
|
||||
{% for kv_pair in _template.data %}
|
||||
"{{ kv_pair.key }}": {{ kv_pair.value }}
|
||||
|
@ -0,0 +1,7 @@
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: ServersTransport
|
||||
metadata:
|
||||
name: {{ _template.name }}
|
||||
namespace: {{ _template.namespace }}
|
||||
spec:
|
||||
{{ _template.spec }}
|
@ -1,12 +1,6 @@
|
||||
- import_tasks: service.yml
|
||||
- import_tasks: cron.yml
|
||||
|
||||
- name: Cleanup tempfile
|
||||
ansible.builtin.file:
|
||||
path: "{{ kubeconfig.path }}"
|
||||
state: absent
|
||||
when: kubeconfig.path is defined
|
||||
|
||||
# - name: Reboot host
|
||||
# ansible.builtin.shell:
|
||||
# cmd: systemctl reboot
|
||||
|
@ -11,7 +11,7 @@
|
||||
- attribute: cluster
|
||||
moref: >-
|
||||
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||
jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
|
||||
jq -r '.[] | select(.name == "runtime").val.host | .type + ":" + .value')
|
||||
part: (NF-1)
|
||||
- attribute: datacenter
|
||||
moref: VirtualMachine:{{ moref_id }}
|
||||
@ -19,27 +19,27 @@
|
||||
- attribute: datastore
|
||||
moref: >-
|
||||
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||
jq -r '.[] | select(.Name == "datastore").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
|
||||
jq -r '.[] | select(.name == "datastore").val._value | .[].type + ":" + .[].value')
|
||||
part: NF
|
||||
- attribute: folder
|
||||
moref: >-
|
||||
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||
jq -r '.[] | select(.Name == "parent").Val | .Type + ":" + .Value')
|
||||
jq -r '.[] | select(.name == "parent").val | .type + ":" + .value')
|
||||
part: 0
|
||||
# - attribute: host
|
||||
# moref: >-
|
||||
# $(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||
# jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
|
||||
# jq -r '.[] | select(.name == "runtime").val.host | .type + ":" + .value')
|
||||
# part: NF
|
||||
- attribute: network
|
||||
moref: >-
|
||||
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||
jq -r '.[] | select(.Name == "network").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
|
||||
jq -r '.[] | select(.name == "network").val._value | .[].type + ":" + .[].value')
|
||||
part: NF
|
||||
- attribute: resourcepool
|
||||
moref: >-
|
||||
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
|
||||
jq -r '.[] | select(.Name == "resourcePool").Val | .Type + ":" + .Value')
|
||||
jq -r '.[] | select(.name == "resourcePool").val | .type + ":" + .value')
|
||||
part: 0
|
||||
loop_control:
|
||||
label: "{{ item.attribute }}"
|
@ -0,0 +1,33 @@
|
||||
- block:
|
||||
|
||||
- name: Check for existing template
|
||||
community.vmware.vmware_guest_info:
|
||||
name: "{{ vapp['workloadcluster.nodetemplate'] }}"
|
||||
hostname: "{{ vapp['hv.fqdn'] }}"
|
||||
validate_certs: false
|
||||
username: "{{ vapp['hv.username'] }}"
|
||||
password: "{{ vapp['hv.password'] }}"
|
||||
datacenter: "{{ vcenter_info.datacenter }}"
|
||||
folder: "{{ vcenter_info.folder }}"
|
||||
register: nodetemplate
|
||||
until:
|
||||
- nodetemplate is not failed
|
||||
retries: 600
|
||||
delay: 30
|
||||
#wait for 5 hr.
|
||||
vars:
|
||||
color_reset: "\e[0m"
|
||||
ansible_callback_diy_runner_retry_msg: >-
|
||||
{%- set result = ansible_callback_diy.result.output -%}
|
||||
{%- set retries_left = result.retries - result.attempts -%}
|
||||
TEMPLATE '{{ vapp['workloadcluster.nodetemplate'] }}' NOT FOUND; PLEASE UPLOAD MANUALLY -- ({{ retries_left }} retries left)
|
||||
ansible_callback_diy_runner_retry_msg_color: bright yellow
|
||||
|
||||
- name: Store inventory path of existing template
|
||||
ansible.builtin.set_fact:
|
||||
nodetemplate_inventorypath: "{{ nodetemplate.instance.hw_folder ~ '/' ~ nodetemplate.instance.hw_name }}"
|
||||
|
||||
rescue:
|
||||
- name: CRITICAL ERROR
|
||||
ansible.builtin.fail:
|
||||
msg: Required node-template is not available; cannot continue
|
@ -1,73 +0,0 @@
|
||||
- block:
|
||||
|
||||
- name: Check for existing template on hypervisor
|
||||
community.vmware.vmware_guest_info:
|
||||
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
|
||||
register: existing_ova
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Store inventory path of existing template
|
||||
ansible.builtin.set_fact:
|
||||
nodetemplate_inventorypath: "{{ existing_ova.instance.hw_folder ~ '/' ~ existing_ova.instance.hw_name }}"
|
||||
when: existing_ova is not failed
|
||||
|
||||
- block:
|
||||
|
||||
- name: Parse OVA file for network mappings
|
||||
ansible.builtin.shell:
|
||||
cmd: govc import.spec -json {{ filename }}
|
||||
environment:
|
||||
GOVC_INSECURE: '1'
|
||||
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
|
||||
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
|
||||
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
|
||||
register: ova_spec
|
||||
|
||||
- name: Deploy OVA template on hypervisor
|
||||
community.vmware.vmware_deploy_ovf:
|
||||
cluster: "{{ vcenter_info.cluster }}"
|
||||
datastore: "{{ vcenter_info.datastore }}"
|
||||
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
|
||||
networks: "{u'{{ ova_spec.stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}"
|
||||
allow_duplicates: no
|
||||
power_on: false
|
||||
ovf: "{{ filename }}"
|
||||
register: ova_deploy
|
||||
|
||||
- name: Add additional placeholder disk
|
||||
community.vmware.vmware_guest_disk:
|
||||
name: "{{ ova_deploy.instance.hw_name }}"
|
||||
disk:
|
||||
- size: 1Gb
|
||||
scsi_controller: 1
|
||||
scsi_type: paravirtual
|
||||
unit_number: 0
|
||||
|
||||
# Disabled to allow disks to be resized; at the cost of cloning speed
|
||||
# - name: Create snapshot on deployed VM
|
||||
# community.vmware.vmware_guest_snapshot:
|
||||
# name: "{{ ova_deploy.instance.hw_name }}"
|
||||
# state: present
|
||||
# snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base"
|
||||
|
||||
- name: Mark deployed VM as templates
|
||||
community.vmware.vmware_guest:
|
||||
name: "{{ ova_deploy.instance.hw_name }}"
|
||||
is_template: yes
|
||||
|
||||
- name: Store inventory path of deployed template
|
||||
ansible.builtin.set_fact:
|
||||
nodetemplate_inventorypath: "{{ ova_deploy.instance.hw_folder ~ '/' ~ ova_deploy.instance.hw_name }}"
|
||||
|
||||
when: existing_ova is failed
|
||||
|
||||
vars:
|
||||
filename: "{{ query('ansible.builtin.fileglob', '/opt/metacluster/node-templates/*.ova') | first }}"
|
||||
module_defaults:
|
||||
group/vmware:
|
||||
hostname: "{{ vapp['hv.fqdn'] }}"
|
||||
validate_certs: no
|
||||
username: "{{ vapp['hv.username'] }}"
|
||||
password: "{{ vapp['hv.password'] }}"
|
||||
datacenter: "{{ vcenter_info.datacenter }}"
|
||||
folder: "{{ vcenter_info.folder }}"
|
@ -37,9 +37,30 @@
|
||||
state: directory
|
||||
|
||||
- name: Configure Ansible defaults
|
||||
ansible.builtin.template:
|
||||
src: ansible.j2
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/ansible/ansible.cfg
|
||||
content: |
|
||||
[defaults]
|
||||
callbacks_enabled = ansible.posix.profile_tasks
|
||||
force_color = true
|
||||
stdout_callback = community.general.diy
|
||||
|
||||
[callback_diy]
|
||||
|
||||
[callback_profile_tasks]
|
||||
task_output_limit = 0
|
||||
|
||||
- name: Create default shell aliases
|
||||
ansible.builtin.lineinfile:
|
||||
path: ~/.bashrc
|
||||
state: present
|
||||
line: "{{ item }}"
|
||||
insertafter: EOF
|
||||
loop:
|
||||
- alias k="kubectl"
|
||||
- alias less="less -rf"
|
||||
loop_control:
|
||||
label: "{{ (item | regex_findall('([^ =\"]+)'))[2] }}"
|
||||
|
||||
- name: Cleanup
|
||||
ansible.builtin.apt:
|
||||
|
@ -1,2 +0,0 @@
|
||||
[defaults]
|
||||
callbacks_enabled = ansible.posix.profile_tasks
|
@ -1,7 +1,8 @@
|
||||
platform:
|
||||
|
||||
k3s:
|
||||
version: v1.25.9+k3s1
|
||||
version: v1.30.0+k3s1
|
||||
# version: v1.27.1+k3s1
|
||||
|
||||
packaged_components:
|
||||
- name: traefik
|
||||
@ -22,7 +23,8 @@ platform:
|
||||
port: 8022
|
||||
protocol: TCP
|
||||
web:
|
||||
redirectTo: websecure
|
||||
redirectTo:
|
||||
port: websecure
|
||||
websecure:
|
||||
tls:
|
||||
certResolver: stepca
|
||||
@ -33,12 +35,10 @@ platform:
|
||||
helm_repositories:
|
||||
- name: argo
|
||||
url: https://argoproj.github.io/argo-helm
|
||||
- name: authentik
|
||||
url: https://charts.goauthentik.io
|
||||
# - name: codecentric
|
||||
# url: https://codecentric.github.io/helm-charts
|
||||
# - name: dex
|
||||
# url: https://charts.dexidp.io
|
||||
- name: bitnami
|
||||
url: https://charts.bitnami.com/bitnami
|
||||
- name: dexidp
|
||||
url: https://charts.dexidp.io
|
||||
- name: gitea-charts
|
||||
url: https://dl.gitea.io/charts/
|
||||
- name: harbor
|
||||
@ -51,151 +51,168 @@ platform:
|
||||
url: https://prometheus-community.github.io/helm-charts
|
||||
- name: smallstep
|
||||
url: https://smallstep.github.io/helm-charts/
|
||||
- name: spamasaurus
|
||||
url: https://code.spamasaurus.com/api/packages/djpbessems/helm
|
||||
|
||||
components:
|
||||
|
||||
argo-cd:
|
||||
helm:
|
||||
version: 5.27.4 # (= ArgoCD v2.6.7)
|
||||
# Must match the version referenced at `dependencies.static_binaries[.filename==argo].url`
|
||||
version: 6.7.7 # (=Argo CD v2.10.5)
|
||||
chart: argo/argo-cd
|
||||
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
|
||||
chart_values: !unsafe |
|
||||
configs:
|
||||
cm:
|
||||
resource.compareoptions: |
|
||||
ignoreAggregatedRoles: true
|
||||
resource.customizations.ignoreDifferences.all: |
|
||||
jsonPointers:
|
||||
- /spec/conversion/webhook/clientConfig/caBundle
|
||||
params:
|
||||
server.insecure: true
|
||||
secret:
|
||||
argocdServerAdminPassword: "{{ vapp['metacluster.password'] | password_hash('bcrypt') }}"
|
||||
global:
|
||||
domain: gitops.{{ vapp['metacluster.fqdn'] | lower }}
|
||||
server:
|
||||
extraArgs:
|
||||
- --insecure
|
||||
ingress:
|
||||
enabled: true
|
||||
|
||||
argo-workflows:
|
||||
helm:
|
||||
version: 0.41.8 # (=Argo Workflows v3.5.7)
|
||||
chart: argo/argo-workflows
|
||||
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
|
||||
chart_values: !unsafe |
|
||||
# workflow:
|
||||
# serviceAccount:
|
||||
# create: true
|
||||
# name: "argo-workflows"
|
||||
# rbac:
|
||||
# create: true
|
||||
controller:
|
||||
workflowNamespaces:
|
||||
- default
|
||||
- firstboot
|
||||
server:
|
||||
authModes:
|
||||
- server
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- gitops.{{ vapp['metacluster.fqdn'] }}
|
||||
|
||||
authentik:
|
||||
helm:
|
||||
version: 2023.3.1
|
||||
chart: authentik/authentik
|
||||
parse_logic: helm template . --set postgresql.enabled=true,redis.enabled=true | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
|
||||
chart_values: !unsafe |
|
||||
authentik:
|
||||
avatars: none
|
||||
secret_key: "{{ lookup('ansible.builtin.password', '/dev/null length=64 chars=ascii_lowercase,digits seed=' ~ vapp['guestinfo.hostname']) }}"
|
||||
postgresql:
|
||||
password: "{{ lookup('ansible.builtin.password', '/dev/null length=32 chars=ascii_lowercase,digits seed=' ~ vapp['guestinfo.hostname']) }}"
|
||||
env:
|
||||
AUTHENTIK_BOOTSTRAP_PASSWORD: "{{ vapp['metacluster.password'] }}"
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- host: auth.{{ vapp['metacluster.fqdn'] }}
|
||||
paths:
|
||||
- path: "/"
|
||||
pathType: Prefix
|
||||
postgresql:
|
||||
enabled: true
|
||||
postgresqlPassword: "{{ lookup('ansible.builtin.password', '/dev/null length=32 chars=ascii_lowercase,digits seed=' ~ vapp['guestinfo.hostname']) }}"
|
||||
redis:
|
||||
enabled: true
|
||||
- workflow.{{ vapp['metacluster.fqdn']}}
|
||||
paths:
|
||||
- /
|
||||
pathType: Prefix
|
||||
|
||||
cert-manager:
|
||||
helm:
|
||||
version: 1.11.0
|
||||
version: 1.14.4
|
||||
chart: jetstack/cert-manager
|
||||
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
|
||||
# chart_values: !unsafe |
|
||||
# installCRDs: true
|
||||
chart_values: !unsafe |
|
||||
installCRDs: true
|
||||
|
||||
clusterapi:
|
||||
management:
|
||||
version:
|
||||
# Must match the version referenced at `dependencies.static_binaries[.filename==clusterctl].url`
|
||||
base: v1.4.0
|
||||
base: v1.6.3
|
||||
# Must match the version referenced at `components.cert-manager.helm.version`
|
||||
cert_manager: v1.11.0
|
||||
infrastructure_vsphere: v1.6.0
|
||||
ipam_incluster: v0.1.0-alpha.2
|
||||
cert_manager: v1.14.4
|
||||
infrastructure_vsphere: v1.9.2
|
||||
ipam_incluster: v0.1.0
|
||||
# Refer to `https://console.cloud.google.com/gcr/images/cloud-provider-vsphere/GLOBAL/cpi/release/manager` for available tags
|
||||
cpi_vsphere: v1.25.2
|
||||
cpi_vsphere: v1.30.1
|
||||
workload:
|
||||
version:
|
||||
calico: v3.25.0
|
||||
k8s: v1.25.9
|
||||
calico: v3.27.3
|
||||
k8s: v1.30.1
|
||||
node_template:
|
||||
url: https://{{ repo_username }}:{{ repo_password }}@sn.itch.fyi/Repository/rel/ubuntu-2204-kube-v1.25.9.ova
|
||||
# Not used anymore; should be uploaded to hypervisor manually!
|
||||
# https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/templates%2Fv1.30.0/
|
||||
|
||||
# dex:
|
||||
# helm:
|
||||
# version: 0.13.0 # (= Dex 2.35.3)
|
||||
# chart: dex/dex
|
||||
# parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
|
||||
# chart_values: !unsafe |
|
||||
# config:
|
||||
# connectors:
|
||||
# - type: ldap
|
||||
# id: ldap
|
||||
# name: "LDAP"
|
||||
# config:
|
||||
# host: "{{ vapp['ldap.fqdn'] }}:636"
|
||||
# insecureNoSSL: false
|
||||
# insecureSkipVerify: true
|
||||
# bindDN: "{{ vapp['ldap.dn'] }}"
|
||||
# bindPW: "{{ vapp['ldap.password'] }}"
|
||||
|
||||
# usernamePrompt: "Username"
|
||||
# userSearch:
|
||||
# baseDN: OU=Administrators,OU=Useraccounts,DC=bessems,DC=eu
|
||||
# filter: "(objectClass=person)"
|
||||
# username: userPrincipalName
|
||||
# idAttr: DN
|
||||
# emailAttr: userPrincipalName
|
||||
# nameAttr: cn
|
||||
|
||||
# groupSearch:
|
||||
# baseDN: OU=Roles,OU=Groups,DC=bessems,DC=eu
|
||||
# filter: "(objectClass=group)"
|
||||
# userMatchers:
|
||||
# - userAttr: DN
|
||||
# groupAttr: member
|
||||
# nameAttr: cn
|
||||
# enablePasswordDB: true
|
||||
# issuer: https://oidc.{{ vapp['metacluster.fqdn'] }}
|
||||
# storage:
|
||||
# type: kubernetes
|
||||
# config:
|
||||
# inCluster: true
|
||||
# ingress:
|
||||
# enabled: true
|
||||
# hosts:
|
||||
# - host: oidc.{{ vapp['metacluster.fqdn'] }}
|
||||
# paths:
|
||||
# - path: /
|
||||
# pathType: Prefix
|
||||
dex:
|
||||
helm:
|
||||
version: 0.15.3 # (= Dex 2.37.0)
|
||||
chart: dexidp/dex
|
||||
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
|
||||
chart_values: !unsafe |
|
||||
config:
|
||||
issuer: https://idps.{{ vapp['metacluster.fqdn'] }}
|
||||
storage:
|
||||
type: kubernetes
|
||||
config:
|
||||
inCluster: true
|
||||
staticClients:
|
||||
- id: pinniped-supervisor
|
||||
secret: "{{ lookup('ansible.builtin.password', '/dev/null length=64 chars=ascii_lowercase,digits seed=' ~ vapp['metacluster.fqdn']) }}"
|
||||
name: Pinniped Supervisor client
|
||||
redirectURIs:
|
||||
- https://auth.{{ vapp['metacluster.fqdn'] }}/sso/callback
|
||||
enablePasswordDB: true
|
||||
staticPasswords:
|
||||
- email: user@{{ vapp['metacluster.fqdn'] }}
|
||||
hash: "{{ vapp['metacluster.password'] | password_hash('bcrypt') }}"
|
||||
username: user
|
||||
userID: "{{ lookup('ansible.builtin.password', '/dev/null length=64 chars=ascii_lowercase,digits seed=' ~ vapp['metacluster.fqdn']) | to_uuid }}"
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- host: idps.{{ vapp['metacluster.fqdn'] }}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
|
||||
gitea:
|
||||
helm:
|
||||
version: v7.0.2 # (= Gitea v1.18.3)
|
||||
version: v10.1.3 # (= Gitea v1.21.7)
|
||||
chart: gitea-charts/gitea
|
||||
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | sed '/:/!s/$/:latest/'
|
||||
chart_values: !unsafe |
|
||||
extraVolumes:
|
||||
- secret:
|
||||
defaultMode: 420
|
||||
secretName: step-certificates-certs
|
||||
name: step-certificates-certs
|
||||
extraVolumeMounts:
|
||||
- mountPath: /etc/ssl/certs/ca-chain.crt
|
||||
name: step-certificates-certs
|
||||
readOnly: true
|
||||
subPath: ca_chain.crt
|
||||
gitea:
|
||||
admin:
|
||||
username: administrator
|
||||
password: "{{ vapp['metacluster.password'] }}"
|
||||
email: admin@{{ vapp['metacluster.fqdn'] }}
|
||||
email: administrator@{{ vapp['metacluster.fqdn'] | lower }}
|
||||
config:
|
||||
cache:
|
||||
ADAPTER: memory
|
||||
server:
|
||||
OFFLINE_MODE: true
|
||||
PROTOCOL: http
|
||||
ROOT_URL: https://git.{{ vapp['metacluster.fqdn'] }}/
|
||||
ROOT_URL: https://git.{{ vapp['metacluster.fqdn'] | lower }}/
|
||||
session:
|
||||
PROVIDER: db
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- host: git.{{ vapp['metacluster.fqdn'] }}
|
||||
- host: git.{{ vapp['metacluster.fqdn'] | lower }}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
postgresql:
|
||||
enabled: true
|
||||
image:
|
||||
tag: 16.1.0-debian-11-r25
|
||||
postgresql-ha:
|
||||
enabled: false
|
||||
redis-cluster:
|
||||
enabled: false
|
||||
service:
|
||||
ssh:
|
||||
type: ClusterIP
|
||||
@ -204,7 +221,7 @@ components:
|
||||
|
||||
harbor:
|
||||
helm:
|
||||
version: 1.11.0 # (= Harbor v2.7.0)
|
||||
version: 1.14.1 # (= Harbor v2.10.1)
|
||||
chart: harbor/harbor
|
||||
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
|
||||
chart_values: !unsafe |
|
||||
@ -212,11 +229,11 @@ components:
|
||||
ingress:
|
||||
annotations: {}
|
||||
hosts:
|
||||
core: registry.{{ vapp['metacluster.fqdn'] }}
|
||||
core: registry.{{ vapp['metacluster.fqdn'] | lower }}
|
||||
tls:
|
||||
certSource: none
|
||||
enabled: false
|
||||
externalURL: https://registry.{{ vapp['metacluster.fqdn'] }}
|
||||
externalURL: https://registry.{{ vapp['metacluster.fqdn'] | lower }}
|
||||
harborAdminPassword: "{{ vapp['metacluster.password'] }}"
|
||||
notary:
|
||||
enabled: false
|
||||
@ -225,37 +242,28 @@ components:
|
||||
registry:
|
||||
size: 25Gi
|
||||
|
||||
# keycloakx:
|
||||
# helm:
|
||||
# version: 2.1.1 # (= Keycloak 20.0.3)
|
||||
# chart: codecentric/keycloakx
|
||||
# parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
|
||||
# chart_values: !unsafe |
|
||||
# command:
|
||||
# - "/opt/keycloak/bin/kc.sh"
|
||||
# - "start"
|
||||
# - "--http-enabled=true"
|
||||
# - "--http-port=8080"
|
||||
# - "--hostname-strict=false"
|
||||
# - "--hostname-strict-https=false"
|
||||
# extraEnv: |
|
||||
# - name: KEYCLOAK_ADMIN
|
||||
# value: admin
|
||||
# - name: KEYCLOAK_ADMIN_PASSWORD
|
||||
# value: {{ vapp['metacluster.password'] }}
|
||||
# - name: KC_PROXY
|
||||
# value: "passthrough"
|
||||
# - name: JAVA_OPTS_APPEND
|
||||
# value: >-
|
||||
# -Djgroups.dns.query={% raw %}{{ include "keycloak.fullname" . }}{% endraw %}-headless
|
||||
# ingress:
|
||||
# enabled: true
|
||||
# rules:
|
||||
# - host: keycloak.{{ vapp['metacluster.fqdn'] }}
|
||||
# paths:
|
||||
# - path: /
|
||||
# pathType: Prefix
|
||||
# tls: []
|
||||
json-server:
|
||||
helm:
|
||||
version: v0.8.4
|
||||
chart: spamasaurus/json-server
|
||||
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
|
||||
chart_values: !unsafe |
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- host: version.{{ vapp['metacluster.fqdn'] }}
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
jsonServer:
|
||||
image:
|
||||
repository: code.spamasaurus.com/djpbessems/json-server
|
||||
seedData:
|
||||
configInline: {}
|
||||
sidecar:
|
||||
targetUrl: version.{{ vapp['metacluster.fqdn'] }}
|
||||
image:
|
||||
repository: code.spamasaurus.com/djpbessems/json-server
|
||||
|
||||
kube-prometheus-stack:
|
||||
helm:
|
||||
@ -270,42 +278,62 @@ components:
|
||||
|
||||
kubevip:
|
||||
# Must match the version referenced at `dependencies.container_images`
|
||||
version: v0.5.8
|
||||
version: v0.6.3
|
||||
|
||||
longhorn:
|
||||
helm:
|
||||
version: 1.4.1
|
||||
version: 1.5.4
|
||||
chart: longhorn/longhorn
|
||||
parse_logic: cat values.yaml | yq eval '.. | select(has("repository")) | .repository + ":" + .tag'
|
||||
chart_values: !unsafe |
|
||||
defaultSettings:
|
||||
allowNodeDrainWithLastHealthyReplica: true
|
||||
concurrentReplicaRebuildPerNodeLimit: 10
|
||||
defaultDataPath: /mnt/blockstorage
|
||||
defaultReplicaCount: 1
|
||||
logLevel: Info
|
||||
nodeDrainPolicy: block-for-eviction-if-contains-last-replica
|
||||
replicaSoftAntiAffinity: true
|
||||
priorityClass: system-node-critical
|
||||
storageOverProvisioningPercentage: 200
|
||||
storageReservedPercentageForDefaultDisk: 0
|
||||
ingress:
|
||||
enabled: true
|
||||
host: storage.{{ vapp['metacluster.fqdn'] }}
|
||||
persistence:
|
||||
defaultClassReplicaCount: 1
|
||||
host: storage.{{ vapp['metacluster.fqdn'] | lower }}
|
||||
longhornManager:
|
||||
priorityClass: system-node-critical
|
||||
longhornDriver:
|
||||
priorityClass: system-node-critical
|
||||
|
||||
pinniped:
|
||||
helm:
|
||||
version: 1.3.10 # (= Pinniped v0.27.0)
|
||||
chart: bitnami/pinniped
|
||||
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
|
||||
chart_values: !unsafe |
|
||||
concierge:
|
||||
enabled: false
|
||||
supervisor:
|
||||
service:
|
||||
public:
|
||||
type: ClusterIP
|
||||
local-user-authenticator:
|
||||
# Must match the appVersion (!=chart version) referenced at `components.pinniped.helm.version`
|
||||
version: v0.27.0
|
||||
users:
|
||||
- username: metauser
|
||||
password: !unsafe "{{ vapp['metacluster.password'] | password_hash('bcrypt') }}"
|
||||
- username: metaguest
|
||||
password: !unsafe "{{ vapp['metacluster.password'] | password_hash('bcrypt') }}"
|
||||
|
||||
step-certificates:
|
||||
helm:
|
||||
version: 1.23.0
|
||||
version: 1.25.2 # (= step-ca v0.25.2)
|
||||
chart: smallstep/step-certificates
|
||||
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sed '/:/!s/$/:latest/' | sort -u
|
||||
chart_values: !unsafe |
|
||||
ca:
|
||||
dns: ca.{{ vapp['metacluster.fqdn'] }},step-certificates.step-ca.svc.cluster.local,127.0.0.1
|
||||
password: "{{ vapp['metacluster.password'] }}"
|
||||
provisioner:
|
||||
name: admin
|
||||
password: "{{ vapp['metacluster.password'] }}"
|
||||
inject:
|
||||
secrets:
|
||||
ca_password: "{{ vapp['metacluster.password'] | b64encode }}"
|
||||
provisioner_password: "{{ vapp['metacluster.password'] | b64encode }}"
|
||||
service:
|
||||
targetPort: 9000
|
||||
|
||||
dependencies:
|
||||
|
||||
@ -316,43 +344,50 @@ dependencies:
|
||||
- community.general
|
||||
- community.vmware
|
||||
- kubernetes.core
|
||||
- lvrfrc87.git_acp
|
||||
|
||||
container_images:
|
||||
# This should match the image tag referenced at `platform.packaged_components[.name==traefik].config`
|
||||
- busybox:1
|
||||
- ghcr.io/kube-vip/kube-vip:v0.5.8
|
||||
- ghcr.io/kube-vip/kube-vip:v0.6.3
|
||||
# The following list is generated by running the following commands:
|
||||
# $ clusterctl init -i vsphere:<version> [...]
|
||||
# $ clusterctl generate cluster <name> [...] | yq eval '.data.data' | yq --no-doc eval '.. | .image? | select(.)' | sort -u
|
||||
- gcr.io/cloud-provider-vsphere/cpi/release/manager:v1.18.1
|
||||
- gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0
|
||||
- gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.0
|
||||
- quay.io/k8scsi/csi-attacher:v3.0.0
|
||||
- quay.io/k8scsi/csi-node-driver-registrar:v2.0.1
|
||||
- quay.io/k8scsi/csi-provisioner:v2.0.0
|
||||
- quay.io/k8scsi/livenessprobe:v2.1.0
|
||||
- gcr.io/cloud-provider-vsphere/cpi/release/manager:v1.27.0
|
||||
- gcr.io/cloud-provider-vsphere/csi/release/driver:v3.1.0
|
||||
- gcr.io/cloud-provider-vsphere/csi/release/syncer:v3.1.0
|
||||
- registry.k8s.io/sig-storage/csi-attacher:v4.3.0
|
||||
- registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0
|
||||
- registry.k8s.io/sig-storage/csi-provisioner:v3.5.0
|
||||
- registry.k8s.io/sig-storage/csi-resizer:v1.8.0
|
||||
- registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2
|
||||
- registry.k8s.io/sig-storage/livenessprobe:v2.10.0
|
||||
|
||||
static_binaries:
|
||||
- filename: argo
|
||||
url: https://github.com/argoproj/argo-workflows/releases/download/v3.5.7/argo-linux-amd64.gz
|
||||
- filename: clusterctl
|
||||
url: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.0/clusterctl-linux-amd64
|
||||
url: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.3/clusterctl-linux-amd64
|
||||
- filename: govc
|
||||
url: https://github.com/vmware/govmomi/releases/download/v0.29.0/govc_Linux_x86_64.tar.gz
|
||||
url: https://github.com/vmware/govmomi/releases/download/v0.36.3/govc_Linux_x86_64.tar.gz
|
||||
archive: compressed
|
||||
- filename: helm
|
||||
url: https://get.helm.sh/helm-v3.10.2-linux-amd64.tar.gz
|
||||
url: https://get.helm.sh/helm-v3.14.3-linux-amd64.tar.gz
|
||||
archive: compressed
|
||||
extra_opts: --strip-components=1
|
||||
- filename: kubectl-slice
|
||||
url: https://github.com/patrickdappollonio/kubectl-slice/releases/download/v1.2.5/kubectl-slice_linux_x86_64.tar.gz
|
||||
url: https://github.com/patrickdappollonio/kubectl-slice/releases/download/v1.2.9/kubectl-slice_linux_x86_64.tar.gz
|
||||
archive: compressed
|
||||
- filename: pinniped
|
||||
url: https://github.com/vmware-tanzu/pinniped/releases/download/v0.25.0/pinniped-cli-linux-amd64
|
||||
- filename: skopeo
|
||||
url: https://code.spamasaurus.com/api/packages/djpbessems/generic/skopeo/v1.12.0/skopeo_linux_amd64
|
||||
- filename: step
|
||||
url: https://dl.step.sm/gh-release/cli/gh-release-header/v0.23.0/step_linux_0.23.0_amd64.tar.gz
|
||||
url: https://dl.step.sm/gh-release/cli/gh-release-header/v0.25.2/step_linux_0.25.2_amd64.tar.gz
|
||||
archive: compressed
|
||||
extra_opts: --strip-components=2
|
||||
- filename: yq
|
||||
url: http://github.com/mikefarah/yq/releases/download/v4.30.5/yq_linux_amd64
|
||||
url: https://github.com/mikefarah/yq/releases/download/v4.43.1/yq_linux_amd64
|
||||
|
||||
packages:
|
||||
apt:
|
||||
|
@ -1,6 +1,8 @@
|
||||
downstream:
|
||||
|
||||
helm_repositories:
|
||||
- name: bitnami
|
||||
url: https://charts.bitnami.com/bitnami
|
||||
- name: longhorn
|
||||
url: https://charts.longhorn.io
|
||||
- name: sealed-secrets
|
||||
@ -9,7 +11,7 @@ downstream:
|
||||
helm_charts:
|
||||
|
||||
longhorn:
|
||||
version: 1.4.1
|
||||
version: 1.5.4
|
||||
chart: longhorn/longhorn
|
||||
namespace: longhorn-system
|
||||
parse_logic: cat values.yaml | yq eval '.. | select(has("repository")) | .repository + ":" + .tag'
|
||||
@ -18,6 +20,24 @@ downstream:
|
||||
createDefaultDiskLabeledNodes: true
|
||||
defaultDataPath: /mnt/blockstorage
|
||||
|
||||
pinniped:
|
||||
version: 1.3.10 # (= Pinniped v0.27.0)
|
||||
chart: bitnami/pinniped
|
||||
namespace: pinniped-concierge
|
||||
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
|
||||
chart_values: !unsafe |
|
||||
supervisor:
|
||||
enabled: false
|
||||
extra_manifests:
|
||||
- src: jwtauthenticator.j2
|
||||
_template:
|
||||
name: metacluster-sso
|
||||
spec: !unsafe |2
|
||||
issuer: https://auth.{{ vapp['metacluster.fqdn'] }}/sso
|
||||
audience: "{{ vapp['workloadcluster.name'] | lower }}"
|
||||
tls:
|
||||
certificateAuthorityData: "{{ (stepca_cm_certs.resources[0].data['intermediate_ca.crt'] ~ _newline ~ stepca_cm_certs.resources[0].data['root_ca.crt']) | b64encode }}"
|
||||
|
||||
sealed-secrets:
|
||||
version: 2.8.1 # (= Sealed Secrets v0.20.2)
|
||||
chart: sealed-secrets/sealed-secrets
|
||||
|
78
deployment/playbook.yml
Normal file
78
deployment/playbook.yml
Normal file
@ -0,0 +1,78 @@
|
||||
- hosts: localhost
|
||||
vars_files:
|
||||
- vars/ova.bootstrap.yaml
|
||||
- vars/hv.vcenter.yaml
|
||||
- vars/pb.secrets.yaml
|
||||
tasks:
|
||||
|
||||
- name: Retrieve target folder details
|
||||
community.vmware.vmware_vm_info:
|
||||
hostname: "{{ hv.hostname }}"
|
||||
username: "{{ hv.username }}"
|
||||
password: "{{ secrets.hv.password }}"
|
||||
folder: "{{ hv.folder }}"
|
||||
validate_certs: false
|
||||
register: vm_info
|
||||
|
||||
- name: User prompt
|
||||
ansible.builtin.pause:
|
||||
prompt: Virtual machine '{{ appliance.id }}' already exists. Delete to continue [yes] or abort [no]?"
|
||||
register: prompt
|
||||
until:
|
||||
- prompt.user_input in ['yes', 'no']
|
||||
delay: 0
|
||||
when: (vm_info | selectattr('guest_name', 'equalto', appliance.id) | length) > 0
|
||||
|
||||
- name: Destroy existing VM
|
||||
community.vmware.vmware_guest:
|
||||
hostname: "{{ hv.hostname }}"
|
||||
username: "{{ hv.username }}"
|
||||
password: "{{ secrets.hv.password }}"
|
||||
folder: "{{ hv.folder }}"
|
||||
name: appliance.id
|
||||
state: absent
|
||||
when:
|
||||
- (vm_info | selectattr('guest_name', 'equalto', appliance.id) | length) > 0
|
||||
- (prompt.user_input | bool) == true
|
||||
|
||||
- name: Deploy VM from OVA-template
|
||||
community.vmware.vmware_deploy_ovf:
|
||||
hostname: "{{ hv.hostname }}"
|
||||
username: "{{ hv.username }}"
|
||||
password: "{{ secrets.hv.password }}"
|
||||
validate_certs: false
|
||||
datacenter: "{{ hv.datacenter }}"
|
||||
folder: "{{ hv.folder }}"
|
||||
cluster: "{{ hv.cluster }}"
|
||||
name: airgapped-k8s-meta1
|
||||
datastore: "{{ hv.datastore }}"
|
||||
disk_provisioning: thin
|
||||
networks:
|
||||
"LAN": "{{ hv.network }}"
|
||||
power_on: yes
|
||||
ovf: "{{ appliance.path }}/{{ appliance.filename }}"
|
||||
deployment_option: cp1w1ws0
|
||||
properties:
|
||||
metacluster.fqdn: k8s.lab
|
||||
metacluster.vip: 192.168.154.125
|
||||
metacluster.token: "{{ secrets.appliance.installtoken }}"
|
||||
# guestinfo.hostname: _default
|
||||
metacluster.password: "{{ secrets.appliance.password }}"
|
||||
guestinfo.ipaddress: 192.168.154.126
|
||||
guestinfo.prefixlength: '24'
|
||||
guestinfo.dnsserver: 192.168.154.225
|
||||
guestinfo.gateway: 192.168.154.1
|
||||
# workloadcluster.name: _default
|
||||
workloadcluster.vip: 192.168.154.130
|
||||
ippool.startip: 192.168.154.135
|
||||
ippool.endip: 192.168.154.140
|
||||
workloadcluster.nodetemplate: ubuntu-2204-kube-v1.30.0
|
||||
workloadcluster.nodesize: small
|
||||
# workloadcluster.additionaldisk: '75'
|
||||
guestinfo.rootsshkey: ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAiRc7Og+cRJGFwdUzgpX9YqvVenTk54N4kqM7emEfYHdsJLMjKQyxr8hklHmsam5dzxx3itFzc6SLf/ldJJ2JZuzE5FiCqUXXv4UFwN6HF5xqn7PTLicvWZH93H4m1gOlD5Dfzi4Es34v5zRBwbMScOgekk/LweTgl35jGKDgMP5DjGTqkPf7Ndh9+iuQrz99JEr8egl3bj+jIlKjScfaQbbnu3AJIRwZwTKgw0AOkLliQdEPNLvG5/ZImxJG4oHV9/uNkfdJObLjT1plR1HbVNskV5fuRNE/vnUiWl9jAJ1RT83GOqV0sQ+Q7p214fkgqb3JPvci/s0Bb7RA85hBEQ== bessems.eu
|
||||
hv.fqdn: "{{ hv.hostname }}"
|
||||
hv.username: "{{ hv.username }}"
|
||||
hv.password: "{{ secrets.hv.password }}"
|
||||
ldap.fqdn: _unused
|
||||
ldap.dn: _unused
|
||||
ldap.password: _unused
|
5
deployment/requirements.yaml
Normal file
5
deployment/requirements.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
collections:
|
||||
# - ansible.posix
|
||||
# - ansible.utils
|
||||
# - community.general
|
||||
- community.vmware
|
@ -1,5 +1,14 @@
|
||||
packer {
|
||||
required_plugins {
|
||||
vsphere = {
|
||||
source = "github.com/hashicorp/vsphere"
|
||||
version = "~> 1"
|
||||
}
|
||||
|
||||
ansible = {
|
||||
source = "github.com/hashicorp/ansible"
|
||||
version = "~> 1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,6 +37,7 @@ build {
|
||||
|
||||
extra_arguments = [
|
||||
"--extra-vars", "appliancetype=${source.name}",
|
||||
"--extra-vars", "applianceversion=${var.appliance_version}",
|
||||
"--extra-vars", "ansible_ssh_pass=${var.ssh_password}",
|
||||
"--extra-vars", "docker_username=${var.docker_username}",
|
||||
"--extra-vars", "docker_password=${var.docker_password}",
|
||||
@ -40,12 +50,12 @@ build {
|
||||
inline = [
|
||||
"pwsh -command \"& scripts/Update-OvfConfiguration.ps1 \\",
|
||||
" -ApplianceType '${source.name}' \\",
|
||||
" -OVFFile '/scratch/bld_${var.vm_name}_${source.name}.ovf' \"",
|
||||
" -OVFFile '/data/scratch/bld_${var.vm_name}_${source.name}.ovf' \"",
|
||||
"pwsh -file scripts/Update-Manifest.ps1 \\",
|
||||
" -ManifestFileName '/scratch/bld_${var.vm_name}_${source.name}.mf'",
|
||||
" -ManifestFileName '/data/scratch/bld_${var.vm_name}_${source.name}.mf'",
|
||||
"ovftool --acceptAllEulas --allowExtraConfig --overwrite \\",
|
||||
" '/scratch/bld_${var.vm_name}_${source.name}.ovf' \\",
|
||||
" /output/airgapped-k8s-${var.k8s_version}.${source.name}.ova"
|
||||
" '/data/scratch/bld_${var.vm_name}_${source.name}.ovf' \\",
|
||||
" /output/airgapped-k8s-${var.appliance_version}+${var.k8s_version}-${source.name}.ova"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
iso_url = "sn.itch.fyi/Repository/iso/Canonical/Ubuntu%20Server%2022.04/ubuntu-22.04.1-live-server-amd64.iso"
|
||||
iso_checksum = "sha256:10F19C5B2B8D6DB711582E0E27F5116296C34FE4B313BA45F9B201A5007056CB"
|
||||
iso_url = "sn.itch.fyi/Repository/iso/Canonical/Ubuntu%20Server%2022.04/ubuntu-22.04.3-live-server-amd64.iso"
|
||||
iso_checksum = "sha256:A4ACFDA10B18DA50E2EC50CCAF860D7F20B389DF8765611142305C0E911D16FD"
|
||||
|
||||
// iso_url = "sn.itch.fyi/Repository/iso/Canonical/Ubuntu%20Server%2022.04/ubuntu-22.04-live-server-amd64.iso"
|
||||
// iso_checksum = "sha256:84AEAF7823C8C61BAA0AE862D0A06B03409394800000B3235854A6B38EB4856F"
|
||||
// iso_url = "sn.itch.fyi/Repository/iso/Canonical/Ubuntu%20Server%2022.04/ubuntu-22.04.1-live-server-amd64.iso"
|
||||
// iso_checksum = "sha256:10F19C5B2B8D6DB711582E0E27F5116296C34FE4B313BA45F9B201A5007056CB"
|
||||
|
@ -1,10 +1,19 @@
|
||||
#cloud-config
|
||||
autoinstall:
|
||||
version: 1
|
||||
apt:
|
||||
geoip: true
|
||||
preserve_sources_list: false
|
||||
primary:
|
||||
- arches: [amd64, i386]
|
||||
uri: http://archive.ubuntu.com/ubuntu
|
||||
- arches: [default]
|
||||
uri: http://ports.ubuntu.com/ubuntu-ports
|
||||
early-commands:
|
||||
- sudo systemctl stop ssh
|
||||
locale: en_US
|
||||
keyboard:
|
||||
layout: en
|
||||
variant: us
|
||||
layout: us
|
||||
network:
|
||||
network:
|
||||
version: 2
|
||||
@ -16,14 +25,18 @@ autoinstall:
|
||||
layout:
|
||||
name: direct
|
||||
identity:
|
||||
hostname: packer-template
|
||||
hostname: ubuntu-server
|
||||
username: ubuntu
|
||||
# password: $6$ZThRyfmSMh9499ar$KSZus58U/l58Efci0tiJEqDKFCpoy.rv25JjGRv5.iL33AQLTY2aljumkGiDAiX6LsjzVsGTgH85Tx4S.aTfx0
|
||||
password: $6$rounds=4096$ZKfzRoaQOtc$M.fhOsI0gbLnJcCONXz/YkPfSoefP4i2/PQgzi2xHEi2x9CUhush.3VmYKL0XVr5JhoYvnLfFwqwR/1YYEqZy/
|
||||
ssh:
|
||||
install-server: yes
|
||||
install-server: true
|
||||
allow-pw: true
|
||||
packages:
|
||||
- openssh-server
|
||||
- open-vm-tools
|
||||
- cloud-init
|
||||
user-data:
|
||||
disable_root: false
|
||||
late-commands:
|
||||
- echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' > /target/etc/sudoers.d/ubuntu
|
||||
- curtin in-target --target=/target -- chmod 440 /etc/sudoers.d/ubuntu
|
||||
|
@ -1,61 +1,63 @@
|
||||
source "vsphere-iso" "ubuntu" {
|
||||
vcenter_server = var.vcenter_server
|
||||
username = var.vsphere_username
|
||||
password = var.vsphere_password
|
||||
insecure_connection = "true"
|
||||
vcenter_server = var.hv_fqdn
|
||||
username = var.hv_username
|
||||
password = var.hv_password
|
||||
insecure_connection = "true"
|
||||
|
||||
datacenter = var.vsphere_datacenter
|
||||
cluster = var.vsphere_cluster
|
||||
host = var.vsphere_host
|
||||
folder = var.vsphere_folder
|
||||
datastore = var.vsphere_datastore
|
||||
datacenter = var.hv_datacenter
|
||||
cluster = var.hv_cluster
|
||||
host = var.hv_host
|
||||
folder = var.hv_folder
|
||||
datastore = var.hv_datastore
|
||||
|
||||
guest_os_type = "ubuntu64Guest"
|
||||
guest_os_type = "ubuntu64Guest"
|
||||
|
||||
boot_order = "disk,cdrom"
|
||||
boot_command = [
|
||||
boot_order = "disk,cdrom"
|
||||
boot_command = [
|
||||
"e<down><down><down><end>",
|
||||
" autoinstall ds=nocloud;",
|
||||
" autoinstall network-config=disabled ds=nocloud;",
|
||||
"<F10>"
|
||||
]
|
||||
boot_wait = "2s"
|
||||
boot_wait = "2s"
|
||||
|
||||
communicator = "ssh"
|
||||
ssh_username = "ubuntu"
|
||||
ssh_password = var.ssh_password
|
||||
ssh_timeout = "20m"
|
||||
ssh_handshake_attempts = "100"
|
||||
ssh_pty = true
|
||||
communicator = "ssh"
|
||||
ssh_username = "ubuntu"
|
||||
ssh_password = var.ssh_password
|
||||
ssh_timeout = "20m"
|
||||
ssh_handshake_attempts = "100"
|
||||
ssh_pty = true
|
||||
|
||||
CPUs = 4
|
||||
RAM = 8192
|
||||
CPUs = 4
|
||||
RAM = 8192
|
||||
|
||||
network_adapters {
|
||||
network = var.vsphere_network
|
||||
network_card = "vmxnet3"
|
||||
network = var.hv_network
|
||||
network_card = "vmxnet3"
|
||||
}
|
||||
storage {
|
||||
disk_size = 76800
|
||||
disk_thin_provisioned = true
|
||||
disk_size = 76800
|
||||
disk_thin_provisioned = true
|
||||
}
|
||||
disk_controller_type = ["pvscsi"]
|
||||
usb_controller = ["xhci"]
|
||||
disk_controller_type = ["pvscsi"]
|
||||
usb_controller = ["xhci"]
|
||||
|
||||
cd_files = [
|
||||
set_host_for_datastore_uploads = true
|
||||
cd_files = [
|
||||
"packer/preseed/UbuntuServer22.04/user-data",
|
||||
"packer/preseed/UbuntuServer22.04/meta-data"
|
||||
]
|
||||
cd_label = "cidata"
|
||||
iso_url = local.iso_authenticatedurl
|
||||
iso_checksum = var.iso_checksum
|
||||
cd_label = "cidata"
|
||||
iso_url = local.iso_authenticatedurl
|
||||
iso_checksum = var.iso_checksum
|
||||
|
||||
shutdown_command = "echo '${var.ssh_password}' | sudo -S shutdown -P now"
|
||||
shutdown_timeout = "5m"
|
||||
shutdown_command = "echo '${var.ssh_password}' | sudo -S shutdown -P now"
|
||||
shutdown_timeout = "5m"
|
||||
|
||||
remove_cdrom = true
|
||||
remove_cdrom = true
|
||||
|
||||
export {
|
||||
images = false
|
||||
output_directory = "/scratch"
|
||||
output_directory = "/data/scratch"
|
||||
}
|
||||
|
||||
destroy = true
|
||||
}
|
||||
|
@ -1,17 +1,17 @@
|
||||
variable "vcenter_server" {}
|
||||
variable "vsphere_username" {}
|
||||
variable "vsphere_password" {
|
||||
variable "hv_fqdn" {}
|
||||
variable "hv_username" {}
|
||||
variable "hv_password" {
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "vsphere_host" {}
|
||||
variable "vsphere_datacenter" {}
|
||||
variable "vsphere_cluster" {}
|
||||
variable "hv_host" {}
|
||||
variable "hv_datacenter" {}
|
||||
variable "hv_cluster" {}
|
||||
|
||||
variable "vsphere_templatefolder" {}
|
||||
variable "vsphere_folder" {}
|
||||
variable "vsphere_datastore" {}
|
||||
variable "vsphere_network" {}
|
||||
variable "hv_templatefolder" {}
|
||||
variable "hv_folder" {}
|
||||
variable "hv_datastore" {}
|
||||
variable "hv_network" {}
|
||||
|
||||
variable "vm_name" {}
|
||||
variable "ssh_password" {
|
||||
@ -34,4 +34,5 @@ variable "docker_password" {
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "appliance_version" {}
|
||||
variable "k8s_version" {}
|
||||
|
@ -1,9 +1,10 @@
|
||||
vcenter_server = "bv11-vc.bessems.lan"
|
||||
vsphere_username = "administrator@vsphere.local"
|
||||
vsphere_datacenter = "DeSchakel"
|
||||
vsphere_cluster = "Cluster.01"
|
||||
vsphere_host = "bv11-esx02.bessems.lan"
|
||||
vsphere_datastore = "ESX02.SSD02"
|
||||
vsphere_folder = "/Packer"
|
||||
vsphere_templatefolder = "/Templates"
|
||||
vsphere_network = "LAN"
|
||||
hv_fqdn = "lab-vc-01.bessems.lan"
|
||||
hv_username = "administrator@vsphere.local"
|
||||
# urlencoded "4/55-Clydebank-Rd"
|
||||
hv_datacenter = "4%2f55-Clydebank-Rd"
|
||||
hv_cluster = "Cluster.01"
|
||||
hv_host = "lab-esx-02.bessems.lan"
|
||||
hv_datastore = "ESX02.SSD02"
|
||||
hv_folder = "/Packer"
|
||||
hv_templatefolder = "/Templates"
|
||||
hv_network = "LAN"
|
||||
|
@ -162,6 +162,19 @@ PropertyCategories:
|
||||
- cp1w1ws1
|
||||
UserConfigurable: true
|
||||
|
||||
- Key: workloadcluster.nodetemplate
|
||||
Type: string["ubuntu-2204-kube-v1.30.0", "photon-5-kube-v1.30.0.ova"]
|
||||
Label: Workload-cluster node template
|
||||
Description: |
|
||||
All worker and worker-storage nodes for the workload-cluster will be provisioned with this node template.
|
||||
Note:
|
||||
Make sure that this exact template has been uploaded to the vCenter instance before powering on this appliance!
|
||||
DefaultValue: ubuntu-2204-kube-v1.30.0
|
||||
Configurations:
|
||||
- cp1w1ws0
|
||||
- cp1w1ws1
|
||||
UserConfigurable: true
|
||||
|
||||
- Key: workloadcluster.nodesize
|
||||
Type: string["small", "medium", "large"]
|
||||
Label: Workload-cluster node size*
|
||||
|
@ -44,7 +44,7 @@ PropertyCategories:
|
||||
Configurations: '*'
|
||||
UserConfigurable: true
|
||||
|
||||
- Name: 2) Add meta-cluster node
|
||||
- Name: 2) Meta-cluster new node
|
||||
ProductProperties:
|
||||
|
||||
- Key: guestinfo.hostname
|
||||
@ -95,7 +95,20 @@ PropertyCategories:
|
||||
# Configurations: '*'
|
||||
# UserConfigurable: true
|
||||
|
||||
- Name: 3) Common
|
||||
- Name: 3) Workload-cluster
|
||||
ProductProperties:
|
||||
|
||||
- Key: workloadcluster.nodetemplate
|
||||
Type: string["ubuntu-2204-kube-v1.30.0", "photon-5-kube-v1.30.0.ova"]
|
||||
Label: Workload-cluster node template
|
||||
Description: |
|
||||
All worker and worker-storage nodes for the workload-cluster will be provisioned with this node template.
|
||||
Note:
|
||||
Make sure that this exact template has been uploaded to the vCenter instance before powering on this appliance!
|
||||
DefaultValue: ubuntu-2204-kube-v1.30.0
|
||||
UserConfigurable: true
|
||||
|
||||
- Name: 4) Common
|
||||
ProductProperties:
|
||||
|
||||
- Key: guestinfo.rootsshkey
|
||||
@ -106,7 +119,7 @@ PropertyCategories:
|
||||
Configurations: '*'
|
||||
UserConfigurable: true
|
||||
|
||||
- Name: 4) Hypervisor
|
||||
- Name: 5) Hypervisor
|
||||
ProductProperties:
|
||||
|
||||
- Key: hv.fqdn
|
||||
|
Reference in New Issue
Block a user