Replay upstream changes;Upgrade to latest minor K8s version
Some checks failed
continuous-integration/drone/push Build is failing

This commit is contained in:
2023-05-19 11:38:53 +02:00
41 changed files with 957 additions and 442 deletions

View File

@ -1,14 +0,0 @@
import netaddr
def netaddr_iter_iprange(ip_start, ip_end):
return [str(ip) for ip in netaddr.iter_iprange(ip_start, ip_end)]
class FilterModule(object):
''' Ansible filter. Interface to netaddr methods.
https://pypi.org/project/netaddr/
'''
def filters(self):
return {
'netaddr_iter_iprange': netaddr_iter_iprange
}

View File

@ -8,7 +8,7 @@
create_namespace: true
wait: false
kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.gitea.chart_values }}"
values: "{{ components['gitea'].chart_values }}"
- name: Ensure gitea API availability
ansible.builtin.uri:
@ -109,16 +109,28 @@
loop:
- organization: mc
body:
name: GitOps.Config
auto_init: true
default_branch: main
description: GitOps manifests
- organization: wl
name: GitOps.ClusterAPI
# auto_init: true
# default_branch: main
description: ClusterAPI manifests
- organization: mc
body:
name: Template.GitOps.Config
name: GitOps.Config
# auto_init: true
# default_branch: main
description: GitOps manifests
- organization: wl
body:
name: GitOps.Config
# auto_init: true
# default_branch: main
description: GitOps manifests
- organization: wl
body:
name: GitOps.HelmCharts
# auto_init: true
# default_branch: main
description: Helm charts
loop_control:
label: "{{ item.organization ~ '/' ~ item.body.name }}"

View File

@ -8,7 +8,7 @@
create_namespace: true
wait: false
kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.argocd.chart_values }}"
values: "{{ components['argo-cd'].chart_values }}"
- name: Ensure argo-cd API availability
ansible.builtin.uri:
@ -39,24 +39,29 @@
mode: 0600
vars:
_template:
name: argocd-gitrepo-metacluster
name: gitrepo-mc-gitopsconfig
namespace: argo-cd
uid: "{{ lookup('ansible.builtin.password', '/dev/null length=5 chars=ascii_lowercase,digits seed=inventory_hostname') }}"
privatekey: "{{ lookup('ansible.builtin.file', '~/.ssh/git_rsa_id') | indent(4, true) }}"
url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.Config.git
notify:
- Apply manifests
- name: Create applicationset
ansible.builtin.template:
src: applicationset.j2
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.application.name }}-manifest.yaml
owner: root
group: root
mode: 0600
vars:
_template:
name: argocd-applicationset-metacluster
namespace: argo-cd
application:
name: applicationset-metacluster
namespace: argo-cd
cluster:
url: https://kubernetes.default.svc
repository:
url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.Config.git
revision: main
notify:
- Apply manifests

View File

@ -78,6 +78,6 @@
src: registries.j2
vars:
_template:
data: "{{ source_registries }}"
registries: "{{ source_registries }}"
hv:
fqdn: "{{ vapp['metacluster.fqdn'] }}"

View File

@ -8,7 +8,7 @@
create_namespace: true
wait: false
kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.harbor.chart_values }}"
values: "{{ components['harbor'].chart_values }}"
- name: Ensure harbor API availability
ansible.builtin.uri:

View File

@ -7,7 +7,7 @@
create_namespace: true
wait: false
kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.longhorn.chart_values }}"
values: "{{ components['longhorn'].chart_values }}"
- name: Ensure longhorn API availability
ansible.builtin.uri:

View File

@ -47,41 +47,27 @@
resourcepool: "{{ vcenter_info.resourcepool }}"
folder: "{{ vcenter_info.folder }}"
cluster:
nodetemplate: "{{ (components.clusterapi.workload.node_template.url | basename | split('.'))[:-1] | join('.') }}"
nodetemplate: "{{ nodetemplate_inventorypath }}"
publickey: "{{ vapp['guestinfo.rootsshkey'] }}"
version: "{{ components.clusterapi.workload.version.k8s }}"
vip: "{{ vapp['workloadcluster.vip'] }}"
- name: WORKAROUND - Update image references to use local registry
ansible.builtin.replace:
dest: "{{ item }}"
regexp: '([ ]+image:[ "]+)(?!({{ _template.pattern }}|"{{ _template.pattern }}))'
replace: '\1{{ _template.pattern }}'
vars:
fileglobs:
- "{{ query('ansible.builtin.fileglob', '/opt/metacluster/cluster-api/cni-calico/' ~ components.clusterapi.workload.version.calico ~ '/*.yaml') }}"
- "{{ query('ansible.builtin.fileglob', '/opt/metacluster/cluster-api/infrastructure-vsphere/' ~ components.clusterapi.management.version.infrastructure_vsphere ~ '/*.yaml') }}"
_template:
pattern: registry.{{ vapp['metacluster.fqdn'] }}/library/
loop: "{{ fileglobs[0:] | flatten | select }}"
loop_control:
label: "{{ item | basename }}"
when:
- item is not search("components.yaml|metadata.yaml")
- name: Generate kustomization template
- name: Generate cluster-template kustomization manifest
ansible.builtin.template:
src: kustomization.cluster-template.j2
dest: /opt/metacluster/cluster-api/infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }}/kustomization.yaml
vars:
_template:
additionaldisk: "{{ vapp['workloadcluster.additionaldisk'] }}"
network:
fqdn: "{{ vapp['metacluster.fqdn'] }}"
dnsserver: "{{ vapp['guestinfo.dnsserver'] }}"
nodesize:
cpu: "{{ config.clusterapi.size_matrix[ vapp['workloadcluster.nodesize'] ].cpu }}"
memory: "{{ config.clusterapi.size_matrix[ vapp['workloadcluster.nodesize'] ].memory }}"
rootca: "{{ stepca_cm_certs.resources[0].data['root_ca.crt'] }}"
runcmds:
- update-ca-certificates
registries: "{{ source_registries }}"
- name: Store custom cluster-template
ansible.builtin.copy:
@ -121,7 +107,8 @@
clustersize: >-
{{ {
'controlplane': vapp['deployment.type'] | regex_findall('^cp(\d)+') | first,
'workers': vapp['deployment.type'] | regex_findall('w(\d)+$') | first
'worker': vapp['deployment.type'] | regex_findall('w(\d)+') | first,
'workerstorage': vapp['deployment.type'] | regex_findall('ws(\d)+$') | first
} }}
- name: Generate workload cluster manifest
@ -130,41 +117,51 @@
clusterctl generate cluster \
{{ vapp['workloadcluster.name'] | lower }} \
--control-plane-machine-count {{ clustersize.controlplane }} \
--worker-machine-count {{ clustersize.workers }} \
--worker-machine-count {{ clustersize.worker }} \
--from ./custom-cluster-template.yaml \
--config ./clusterctl.yaml \
--kubeconfig {{ kubeconfig.path }}
chdir: /opt/metacluster/cluster-api
register: clusterctl_newcluster
- name: Initialize tempfile
- name: Initialize tempfolder
ansible.builtin.tempfile:
state: file
state: directory
register: capi_clustermanifest
- name: Save workload cluster manifest
ansible.builtin.copy:
dest: "{{ capi_clustermanifest.path }}"
dest: "{{ capi_clustermanifest.path }}/new-cluster.yaml"
content: "{{ clusterctl_newcluster.stdout }}"
- name: Split manifest into separate files
ansible.builtin.shell:
cmd: >-
kubectl slice \
-f {{ capi_clustermanifest.path }} \
-o /opt/metacluster/cluster-api/new-cluster
-f {{ capi_clustermanifest.path }}/new-cluster.yaml \
-o {{ capi_clustermanifest.path }}/manifests
- name: Cleanup tempfile
ansible.builtin.file:
path: "{{ capi_clustermanifest.path }}"
state: absent
when: capi_clustermanifest.path is defined
- name: Generate nodepool kustomization manifest
ansible.builtin.template:
src: kustomization.nodepool.j2
dest: "{{ capi_clustermanifest.path }}/kustomization.yaml"
vars:
_template:
cluster:
name: "{{ vapp['workloadcluster.name'] }}"
nodepool:
size: "{{ clustersize.workerstorage }}"
additionaldisk: "{{ vapp['workloadcluster.additionaldisk'] }}"
- name: Store nodepool manifest
ansible.builtin.copy:
dest: "{{ capi_clustermanifest.path }}/manifests/nodepool-worker-storage.yaml"
content: "{{ lookup('kubernetes.core.kustomize', dir=capi_clustermanifest.path) }}"
- name: Create in-cluster IpPool
kubernetes.core.k8s:
template: ippool.j2
state: present
kubeconfig: "{{ kubeconfig.path }}"
ansible.builtin.template:
src: ippool.j2
dest: "{{ capi_clustermanifest.path }}/manifests/inclusterippool-{{ _template.cluster.name }}.yml"
vars:
_template:
cluster:
@ -176,6 +173,40 @@
prefix: "{{ vapp['guestinfo.prefixlength'] }}"
gateway: "{{ vapp['guestinfo.gateway'] }}"
- name: Initialize/Push git repository
ansible.builtin.shell:
cmd: |
git init
git config --global user.email "administrator@{{ vapp['metacluster.fqdn'] }}"
git config --global user.name "administrator"
git checkout -b main
git add ./manifests
git commit -m "Upload manifests"
git remote add origin https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
git push https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git --all
chdir: "{{ capi_clustermanifest.path }}"
- name: Cleanup tempfolder
ansible.builtin.file:
path: "{{ capi_clustermanifest.path }}"
state: absent
when: capi_clustermanifest.path is defined
- name: Configure Cluster API repository
ansible.builtin.template:
src: gitrepo.j2
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
owner: root
group: root
mode: 0600
vars:
_template:
name: gitrepo-mc-gitopsclusterapi
namespace: argo-cd
url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
notify:
- Apply manifests
- name: WORKAROUND - Wait for ingress ACME requests to complete
ansible.builtin.shell:
cmd: >-
@ -187,13 +218,30 @@
retries: "{{ playbook.retries }}"
delay: "{{ (storage_benchmark | int) * (playbook.delay.medium | int) }}"
- name: Apply workload cluster manifest
kubernetes.core.k8s:
definition: >-
{{ clusterctl_newcluster.stdout }}
wait: true
kubeconfig: "{{ kubeconfig.path }}"
# TODO: move to git repo
- name: Create application
ansible.builtin.template:
src: application.j2
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.application.name }}-manifest.yaml
owner: root
group: root
mode: 0600
vars:
_template:
application:
name: application-clusterapi-workloadcluster
namespace: argo-cd
cluster:
name: https://kubernetes.default.svc
namespace: default
repository:
url: https://git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.ClusterAPI.git
path: manifests
revision: main
notify:
- Apply manifests
- name: Trigger handlers
ansible.builtin.meta: flush_handlers
- name: Wait for cluster to be available
ansible.builtin.shell:

View File

@ -1,37 +1,105 @@
- block:
- name: Aggregate helm charts from filesystem
ansible.builtin.find:
path: /opt/workloadcluster/helm-charts
file_type: directory
recurse: false
register: helm_charts
- name: Generate service account in workload cluster
kubernetes.core.k8s:
template: serviceaccount.j2
state: present
- name: Create hard-links to populate new git-repository
ansible.builtin.shell:
cmd: >-
cp -lr {{ item.path }}/ /opt/workloadcluster/git-repositories/gitops/charts
loop: "{{ helm_charts.files }}"
loop_control:
label: "{{ item.path | basename }}"
- name: Retrieve service account bearer token
kubernetes.core.k8s_info:
kind: Secret
name: "{{ _template.account.name }}-secret"
namespace: "{{ _template.account.namespace }}"
register: workloadcluster_bearertoken
- name: Create subfolders
ansible.builtin.file:
path: /opt/workloadcluster/git-repositories/gitops/values/{{ item.key }}
state: directory
loop: "{{ query('ansible.builtin.dict', downstream_components) }}"
loop_control:
label: "{{ item.key }}"
- name: Register workload cluster in argo-cd
kubernetes.core.k8s:
template: cluster.j2
state: present
kubeconfig: "{{ kubeconfig.path }}"
vars:
_template:
cluster:
name: "{{ vapp['workloadcluster.name'] | lower }}"
secret: argocd-cluster-{{ vapp['workloadcluster.name'] | lower }}
url: https://{{ vapp['workloadcluster.vip'] }}:6443
token: "{{ workloadcluster_bearertoken.resources | json_query('[].data.token') }}"
- name: Write chart values to file
ansible.builtin.copy:
dest: /opt/workloadcluster/git-repositories/gitops/values/{{ item.key }}/values.yaml
content: "{{ item.value.chart_values | default('# Empty') | to_nice_yaml(indent=2, width=4096) }}"
loop: "{{ query('ansible.builtin.dict', downstream_components) }}"
loop_control:
label: "{{ item.key }}"
- name: Initialize/Push git repository
ansible.builtin.shell:
cmd: |
git init
git config --global user.email "administrator@{{ vapp['metacluster.fqdn'] }}"
git config --global user.name "administrator"
git checkout -b main
git add .
git commit -m "Upload charts"
git remote add origin https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git
git push https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git --all
chdir: /opt/workloadcluster/git-repositories/gitops
- name: Retrieve workload-cluster kubeconfig
kubernetes.core.k8s_info:
kind: Secret
name: "{{ vapp['workloadcluster.name'] }}-kubeconfig"
namespace: default
kubeconfig: "{{ kubeconfig.path }}"
register: secret_workloadcluster_kubeconfig
- name: Register workload-cluster in argo-cd
kubernetes.core.k8s:
template: cluster.j2
state: present
kubeconfig: "{{ kubeconfig.path }}"
vars:
_template:
account:
name: argocd-sa
namespace: default
clusterrolebinding:
name: argocd-crb
module_defaults:
group/k8s:
kubeconfig: "{{ capi_kubeconfig.path }}"
cluster:
name: "{{ vapp['workloadcluster.name'] | lower }}"
secret: argocd-cluster-{{ vapp['workloadcluster.name'] | lower }}
url: https://{{ vapp['workloadcluster.vip'] }}:6443
kubeconfig:
ca: "{{ (secret_workloadcluster_kubeconfig.resources[0].data.value | b64decode | from_yaml).clusters[0].cluster['certificate-authority-data'] }}"
certificate: "{{ (secret_workloadcluster_kubeconfig.resources[0].data.value | b64decode | from_yaml).users[0].user['client-certificate-data'] }}"
key: "{{ (secret_workloadcluster_kubeconfig.resources[0].data.value | b64decode | from_yaml).users[0].user['client-key-data'] }}"
- name: Configure workload-cluster GitOps repository
ansible.builtin.template:
src: gitrepo.j2
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
owner: root
group: root
mode: 0600
vars:
_template:
name: gitrepo-wl-gitopsconfig
namespace: argo-cd
url: https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git
notify:
- Apply manifests
- name: Create applicationset
ansible.builtin.template:
src: applicationset.j2
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.application.name }}-manifest.yaml
owner: root
group: root
mode: 0600
vars:
_template:
application:
name: applicationset-workloadcluster
namespace: argo-cd
cluster:
url: https://{{ vapp['workloadcluster.vip'] }}:6443
repository:
url: https://git.{{ vapp['metacluster.fqdn'] }}/wl/GitOps.Config.git
revision: main
notify:
- Apply manifests
- name: Trigger handlers
ansible.builtin.meta: flush_handlers

View File

@ -1,77 +1,68 @@
- block:
- name: Check for existing templates on hypervisor
- name: Check for existing template on hypervisor
community.vmware.vmware_guest_info:
name: "{{ (item | basename | split('.'))[:-1] | join('.') }}"
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
register: existing_ova
loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}"
ignore_errors: yes
- name: Parse OVA files for network mappings
ansible.builtin.shell:
cmd: govc import.spec -json {{ item }}
environment:
GOVC_INSECURE: '1'
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
register: ova_spec
when: existing_ova.results[index] is failed
loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}"
loop_control:
index_var: index
- name: Store inventory path of existing template
ansible.builtin.set_fact:
nodetemplate_inventorypath: "{{ existing_ova.instance.hw_folder ~ '/' ~ existing_ova.instance.hw_name }}"
when: existing_ova is not failed
- name: Deploy OVA templates on hypervisor
community.vmware.vmware_deploy_ovf:
cluster: "{{ vcenter_info.cluster }}"
datastore: "{{ vcenter_info.datastore }}"
name: "{{ (item | basename | split('.'))[:-1] | join('.') }}"
networks: "{u'{{ ova_spec.results[index].stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}"
allow_duplicates: no
power_on: false
ovf: "{{ item }}"
register: ova_deploy
when: existing_ova.results[index] is failed
loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}"
loop_control:
index_var: index
- block:
- name: Add additional placeholder disk
community.vmware.vmware_guest_disk:
name: "{{ item.instance.hw_name }}"
disk:
- size: 1Gb
scsi_controller: 1
scsi_type: paravirtual
unit_number: 0
when: ova_deploy.results[index] is not skipped
loop: "{{ ova_deploy.results }}"
loop_control:
index_var: index
label: "{{ item.item }}"
- name: Parse OVA file for network mappings
ansible.builtin.shell:
cmd: govc import.spec -json {{ filename }}
environment:
GOVC_INSECURE: '1'
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
register: ova_spec
# Disabled to allow disks to be resized; at the cost of cloning speed
# - name: Create snapshot on deployed VM's
# community.vmware.vmware_guest_snapshot:
# name: "{{ item.instance.hw_name }}"
# state: present
# snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base"
# when: ova_deploy.results[index] is not skipped
# loop: "{{ ova_deploy.results }}"
# loop_control:
# index_var: index
# label: "{{ item.item }}"
- name: Deploy OVA template on hypervisor
community.vmware.vmware_deploy_ovf:
cluster: "{{ vcenter_info.cluster }}"
datastore: "{{ vcenter_info.datastore }}"
name: "{{ (filename | basename | split('.'))[:-1] | join('.') }}"
networks: "{u'{{ ova_spec.stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}"
allow_duplicates: no
power_on: false
ovf: "{{ filename }}"
register: ova_deploy
- name: Mark deployed VM's as templates
community.vmware.vmware_guest:
name: "{{ item.instance.hw_name }}"
is_template: yes
when: ova_deploy.results[index] is not skipped
loop: "{{ ova_deploy.results }}"
loop_control:
index_var: index
label: "{{ item.item }}"
- name: Add additional placeholder disk
community.vmware.vmware_guest_disk:
name: "{{ ova_deploy.instance.hw_name }}"
disk:
- size: 1Mb
scsi_controller: 1
scsi_type: paravirtual
unit_number: 0
# Disabled to allow disks to be resized; at the cost of cloning speed
# - name: Create snapshot on deployed VM
# community.vmware.vmware_guest_snapshot:
# name: "{{ ova_deploy.instance.hw_name }}"
# state: present
# snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base"
- name: Mark deployed VM as templates
community.vmware.vmware_guest:
name: "{{ ova_deploy.instance.hw_name }}"
is_template: yes
- name: Store inventory path of deployed template
ansible.builtin.set_fact:
nodetemplate_inventorypath: "{{ ova_deploy.instance.hw_folder ~ '/' ~ ova_deploy.instance.hw_name }}"
when: existing_ova is failed
vars:
filename: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | first }}"
module_defaults:
group/vmware:
hostname: "{{ vapp['hv.fqdn'] }}"