Update hypervisor details;Upgrade components;Housekeeping;Add decom tasks;Prevent configuration reset #2;Add morefid label
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
Danny Bessems 2023-02-03 13:11:54 +01:00
parent d874da0cb3
commit f74d94a5e0
18 changed files with 142 additions and 21 deletions

View File

@ -16,9 +16,9 @@
name: step-certificates name: step-certificates
chart_ref: /opt/metacluster/helm-charts/step-certificates chart_ref: /opt/metacluster/helm-charts/step-certificates
release_namespace: step-ca release_namespace: step-ca
create_namespace: yes create_namespace: true
# Unable to use REST api based readycheck due to lack of ingress # Unable to use REST api based readycheck due to lack of ingress
wait: yes wait: true
kubeconfig: "{{ kubeconfig.path }}" kubeconfig: "{{ kubeconfig.path }}"
values_files: values_files:
- "{{ values_file.path }}" - "{{ values_file.path }}"

View File

@ -5,8 +5,8 @@
name: gitea name: gitea
chart_ref: /opt/metacluster/helm-charts/gitea chart_ref: /opt/metacluster/helm-charts/gitea
release_namespace: gitea release_namespace: gitea
create_namespace: yes create_namespace: true
wait: no wait: false
kubeconfig: "{{ kubeconfig.path }}" kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.gitea.chart_values }}" values: "{{ components.gitea.chart_values }}"

View File

@ -5,8 +5,8 @@
name: argo-cd name: argo-cd
chart_ref: /opt/metacluster/helm-charts/argo-cd chart_ref: /opt/metacluster/helm-charts/argo-cd
release_namespace: argo-cd release_namespace: argo-cd
create_namespace: yes create_namespace: true
wait: no wait: false
kubeconfig: "{{ kubeconfig.path }}" kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.argocd.chart_values }}" values: "{{ components.argocd.chart_values }}"

View File

@ -62,3 +62,13 @@
content: "{{ kubectl_config.stdout }}" content: "{{ kubectl_config.stdout }}"
mode: 0600 mode: 0600
no_log: true no_log: true
- name: Add label to node object
kubernetes.core.k8s:
name: "{{ ansible_facts.nodename }}"
kind: Node
state: patched
definition:
metadata:
labels:
vm_id: "{{ moref_id }}"

View File

@ -5,8 +5,8 @@
name: harbor name: harbor
chart_ref: /opt/metacluster/helm-charts/harbor chart_ref: /opt/metacluster/helm-charts/harbor
release_namespace: harbor release_namespace: harbor
create_namespace: yes create_namespace: true
wait: no wait: false
kubeconfig: "{{ kubeconfig.path }}" kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.harbor.chart_values }}" values: "{{ components.harbor.chart_values }}"

View File

@ -4,8 +4,8 @@
name: longhorn name: longhorn
chart_ref: /opt/metacluster/helm-charts/longhorn chart_ref: /opt/metacluster/helm-charts/longhorn
release_namespace: longhorn-system release_namespace: longhorn-system
create_namespace: yes create_namespace: true
wait: no wait: false
kubeconfig: "{{ kubeconfig.path }}" kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.longhorn.chart_values }}" values: "{{ components.longhorn.chart_values }}"

View File

@ -153,7 +153,7 @@
kubernetes.core.k8s: kubernetes.core.k8s:
definition: >- definition: >-
{{ clusterctl_newcluster.stdout }} {{ clusterctl_newcluster.stdout }}
wait: yes wait: true
kubeconfig: "{{ kubeconfig.path }}" kubeconfig: "{{ kubeconfig.path }}"
# TODO: move to git repo # TODO: move to git repo
@ -193,6 +193,6 @@
kubernetes.core.k8s: kubernetes.core.k8s:
src: /opt/metacluster/cluster-api/cni-calico/{{ components.clusterapi.workload.version.calico }}/calico.yaml src: /opt/metacluster/cluster-api/cni-calico/{{ components.clusterapi.workload.version.calico }}/calico.yaml
state: present state: present
wait: yes wait: true
kubeconfig: "{{ capi_kubeconfig.path }}" kubeconfig: "{{ capi_kubeconfig.path }}"
# TODO: move to git repo # TODO: move to git repo

View File

@ -14,6 +14,7 @@
- disks - disks
- metacluster - metacluster
- workloadcluster - workloadcluster
- decommission
- tty - tty
- cleanup - cleanup
handlers: handlers:

View File

@ -0,0 +1,24 @@
- name: Cordon node
kubernetes.core.k8s_drain:
name: "{{ decom_node }}"
state: cordon
kubeconfig: "{{ kubeconfig.path }}"
- name: Drain node
kubernetes.core.k8s_drain:
name: "{{ decom_node }}"
state: drain
delete_options:
ignore_daemonsets: true
delete_emptydir_data: true
wait_sleep: 10
wait_timeout: 0
kubeconfig: "{{ kubeconfig.path }}"
- name: Delete node
kubernetes.core.k8s:
name: "{{ decom_node }}"
kind: node
state: absent
wait: true
kubeconfig: "{{ kubeconfig.path }}"

View File

@ -0,0 +1,18 @@
- name: Lookup node name and moref id for decommissioning
ansible.builtin.set_fact:
decom_node: >-
{{
lookup('kubernetes.core.k8s', kind='Node', kubeconfig=(kubeconfig.path)) |
json_query('[? metadata.name != `' ~ ansible_facts.nodename ~ '`].metadata.name') |
first
}}
decom_vmid: >-
{{
lookup('kubernetes.core.k8s', kind='Node', kubeconfig=(kubeconfig.path)) |
json_query('[? metadata.name != `' ~ ansible_facts.nodename ~ '`].metadata.labels.vm_id') |
first
}}
- import_tasks: storage.yml
- import_tasks: k3s.yml
- import_tasks: virtualmachine.yml

View File

@ -0,0 +1,26 @@
- name: Disable disk scheduling and evict replicas
kubernetes.core.k8s:
name: "{{ decom_node }}"
namespace: longhorn-system
kind: nodes.longhorn.io
state: patched
definition: |
spec:
allowScheduling: false
evictionRequested: true
kubeconfig: "{{ kubeconfig.path }}"
- name: Reduce replica amount for each volume
kubernetes.core.k8s:
api_version: longhorn.io/v1beta2
kind: volume
name: "{{ item.metadata.name }}"
namespace: longhorn-system
state: patched
definition: |
spec:
numberOfReplicas: {{ (lookup('kubernetes.core.k8s', kind='node', kubeconfig=(kubeconfig.path)) | length | int) - 1 }}
kubeconfig: "{{ kubeconfig.path }}"
loop: "{{ lookup('kubernetes.core.k8s', api_version='longhorn.io/v1beta2', kind='volume', namespace='longhorn-system', kubeconfig=(kubeconfig.path)) }}"
loop_control:
label: "{{ item.metadata.name }}"

View File

@ -0,0 +1,27 @@
- block:
- name: Lookup VM name
community.vmware.vmware_guest_info:
moid: "{{ decom_vmid }}"
register: virtualmachine_details
- name: Power off VM
community.vmware.vmware_guest:
name: "{{ virtualmachine_details.hw_name }}"
folder: "{{ virtualmachine_details.hw_folder }}"
state: poweredoff
# state_change_timeout: "{{ playbook.delay.long }}"
# - name: Delete VM
# community.vmware.vmware_guest:
# name: "{{ virtualmachine_details.hw_name }}"
# folder: "{{ virtualmachine_details.hw_folder }}"
# state: absent
module_defaults:
group/vmware:
hostname: "{{ vapp['hv.fqdn'] }}"
validate_certs: no
username: "{{ vapp['hv.username'] }}"
password: "{{ vapp['hv.password'] }}"
datacenter: "{{ vcenter_info.datacenter }}"

View File

@ -28,3 +28,8 @@
- name: Update certificate truststore - name: Update certificate truststore
ansible.builtin.command: ansible.builtin.command:
cmd: update-ca-certificates cmd: update-ca-certificates
- name: Remove redundant files
ansible.builtin.file:
path: /var/lib/rancher/k3s/server/manifests/traefik-config.yaml
state: absent

View File

@ -62,3 +62,13 @@
content: "{{ kubectl_config.stdout }}" content: "{{ kubectl_config.stdout }}"
mode: 0600 mode: 0600
no_log: true no_log: true
- name: Add label to node object
kubernetes.core.k8s:
name: "{{ ansible_facts.nodename }}"
kind: Node
state: patched
definition:
metadata:
labels:
vm_id: "{{ moref_id }}"

View File

@ -5,8 +5,8 @@
name: harbor name: harbor
chart_ref: /opt/metacluster/helm-charts/harbor chart_ref: /opt/metacluster/helm-charts/harbor
release_namespace: harbor release_namespace: harbor
create_namespace: yes create_namespace: true
wait: no wait: false
kubeconfig: "{{ kubeconfig.path }}" kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.harbor.chart_values }}" values: "{{ components.harbor.chart_values }}"

View File

@ -27,13 +27,13 @@
retries: "{{ playbook.retries }}" retries: "{{ playbook.retries }}"
delay: "{{ playbook.delay.long }}" delay: "{{ playbook.delay.long }}"
- name: Install longhorn chart - name: Upgrade longhorn chart
kubernetes.core.helm: kubernetes.core.helm:
name: longhorn name: longhorn
chart_ref: /opt/metacluster/helm-charts/longhorn chart_ref: /opt/metacluster/helm-charts/longhorn
release_namespace: longhorn-system release_namespace: longhorn-system
create_namespace: yes create_namespace: true
wait: no wait: false
kubeconfig: "{{ kubeconfig.path }}" kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.longhorn.chart_values }}" values: "{{ components.longhorn.chart_values }}"

View File

@ -54,7 +54,7 @@ components:
argo-cd: argo-cd:
helm: helm:
version: 5.14.1 # (= ArgoCD v2.5.2) version: 5.19.14 # (= ArgoCD v2.5.10)
chart: argo/argo-cd chart: argo/argo-cd
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /' parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | awk '!/ /'
chart_values: !unsafe | chart_values: !unsafe |
@ -98,7 +98,7 @@ components:
gitea: gitea:
helm: helm:
version: v6.0.3 # (= Gitea v1.17.3) version: v7.0.2 # (= Gitea v1.18.3)
chart: gitea-charts/gitea chart: gitea-charts/gitea
parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | sed '/:/!s/$/:latest/' parse_logic: helm template . | yq --no-doc eval '.. | .image? | select(.)' | sort -u | sed '/:/!s/$/:latest/'
chart_values: !unsafe | chart_values: !unsafe |

View File

@ -2,8 +2,8 @@ vcenter_server = "bv11-vc.bessems.lan"
vsphere_username = "administrator@vsphere.local" vsphere_username = "administrator@vsphere.local"
vsphere_datacenter = "DeSchakel" vsphere_datacenter = "DeSchakel"
vsphere_cluster = "Cluster.01" vsphere_cluster = "Cluster.01"
vsphere_host = "bv11-esx01.bessems.lan" vsphere_host = "bv11-esx02.bessems.lan"
vsphere_datastore = "ESX01.SSD02" vsphere_datastore = "ESX02.SSD02"
vsphere_folder = "/Packer" vsphere_folder = "/Packer"
vsphere_templatefolder = "/Templates" vsphere_templatefolder = "/Templates"
vsphere_network = "LAN" vsphere_network = "LAN"