Parallel build of bootstrap/upgrade ova;Split ansible tasks respectively
Some checks failed
continuous-integration/drone/push Build is failing

This commit is contained in:
2023-01-18 15:09:32 +01:00
parent 8ba8b5aaab
commit c1bff94cd1
52 changed files with 274 additions and 74 deletions

View File

@ -0,0 +1,26 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: true
vars_files:
- defaults.yml
- metacluster.yml
# become: true
roles:
- vapp
- network
- preflight
- users
- disks
- metacluster
- workloadcluster
- tty
- cleanup
handlers:
- name: Apply manifests
kubernetes.core.k8s:
src: "{{ item }}"
state: present
kubeconfig: "{{ kubeconfig.path }}"
loop: "{{ query('ansible.builtin.fileglob', '/var/lib/rancher/k3s/server/manifests/*.yaml') | sort }}"
ignore_errors: yes

View File

@ -0,0 +1,14 @@
import netaddr
def netaddr_iter_iprange(ip_start, ip_end):
return [str(ip) for ip in netaddr.iter_iprange(ip_start, ip_end)]
class FilterModule(object):
''' Ansible filter. Interface to netaddr methods.
https://pypi.org/project/netaddr/
'''
def filters(self):
return {
'netaddr_iter_iprange': netaddr_iter_iprange
}

View File

@ -0,0 +1,12 @@
- name: Import container images
ansible.builtin.command:
cmd: k3s ctr image import {{ item }} --digests
chdir: /opt/metacluster/container-images
register: import_result
loop: "{{ query('ansible.builtin.fileglob', '/opt/metacluster/container-images/*.tar') | sort }}"
loop_control:
label: "{{ item | basename }}"
# Probably should add a task before that ensures K3s node is fully initialized before starting imports; currently K3s goes away briefly during this loop
retries: "{{ playbook.retries }}"
delay: "{{ playbook.delays.short }}"
until: import_result is not failed

View File

@ -0,0 +1,131 @@
- block:
- name: Install step-ca chart
kubernetes.core.helm:
name: step-certificates
chart_ref: /opt/metacluster/helm-charts/step-certificates
release_namespace: step-ca
create_namespace: yes
# Unable to use REST api based readycheck due to missing ingress
wait: yes
kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.stepcertificates.chart_values }}"
- name: Retrieve configmap w/ root certificate
kubernetes.core.k8s_info:
kind: ConfigMap
name: step-certificates-certs
namespace: step-ca
kubeconfig: "{{ kubeconfig.path }}"
register: stepca_cm_certs
- name: Create target namespaces
kubernetes.core.k8s:
kind: Namespace
name: "{{ item }}"
state: present
kubeconfig: "{{ kubeconfig.path }}"
loop:
- argo-cd
# - kube-system
- name: Store root certificate in namespaced configmaps/secrets
kubernetes.core.k8s:
state: present
template: "{{ item.kind }}.j2"
kubeconfig: "{{ kubeconfig.path }}"
vars:
_template:
name: "{{ item.name }}"
namespace: "{{ item.namespace }}"
annotations: "{{ item.annotations | default('{}') | indent(width=4, first=True) }}"
labels: "{{ item.labels | default('{}') | indent(width=4, first=True) }}"
data: "{{ item.data }}"
loop:
- name: argocd-tls-certs-cm
namespace: argo-cd
kind: configmap
annotations: |
meta.helm.sh/release-name: argo-cd
meta.helm.sh/release-namespace: argo-cd
labels: |
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: argocd-cm
app.kubernetes.io/part-of: argocd
data:
- key: git.{{ vapp['metacluster.fqdn'] }}
value: "{{ stepca_cm_certs.resources[0].data['root_ca.crt'] }}"
- name: step-certificates-certs
namespace: kube-system
kind: secret
data:
- key: root_ca.crt
value: "{{ stepca_cm_certs.resources[0].data['root_ca.crt'] | b64encode }}"
loop_control:
label: "{{ item.kind + '/' + item.name + ' (' + item.namespace + ')' }}"
- name: Configure step-ca passthrough ingress
ansible.builtin.template:
src: ingressroutetcp.j2
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
owner: root
group: root
mode: 0600
vars:
_template:
name: step-ca
namespace: step-ca
config: |2
entryPoints:
- websecure
routes:
- match: HostSNI(`ca.{{ vapp['metacluster.fqdn'] }}`)
services:
- name: step-certificates
port: 443
tls:
passthrough: true
notify:
- Apply manifests
- name: Inject step-ca certificate into traefik container
ansible.builtin.blockinfile:
path: /var/lib/rancher/k3s/server/manifests/traefik-config.yaml
block: |2
volumes:
- name: step-certificates-certs
mountPath: /step-ca
type: secret
env:
- name: LEGO_CA_CERTIFICATES
value: /step-ca/root_ca.crt
marker: ' # {mark} ANSIBLE MANAGED BLOCK'
notify:
- Apply manifests
- name: Trigger handlers
ansible.builtin.meta: flush_handlers
- name: Retrieve step-ca configuration
kubernetes.core.k8s_info:
kind: ConfigMap
name: step-certificates-config
namespace: step-ca
kubeconfig: "{{ kubeconfig.path }}"
register: stepca_cm_config
- name: Install root CA in system truststore
ansible.builtin.shell:
cmd: >-
step ca bootstrap \
--ca-url=https://ca.{{ vapp['metacluster.fqdn'] }} \
--fingerprint={{ stepca_cm_config.resources[0].data['defaults.json'] | from_json | json_query('fingerprint') }} \
--install \
--force
update-ca-certificates
module_defaults:
ansible.builtin.uri:
validate_certs: no
status_code: [200, 201]
body_format: json

View File

@ -0,0 +1,139 @@
- block:
- name: Install gitea chart
kubernetes.core.helm:
name: gitea
chart_ref: /opt/metacluster/helm-charts/gitea
release_namespace: gitea
create_namespace: yes
wait: no
kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.gitea.chart_values }}"
- name: Ensure gitea API availability
ansible.builtin.uri:
url: https://git.{{ vapp['metacluster.fqdn'] }}/api/healthz
method: GET
register: api_readycheck
until:
- api_readycheck.json.status is defined
- api_readycheck.json.status == 'pass'
retries: "{{ playbook.retries }}"
delay: "{{ playbook.delays.long }}"
- name: Configure additional SSH ingress
ansible.builtin.template:
src: ingressroutetcp.j2
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
owner: root
group: root
mode: 0600
vars:
_template:
name: gitea-ssh
namespace: gitea
config: |2
entryPoints:
- ssh
routes:
- match: HostSNI(`*`)
services:
- name: gitea-ssh
port: 22
notify:
- Apply manifests
- name: Trigger handlers
ansible.builtin.meta: flush_handlers
- name: Generate gitea API token
ansible.builtin.uri:
url: https://git.{{ vapp['metacluster.fqdn'] }}/api/v1/users/administrator/tokens
method: POST
user: administrator
password: "{{ vapp['metacluster.password'] }}"
force_basic_auth: yes
body:
name: token_init_{{ lookup('password', '/dev/null length=5 chars=ascii_letters,digits') }}
register: gitea_api_token
- name: Retrieve existing gitea configuration
ansible.builtin.uri:
url: https://git.{{ vapp['metacluster.fqdn'] }}/api/v1/repos/search
method: GET
register: gitea_existing_config
- block:
- name: Register SSH public key
ansible.builtin.uri:
url: https://git.{{ vapp['metacluster.fqdn'] }}/api/v1/user/keys
method: POST
headers:
Authorization: token {{ gitea_api_token.json.sha1 }}
body:
key: "{{ gitops_sshkey.public_key }}"
read_only: false
title: GitOps
- name: Create organization(s)
ansible.builtin.uri:
url: https://git.{{ vapp['metacluster.fqdn'] }}/api/v1/orgs
method: POST
headers:
Authorization: token {{ gitea_api_token.json.sha1 }}
body: "{{ item }}"
loop:
- full_name: Meta-cluster
description: Meta-cluster configuration items
username: mc
website: https://git.{{ vapp['metacluster.fqdn'] }}/mc
location: '[...]'
visibility: public
- full_name: Workload-cluster
description: Workload-cluster configuration items
username: wl
website: https://git.{{ vapp['metacluster.fqdn'] }}/wl
location: '[...]'
visibility: public
loop_control:
label: "{{ item.full_name }}"
- name: Create repositories
ansible.builtin.uri:
url: https://git.{{ vapp['metacluster.fqdn'] }}/api/v1/orgs/{{ item.organization }}/repos
method: POST
headers:
Authorization: token {{ gitea_api_token.json.sha1 }}
body: "{{ item.body }}"
loop:
- organization: mc
body:
name: GitOps.Config
# auto_init: true
# default_branch: main
description: GitOps manifests
- organization: wl
body:
name: Template.GitOps.Config
# auto_init: true
# default_branch: main
description: GitOps manifests
loop_control:
label: "{{ item.organization + '/' + item.body.name }}"
- name: Rebase/Push source gitops repository
ansible.builtin.shell:
cmd: |
git config --local http.sslVerify false
git remote set-url origin https://administrator:{{ vapp['metacluster.password'] | urlencode }}@git.{{ vapp['metacluster.fqdn'] }}/mc/GitOps.Config.git
git push
chdir: /opt/metacluster/git-repositories/gitops
when: (gitea_existing_config.json is undefined) or (gitea_existing_config.json.data | length == 0)
module_defaults:
ansible.builtin.uri:
validate_certs: no
status_code: [200, 201]
body_format: json

View File

@ -0,0 +1,70 @@
- block:
- name: Install argo-cd chart
kubernetes.core.helm:
name: argo-cd
chart_ref: /opt/metacluster/helm-charts/argo-cd
release_namespace: argo-cd
create_namespace: yes
wait: no
kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.argocd.chart_values }}"
- name: Ensure argo-cd API availability
ansible.builtin.uri:
url: https://gitops.{{ vapp['metacluster.fqdn'] }}/api/version
method: GET
register: api_readycheck
until:
- api_readycheck.json.Version is defined
retries: "{{ playbook.retries }}"
delay: "{{ playbook.delays.long }}"
- name: Generate argo-cd API token
ansible.builtin.uri:
url: https://gitops.{{ vapp['metacluster.fqdn'] }}/api/v1/session
method: POST
force_basic_auth: yes
body:
username: admin
password: "{{ vapp['metacluster.password'] }}"
register: argocd_api_token
- name: Configure metacluster-gitops repository
ansible.builtin.template:
src: gitrepo.j2
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
owner: root
group: root
mode: 0600
vars:
_template:
name: argocd-gitrepo-metacluster
namespace: argo-cd
uid: "{{ lookup('ansible.builtin.password', '/dev/null length=5 chars=ascii_lowercase,digits seed=inventory_hostname') }}"
privatekey: "{{ lookup('ansible.builtin.file', '~/.ssh/git_rsa_id') | indent(4, true) }}"
notify:
- Apply manifests
- name: Create applicationset
ansible.builtin.template:
src: applicationset.j2
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
owner: root
group: root
mode: 0600
vars:
_template:
name: argocd-applicationset-metacluster
namespace: argo-cd
notify:
- Apply manifests
- name: Trigger handlers
ansible.builtin.meta: flush_handlers
module_defaults:
ansible.builtin.uri:
validate_certs: no
status_code: [200, 201]
body_format: json

View File

@ -0,0 +1,26 @@
- name: Configure traefik dashboard ingress
ansible.builtin.template:
src: ingressroute.j2
dest: /var/lib/rancher/k3s/server/manifests/{{ _template.name }}-manifest.yaml
owner: root
group: root
mode: 0600
vars:
_template:
name: traefik-dashboard
namespace: kube-system
config: |2
entryPoints:
- web
- websecure
routes:
- kind: Rule
match: Host(`ingress.{{ vapp['metacluster.fqdn'] }}`)
services:
- kind: TraefikService
name: api@internal
notify:
- Apply manifests
- name: Trigger handlers
ansible.builtin.meta: flush_handlers

View File

@ -0,0 +1,13 @@
- name: Configure fallback name resolution
ansible.builtin.lineinfile:
path: /etc/hosts
line: "{{ vapp['guestinfo.ipaddress'] }} {{ item + '.' + vapp['metacluster.fqdn'] }}"
state: present
loop:
# TODO: Make this list dynamic
- ca
- git
- gitops
- ingress
- registry
- storage

View File

@ -0,0 +1,74 @@
- name: Store custom configuration files
ansible.builtin.copy:
dest: "{{ item.filename }}"
content: "{{ item.content }}"
loop:
- filename: /etc/rancher/k3s/config.yaml
content: |
kubelet-arg:
- "config=/etc/rancher/k3s/kubelet.config"
- filename: /etc/rancher/k3s/kubelet.config
content: |
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
shutdownGracePeriod: 180s
shtudownGracePeriodCriticalPods: 60s
loop_control:
label: "{{ item.filename }}"
- name: Gather service facts
ansible.builtin.service_facts:
# Module requires no attributes
- name: Install K3s
ansible.builtin.command:
cmd: ./install.sh
chdir: /opt/metacluster/k3s
environment:
INSTALL_K3S_SKIP_DOWNLOAD: 'true'
INSTALL_K3S_EXEC: 'server --cluster-init --disable local-storage --config /etc/rancher/k3s/config.yaml'
when: ansible_facts.services['k3s.service'] is undefined
- name: Debug possible taints on k3s node
ansible.builtin.shell:
cmd: >-
while true;
do
kubectl get nodes -o custom-columns=NAME:.metadata.name,TAINTS:.spec.taints --no-headers | awk '{print strftime("%H:%M:%S"),$0;fflush();}' >> /var/log/taintlog
sleep 1
done
async: 1800
poll: 0
- name: Ensure API availability
ansible.builtin.uri:
url: https://{{ vapp['guestinfo.ipaddress'] }}:6443/livez?verbose
method: GET
validate_certs: no
status_code: [200, 401]
register: api_readycheck
until: api_readycheck.json.apiVersion is defined
retries: "{{ playbook.retries }}"
delay: "{{ playbook.delays.medium }}"
- name: Install kubectl tab-completion
ansible.builtin.shell:
cmd: kubectl completion bash | tee /etc/bash_completion.d/kubectl
- name: Initialize tempfile
ansible.builtin.tempfile:
state: file
register: kubeconfig
- name: Retrieve kubeconfig
ansible.builtin.command:
cmd: kubectl config view --raw
register: kubectl_config
- name: Store kubeconfig in tempfile
ansible.builtin.copy:
dest: "{{ kubeconfig.path }}"
content: "{{ kubectl_config.stdout }}"
mode: 0600
no_log: true

View File

@ -0,0 +1,9 @@
- import_tasks: init.yml
- import_tasks: k3s.yml
- import_tasks: assets.yml
- import_tasks: ingress.yml
- import_tasks: storage.yml
- import_tasks: certauthority.yml
- import_tasks: registry.yml
- import_tasks: git.yml
- import_tasks: gitops.yml

View File

@ -0,0 +1,85 @@
- block:
- name: Install harbor chart
kubernetes.core.helm:
name: harbor
chart_ref: /opt/metacluster/helm-charts/harbor
release_namespace: harbor
create_namespace: yes
wait: no
kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.harbor.chart_values }}"
- name: Ensure harbor API availability
ansible.builtin.uri:
url: https://registry.{{ vapp['metacluster.fqdn'] }}/api/v2.0/health
method: GET
register: api_readycheck
until:
- api_readycheck.json.status is defined
- api_readycheck.json.status == 'healthy'
retries: "{{ playbook.retries }}"
delay: "{{ playbook.delays.long }}"
- name: Push images to registry
ansible.builtin.shell:
cmd: >-
skopeo copy \
--insecure-policy \
--dest-tls-verify=false \
--dest-creds admin:{{ vapp['metacluster.password'] }} \
docker-archive:./{{ item | basename }} \
docker://registry.{{ vapp['metacluster.fqdn'] }}/library/$( \
skopeo list-tags \
--insecure-policy \
docker-archive:./{{ item | basename }} | \
jq -r '.Tags[0]')
chdir: /opt/metacluster/container-images/
register: push_result
loop: "{{ query('ansible.builtin.fileglob', '/opt/metacluster/container-images/*.tar') | sort }}"
loop_control:
label: "{{ item | basename }}"
retries: "{{ playbook.retries }}"
delay: "{{ playbook.delays.short }}"
until: push_result is not failed
- name: Get all stored container images (=artifacts)
ansible.builtin.uri:
url: https://registry.{{ vapp['metacluster.fqdn'] }}/api/v2.0/search?q=library
method: GET
register: registry_artifacts
- name: Get source registries of all artifacts
ansible.builtin.set_fact:
source_registries: "{{ (source_registries | default([]) + [(item | split('/'))[1]]) | unique | sort }}"
loop: "{{ registry_artifacts.json.repository | json_query('[*].repository_name') }}"
- name: Configure K3s node for private registry
ansible.builtin.template:
dest: /etc/rancher/k3s/registries.yaml
src: registries.j2
vars:
_template:
data: "{{ source_registries }}"
hv:
fqdn: "{{ vapp['metacluster.fqdn'] }}"
# - name: Restart kubelet (k3s) to pick up configured registries
# ansible.builtin.systemd:
# name: k3s
# state: restarted
# - name: Ensure k3s API availability
# ansible.builtin.uri:
# url: https://{{ vapp['guestinfo.ipaddress'] }}:6443/livez?verbose
# method: GET
# register: api_readycheck
# until: api_readycheck.json.apiVersion is defined
# retries: 5
# delay: 30
module_defaults:
ansible.builtin.uri:
validate_certs: no
status_code: [200, 201, 401]
body_format: json

View File

@ -0,0 +1,26 @@
- block:
- name: Install longhorn chart
kubernetes.core.helm:
name: longhorn
chart_ref: /opt/metacluster/helm-charts/longhorn
release_namespace: longhorn-system
create_namespace: yes
wait: no
kubeconfig: "{{ kubeconfig.path }}"
values: "{{ components.longhorn.chart_values }}"
- name: Ensure longhorn API availability
ansible.builtin.uri:
url: https://storage.{{ vapp['metacluster.fqdn'] }}/v1
method: GET
register: api_readycheck
until:
- api_readycheck is not failed
retries: "{{ playbook.retries }}"
delay: "{{ playbook.delays.long }}"
module_defaults:
ansible.builtin.uri:
validate_certs: no
status_code: [200, 201]
body_format: json

View File

@ -0,0 +1,161 @@
- name: Configure clusterctl
ansible.builtin.template:
src: clusterctl.j2
dest: /opt/metacluster/cluster-api/clusterctl.yaml
vars:
_template:
version:
base: "{{ components.clusterapi.management.version.base }}"
cert_manager: "{{ components.clusterapi.management.version.cert_manager }}"
infrastructure_vsphere: "{{ components.clusterapi.management.version.infrastructure_vsphere }}"
ipam_incluster: "{{ components.clusterapi.management.version.ipam_incluster }}"
hv:
fqdn: "{{ vapp['hv.fqdn'] }}"
tlsthumbprint: "{{ tls_thumbprint.stdout }}"
username: "{{ vapp['hv.username'] }}"
password: "{{ vapp['hv.password'] }}"
datacenter: "{{ vcenter_info.datacenter }}"
datastore: "{{ vcenter_info.datastore }}"
network: "{{ vcenter_info.network }}"
resourcepool: "{{ vcenter_info.resourcepool }}"
folder: "{{ vcenter_info.folder }}"
cluster:
nodetemplate: "{{ (components.clusterapi.workload.node_template.url | basename | split('.'))[:-1] | join('.') }}"
publickey: "{{ vapp['guestinfo.rootsshkey'] }}"
version: "{{ components.clusterapi.workload.version.k8s }}"
vip: "{{ vapp['workloadcluster.vip'] }}"
- name: Update image references to use local registry
ansible.builtin.replace:
dest: "{{ item.root + '/' + item.path }}"
regexp: '([ ]+image:[ "]+)(?!({{ _template.pattern }}|"{{ _template.pattern }}))'
replace: '\1{{ _template.pattern }}'
vars:
_template:
pattern: registry.{{ vapp['metacluster.fqdn'] }}/library/
loop: "{{ lookup('community.general.filetree', '/opt/metacluster/cluster-api') }}"
loop_control:
label: "{{ item.path }}"
when:
- item.path is search('.yaml')
- item.path is not search("clusterctl.yaml|metadata.yaml")
- name: Generate kustomization template
ansible.builtin.template:
src: kustomization.cluster-template.j2
dest: /opt/metacluster/cluster-api/infrastructure-vsphere/{{ components.clusterapi.management.version.infrastructure_vsphere }}/kustomization.yaml
vars:
_template:
fqdn: "{{ vapp['metacluster.fqdn'] }}"
rootca: "{{ stepca_cm_certs.resources[0].data['root_ca.crt'] }}"
script:
# Base64 encoded; to avoid variable substitution when clusterctl parses the cluster-template.yml
encoded: IyEvYmluL2Jhc2gKdm10b29sc2QgLS1jbWQgJ2luZm8tZ2V0IGd1ZXN0aW5mby5vdmZFbnYnID4gL3RtcC9vdmZlbnYKCklQQWRkcmVzcz0kKHNlZCAtbiAncy8uKlByb3BlcnR5IG9lOmtleT0iZ3Vlc3RpbmZvLmludGVyZmFjZS4wLmlwLjAuYWRkcmVzcyIgb2U6dmFsdWU9IlwoW14iXSpcKS4qL1wxL3AnIC90bXAvb3ZmZW52KQpTdWJuZXRNYXNrPSQoc2VkIC1uICdzLy4qUHJvcGVydHkgb2U6a2V5PSJndWVzdGluZm8uaW50ZXJmYWNlLjAuaXAuMC5uZXRtYXNrIiBvZTp2YWx1ZT0iXChbXiJdKlwpLiovXDEvcCcgL3RtcC9vdmZlbnYpCkdhdGV3YXk9JChzZWQgLW4gJ3MvLipQcm9wZXJ0eSBvZTprZXk9Imd1ZXN0aW5mby5pbnRlcmZhY2UuMC5yb3V0ZS4wLmdhdGV3YXkiIG9lOnZhbHVlPSJcKFteIl0qXCkuKi9cMS9wJyAvdG1wL292ZmVudikKRE5TPSQoc2VkIC1uICdzLy4qUHJvcGVydHkgb2U6a2V5PSJndWVzdGluZm8uZG5zLnNlcnZlcnMiIG9lOnZhbHVlPSJcKFteIl0qXCkuKi9cMS9wJyAvdG1wL292ZmVudikKTUFDQWRkcmVzcz0kKHNlZCAtbiAncy8uKnZlOkFkYXB0ZXIgdmU6bWFjPSJcKFteIl0qXCkuKi9cMS9wJyAvdG1wL292ZmVudikKCm1hc2syY2lkcigpIHsKICBjPTAKICB4PTAkKCBwcmludGYgJyVvJyAkezEvLy4vIH0gKQoKICB3aGlsZSBbICR4IC1ndCAwIF07IGRvCiAgICBsZXQgYys9JCgoeCUyKSkgJ3g+Pj0xJwogIGRvbmUKCiAgZWNobyAkYwp9CgpQcmVmaXg9JChtYXNrMmNpZHIgJFN1Ym5ldE1hc2spCgpjYXQgPiAvZXRjL25ldHBsYW4vMDEtbmV0Y2ZnLnlhbWwgPDxFT0YKbmV0d29yazoKICB2ZXJzaW9uOiAyCiAgcmVuZGVyZXI6IG5ldHdvcmtkCiAgZXRoZXJuZXRzOgogICAgaWQwOgogICAgICBzZXQtbmFtZTogZXRoMAogICAgICBtYXRjaDoKICAgICAgICBtYWNhZGRyZXNzOiAkTUFDQWRkcmVzcwogICAgICBhZGRyZXNzZXM6CiAgICAgICAgLSAkSVBBZGRyZXNzLyRQcmVmaXgKICAgICAgZ2F0ZXdheTQ6ICRHYXRld2F5CiAgICAgIG5hbWVzZXJ2ZXJzOgogICAgICAgIGFkZHJlc3NlcyA6IFskRE5TXQpFT0YKcm0gL2V0Yy9uZXRwbGFuLzUwKi55YW1sIC1mCgpzdWRvIG5ldHBsYW4gYXBwbHk=
runcmds:
- update-ca-certificates
- bash /root/network.sh
- name: Store custom cluster-template
ansible.builtin.copy:
dest: /opt/metacluster/cluster-api/custom-cluster-template.yaml
content: "{{ lookup('kubernetes.core.kustomize', dir='/opt/metacluster/cluster-api/infrastructure-vsphere/' + components.clusterapi.management.version.infrastructure_vsphere ) }}"
- name: Initialize Cluster API management cluster
ansible.builtin.shell:
cmd: >-
clusterctl init \
-v5 \
--infrastructure vsphere:{{ components.clusterapi.management.version.infrastructure_vsphere }} \
--ipam in-cluster:{{ components.clusterapi.management.version.ipam_incluster }} \
--config ./clusterctl.yaml \
--kubeconfig {{ kubeconfig.path }}
chdir: /opt/metacluster/cluster-api
- name: Ensure CAPI/CAPV controller availability
kubernetes.core.k8s_info:
kind: Deployment
name: "{{ item.name }}"
namespace: "{{ item.namespace }}"
wait: true
kubeconfig: "{{ kubeconfig.path }}"
loop:
- name: capi-controller-manager
namespace: capi-system
- name: capv-controller-manager
namespace: capv-system
loop_control:
label: "{{ item.name }}"
- name: Parse vApp for workload cluster sizing
ansible.builtin.set_fact:
clustersize: >-
{{ {
'controlplane': vapp['deployment.type'] | regex_findall('^cp(\d)+') | first,
'workers': vapp['deployment.type'] | regex_findall('w(\d)+$') | first
} }}
- name: Generate workload cluster manifest
ansible.builtin.shell:
cmd: >-
clusterctl generate cluster \
{{ vapp['workloadcluster.name'] | lower }} \
--control-plane-machine-count {{ clustersize.controlplane }} \
--worker-machine-count {{ clustersize.workers }} \
--from ./custom-cluster-template.yaml \
--config ./clusterctl.yaml \
--kubeconfig {{ kubeconfig.path }}
chdir: /opt/metacluster/cluster-api
register: clusterctl_newcluster
# TODO: move to git repo
- name: Save workload cluster manifest
ansible.builtin.copy:
dest: /opt/metacluster/cluster-api/new-cluster.yaml
content: "{{ clusterctl_newcluster.stdout }}"
- name: Apply workload cluster manifest
kubernetes.core.k8s:
definition: >-
{{ clusterctl_newcluster.stdout }}
wait: yes
kubeconfig: "{{ kubeconfig.path }}"
# TODO: move to git repo
- name: Wait for cluster to be available
ansible.builtin.shell:
cmd: >-
kubectl wait clusters.cluster.x-k8s.io/{{ vapp['workloadcluster.name'] | lower }} \
--for=condition=Ready \
--timeout 0s
register: cluster_readycheck
until: cluster_readycheck is succeeded
retries: "{{ playbook.retries }}"
delay: "{{ playbook.delays.long }}"
- name: Initialize tempfile
ansible.builtin.tempfile:
state: file
register: capi_kubeconfig
- name: Retrieve kubeconfig
ansible.builtin.shell:
cmd: >-
clusterctl get kubeconfig \
{{ vapp['workloadcluster.name'] | lower }} \
--kubeconfig {{ kubeconfig.path }}
register: capi_kubectl_config
- name: Store kubeconfig in tempfile
ansible.builtin.copy:
dest: "{{ capi_kubeconfig.path }}"
content: "{{ capi_kubectl_config.stdout }}"
mode: 0600
no_log: true
# TODO: move to git repo
- name: Apply cni plugin manifest
kubernetes.core.k8s:
src: /opt/metacluster/cluster-api/cni-calico/{{ components.clusterapi.workload.version.calico }}/calico.yaml
state: present
wait: yes
kubeconfig: "{{ capi_kubeconfig.path }}"
# TODO: move to git repo

View File

@ -0,0 +1,44 @@
- block:
- name: Generate service account in workload cluster
kubernetes.core.k8s:
template: serviceaccount.j2
state: present
- name: Retrieve service account bearer token
kubernetes.core.k8s_info:
kind: ServiceAccount
name: "{{ _template.account.name }}"
namespace: "{{ _template.account.namespace }}"
register: workloadcluster_serviceaccount
- name: Retrieve service account bearer token
kubernetes.core.k8s_info:
kind: Secret
name: "{{ workloadcluster_serviceaccount.resources | json_query('[].secrets[].name') | first }}"
namespace: "{{ _template.account.namespace }}"
register: workloadcluster_bearertoken
- name: Register workload cluster in argo-cd
kubernetes.core.k8s:
template: cluster.j2
state: present
kubeconfig: "{{ kubeconfig.path }}"
vars:
_template:
cluster:
name: "{{ vapp['workloadcluster.name'] | lower }}"
secret: argocd-cluster-{{ vapp['workloadcluster.name'] | lower }}
url: https://{{ vapp['workloadcluster.vip'] }}:6443
token: "{{ workloadcluster_bearertoken.resources | json_query('[].data.token') }}"
vars:
_template:
account:
name: argocd-sa
namespace: default
clusterrolebinding:
name: argocd-crb
module_defaults:
group/k8s:
kubeconfig: "{{ capi_kubeconfig.path }}"

View File

@ -0,0 +1,75 @@
- name: Gather hypervisor details
ansible.builtin.shell:
cmd: govc ls -L {{ item.moref }} | awk -F/ '{print ${{ item.part }}}'
environment:
GOVC_INSECURE: '1'
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
register: govc_inventory
loop:
- attribute: cluster
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
part: (NF-1)
- attribute: datacenter
moref: VirtualMachine:{{ moref_id }}
part: 2
- attribute: datastore
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "datastore").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
part: NF
- attribute: folder
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "parent").Val | .Type + ":" + .Value')
part: 0
# - attribute: host
# moref: >-
# $(govc object.collect -json VirtualMachine:{{ moref_id }} | \
# jq -r '.[] | select(.Name == "runtime").Val.Host | .Type + ":" + .Value')
# part: NF
- attribute: network
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "network").Val.ManagedObjectReference | .[].Type + ":" + .[].Value')
part: NF
- attribute: resourcepool
moref: >-
$(govc object.collect -json VirtualMachine:{{ moref_id }} | \
jq -r '.[] | select(.Name == "resourcePool").Val | .Type + ":" + .Value')
part: 0
loop_control:
label: "{{ item.attribute }}"
- name: Retrieve hypervisor TLS thumbprint
ansible.builtin.shell:
cmd: openssl s_client -connect {{ vapp['hv.fqdn'] }}:443 < /dev/null 2>/dev/null | openssl x509 -fingerprint -noout -in /dev/stdin | awk -F'=' '{print $2}'
register: tls_thumbprint
- name: Store hypervisor details in dictionary
ansible.builtin.set_fact:
vcenter_info: "{{ vcenter_info | default({}) | combine({ item.item.attribute : item.stdout }) }}"
loop: "{{ govc_inventory.results }}"
loop_control:
label: "{{ item.item.attribute }}"
- name: Configure network protocol profile on hypervisor
ansible.builtin.shell:
cmd: >-
npp-prepper \
--server "{{ vapp['hv.fqdn'] }}" \
--username "{{ vapp['hv.username'] }}" \
--password "{{ vapp['hv.password'] }}" \
dc \
--name "{{ vcenter_info.datacenter }}" \
--portgroup "{{ vcenter_info.network }}" \
--startaddress {{ vapp['ippool.startip'] }} \
--endaddress {{ vapp['ippool.endip'] }} \
--netmask {{ (vapp['guestinfo.ipaddress'] + '/' + vapp['guestinfo.prefixlength']) | ansible.utils.ipaddr('netmask') }} \
{{ vapp['guestinfo.dnsserver'] | split(',') | map('trim') | map('regex_replace', '^', '--dnsserver ') | join(' ') }} \
--dnsdomain {{ vapp['metacluster.fqdn'] }} \
--gateway {{ vapp['guestinfo.gateway'] }} \
--force

View File

@ -0,0 +1,5 @@
- import_tasks: hypervisor.yml
- import_tasks: registry.yml
- import_tasks: nodetemplates.yml
- import_tasks: clusterapi.yml
- import_tasks: gitops.yml

View File

@ -0,0 +1,85 @@
- block:
- name: Check for existing templates on hypervisor
community.vmware.vmware_guest_info:
name: "{{ (item | basename | split('.'))[:-1] | join('.') }}"
register: existing_ova
loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}"
ignore_errors: yes
- name: Parse OVA files for network mappings
ansible.builtin.shell:
cmd: govc import.spec -json {{ item }}
environment:
GOVC_INSECURE: '1'
GOVC_URL: "{{ vapp['hv.fqdn'] }}"
GOVC_USERNAME: "{{ vapp['hv.username'] }}"
GOVC_PASSWORD: "{{ vapp['hv.password'] }}"
register: ova_spec
when: existing_ova.results[index] is failed
loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}"
loop_control:
index_var: index
- name: Deploy OVA templates on hypervisor
community.vmware.vmware_deploy_ovf:
cluster: "{{ vcenter_info.cluster }}"
datastore: "{{ vcenter_info.datastore }}"
folder: "{{ vcenter_info.folder }}"
name: "{{ (item | basename | split('.'))[:-1] | join('.') }}"
networks: "{u'{{ ova_spec.results[index].stdout | from_json | json_query('NetworkMapping[0].Name') }}':u'{{ vcenter_info.network }}'}"
allow_duplicates: no
power_on: false
ovf: "{{ item }}"
register: ova_deploy
when: existing_ova.results[index] is failed
loop: "{{ query('ansible.builtin.fileglob', '/opt/workloadcluster/node-templates/*.ova') | sort }}"
loop_control:
index_var: index
- name: Add vApp properties on deployed VM's
ansible.builtin.shell:
cmd: >-
npp-prepper \
--server "{{ vapp['hv.fqdn'] }}" \
--username "{{ vapp['hv.username'] }}" \
--password "{{ vapp['hv.password'] }}" \
vm \
--datacenter "{{ vcenter_info.datacenter }}" \
--portgroup "{{ vcenter_info.network }}" \
--name "{{ item.instance.hw_name }}"
when: existing_ova.results[index] is failed
loop: "{{ ova_deploy.results }}"
loop_control:
index_var: index
label: "{{ item.item }}"
- name: Create snapshot on deployed VM's
community.vmware.vmware_guest_snapshot:
folder: "{{ vcenter_info.folder }}"
name: "{{ item.instance.hw_name }}"
state: present
snapshot_name: "{{ ansible_date_time.iso8601_basic_short }}-base"
when: ova_deploy.results[index] is not skipped
loop: "{{ ova_deploy.results }}"
loop_control:
index_var: index
label: "{{ item.item }}"
- name: Mark deployed VM's as templates
community.vmware.vmware_guest:
name: "{{ item.instance.hw_name }}"
is_template: yes
when: ova_deploy.results[index] is not skipped
loop: "{{ ova_deploy.results }}"
loop_control:
index_var: index
label: "{{ item.item }}"
module_defaults:
group/vmware:
hostname: "{{ vapp['hv.fqdn'] }}"
validate_certs: no
username: "{{ vapp['hv.username'] }}"
password: "{{ vapp['hv.password'] }}"
datacenter: "{{ vcenter_info.datacenter }}"

View File

@ -0,0 +1,40 @@
- block:
- name: Create dedicated kubeadm project within container registry
ansible.builtin.uri:
url: https://registry.{{ vapp['metacluster.fqdn'] }}/api/v2.0/projects
method: POST
headers:
Authorization: "Basic {{ ('admin:' + vapp['metacluster.password']) | b64encode }}"
body:
project_name: kubeadm
public: true
storage_limit: 0
metadata:
enable_content_trust: 'false'
enable_content_trust_cosign: 'false'
auto_scan: 'true'
severity: none
prevent_vul: 'false'
public: 'true'
reuse_sys_cve_allowlist: 'true'
- name: Lookup kubeadm container images
ansible.builtin.set_fact:
kubeadm_images: "{{ lookup('ansible.builtin.file', '/opt/metacluster/cluster-api/imagelist').splitlines() }}"
- name: Copy kubeadm container images to dedicated project
ansible.builtin.uri:
url: https://registry.{{ vapp['metacluster.fqdn'] }}/api/v2.0/projects/kubeadm/repositories/{{ ( item | regex_findall('([^:/]+)') )[-2] }}/artifacts?from=library/{{ item | replace('/', '%2F') | replace(':', '%3A') }}
method: POST
headers:
Authorization: "Basic {{ ('admin:' + vapp['metacluster.password']) | b64encode }}"
body:
from: "{{ item }}"
loop: "{{ kubeadm_images }}"
module_defaults:
ansible.builtin.uri:
validate_certs: no
status_code: [200, 201, 409]
body_format: json

View File

@ -0,0 +1,28 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: {{ _template.name }}
namespace: {{ _template.namespace }}
spec:
generators:
- git:
repoURL: ssh://git@gitea-ssh.gitea.svc.cluster.local/mc/GitOps.Config.git
revision: HEAD
directories:
- path: metacluster-applicationset/*
template:
metadata:
name: {% raw %}'{{ path.basename }}'{% endraw +%}
spec:
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
source:
repoURL: ssh://git@gitea-ssh.gitea.svc.cluster.local/mc/GitOps.Config.git
targetRevision: HEAD
path: {% raw %}'{{ path }}'{% endraw +%}
destination:
server: https://kubernetes.default.svc
namespace: default

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ _template.cluster.secret }}
namespace: argo-cd
labels:
argocd.argoproj.io/secret-type: cluster
type: Opaque
stringData:
name: {{ _template.cluster.name }}
server: {{ _template.cluster.url }}
config: |
{
"bearerToken": "{{ _template.cluster.token }}",
"tlsClientConfig": {
"insecure": true
}
}

View File

@ -0,0 +1,42 @@
providers:
- name: "kubeadm"
url: "/opt/metacluster/cluster-api/bootstrap-kubeadm/{{ _template.version.base }}/bootstrap-components.yaml"
type: "BootstrapProvider"
- name: "cluster-api"
url: "/opt/metacluster/cluster-api/cluster-api/{{ _template.version.base }}/core-components.yaml"
type: "CoreProvider"
- name: "kubeadm"
url: "/opt/metacluster/cluster-api/control-plane-kubeadm/{{ _template.version.base }}/control-plane-components.yaml"
type: "ControlPlaneProvider"
- name: "vsphere"
url: "/opt/metacluster/cluster-api/infrastructure-vsphere/{{ _template.version.infrastructure_vsphere }}/infrastructure-components.yaml"
type: "InfrastructureProvider"
- name: "in-cluster"
url: "/opt/metacluster/cluster-api/ipam-in-cluster/{{ _template.version.ipam_incluster }}/ipam-components.yaml"
type: "IPAMProvider"
cert-manager:
url: "/opt/metacluster/cluster-api/cert-manager/{{ _template.version.cert_manager }}/cert-manager.yaml"
version: "{{ _template.version.cert_manager }}"
## -- Controller settings -- ##
VSPHERE_SERVER: "{{ _template.hv.fqdn }}"
VSPHERE_TLS_THUMBPRINT: "{{ _template.hv.tlsthumbprint }}"
VSPHERE_USERNAME: "{{ _template.hv.username }}"
VSPHERE_PASSWORD: "{{ _template.hv.password }}"
## -- Required workload cluster default settings -- ##
VSPHERE_DATACENTER: "{{ _template.hv.datacenter }}"
VSPHERE_DATASTORE: "{{ _template.hv.datastore }}"
VSPHERE_STORAGE_POLICY: ""
VSPHERE_NETWORK: "{{ _template.hv.network }}"
VSPHERE_RESOURCE_POOL: "{{ _template.hv.resourcepool }}"
VSPHERE_FOLDER: "{{ _template.hv.folder }}"
VSPHERE_TEMPLATE: "{{ _template.cluster.nodetemplate }}"
VSPHERE_SSH_AUTHORIZED_KEY: "{{ _template.cluster.publickey }}"
KUBERNETES_VERSION: "{{ _template.cluster.version }}"
CONTROL_PLANE_ENDPOINT_IP: "{{ _template.cluster.vip }}"
VIP_NETWORK_INTERFACE: ""
EXP_CLUSTER_RESOURCE_SET: "true"

View File

@ -0,0 +1,10 @@
#!/bin/bash
# Change working directory
pushd {{ _template.service.workingdir }}
# Compress *.tar files
if tar -czf image-tarballs.tgz *.tar --remove-files; then
# Disable systemd unit
systemctl disable {{ _template.service.name }}
fi

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ _template.name }}
namespace: {{ _template.namespace }}
annotations:
{{ _template.annotations }}
labels:
{{ _template.labels }}
data:
{% for kv_pair in _template.data %}
"{{ kv_pair.key }}": |
{{ kv_pair.value | indent(width=4, first=True) }}
{% endfor %}

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ _template.name }}-{{ _template.uid }}
namespace: {{ _template.namespace }}
labels:
argocd.argoproj.io/secret-type: repository
stringData:
url: ssh://git@gitea-ssh.gitea.svc.cluster.local/mc/GitOps.Config.git
name: {{ _template.name }}
insecure: 'true'
sshPrivateKey: |
{{ _template.privatekey }}

View File

@ -0,0 +1,7 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: {{ _template.name }}
namespace: {{ _template.namespace }}
spec:
{{ _template.config }}

View File

@ -0,0 +1,7 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: {{ _template.name }}
namespace: {{ _template.namespace }}
spec:
{{ _template.config }}

View File

@ -0,0 +1,104 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cluster-template.yaml
patchesStrategicMerge:
- |-
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: '${CLUSTER_NAME}'
namespace: '${NAMESPACE}'
spec:
kubeadmConfigSpec:
clusterConfiguration:
imageRepository: registry.{{ _template.fqdn }}/kubeadm
- |-
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: '${CLUSTER_NAME}-md-0'
namespace: '${NAMESPACE}'
spec:
template:
spec:
clusterConfiguration:
imageRepository: registry.{{ _template.fqdn }}/kubeadm
- |-
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: '${CLUSTER_NAME}-md-0'
namespace: '${NAMESPACE}'
spec:
template:
spec:
files:
- encoding: base64
content: |
{{ _template.script.encoded }}
permissions: '0744'
owner: root:root
path: /root/network.sh
- content: |
network: {config: disabled}
owner: root:root
path: /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg
- content: |
{{ _template.rootca | indent(width=14, first=False) | trim }}
owner: root:root
path: /usr/local/share/ca-certificates/root_ca.crt
patchesJson6902:
- target:
group: controlplane.cluster.x-k8s.io
version: v1beta1
kind: KubeadmControlPlane
name: .*
patch: |-
- op: add
path: /spec/kubeadmConfigSpec/files/-
value:
encoding: base64
content: |
{{ _template.script.encoded }}
owner: root:root
path: /root/network.sh
permissions: '0744'
- op: add
path: /spec/kubeadmConfigSpec/files/-
value:
content: |
network: {config: disabled}
owner: root:root
path: /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg
- op: add
path: /spec/kubeadmConfigSpec/files/-
value:
content: |
{{ _template.rootca | indent(width=12, first=False) | trim }}
owner: root:root
path: /usr/local/share/ca-certificates/root_ca.crt
- target:
group: bootstrap.cluster.x-k8s.io
version: v1beta1
kind: KubeadmConfigTemplate
name: .*
patch: |-
{% for cmd in _template.runcmds %}
- op: add
path: /spec/template/spec/preKubeadmCommands/-
value: {{ cmd }}
{% endfor %}
- target:
group: controlplane.cluster.x-k8s.io
version: v1beta1
kind: KubeadmControlPlane
name: .*
patch: |-
{% for cmd in _template.runcmds %}
- op: add
path: /spec/kubeadmConfigSpec/preKubeadmCommands/-
value: {{ cmd }}
{% endfor %}

View File

@ -0,0 +1,8 @@
mirrors:
{% for entry in _template.data %}
{{ entry }}:
endpoint:
- https://registry.{{ _template.hv.fqdn }}
rewrite:
"(.*)": "library/{{ entry }}/$1"
{% endfor %}

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ _template.name }}
namespace: {{ _template.namespace }}
data:
{% for kv_pair in _template.data %}
"{{ kv_pair.key }}": {{ kv_pair.value }}
{% endfor %}

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ _template.account.name }}
namespace: {{ _template.account.namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ _template.clusterrolebinding.name }}
subjects:
- kind: ServiceAccount
name: {{ _template.account.name }}
namespace: {{ _template.account.namespace }}
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,9 @@
[Unit]
Description={{ _template.service.name }}
[Service]
ExecStart={{ _template.service.executable }}
Nice=10
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,47 @@
#!/bin/bash
export TERM=linux
BGRN='\033[1;92m'
BGRY='\033[1;30m'
BBLU='\033[1;34m'
BRED='\033[1;91m'
BWHI='\033[1;97m'
CBLA='\033[?16;0;30c' # Hide blinking cursor
DFLT='\033[0m' # Reset colour
LCLR='\033[K' # Clear to end of line
PRST='\033[0;0H' # Reset cursor position
# COMPONENTS=('ca' 'ingress' 'storage' 'registry' 'git' 'gitops')
COMPONENTS=('ca' 'storage' 'registry' 'git' 'gitops')
FQDN='{{ vapp['metacluster.fqdn'] }}'
IPADDRESS='{{ vapp['guestinfo.ipaddress'] }}'
I=60
while /bin/true; do
if [[ $I -gt 59 ]]; then
clear > /dev/tty1
I=0
else
I=$(( $I + 1 ))
fi
echo -e "${PRST}" > /dev/tty1
echo -e "\n\n\t${DFLT}To manage this appliance, please connect to one of the following:${LCLR}\n" > /dev/tty1
for c in "${COMPONENTS[@]}"; do
STATUS=$(curl -ks "https://${c}.${FQDN}" -o /dev/null -w '%{http_code}')
if [[ "${STATUS}" -eq "200" ]]; then
echo -e "\t [${BGRN}+${DFLT}] ${BBLU}https://${c}.${FQDN}${DFLT}${LCLR}" > /dev/tty1
else
echo -e "\t [${BRED}-${DFLT}] ${BBLU}https://${c}.${FQDN}${DFLT}${LCLR}" > /dev/tty1
fi
done
echo -e "\n\t${BGRY}Note that your DNS zone ${DFLT}must have${BGRY} respective records defined,\n\teach pointing to: ${DFLT}${IPADDRESS}${LCLR}" > /dev/tty1
echo -e "${CBLA}" > /dev/tty1
sleep 1
done