From 29a6005ff2b3d116292eeac9a121acd35d29712c Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Mon, 26 Jun 2023 13:55:08 +0200 Subject: [PATCH] latest updates --- README.rst | 2 +- inventory/base/hosts.yaml | 24 +-- inventory/local_certs/group_vars/all.yaml | 1 + inventory/local_certs/group_vars/vault.yaml | 2 + inventory/service/all.yaml | 42 ++++ inventory/service/groups.yaml | 59 +++--- .../host_vars/vault1.scs.otc-service.com.yaml | 2 +- kubernetes/zuul/base/kustomization.yaml | 4 +- .../nodepool-builder/statefulset.yaml | 12 +- .../nodepool-launcher/deployment.yaml | 8 +- .../components/nodepool-launcher/hpa.yaml | 23 +++ .../nodepool-launcher/kustomization.yaml | 1 + .../zuul/components/restarter/README.md | 8 + kubernetes/zuul/components/restarter/crb.yaml | 12 ++ .../job-restart-nodepool-launcher.yaml | 27 +++ .../restarter/job-restart-zuul-web.yaml | 27 +++ .../components/restarter/kustomization.yaml | 10 + .../zuul/components/restarter/role.yaml | 10 + kubernetes/zuul/components/restarter/sa.yaml | 5 + .../components/zookeeper/kustomization.yaml | 2 +- .../components/zookeeper/statefulset.yaml | 19 +- .../components/zuul-client/deployment.yaml | 12 +- .../components/zuul-config/deployment.yaml | 8 +- .../components/zuul-executor/statefulset.yaml | 37 +++- .../components/zuul-merger/deployment.yaml | 107 ++++++++++ .../zuul/components/zuul-merger/hpa.yaml | 23 +++ .../components/zuul-merger/kustomization.yaml | 3 +- .../components/zuul-scheduler/deployment.yaml | 110 +++++++++++ .../zuul-scheduler/kustomization.yaml | 2 +- .../zuul/components/zuul-web/deployment.yaml | 20 +- .../zuul/overlays/scs/configs/gitea.key | 50 +++++ .../overlays/scs/configs/zuul-gitea-ssh.key | 50 +++++ .../zuul/overlays/scs/configs/zuul.conf | 60 ++++++ .../zuul/overlays/scs/kustomization.yaml | 187 +++++++++--------- playbooks/acme-certs-local.yaml | 57 ++++++ playbooks/bootstrap-bridge.yaml | 19 +- playbooks/configure-keycloak.yaml | 9 - playbooks/distribute-tls-certs.yaml | 3 + .../roles/acme_request_certs/tasks/acme.yaml | 1 + playbooks/roles/hashivault/tasks/Debian.yaml | 14 +- playbooks/roles/hashivault/tasks/main.yaml | 2 +- 41 files changed, 872 insertions(+), 202 deletions(-) create mode 100644 inventory/local_certs/group_vars/all.yaml create mode 100644 inventory/local_certs/group_vars/vault.yaml create mode 100644 inventory/service/all.yaml create mode 100644 kubernetes/zuul/components/nodepool-launcher/hpa.yaml create mode 100644 kubernetes/zuul/components/restarter/README.md create mode 100644 kubernetes/zuul/components/restarter/crb.yaml create mode 100644 kubernetes/zuul/components/restarter/job-restart-nodepool-launcher.yaml create mode 100644 kubernetes/zuul/components/restarter/job-restart-zuul-web.yaml create mode 100644 kubernetes/zuul/components/restarter/kustomization.yaml create mode 100644 kubernetes/zuul/components/restarter/role.yaml create mode 100644 kubernetes/zuul/components/restarter/sa.yaml create mode 100644 kubernetes/zuul/components/zuul-merger/deployment.yaml create mode 100644 kubernetes/zuul/components/zuul-merger/hpa.yaml create mode 100644 kubernetes/zuul/components/zuul-scheduler/deployment.yaml create mode 100644 kubernetes/zuul/overlays/scs/configs/gitea.key create mode 100644 kubernetes/zuul/overlays/scs/configs/zuul-gitea-ssh.key create mode 100644 kubernetes/zuul/overlays/scs/configs/zuul.conf create mode 100644 playbooks/acme-certs-local.yaml delete mode 100644 playbooks/configure-keycloak.yaml create mode 100644 playbooks/distribute-tls-certs.yaml diff --git a/README.rst b/README.rst index 0f140ef..4ae3605 100644 --- a/README.rst +++ b/README.rst @@ -45,6 +45,6 @@ Bootstraping Vault therefore requires following steps 2. Checkout this repository and ensure `inventory/service/hosts.yaml` contain proper IP addresses as - well as those hosts are member of vault group as `inventory/service/groups.yaml` + well as those hosts are member of vault group as `inventory/service/all.yaml` 3. execute `ansible-playbook playbooks/service-vault.yaml` playbook. diff --git a/inventory/base/hosts.yaml b/inventory/base/hosts.yaml index 33e0ae2..9cd5d1a 100644 --- a/inventory/base/hosts.yaml +++ b/inventory/base/hosts.yaml @@ -7,26 +7,26 @@ all: host_keys: - 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBO3RHfleGRMVSNHSBg634EJwM1jYMrbsHTibECPttH1xc6Hdq5XSk/LWYYAeR8g3otMjxxwCVS13e/nMQNMlYvo=' vault1.scs.otc-service.com: - ansible_host: 10.10.0.29 + ansible_host: 10.10.0.210 ansible_user: automation - public_v4: 10.10.0.29 + public_v4: 10.10.0.210 host_keys: - - 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFiPzNLi9kxmb4FeAjpQ8GfXpUqzZxs+1L1JqYmAhaNPdy6DwcKglWde/ce3DxFA3YXGGNw8B1euq+hI/zoNVxI=' + - 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFfXq60z37tRXjTmwWlnLHFk/Udn1R2MbYe4jNo1DVDEf1lE44DzMDUkyTYo0lcDKSRTx6D/UlH0J4X/PN24Vp4=' vault2.scs.otc-service.com: - ansible_host: 10.10.0.120 + ansible_host: 10.10.0.231 ansible_user: automation - public_v4: 10.10.0.120 + public_v4: 10.10.0.231 host_keys: - - 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNgVYQmU1AEzantVTjKpe1A6z22ve8/gMkdBFFwHgQicG6ppU+0L9LtVJsLd7xgSg8wnUGaZUotQ9sfKogwb2LQ=' + - 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBH2ZcNK0OswZFCGc/hhEcLrZwaNng9qd+NAMFgCI+Z2en66n+nlonBlEmP9fbws84G0oBWfZ/+Z68dtAaMNVKZw=' vault3.scs.otc-service.com: - ansible_host: 10.10.0.113 + ansible_host: 10.10.0.251 ansible_user: automation - public_v4: 10.10.0.113 + public_v4: 10.10.0.251 host_keys: - - 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE3Afc7X7kB5v6Rre0LJRC05R/KVW5iV6q+KKyHHQWMCXTdEHRDkgXiSDwxV7FPneZB7QT42QqNfoa43Zz4ptP0=' + - 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAEHeofBIUQPW54/0B/p6Zmrxjfk6VqZYaCtWzfUMH4HqPZO/dFbza8MulKNprDSEDK4+KK2+9HvYunEYmvDvms=' gitea1.scs.otc-service.com: - ansible_host: 10.10.0.6 + ansible_host: 10.10.0.107 ansible_user: automation - public_v4: 10.10.0.6 + public_v4: 10.10.0.107 host_keys: - - 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBA4L6C0gdxqrbueQf+cEWVHxmZmcewbYCGV5wqEayTXT4ceoktkyzHjOjk4fa91VmE5He+GkC1a88hDnWcwT2+w=' + - 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIKrZwdNgGFNSozidYBIyFTla9Ho6ZksBQZja3jBhtnMabm2eUk0ITvaIvAhhhXGk2XeiRzvWpc/WtroIMLm+w0=' diff --git a/inventory/local_certs/group_vars/all.yaml b/inventory/local_certs/group_vars/all.yaml new file mode 100644 index 0000000..311be67 --- /dev/null +++ b/inventory/local_certs/group_vars/all.yaml @@ -0,0 +1 @@ +certs_path: "../certs" diff --git a/inventory/local_certs/group_vars/vault.yaml b/inventory/local_certs/group_vars/vault.yaml new file mode 100644 index 0000000..0b47184 --- /dev/null +++ b/inventory/local_certs/group_vars/vault.yaml @@ -0,0 +1,2 @@ +vault_tls_cert_content: "{{ lookup('ansible.builtin.file', certs_path + '/' + vault_cert + '-fullchain.crt') | default(omit) }}" +vault_tls_key_content: "{{ lookup('ansible.builtin.file', certs_path + '/' + vault_cert + '.pem') }}" diff --git a/inventory/service/all.yaml b/inventory/service/all.yaml new file mode 100644 index 0000000..8a8b38b --- /dev/null +++ b/inventory/service/all.yaml @@ -0,0 +1,42 @@ +--- +all: + vars: + ansible_ssh_user: ubuntu + ansible_ssh_private_key_file: /root/.ssh/id_rsa_scs + children: + bastion: + hosts: + bastion*.scs.otc-service.com: + bridge*.scs.otc-service.com: + + ssl_certs: + hosts: + bridge.scs.otc-service.com: + vault1.scs.otc-service.com: + vault2.scs.otc-service.com: + vault3.scs.otc-service.com: + gitea1.scs.otc-service.com: + + k8s-controller: + hosts: + bridge.scs.otc-service.com: + + vault: + hosts: + vault1.scs.otc-service.com: + vault2.scs.otc-service.com: + vault3.scs.otc-service.com: + + vault-controller: + hosts: + bridge.scs.otc-service.com: + + gitea: + hosts: + gitea1.scs.otc-service.com: + + prod_bastion: + hosts: + bridge.scs.otc-service.com: + + disabled: [] diff --git a/inventory/service/groups.yaml b/inventory/service/groups.yaml index c7addac..a9d1d0e 100644 --- a/inventory/service/groups.yaml +++ b/inventory/service/groups.yaml @@ -1,31 +1,42 @@ -plugin: yamlgroup -groups: - bastion: - - bastion*.scs.otc-service.com - - bridge*.scs.otc-service.com +plugin: yaml +all: + vars: + ansible_ssh_user: ubuntu + ansible_ssh_private_key_file: /root/.ssh/id_rsa_scs + children: + bastion: + hosts: + bastion*.scs.otc-service.com: + bridge*.scs.otc-service.com: - ssl_certs: - - bridge.scs.otc-service.com - - vault1.scs.otc-service.com - - vault2.scs.otc-service.com - - vault3.scs.otc-service.com - - gitea1.scs.otc-service.com + ssl_certs: + hosts: + bridge.scs.otc-service.com: + vault1.scs.otc-service.com: + vault2.scs.otc-service.com: + vault3.scs.otc-service.com: + gitea1.scs.otc-service.com: - k8s-controller: - - bridge.scs.otc-service.com + k8s-controller: + hosts: + bridge.scs.otc-service.com: - vault: - - vault1.scs.otc-service.com - - vault2.scs.otc-service.com - - vault3.scs.otc-service.com + vault: + hosts: + vault1.scs.otc-service.com: + vault2.scs.otc-service.com: + vault3.scs.otc-service.com: - vault-controller: - - bridge.scs.otc-service.com + vault-controller: + hosts: + bridge.scs.otc-service.com: - gitea: - - gitea1.scs.otc-service.com + gitea: + hosts: + gitea1.scs.otc-service.com: - prod_bastion: - - bridge.scs.otc-service.com + prod_bastion: + hosts: + bridge.scs.otc-service.com: - disabled: [] + disabled: [] diff --git a/inventory/service/host_vars/vault1.scs.otc-service.com.yaml b/inventory/service/host_vars/vault1.scs.otc-service.com.yaml index df0ddee..3fe6346 100644 --- a/inventory/service/host_vars/vault1.scs.otc-service.com.yaml +++ b/inventory/service/host_vars/vault1.scs.otc-service.com.yaml @@ -1,7 +1,7 @@ ssl_certs: vault: - "vault1.scs.otc-service.com" -vault_cert: "vault" +vault_cert: "vault1" vault_proxy_protocol_behavior: "allow_authorized" # vault_proxy_protocol_authorized_addrs: "192.168.110.151,192.168.110.160" diff --git a/kubernetes/zuul/base/kustomization.yaml b/kubernetes/zuul/base/kustomization.yaml index fac6715..bdd853c 100644 --- a/kubernetes/zuul/base/kustomization.yaml +++ b/kubernetes/zuul/base/kustomization.yaml @@ -12,9 +12,11 @@ components: - ../components/nodepool-launcher configMapGenerator: - - name: zuul-instance-config + - name: "zuul-instance-config" literals: - ZUUL_CONFIG_REPO=https://gitea.eco.tsi-dev.otc-service.com/scs/zuul-config.git + - name: "zuul-executor-vars" + literals: [] labels: - includeSelectors: true diff --git a/kubernetes/zuul/components/nodepool-builder/statefulset.yaml b/kubernetes/zuul/components/nodepool-builder/statefulset.yaml index d238518..a54c9e1 100644 --- a/kubernetes/zuul/components/nodepool-builder/statefulset.yaml +++ b/kubernetes/zuul/components/nodepool-builder/statefulset.yaml @@ -35,10 +35,10 @@ spec: resources: limits: cpu: "300m" - memory: "500Mi" + memory: "512Mi" requests: cpu: "100m" - memory: "200Mi" + memory: "256Mi" securityContext: privileged: true @@ -71,6 +71,10 @@ spec: serviceAccountName: "zuul" volumes: + - name: "nodepool-config" + secret: + secretName: "nodepool-config" + - name: "dev" hostPath: path: "/dev" @@ -81,10 +85,6 @@ spec: - name: "dib-tmp" emptyDir: {} - - name: "nodepool-config" - secret: - secretName: "nodepool-config" - - name: "nodepool-containers" emptyDir: {} diff --git a/kubernetes/zuul/components/nodepool-launcher/deployment.yaml b/kubernetes/zuul/components/nodepool-launcher/deployment.yaml index 043544f..cfeb6e2 100644 --- a/kubernetes/zuul/components/nodepool-launcher/deployment.yaml +++ b/kubernetes/zuul/components/nodepool-launcher/deployment.yaml @@ -12,13 +12,13 @@ spec: selector: matchLabels: app.kubernetes.io/name: "zuul" - app.kubernetes.io/part-of: zuul + app.kubernetes.io/part-of: "zuul" app.kubernetes.io/component: "nodepool-launcher" template: metadata: labels: app.kubernetes.io/name: "zuul" - app.kubernetes.io/part-of: zuul + app.kubernetes.io/part-of: "zuul" app.kubernetes.io/component: "nodepool-launcher" spec: containers: @@ -33,10 +33,10 @@ spec: resources: limits: - cpu: "300m" + cpu: "100m" memory: "500Mi" requests: - cpu: "100m" + cpu: "50m" memory: "200Mi" securityContext: diff --git a/kubernetes/zuul/components/nodepool-launcher/hpa.yaml b/kubernetes/zuul/components/nodepool-launcher/hpa.yaml new file mode 100644 index 0000000..363cc72 --- /dev/null +++ b/kubernetes/zuul/components/nodepool-launcher/hpa.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: autoscaling/v2 +kind: "HorizontalPodAutoscaler" +metadata: + name: "nodepool-launcher" + labels: + app.kubernetes.io/name: "zuul" + app.kubernetes.io/part-of: "zuul" + app.kubernetes.io/component: "nodepool-launcher" +spec: + scaleTargetRef: + kind: "Deployment" + name: "nodepool-launcher" + apiVersion: "apps/v1" + minReplicas: 1 + maxReplicas: 2 + metrics: + - type: "Resource" + resource: + name: "cpu" + target: + type: "Utilization" + averageUtilization: 70 diff --git a/kubernetes/zuul/components/nodepool-launcher/kustomization.yaml b/kubernetes/zuul/components/nodepool-launcher/kustomization.yaml index fbc3362..65e6931 100644 --- a/kubernetes/zuul/components/nodepool-launcher/kustomization.yaml +++ b/kubernetes/zuul/components/nodepool-launcher/kustomization.yaml @@ -4,3 +4,4 @@ kind: Component resources: - deployment.yaml + - hpa.yaml diff --git a/kubernetes/zuul/components/restarter/README.md b/kubernetes/zuul/components/restarter/README.md new file mode 100644 index 0000000..22f9bd2 --- /dev/null +++ b/kubernetes/zuul/components/restarter/README.md @@ -0,0 +1,8 @@ +# Zuul restarter + +Sometimes credentials stored in Vault are rotated outside of Zuul. Since Zuul +itself is not capable of reloading its general configration it is better to +simply periodically restart certain parts of it. + +This component is implementing K8 ServiceAccount with role and few CronJobs +that restart some Zuul components. diff --git a/kubernetes/zuul/components/restarter/crb.yaml b/kubernetes/zuul/components/restarter/crb.yaml new file mode 100644 index 0000000..fb4d677 --- /dev/null +++ b/kubernetes/zuul/components/restarter/crb.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: "restart-deployment" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "restart-deployment" +subjects: + - kind: "ServiceAccount" + name: "restart-deployment" diff --git a/kubernetes/zuul/components/restarter/job-restart-nodepool-launcher.yaml b/kubernetes/zuul/components/restarter/job-restart-nodepool-launcher.yaml new file mode 100644 index 0000000..2fc2b48 --- /dev/null +++ b/kubernetes/zuul/components/restarter/job-restart-nodepool-launcher.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: "restart-nodepool-launcher" +spec: + successfulJobsHistoryLimit: 1 + failedJobsHistoryLimit: 2 + concurrencyPolicy: Forbid + schedule: '15 22 * * *' + jobTemplate: + spec: + backoffLimit: 2 + activeDeadlineSeconds: 600 + template: + spec: + serviceAccountName: "restart-deployment" + restartPolicy: Never + containers: + - name: "kubectl" + image: "bitnami/kubectl" + command: + - "bash" + - "-c" + - >- + kubectl rollout restart deployment/nodepool-launcher && + kubectl rollout status deployment/nodepool-launcher diff --git a/kubernetes/zuul/components/restarter/job-restart-zuul-web.yaml b/kubernetes/zuul/components/restarter/job-restart-zuul-web.yaml new file mode 100644 index 0000000..fd2b188 --- /dev/null +++ b/kubernetes/zuul/components/restarter/job-restart-zuul-web.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: "restart-zuul-web" +spec: + successfulJobsHistoryLimit: 1 + failedJobsHistoryLimit: 2 + concurrencyPolicy: Forbid + schedule: '0 0 * * *' + jobTemplate: + spec: + backoffLimit: 2 + activeDeadlineSeconds: 600 + template: + spec: + serviceAccountName: "restart-deployment" + restartPolicy: Never + containers: + - name: "kubectl" + image: "bitnami/kubectl" + command: + - "bash" + - "-c" + - >- + kubectl rollout restart deployment/zuul-web && + kubectl rollout status deployment/zuul-web diff --git a/kubernetes/zuul/components/restarter/kustomization.yaml b/kubernetes/zuul/components/restarter/kustomization.yaml new file mode 100644 index 0000000..cca5722 --- /dev/null +++ b/kubernetes/zuul/components/restarter/kustomization.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: kustomize.config.k8s.io/v1alpha1 +kind: Component + +resources: + - sa.yaml + - role.yaml + - crb.yaml + - job-restart-zuul-web.yaml + - job-restart-nodepool-launcher.yaml diff --git a/kubernetes/zuul/components/restarter/role.yaml b/kubernetes/zuul/components/restarter/role.yaml new file mode 100644 index 0000000..84d9e7d --- /dev/null +++ b/kubernetes/zuul/components/restarter/role.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: "restart-deployment" +rules: + - apiGroups: ["apps"] + resources: ["deployments"] + # resourceNames: ["test-pod"] + verbs: ["get", "patch", "list", "watch"] diff --git a/kubernetes/zuul/components/restarter/sa.yaml b/kubernetes/zuul/components/restarter/sa.yaml new file mode 100644 index 0000000..2a7426c --- /dev/null +++ b/kubernetes/zuul/components/restarter/sa.yaml @@ -0,0 +1,5 @@ +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: "restart-deployment" diff --git a/kubernetes/zuul/components/zookeeper/kustomization.yaml b/kubernetes/zuul/components/zookeeper/kustomization.yaml index bf96e4f..b622376 100644 --- a/kubernetes/zuul/components/zookeeper/kustomization.yaml +++ b/kubernetes/zuul/components/zookeeper/kustomization.yaml @@ -19,7 +19,7 @@ labels: images: - name: "zookeeper" newName: "quay.io/opentelekomcloud/zookeeper" - newTag: "3.8.0" + newTag: "3.8.1" resources: - cert.yaml diff --git a/kubernetes/zuul/components/zookeeper/statefulset.yaml b/kubernetes/zuul/components/zookeeper/statefulset.yaml index 6959d58..9c98896 100644 --- a/kubernetes/zuul/components/zookeeper/statefulset.yaml +++ b/kubernetes/zuul/components/zookeeper/statefulset.yaml @@ -40,13 +40,6 @@ spec: runAsUser: 1000 runAsGroup: 1000 image: "zookeeper" - resources: - limits: - cpu: "500m" - memory: "4Gi" - requests: - cpu: "100m" - memory: "1Gi" command: - "/bin/bash" - "-xec" @@ -93,9 +86,9 @@ spec: value: "10" - name: ZK_TICK_TIME value: "2000" - - name: ZOO_AUTOPURGE_PURGEINTERVAL + - name: ZK_PURGE_INTERVAL value: "6" - - name: ZOO_AUTOPURGE_SNAPRETAINCOUNT + - name: ZK_SNAP_RETAIN_COUNT value: "3" - name: ZOO_INIT_LIMIT value: "5" @@ -108,6 +101,14 @@ spec: - name: ZOO_TICK_TIME value: "2000" + resources: + limits: + cpu: "100m" + memory: "2Gi" + requests: + cpu: "20m" + memory: "1Gi" + volumeMounts: - name: data mountPath: /data diff --git a/kubernetes/zuul/components/zuul-client/deployment.yaml b/kubernetes/zuul/components/zuul-client/deployment.yaml index 85f3c18..910be03 100644 --- a/kubernetes/zuul/components/zuul-client/deployment.yaml +++ b/kubernetes/zuul/components/zuul-client/deployment.yaml @@ -24,7 +24,7 @@ spec: # Zuul-client is a regular zuul-web image doing nothing. # We use it only to have completely independent pod serving as # zuul client for i.e. maintenance. - - name: "zuul-client" + - name: "zuul" image: "zuul/zuul-web" command: - "sh" @@ -34,17 +34,17 @@ spec: resources: limits: cpu: "50m" - memory: "200Mi" + memory: "128Mi" requests: - cpu: "20m" - memory: "100Mi" + cpu: "10m" + memory: "32Mi" securityContext: runAsUser: 10001 runAsGroup: 10001 volumeMounts: - - name: "zuul-config" + - name: "zuul-cfg" mountPath: "/etc/zuul" readOnly: true - name: "zookeeper-client-tls" @@ -54,7 +54,7 @@ spec: mountPath: "/etc/zuul-config" volumes: - - name: "zuul-config" + - name: "zuul-cfg" secret: secretName: "zuul-config" diff --git a/kubernetes/zuul/components/zuul-config/deployment.yaml b/kubernetes/zuul/components/zuul-config/deployment.yaml index abafa53..7b0a2fd 100644 --- a/kubernetes/zuul/components/zuul-config/deployment.yaml +++ b/kubernetes/zuul/components/zuul-config/deployment.yaml @@ -44,11 +44,11 @@ spec: image: "zuul/nodepool-builder" resources: limits: - cpu: "100m" - memory: "128Mi" + cpu: "50m" + memory: "64Mi" requests: cpu: "10m" - memory: "64Mi" + memory: "5Mi" volumeMounts: - name: "zuul-config-data" @@ -62,3 +62,5 @@ spec: - name: "zuul-config-data" persistentVolumeClaim: claimName: "zuul-config" + + revisionHistoryLimit: 2 diff --git a/kubernetes/zuul/components/zuul-executor/statefulset.yaml b/kubernetes/zuul/components/zuul-executor/statefulset.yaml index d280d75..384460e 100644 --- a/kubernetes/zuul/components/zuul-executor/statefulset.yaml +++ b/kubernetes/zuul/components/zuul-executor/statefulset.yaml @@ -40,7 +40,7 @@ spec: topologyKey: "kubernetes.io/hostname" containers: - - name: "executor" + - name: "zuul" image: "zuul/zuul-executor" args: ["/usr/local/bin/zuul-executor", "-f", "-d"] env: @@ -58,19 +58,36 @@ spec: - containerPort: 7900 name: "logs" protocol: "TCP" + - containerPort: 9091 + name: "prometheus" + protocol: "TCP" + + # readinessProbe: + # httpGet: + # path: "/health/ready" + # port: "prometheus" + # failureThreshold: 20 + # periodSeconds: 10 + # livenessProbe: + # httpGet: + # path: "/health/live" + # port: "prometheus" + # initialDelaySeconds: 120 + # failureThreshold: 10 + # periodSeconds: 5 + # timeoutSeconds: 5 resources: limits: - cpu: "2" + cpu: "2000m" memory: "8G" requests: - cpu: "1" + cpu: "500m" memory: "1G" securityContext: privileged: true - volumeMounts: - name: "zuul-config" mountPath: "/etc/zuul" @@ -82,6 +99,11 @@ spec: mountPath: "/etc/zuul-config" - name: "zuul-var" mountPath: "/var/lib/zuul" + - name: "zuul-vars" + mountPath: "/var/run/zuul/vars" + - name: "zuul-trusted-ro" + mountPath: "/var/run/zuul/trusted-ro" + readOnly: true serviceAccountName: "zuul" terminationGracePeriodSeconds: 120 @@ -98,5 +120,12 @@ spec: persistentVolumeClaim: claimName: "zuul-config" + - name: "zuul-vars" + configMap: + name: "zuul-executor-vars" + + - name: "zuul-trusted-ro" + emptyDir: {} + - name: "zuul-var" emptyDir: {} diff --git a/kubernetes/zuul/components/zuul-merger/deployment.yaml b/kubernetes/zuul/components/zuul-merger/deployment.yaml new file mode 100644 index 0000000..04a5713 --- /dev/null +++ b/kubernetes/zuul/components/zuul-merger/deployment.yaml @@ -0,0 +1,107 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "zuul-merger" + labels: + app.kubernetes.io/name: "zuul" + app.kubernetes.io/part-of: "zuul" + app.kubernetes.io/component: "zuul-merger" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: "zuul" + app.kubernetes.io/part-of: "zuul" + app.kubernetes.io/component: "zuul-merger" + template: + metadata: + labels: + app.kubernetes.io/name: "zuul" + app.kubernetes.io/part-of: "zuul" + app.kubernetes.io/component: "zuul-merger" + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app.kubernetes.io/name" + operator: "In" + values: + - "zuul" + - key: "app.kubernetes.io/component" + operator: "In" + values: + - "zuul-merger" + topologyKey: "kubernetes.io/hostname" + + containers: + - name: "zuul" + image: "zuul/zuul-merger" + args: ["/usr/local/bin/zuul-merger", "-f", "-d"] + + ports: + - containerPort: 9091 + name: "prometheus" + protocol: "TCP" + + readinessProbe: + httpGet: + path: "/health/ready" + port: "prometheus" + failureThreshold: 20 + periodSeconds: 10 + livenessProbe: + httpGet: + path: "/health/live" + port: "prometheus" + initialDelaySeconds: 120 + failureThreshold: 10 + periodSeconds: 5 + timeoutSeconds: 5 + + resources: + limits: + cpu: "200m" + memory: "600Mi" + requests: + cpu: "50m" + memory: "100Mi" + + securityContext: + runAsUser: 10001 + runAsGroup: 10001 + + volumeMounts: + - name: "zuul-config" + mountPath: "/etc/zuul" + readOnly: true + - name: "zookeeper-client-tls" + mountPath: "/tls/client" + readOnly: true + - name: "zuul-config-data" + mountPath: "/etc/zuul-config" + - name: "zuul-var" + mountPath: "/var/lib/zuul" + + serviceAccountName: "zuul" + terminationGracePeriodSeconds: 120 + volumes: + - name: "zuul-config" + secret: + secretName: "zuul-config" + + - name: "zookeeper-client-tls" + secret: + secretName: "zookeeper-client-tls" + + - name: "zuul-config-data" + persistentVolumeClaim: + claimName: "zuul-config" + + - name: "zuul-var" + emptyDir: {} + revisionHistoryLimit: 2 diff --git a/kubernetes/zuul/components/zuul-merger/hpa.yaml b/kubernetes/zuul/components/zuul-merger/hpa.yaml new file mode 100644 index 0000000..20d9a9d --- /dev/null +++ b/kubernetes/zuul/components/zuul-merger/hpa.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: autoscaling/v2 +kind: "HorizontalPodAutoscaler" +metadata: + name: "zuul-merger" + labels: + app.kubernetes.io/name: "zuul" + app.kubernetes.io/part-of: "zuul" + app.kubernetes.io/component: "zuul-merger" +spec: + scaleTargetRef: + kind: "Deployment" + name: "zuul-merger" + apiVersion: "apps/v1" + minReplicas: 1 + maxReplicas: 4 + metrics: + - type: "Resource" + resource: + name: "cpu" + target: + type: "Utilization" + averageUtilization: 70 diff --git a/kubernetes/zuul/components/zuul-merger/kustomization.yaml b/kubernetes/zuul/components/zuul-merger/kustomization.yaml index 61f0ad9..65e6931 100644 --- a/kubernetes/zuul/components/zuul-merger/kustomization.yaml +++ b/kubernetes/zuul/components/zuul-merger/kustomization.yaml @@ -3,4 +3,5 @@ apiVersion: kustomize.config.k8s.io/v1alpha1 kind: Component resources: - - statefulset.yaml + - deployment.yaml + - hpa.yaml diff --git a/kubernetes/zuul/components/zuul-scheduler/deployment.yaml b/kubernetes/zuul/components/zuul-scheduler/deployment.yaml new file mode 100644 index 0000000..1b01447 --- /dev/null +++ b/kubernetes/zuul/components/zuul-scheduler/deployment.yaml @@ -0,0 +1,110 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zuul-scheduler + labels: + app.kubernetes.io/name: "zuul" + app.kubernetes.io/part-of: "zuul" + app.kubernetes.io/component: "zuul-scheduler" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: "zuul" + app.kubernetes.io/part-of: "zuul" + app.kubernetes.io/component: "zuul-scheduler" + template: + metadata: + labels: + app.kubernetes.io/name: "zuul" + app.kubernetes.io/part-of: "zuul" + app.kubernetes.io/component: "zuul-scheduler" + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app.kubernetes.io/name" + operator: In + values: + - "zuul" + - key: "app.kubernetes.io/component" + operator: In + values: + - "zuul-scheduler" + topologyKey: "kubernetes.io/hostname" + + containers: + - name: "zuul" + image: "zuul/zuul-scheduler" + args: ["/usr/local/bin/zuul-scheduler", "-f", "-d"] + + ports: + - containerPort: 9091 + name: "prometheus" + protocol: "TCP" + + readinessProbe: + httpGet: + path: "/health/ready" + port: "prometheus" + failureThreshold: 20 + periodSeconds: 10 + livenessProbe: + httpGet: + path: "/health/live" + port: "prometheus" + initialDelaySeconds: 120 + failureThreshold: 10 + periodSeconds: 5 + timeoutSeconds: 5 + + resources: + limits: + cpu: "2" + memory: "2G" + requests: + cpu: "100m" + memory: "200Mi" + + securityContext: + runAsUser: 10001 + runAsGroup: 10001 + + volumeMounts: + - name: "zuul-config" + mountPath: "/etc/zuul" + readOnly: true + - name: "zookeeper-client-tls" + mountPath: "/tls/client" + readOnly: true + - name: "zuul-config-data" + mountPath: "/etc/zuul-config" + - name: "zuul-var-lib" + mountPath: "/var/lib/zuul" + - name: "zuul-var-run" + mountPath: "/var/run/zuul" + + serviceAccountName: "zuul" + volumes: + - name: "zuul-config" + secret: + secretName: "zuul-config" + + - name: "zookeeper-client-tls" + secret: + secretName: "zookeeper-client-tls" + + - name: "zuul-config-data" + persistentVolumeClaim: + claimName: "zuul-config" + + - name: "zuul-var-lib" + emptyDir: {} + + - name: "zuul-var-run" + emptyDir: {} diff --git a/kubernetes/zuul/components/zuul-scheduler/kustomization.yaml b/kubernetes/zuul/components/zuul-scheduler/kustomization.yaml index 61f0ad9..fbc3362 100644 --- a/kubernetes/zuul/components/zuul-scheduler/kustomization.yaml +++ b/kubernetes/zuul/components/zuul-scheduler/kustomization.yaml @@ -3,4 +3,4 @@ apiVersion: kustomize.config.k8s.io/v1alpha1 kind: Component resources: - - statefulset.yaml + - deployment.yaml diff --git a/kubernetes/zuul/components/zuul-web/deployment.yaml b/kubernetes/zuul/components/zuul-web/deployment.yaml index 1e983a7..8d060ca 100644 --- a/kubernetes/zuul/components/zuul-web/deployment.yaml +++ b/kubernetes/zuul/components/zuul-web/deployment.yaml @@ -22,7 +22,7 @@ spec: app.kubernetes.io/component: "zuul-web" spec: containers: - - name: "web" + - name: "zuul" image: "zuul/zuul-web" args: ["/usr/local/bin/zuul-web", "-f", "-d"] @@ -30,6 +30,24 @@ spec: - containerPort: 9000 name: "web" protocol: "TCP" + - containerPort: 9091 + name: "prometheus" + protocol: "TCP" + + readinessProbe: + httpGet: + path: "/health/ready" + port: "prometheus" + failureThreshold: 30 + periodSeconds: 10 + livenessProbe: + httpGet: + path: "/health/live" + port: "prometheus" + initialDelaySeconds: 120 + failureThreshold: 10 + periodSeconds: 5 + timeoutSeconds: 5 resources: limits: diff --git a/kubernetes/zuul/overlays/scs/configs/gitea.key b/kubernetes/zuul/overlays/scs/configs/gitea.key new file mode 100644 index 0000000..35dfe4b --- /dev/null +++ b/kubernetes/zuul/overlays/scs/configs/gitea.key @@ -0,0 +1,50 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn +NhAAAAAwEAAQAAAgEArig9BW994crJrAfM3H9P/HH+yz7fJI+2SOsVaDVle+2tWD+rfpFJ +7SNLgXG1ipv/XnA0W7S/i0+7ShPieiakRuXqHrnfZVNf5Z/smH5aweZB62CgNxIH2fCCRI +lKJ8YbNDOdulpltbELjHieXU9mjIapYrLFx13xLjr2mcRNrkOs+N1JcHxiRoG4qez0KNlr +dn83c3Hda5lUi8O73ZxaGPzW5a9J89GLOiX7w+J6T3eDzMHOQqGOoC4S90QIRMya1UhP2J +8GU2FTOMav5ZlOFHTN4m+/iO0xe68rwAFuO+l0DN+nYUvEr/daK/YAAjZcDS0MvwRwpb9g +a+V6YoUCxnBZPa0GTqFe49UBUZzuwgdwoYznUYkKi1zodp0idR/VoDFwu/XmMM2OlhS4qT +vtDyyTRd2OJDCkVv2HHWp9vNmf8V6UIbPvvEpHwK0Ts1Z01aNMf9wmCxK3ORDdp1nrC3Uv +OjQ+AkxooZezpRwBEgXYfZH1XVMdrJDQSeMFln2/20BTYqrum2bpdheEbRfpwIT8YFfUsu +TiZZm4VRmDjtK+Zi+0IP4611M5Zeqpnrvxe4c4QeNJVR9/Euc8awhq78+8tfV+cOJygfu9 +3JD43eVgd3qoR3jRSRPznPMffSlPma/Nu+gXHQ1nAmU/ZmBcq4Yx9XpIUNb05AJzAsU79o +MAAAdQ1aGCR9WhgkcAAAAHc3NoLXJzYQAAAgEArig9BW994crJrAfM3H9P/HH+yz7fJI+2 +SOsVaDVle+2tWD+rfpFJ7SNLgXG1ipv/XnA0W7S/i0+7ShPieiakRuXqHrnfZVNf5Z/smH +5aweZB62CgNxIH2fCCRIlKJ8YbNDOdulpltbELjHieXU9mjIapYrLFx13xLjr2mcRNrkOs ++N1JcHxiRoG4qez0KNlrdn83c3Hda5lUi8O73ZxaGPzW5a9J89GLOiX7w+J6T3eDzMHOQq +GOoC4S90QIRMya1UhP2J8GU2FTOMav5ZlOFHTN4m+/iO0xe68rwAFuO+l0DN+nYUvEr/da +K/YAAjZcDS0MvwRwpb9ga+V6YoUCxnBZPa0GTqFe49UBUZzuwgdwoYznUYkKi1zodp0idR +/VoDFwu/XmMM2OlhS4qTvtDyyTRd2OJDCkVv2HHWp9vNmf8V6UIbPvvEpHwK0Ts1Z01aNM +f9wmCxK3ORDdp1nrC3UvOjQ+AkxooZezpRwBEgXYfZH1XVMdrJDQSeMFln2/20BTYqrum2 +bpdheEbRfpwIT8YFfUsuTiZZm4VRmDjtK+Zi+0IP4611M5Zeqpnrvxe4c4QeNJVR9/Euc8 +awhq78+8tfV+cOJygfu93JD43eVgd3qoR3jRSRPznPMffSlPma/Nu+gXHQ1nAmU/ZmBcq4 +Yx9XpIUNb05AJzAsU79oMAAAADAQABAAACAEhUKdOiFDO8Frm9m5VPwpZjeaBLgj0a+mea +So+27WjkswNdngm4qW01JVyjLvRcCVjdXMFhddOTz4Lac0qr1bokLnGIXIEmeUNSgd5rS6 +IP0PzCaoe0k1IuEswIAKY4HoA1l6IXfPpShytVxN+X5E0keCCngoBkQZAjqNr/rgtby/Cn +ZqKy5dXGdj0MTfLRKVJTT2JAvea8DWLmbZWCI+EQ0OcfP6VlN190+vTFkGqEhlZ5fwIpOq +THvdS3in+YQg2mNJMQqH3kg72mttKyMr1ILWGHa5Kgf9aQT6k0buWu9SWLlWZRI2S5Y9ey +GRrSHLTUKuECJQ6RRqhI6+USvK2hifFQQGcLhPc8hjT3S1dxrbUxfrMY6/f5v19AS8Sewf +RPLDK+NU+AigfbGj7rAqMwRfgSdvgs+7Cmx058fE5f6kIIyxpxmFloAv812Hwxc5cekDxH +hL24Y1OrK9Ij/FWZKUjK2q97Wv34p79kouwFVK8umfCSTaZoQWp03JgkSexjwT6rw+ULih +ChjNNMF7byWd+vBGXXbE6hAg7+cSpSmAI6vqB/29Fp4bnx3Dr7YT1guxmVlVsq0aqhWYcw +Mh6xgRHQgjBAVV1xi597e73b0JQz3fWTyYSX5jo+GlIGrzFDRDGs69QwHj93D3WPeL1QtP +BhmSjvKyILYLVaEPeRAAABAQDHOBm7iKv3cr4wOkHT6eUK0budnklJyVtktj0XBHjLFUpl +Ac4ViQjw6c1ev3gZ0vB9ykkDYNcJpqjBO6EDnq5iexDmfXMm6ZlVAYdPYb/wIdHiFTa+Pe +6Dbvcporu8ATl6EUMz1ZjZ1+752F1eybucoq3SiwgS7B0lz7lYwCH1VpocOmW1zlAPlfdd +YsRCjf0f31INQn3JPN9lb6BBdM2AB5lknjAmpZFOo/tKYDCUrKoSPyy14gqszHT0ah5x8C +Qvu0YhHq+uxMiadEAPwMuYXQf9K0Msd67Mh/0Z67keoLzAWz6iWsO9xuygQv2dXvdq74ww +f250qQnVSQhslaibAAABAQDjUq8q9j+Z2XDmdhaA8U8CwvTrYioFcEjlmOD33pK5vySRtg +XATgfzxhfrrek7LOuyK7i81lD3QMNtmxsi/c7NvqqU1av7nPKdVL25qi1KmIKbDD1PqQCE +BvkB+wRmPXLHae3HmAjSovayD1S9K2txx5mOJ17RHJfALADhnVdJBvP1kdqNJjI+rTCNku +cm8UcQy+TxmC8dErCy5Kh259JrrtShGMLAT0r27CEe3DDnLj46YDledj2W/3PDKSvPkDSL +2lwmrxrLGqoWnQO7jg2tsrtGFvDrze+peVtxvEshE2cED3qC1H0PcvfC32Fbra5KY4DIkj +4+v/VaTVANAQi/AAABAQDEIJVMCjvgL+06OeC1PbN0l/B4oDfKCkCbPwV7BlNt+6rF0Sd3 +kHfz5bi1Y/iEtuaFjG/Cdvour81m4GP8atDUqdwLSjbsC+EhLdwiuZVNfponzaLYUzs61m ++qEA2OW276t/FvFHLmm8zpKYPHC4T6uvAiy3ZjeMUAH2DRVcPVpoTICo1ki3lb3IWJqpjz +XnCEmo6w7zZDAPQdA95KrxEJML5bo78FM6Oh96Rvfq2MQz3iMwnPdUYlOX/F9jw3BHcFqM +uZnLViGhuDJnFrMsgFIRDVCyUi8icB+WreLWAvY2tmmUhmzrNeL8oZl0yuBKDuz3FNmlvh +Iv5vPJWXwl89AAAAE2ExMTc5MDU2OTRAUkRERTAzVU0BAgMEBQYH +-----END OPENSSH PRIVATE KEY----- + diff --git a/kubernetes/zuul/overlays/scs/configs/zuul-gitea-ssh.key b/kubernetes/zuul/overlays/scs/configs/zuul-gitea-ssh.key new file mode 100644 index 0000000..35dfe4b --- /dev/null +++ b/kubernetes/zuul/overlays/scs/configs/zuul-gitea-ssh.key @@ -0,0 +1,50 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn +NhAAAAAwEAAQAAAgEArig9BW994crJrAfM3H9P/HH+yz7fJI+2SOsVaDVle+2tWD+rfpFJ +7SNLgXG1ipv/XnA0W7S/i0+7ShPieiakRuXqHrnfZVNf5Z/smH5aweZB62CgNxIH2fCCRI +lKJ8YbNDOdulpltbELjHieXU9mjIapYrLFx13xLjr2mcRNrkOs+N1JcHxiRoG4qez0KNlr +dn83c3Hda5lUi8O73ZxaGPzW5a9J89GLOiX7w+J6T3eDzMHOQqGOoC4S90QIRMya1UhP2J +8GU2FTOMav5ZlOFHTN4m+/iO0xe68rwAFuO+l0DN+nYUvEr/daK/YAAjZcDS0MvwRwpb9g +a+V6YoUCxnBZPa0GTqFe49UBUZzuwgdwoYznUYkKi1zodp0idR/VoDFwu/XmMM2OlhS4qT +vtDyyTRd2OJDCkVv2HHWp9vNmf8V6UIbPvvEpHwK0Ts1Z01aNMf9wmCxK3ORDdp1nrC3Uv +OjQ+AkxooZezpRwBEgXYfZH1XVMdrJDQSeMFln2/20BTYqrum2bpdheEbRfpwIT8YFfUsu +TiZZm4VRmDjtK+Zi+0IP4611M5Zeqpnrvxe4c4QeNJVR9/Euc8awhq78+8tfV+cOJygfu9 +3JD43eVgd3qoR3jRSRPznPMffSlPma/Nu+gXHQ1nAmU/ZmBcq4Yx9XpIUNb05AJzAsU79o +MAAAdQ1aGCR9WhgkcAAAAHc3NoLXJzYQAAAgEArig9BW994crJrAfM3H9P/HH+yz7fJI+2 +SOsVaDVle+2tWD+rfpFJ7SNLgXG1ipv/XnA0W7S/i0+7ShPieiakRuXqHrnfZVNf5Z/smH +5aweZB62CgNxIH2fCCRIlKJ8YbNDOdulpltbELjHieXU9mjIapYrLFx13xLjr2mcRNrkOs ++N1JcHxiRoG4qez0KNlrdn83c3Hda5lUi8O73ZxaGPzW5a9J89GLOiX7w+J6T3eDzMHOQq +GOoC4S90QIRMya1UhP2J8GU2FTOMav5ZlOFHTN4m+/iO0xe68rwAFuO+l0DN+nYUvEr/da +K/YAAjZcDS0MvwRwpb9ga+V6YoUCxnBZPa0GTqFe49UBUZzuwgdwoYznUYkKi1zodp0idR +/VoDFwu/XmMM2OlhS4qTvtDyyTRd2OJDCkVv2HHWp9vNmf8V6UIbPvvEpHwK0Ts1Z01aNM +f9wmCxK3ORDdp1nrC3UvOjQ+AkxooZezpRwBEgXYfZH1XVMdrJDQSeMFln2/20BTYqrum2 +bpdheEbRfpwIT8YFfUsuTiZZm4VRmDjtK+Zi+0IP4611M5Zeqpnrvxe4c4QeNJVR9/Euc8 +awhq78+8tfV+cOJygfu93JD43eVgd3qoR3jRSRPznPMffSlPma/Nu+gXHQ1nAmU/ZmBcq4 +Yx9XpIUNb05AJzAsU79oMAAAADAQABAAACAEhUKdOiFDO8Frm9m5VPwpZjeaBLgj0a+mea +So+27WjkswNdngm4qW01JVyjLvRcCVjdXMFhddOTz4Lac0qr1bokLnGIXIEmeUNSgd5rS6 +IP0PzCaoe0k1IuEswIAKY4HoA1l6IXfPpShytVxN+X5E0keCCngoBkQZAjqNr/rgtby/Cn +ZqKy5dXGdj0MTfLRKVJTT2JAvea8DWLmbZWCI+EQ0OcfP6VlN190+vTFkGqEhlZ5fwIpOq +THvdS3in+YQg2mNJMQqH3kg72mttKyMr1ILWGHa5Kgf9aQT6k0buWu9SWLlWZRI2S5Y9ey +GRrSHLTUKuECJQ6RRqhI6+USvK2hifFQQGcLhPc8hjT3S1dxrbUxfrMY6/f5v19AS8Sewf +RPLDK+NU+AigfbGj7rAqMwRfgSdvgs+7Cmx058fE5f6kIIyxpxmFloAv812Hwxc5cekDxH +hL24Y1OrK9Ij/FWZKUjK2q97Wv34p79kouwFVK8umfCSTaZoQWp03JgkSexjwT6rw+ULih +ChjNNMF7byWd+vBGXXbE6hAg7+cSpSmAI6vqB/29Fp4bnx3Dr7YT1guxmVlVsq0aqhWYcw +Mh6xgRHQgjBAVV1xi597e73b0JQz3fWTyYSX5jo+GlIGrzFDRDGs69QwHj93D3WPeL1QtP +BhmSjvKyILYLVaEPeRAAABAQDHOBm7iKv3cr4wOkHT6eUK0budnklJyVtktj0XBHjLFUpl +Ac4ViQjw6c1ev3gZ0vB9ykkDYNcJpqjBO6EDnq5iexDmfXMm6ZlVAYdPYb/wIdHiFTa+Pe +6Dbvcporu8ATl6EUMz1ZjZ1+752F1eybucoq3SiwgS7B0lz7lYwCH1VpocOmW1zlAPlfdd +YsRCjf0f31INQn3JPN9lb6BBdM2AB5lknjAmpZFOo/tKYDCUrKoSPyy14gqszHT0ah5x8C +Qvu0YhHq+uxMiadEAPwMuYXQf9K0Msd67Mh/0Z67keoLzAWz6iWsO9xuygQv2dXvdq74ww +f250qQnVSQhslaibAAABAQDjUq8q9j+Z2XDmdhaA8U8CwvTrYioFcEjlmOD33pK5vySRtg +XATgfzxhfrrek7LOuyK7i81lD3QMNtmxsi/c7NvqqU1av7nPKdVL25qi1KmIKbDD1PqQCE +BvkB+wRmPXLHae3HmAjSovayD1S9K2txx5mOJ17RHJfALADhnVdJBvP1kdqNJjI+rTCNku +cm8UcQy+TxmC8dErCy5Kh259JrrtShGMLAT0r27CEe3DDnLj46YDledj2W/3PDKSvPkDSL +2lwmrxrLGqoWnQO7jg2tsrtGFvDrze+peVtxvEshE2cED3qC1H0PcvfC32Fbra5KY4DIkj +4+v/VaTVANAQi/AAABAQDEIJVMCjvgL+06OeC1PbN0l/B4oDfKCkCbPwV7BlNt+6rF0Sd3 +kHfz5bi1Y/iEtuaFjG/Cdvour81m4GP8atDUqdwLSjbsC+EhLdwiuZVNfponzaLYUzs61m ++qEA2OW276t/FvFHLmm8zpKYPHC4T6uvAiy3ZjeMUAH2DRVcPVpoTICo1ki3lb3IWJqpjz +XnCEmo6w7zZDAPQdA95KrxEJML5bo78FM6Oh96Rvfq2MQz3iMwnPdUYlOX/F9jw3BHcFqM +uZnLViGhuDJnFrMsgFIRDVCyUi8icB+WreLWAvY2tmmUhmzrNeL8oZl0yuBKDuz3FNmlvh +Iv5vPJWXwl89AAAAE2ExMTc5MDU2OTRAUkRERTAzVU0BAgMEBQYH +-----END OPENSSH PRIVATE KEY----- + diff --git a/kubernetes/zuul/overlays/scs/configs/zuul.conf b/kubernetes/zuul/overlays/scs/configs/zuul.conf new file mode 100644 index 0000000..d0eec57 --- /dev/null +++ b/kubernetes/zuul/overlays/scs/configs/zuul.conf @@ -0,0 +1,60 @@ +[zookeeper] +hosts=zookeeper.zuul-ci.svc.cluster.local:2281 +tls_cert=/tls/client/tls.crt +tls_key=/tls/client/tls.key +tls_ca=/tls/client/ca.crt +session_timeout=40 + +[scheduler] +tenant_config=/etc/zuul-config/zuul/main.yaml +state_dir=/var/lib/zuul +relative_priority=true +prometheus_port=9091 + +[web] +listen_address=0.0.0.0 +port=9000 +status_url=https://zuul.scs.otc-service.com +root=https://zuul.scs.otc-service.com +prometheus_port=9091 + +[fingergw] +port=9079 +user=zuul + +[keystore] +password=abc + +[merger] +git_dir=/var/lib/zuul/git +git_timeout=600 +git_user_email=zuul@zuul.scs.otc-service.com +git_user_name=OTC SCS Zuul +prometheus_port=9091 + +[executor] +manage_ansible=true +ansible_root=/var/lib/zuul/managed_ansible +private_key_file=/etc/zuul/sshkey +disk_limit_per_job=2000 +max_starting_builds=5 +prometheus_port=9091 + +[database] +dburi=postgresql://root:Holla_DieWaldfee2023@10.20.0.21:8635/zuul?sslmode=require + +# base zuul jobs +[connection "opendev"] +name=opendev +driver=git +baseurl=https://opendev.org + +[connection "gitea"] +name=gitea +driver=gitea +baseurl=http://10.10.0.119:3000 +server=10.10.0.119 +cloneurl=ssh://git@10.10.0.119:2222 +api_token=77142be0fe4644d5450652da17aff9ef0530993b +webhook_secret=TODO +sshkey=/etc/zuul/gitea.key diff --git a/kubernetes/zuul/overlays/scs/kustomization.yaml b/kubernetes/zuul/overlays/scs/kustomization.yaml index 8b274b1..e2fa52a 100644 --- a/kubernetes/zuul/overlays/scs/kustomization.yaml +++ b/kubernetes/zuul/overlays/scs/kustomization.yaml @@ -2,16 +2,16 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -components: - - ../../components/zuul-client - - ../../components/zuul-merger - - ../../components/nodepool-builder +# components: +# - ../../components/zuul-client +# - ../../components/zuul-merger +# - ../../components/nodepool-builder configMapGenerator: - - name: "vault-agent-config" - files: - - "config-zuul.hcl=configs/vault-agent/config-zuul.hcl" - - "config-nodepool.hcl=configs/vault-agent/config-nodepool.hcl" + # - name: "vault-agent-config" + # files: + # - "config-zuul.hcl=configs/vault-agent/config-zuul.hcl" + # - "config-nodepool.hcl=configs/vault-agent/config-nodepool.hcl" - name: "zuul-instance-config" behavior: "replace" literals: @@ -76,71 +76,71 @@ patches: version: v1 # Path zuul components (replace config and enable vault) - - patch: |- - - op: replace - path: /spec/template/spec/volumes/0 - value: - name: "zuul-config" - emptyDir: {} - target: - labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (zuul-client,zuul-web)" - group: apps - version: v1 - kind: Deployment - - - path: patch-zuul.yaml - target: - labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (zuul-client,zuul-web)" - group: apps - version: v1 - kind: Deployment - - - patch: |- - - op: replace - path: /spec/template/spec/volumes/0 - value: - name: "zuul-config" - emptyDir: - medium: "Memory" - target: - labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (zuul-scheduler,zuul-executor,zuul-merger)" - group: apps - version: v1 - kind: StatefulSet - - - path: patch-zuul.yaml - target: - labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (zuul-scheduler,zuul-executor,zuul-merger)" - group: apps - version: v1 - kind: StatefulSet - - # Patching Nodepool components (replace config and enable vault) - - patch: |- - - op: replace - path: /spec/template/spec/volumes/0 - value: - name: "nodepool-config" - emptyDir: {} - target: - labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (nodepool-launcher)" - group: apps - version: v1 - kind: Deployment - - - path: patch-nodepool.yaml - target: - labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (nodepool-launcher)" - group: apps - version: v1 - kind: Deployment - - - path: patch-nodepool.yaml - target: - labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (nodepool-builder)" - group: apps - version: v1 - kind: StatefulSet + # - patch: |- + # - op: replace + # path: /spec/template/spec/volumes/0 + # value: + # name: "zuul-config" + # emptyDir: {} + # target: + # labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (zuul-client,zuul-web)" + # group: apps + # version: v1 + # kind: Deployment + # + # - path: patch-zuul.yaml + # target: + # labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (zuul-client,zuul-web)" + # group: apps + # version: v1 + # kind: Deployment + # + # - patch: |- + # - op: replace + # path: /spec/template/spec/volumes/0 + # value: + # name: "zuul-config" + # emptyDir: + # medium: "Memory" + # target: + # labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (zuul-scheduler,zuul-executor,zuul-merger)" + # group: apps + # version: v1 + # kind: StatefulSet + # + # - path: patch-zuul.yaml + # target: + # labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (zuul-scheduler,zuul-executor,zuul-merger)" + # group: apps + # version: v1 + # kind: StatefulSet + # + # # Patching Nodepool components (replace config and enable vault) + # - patch: |- + # - op: replace + # path: /spec/template/spec/volumes/0 + # value: + # name: "nodepool-config" + # emptyDir: {} + # target: + # labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (nodepool-launcher)" + # group: apps + # version: v1 + # kind: Deployment + # + # - path: patch-nodepool.yaml + # target: + # labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (nodepool-launcher)" + # group: apps + # version: v1 + # kind: Deployment + # + # - path: patch-nodepool.yaml + # target: + # labelSelector: "app.kubernetes.io/name=zuul,app.kubernetes.io/component in (nodepool-builder)" + # group: apps + # version: v1 + # kind: StatefulSet # Patching web - patch: |- @@ -149,18 +149,18 @@ patches: value: nginx - op: replace path: /spec/rules/0/host - value: zuul.otc-service.com - value: zuul - - op: replace - path: /metadata/annotations - value: - cert-manager.io/cluster-issuer: letsencrypt-prod - - op: replace - path: /spec/tls - value: - - hosts: - - zuul - secretName: zuul-cert-prod + value: zuul.scs.otc-service.com + #- op: replace + # path: /metadata/annotations + # value: + # cert-manager.io/cluster-issuer: letsencrypt-prod + #- op: replace + # value: zuul + # path: /spec/tls + # value: + # - hosts: + # - zuul + # secretName: zuul-cert-prod target: group: networking.k8s.io kind: Ingress @@ -176,16 +176,17 @@ secretGenerator: - name: zuul-config behavior: "replace" files: - - "configs/zuul.conf.hcl" + - "configs/zuul.conf" + - "configs/gitea.key" - name: nodepool-config behavior: "replace" files: - "configs/openstack/clouds.yaml.hcl" # Vault configs - - name: vault-config-zuul - files: - - "configs/zuul.conf.hcl" - - name: vault-config-nodepool - files: - - "configs/openstack/clouds.yaml.hcl" - - "configs/kube.config.hcl" + # - name: vault-config-zuul + # files: + # - "configs/zuul.conf.hcl" + # - name: vault-config-nodepool + # files: + # - "configs/openstack/clouds.yaml.hcl" + # - "configs/kube.config.hcl" diff --git a/playbooks/acme-certs-local.yaml b/playbooks/acme-certs-local.yaml new file mode 100644 index 0000000..d672bf1 --- /dev/null +++ b/playbooks/acme-certs-local.yaml @@ -0,0 +1,57 @@ +--- +# This playbook is intended to be used i.e. during bootstraping +# of the system to generate required TLS certs using Acme +# (LetsEncrypt) on localhost. +# It expects following variables: +# - :dns_cloud: name of the clouds.yaml entry on the localhost +# which is hosting target DNS zone +# +# - :certs_path: path to the folder where certificates would be +# places +# +# - :ssl_certs: a dictionary for the requested certificates +# key: cert name (file name) to be used +# value: list of DNS names +- hosts: localhost + become: true + vars_prompt: + + - name: certs_path + prompt: Path to the certs folder + default: "certs" + + vars: + ssl_certs: + vault1: + - vault1.scs.otc-service.com + - vault-lb.scs.otc-service.com + vault2: + - vault2.scs.otc-service.com + - vault-lb.scs.otc-service.com + vault3: + - vault3.scs.otc-service.com + - vault-lb.scs.otc-service.com + + tasks: + - name: Generate CSRs + ansible.builtin.include_role: + name: acme_request_certs + + - name: Add localhost into the ssl_certs group as expected by the role + ansible.builtin.add_host: + name: localhost + group: ssl_certs + ansible_connection: local + + - name: Install TXT records + ansible.builtin.include_role: + name: acme_install_txt_records + + - name: Generate Certs + ansible.builtin.include_role: + name: acme_create_certs + + - name: Remove TXT records + ansible.builtin.include_role: + name: acme_drop_txt_records + diff --git a/playbooks/bootstrap-bridge.yaml b/playbooks/bootstrap-bridge.yaml index 899ec18..ce455b9 100644 --- a/playbooks/bootstrap-bridge.yaml +++ b/playbooks/bootstrap-bridge.yaml @@ -11,7 +11,8 @@ # # In both cases, the "bastion" group has one entry, which is the # bastion host to run against. -- hosts: prod_bastion[0]:!disabled +- hosts: localhost + #prod_bastion[0]:!disabled name: "Bridge: boostrap the bastion host" become: true tasks: @@ -76,22 +77,6 @@ owner: root mode: 0755 - - name: Ensure sc2 folders - ansible.builtin.file: - path: "{{ item }}" - state: "directory" - loop: - - "/home/zuul/src/gitlab/ecosystem/system-config/inventory/base" - - "/home/zuul/src/gitlab/ecosystem/system-config/inventory/service" - - - name: Ensure sc2 files - ansible.builtin.file: - path: "{{ item }}" - state: "touch" - loop: - - "/home/zuul/src/gitlab/ecosystem/system-config/inventory/base/hosts.yaml" - - "/home/zuul/src/gitlab/ecosystem/system-config/inventory/service/groups.yaml" - - name: Setup global known_hosts ansible.builtin.include_role: name: add-inventory-known-hosts diff --git a/playbooks/configure-keycloak.yaml b/playbooks/configure-keycloak.yaml deleted file mode 100644 index 2709a35..0000000 --- a/playbooks/configure-keycloak.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- hosts: keycloak-controller:!disabled - name: "Configure Keycloak instances" - tasks: - - name: Configure Keycloak instance - include_role: - name: "configure_keycloak" - vars: - keycloak: "{{ item.value }}" - loop: "{{ keycloak_instances | dict2items }}" diff --git a/playbooks/distribute-tls-certs.yaml b/playbooks/distribute-tls-certs.yaml new file mode 100644 index 0000000..f3e2eae --- /dev/null +++ b/playbooks/distribute-tls-certs.yaml @@ -0,0 +1,3 @@ +--- +hosts: ssl_certs:!disabled +become: true diff --git a/playbooks/roles/acme_request_certs/tasks/acme.yaml b/playbooks/roles/acme_request_certs/tasks/acme.yaml index a378828..d64fef4 100644 --- a/playbooks/roles/acme_request_certs/tasks/acme.yaml +++ b/playbooks/roles/acme_request_certs/tasks/acme.yaml @@ -1,3 +1,4 @@ +--- - include_tasks: common.yaml - name: Create acme challenge diff --git a/playbooks/roles/hashivault/tasks/Debian.yaml b/playbooks/roles/hashivault/tasks/Debian.yaml index adec99b..bf4525a 100644 --- a/playbooks/roles/hashivault/tasks/Debian.yaml +++ b/playbooks/roles/hashivault/tasks/Debian.yaml @@ -1,12 +1,12 @@ --- - name: Add PPA GPG key - become: yes - apt_key: + become: true + ansible.builtin.apt_key: data: "{{ hashicorp_gpg_key }}" - name: Add hashicorp apt repo - become: yes - template: + become: true + ansible.builtin.template: dest: /etc/apt/sources.list.d/hashicorp.list group: root mode: 0644 @@ -14,8 +14,8 @@ src: sources.list.j2 - name: Install vault - become: yes - apt: + become: true + ansible.builtin.apt: name: vault state: present - update_cache: yes + update_cache: true diff --git a/playbooks/roles/hashivault/tasks/main.yaml b/playbooks/roles/hashivault/tasks/main.yaml index d3006df..e694798 100644 --- a/playbooks/roles/hashivault/tasks/main.yaml +++ b/playbooks/roles/hashivault/tasks/main.yaml @@ -13,7 +13,7 @@ file_list: "{{ distro_lookup_path }}" - name: Add PPA GPG key - become: yes + become: true apt_key: data: "{{ hashicorp_gpg_key }}"