lots of updates #1
@ -45,6 +45,6 @@ Bootstraping Vault therefore requires following steps
|
|||||||
|
|
||||||
2. Checkout this repository and ensure
|
2. Checkout this repository and ensure
|
||||||
`inventory/service/hosts.yaml` contain proper IP addresses as
|
`inventory/service/hosts.yaml` contain proper IP addresses as
|
||||||
well as those hosts are member of vault group as `inventory/service/groups.yaml`
|
well as those hosts are member of vault group as `inventory/service/all.yaml`
|
||||||
|
|
||||||
3. execute `ansible-playbook playbooks/service-vault.yaml` playbook.
|
3. execute `ansible-playbook playbooks/service-vault.yaml` playbook.
|
||||||
|
@ -7,26 +7,26 @@ all:
|
|||||||
host_keys:
|
host_keys:
|
||||||
- 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBO3RHfleGRMVSNHSBg634EJwM1jYMrbsHTibECPttH1xc6Hdq5XSk/LWYYAeR8g3otMjxxwCVS13e/nMQNMlYvo='
|
- 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBO3RHfleGRMVSNHSBg634EJwM1jYMrbsHTibECPttH1xc6Hdq5XSk/LWYYAeR8g3otMjxxwCVS13e/nMQNMlYvo='
|
||||||
vault1.scs.otc-service.com:
|
vault1.scs.otc-service.com:
|
||||||
ansible_host: 10.10.0.29
|
ansible_host: 10.10.0.210
|
||||||
ansible_user: automation
|
ansible_user: automation
|
||||||
public_v4: 10.10.0.29
|
public_v4: 10.10.0.210
|
||||||
host_keys:
|
host_keys:
|
||||||
- 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFiPzNLi9kxmb4FeAjpQ8GfXpUqzZxs+1L1JqYmAhaNPdy6DwcKglWde/ce3DxFA3YXGGNw8B1euq+hI/zoNVxI='
|
- 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFfXq60z37tRXjTmwWlnLHFk/Udn1R2MbYe4jNo1DVDEf1lE44DzMDUkyTYo0lcDKSRTx6D/UlH0J4X/PN24Vp4='
|
||||||
vault2.scs.otc-service.com:
|
vault2.scs.otc-service.com:
|
||||||
ansible_host: 10.10.0.120
|
ansible_host: 10.10.0.231
|
||||||
ansible_user: automation
|
ansible_user: automation
|
||||||
public_v4: 10.10.0.120
|
public_v4: 10.10.0.231
|
||||||
host_keys:
|
host_keys:
|
||||||
- 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNgVYQmU1AEzantVTjKpe1A6z22ve8/gMkdBFFwHgQicG6ppU+0L9LtVJsLd7xgSg8wnUGaZUotQ9sfKogwb2LQ='
|
- 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBH2ZcNK0OswZFCGc/hhEcLrZwaNng9qd+NAMFgCI+Z2en66n+nlonBlEmP9fbws84G0oBWfZ/+Z68dtAaMNVKZw='
|
||||||
vault3.scs.otc-service.com:
|
vault3.scs.otc-service.com:
|
||||||
ansible_host: 10.10.0.113
|
ansible_host: 10.10.0.251
|
||||||
ansible_user: automation
|
ansible_user: automation
|
||||||
public_v4: 10.10.0.113
|
public_v4: 10.10.0.251
|
||||||
host_keys:
|
host_keys:
|
||||||
- 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE3Afc7X7kB5v6Rre0LJRC05R/KVW5iV6q+KKyHHQWMCXTdEHRDkgXiSDwxV7FPneZB7QT42QqNfoa43Zz4ptP0='
|
- 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAEHeofBIUQPW54/0B/p6Zmrxjfk6VqZYaCtWzfUMH4HqPZO/dFbza8MulKNprDSEDK4+KK2+9HvYunEYmvDvms='
|
||||||
gitea1.scs.otc-service.com:
|
gitea1.scs.otc-service.com:
|
||||||
ansible_host: 10.10.0.6
|
ansible_host: 10.10.0.107
|
||||||
ansible_user: automation
|
ansible_user: automation
|
||||||
public_v4: 10.10.0.6
|
public_v4: 10.10.0.107
|
||||||
host_keys:
|
host_keys:
|
||||||
- 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBA4L6C0gdxqrbueQf+cEWVHxmZmcewbYCGV5wqEayTXT4ceoktkyzHjOjk4fa91VmE5He+GkC1a88hDnWcwT2+w='
|
- 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIKrZwdNgGFNSozidYBIyFTla9Ho6ZksBQZja3jBhtnMabm2eUk0ITvaIvAhhhXGk2XeiRzvWpc/WtroIMLm+w0='
|
||||||
|
1
inventory/local_certs/group_vars/all.yaml
Normal file
1
inventory/local_certs/group_vars/all.yaml
Normal file
@ -0,0 +1 @@
|
|||||||
|
certs_path: "../certs"
|
2
inventory/local_certs/group_vars/vault.yaml
Normal file
2
inventory/local_certs/group_vars/vault.yaml
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
vault_tls_cert_content: "{{ lookup('ansible.builtin.file', certs_path + '/' + vault_cert + '-fullchain.crt') | default(omit) }}"
|
||||||
|
vault_tls_key_content: "{{ lookup('ansible.builtin.file', certs_path + '/' + vault_cert + '.pem') }}"
|
42
inventory/service/all.yaml
Normal file
42
inventory/service/all.yaml
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
---
|
||||||
|
all:
|
||||||
|
vars:
|
||||||
|
ansible_ssh_user: ubuntu
|
||||||
|
ansible_ssh_private_key_file: /root/.ssh/id_rsa_scs
|
||||||
|
children:
|
||||||
|
bastion:
|
||||||
|
hosts:
|
||||||
|
bastion*.scs.otc-service.com:
|
||||||
|
bridge*.scs.otc-service.com:
|
||||||
|
|
||||||
|
ssl_certs:
|
||||||
|
hosts:
|
||||||
|
bridge.scs.otc-service.com:
|
||||||
|
vault1.scs.otc-service.com:
|
||||||
|
vault2.scs.otc-service.com:
|
||||||
|
vault3.scs.otc-service.com:
|
||||||
|
gitea1.scs.otc-service.com:
|
||||||
|
|
||||||
|
k8s-controller:
|
||||||
|
hosts:
|
||||||
|
bridge.scs.otc-service.com:
|
||||||
|
|
||||||
|
vault:
|
||||||
|
hosts:
|
||||||
|
vault1.scs.otc-service.com:
|
||||||
|
vault2.scs.otc-service.com:
|
||||||
|
vault3.scs.otc-service.com:
|
||||||
|
|
||||||
|
vault-controller:
|
||||||
|
hosts:
|
||||||
|
bridge.scs.otc-service.com:
|
||||||
|
|
||||||
|
gitea:
|
||||||
|
hosts:
|
||||||
|
gitea1.scs.otc-service.com:
|
||||||
|
|
||||||
|
prod_bastion:
|
||||||
|
hosts:
|
||||||
|
bridge.scs.otc-service.com:
|
||||||
|
|
||||||
|
disabled: []
|
@ -1,31 +1,42 @@
|
|||||||
plugin: yamlgroup
|
plugin: yaml
|
||||||
groups:
|
all:
|
||||||
bastion:
|
vars:
|
||||||
- bastion*.scs.otc-service.com
|
ansible_ssh_user: ubuntu
|
||||||
- bridge*.scs.otc-service.com
|
ansible_ssh_private_key_file: /root/.ssh/id_rsa_scs
|
||||||
|
children:
|
||||||
|
bastion:
|
||||||
|
hosts:
|
||||||
|
bastion*.scs.otc-service.com:
|
||||||
|
bridge*.scs.otc-service.com:
|
||||||
|
|
||||||
ssl_certs:
|
ssl_certs:
|
||||||
- bridge.scs.otc-service.com
|
hosts:
|
||||||
- vault1.scs.otc-service.com
|
bridge.scs.otc-service.com:
|
||||||
- vault2.scs.otc-service.com
|
vault1.scs.otc-service.com:
|
||||||
- vault3.scs.otc-service.com
|
vault2.scs.otc-service.com:
|
||||||
- gitea1.scs.otc-service.com
|
vault3.scs.otc-service.com:
|
||||||
|
gitea1.scs.otc-service.com:
|
||||||
|
|
||||||
k8s-controller:
|
k8s-controller:
|
||||||
- bridge.scs.otc-service.com
|
hosts:
|
||||||
|
bridge.scs.otc-service.com:
|
||||||
|
|
||||||
vault:
|
vault:
|
||||||
- vault1.scs.otc-service.com
|
hosts:
|
||||||
- vault2.scs.otc-service.com
|
vault1.scs.otc-service.com:
|
||||||
- vault3.scs.otc-service.com
|
vault2.scs.otc-service.com:
|
||||||
|
vault3.scs.otc-service.com:
|
||||||
|
|
||||||
vault-controller:
|
vault-controller:
|
||||||
- bridge.scs.otc-service.com
|
hosts:
|
||||||
|
bridge.scs.otc-service.com:
|
||||||
|
|
||||||
gitea:
|
gitea:
|
||||||
- gitea1.scs.otc-service.com
|
hosts:
|
||||||
|
gitea1.scs.otc-service.com:
|
||||||
|
|
||||||
prod_bastion:
|
prod_bastion:
|
||||||
- bridge.scs.otc-service.com
|
hosts:
|
||||||
|
bridge.scs.otc-service.com:
|
||||||
|
|
||||||
disabled: []
|
disabled: []
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
ssl_certs:
|
ssl_certs:
|
||||||
vault:
|
vault:
|
||||||
- "vault1.scs.otc-service.com"
|
- "vault1.scs.otc-service.com"
|
||||||
vault_cert: "vault"
|
vault_cert: "vault1"
|
||||||
|
|
||||||
vault_proxy_protocol_behavior: "allow_authorized"
|
vault_proxy_protocol_behavior: "allow_authorized"
|
||||||
# vault_proxy_protocol_authorized_addrs: "192.168.110.151,192.168.110.160"
|
# vault_proxy_protocol_authorized_addrs: "192.168.110.151,192.168.110.160"
|
||||||
|
@ -12,9 +12,11 @@ components:
|
|||||||
- ../components/nodepool-launcher
|
- ../components/nodepool-launcher
|
||||||
|
|
||||||
configMapGenerator:
|
configMapGenerator:
|
||||||
- name: zuul-instance-config
|
- name: "zuul-instance-config"
|
||||||
literals:
|
literals:
|
||||||
- ZUUL_CONFIG_REPO=https://gitea.eco.tsi-dev.otc-service.com/scs/zuul-config.git
|
- ZUUL_CONFIG_REPO=https://gitea.eco.tsi-dev.otc-service.com/scs/zuul-config.git
|
||||||
|
- name: "zuul-executor-vars"
|
||||||
|
literals: []
|
||||||
|
|
||||||
labels:
|
labels:
|
||||||
- includeSelectors: true
|
- includeSelectors: true
|
||||||
|
@ -35,10 +35,10 @@ spec:
|
|||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: "300m"
|
cpu: "300m"
|
||||||
memory: "500Mi"
|
memory: "512Mi"
|
||||||
requests:
|
requests:
|
||||||
cpu: "100m"
|
cpu: "100m"
|
||||||
memory: "200Mi"
|
memory: "256Mi"
|
||||||
|
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
@ -71,6 +71,10 @@ spec:
|
|||||||
|
|
||||||
serviceAccountName: "zuul"
|
serviceAccountName: "zuul"
|
||||||
volumes:
|
volumes:
|
||||||
|
- name: "nodepool-config"
|
||||||
|
secret:
|
||||||
|
secretName: "nodepool-config"
|
||||||
|
|
||||||
- name: "dev"
|
- name: "dev"
|
||||||
hostPath:
|
hostPath:
|
||||||
path: "/dev"
|
path: "/dev"
|
||||||
@ -81,10 +85,6 @@ spec:
|
|||||||
- name: "dib-tmp"
|
- name: "dib-tmp"
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
|
|
||||||
- name: "nodepool-config"
|
|
||||||
secret:
|
|
||||||
secretName: "nodepool-config"
|
|
||||||
|
|
||||||
- name: "nodepool-containers"
|
- name: "nodepool-containers"
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
|
|
||||||
|
@ -12,13 +12,13 @@ spec:
|
|||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
app.kubernetes.io/name: "zuul"
|
app.kubernetes.io/name: "zuul"
|
||||||
app.kubernetes.io/part-of: zuul
|
app.kubernetes.io/part-of: "zuul"
|
||||||
app.kubernetes.io/component: "nodepool-launcher"
|
app.kubernetes.io/component: "nodepool-launcher"
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/name: "zuul"
|
app.kubernetes.io/name: "zuul"
|
||||||
app.kubernetes.io/part-of: zuul
|
app.kubernetes.io/part-of: "zuul"
|
||||||
app.kubernetes.io/component: "nodepool-launcher"
|
app.kubernetes.io/component: "nodepool-launcher"
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
@ -33,10 +33,10 @@ spec:
|
|||||||
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: "300m"
|
cpu: "100m"
|
||||||
memory: "500Mi"
|
memory: "500Mi"
|
||||||
requests:
|
requests:
|
||||||
cpu: "100m"
|
cpu: "50m"
|
||||||
memory: "200Mi"
|
memory: "200Mi"
|
||||||
|
|
||||||
securityContext:
|
securityContext:
|
||||||
|
23
kubernetes/zuul/components/nodepool-launcher/hpa.yaml
Normal file
23
kubernetes/zuul/components/nodepool-launcher/hpa.yaml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
apiVersion: autoscaling/v2
|
||||||
|
kind: "HorizontalPodAutoscaler"
|
||||||
|
metadata:
|
||||||
|
name: "nodepool-launcher"
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: "zuul"
|
||||||
|
app.kubernetes.io/part-of: "zuul"
|
||||||
|
app.kubernetes.io/component: "nodepool-launcher"
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
kind: "Deployment"
|
||||||
|
name: "nodepool-launcher"
|
||||||
|
apiVersion: "apps/v1"
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 2
|
||||||
|
metrics:
|
||||||
|
- type: "Resource"
|
||||||
|
resource:
|
||||||
|
name: "cpu"
|
||||||
|
target:
|
||||||
|
type: "Utilization"
|
||||||
|
averageUtilization: 70
|
@ -4,3 +4,4 @@ kind: Component
|
|||||||
|
|
||||||
resources:
|
resources:
|
||||||
- deployment.yaml
|
- deployment.yaml
|
||||||
|
- hpa.yaml
|
||||||
|
8
kubernetes/zuul/components/restarter/README.md
Normal file
8
kubernetes/zuul/components/restarter/README.md
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# Zuul restarter
|
||||||
|
|
||||||
|
Sometimes credentials stored in Vault are rotated outside of Zuul. Since Zuul
|
||||||
|
itself is not capable of reloading its general configration it is better to
|
||||||
|
simply periodically restart certain parts of it.
|
||||||
|
|
||||||
|
This component is implementing K8 ServiceAccount with role and few CronJobs
|
||||||
|
that restart some Zuul components.
|
12
kubernetes/zuul/components/restarter/crb.yaml
Normal file
12
kubernetes/zuul/components/restarter/crb.yaml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
name: "restart-deployment"
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: "restart-deployment"
|
||||||
|
subjects:
|
||||||
|
- kind: "ServiceAccount"
|
||||||
|
name: "restart-deployment"
|
@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: CronJob
|
||||||
|
metadata:
|
||||||
|
name: "restart-nodepool-launcher"
|
||||||
|
spec:
|
||||||
|
successfulJobsHistoryLimit: 1
|
||||||
|
failedJobsHistoryLimit: 2
|
||||||
|
concurrencyPolicy: Forbid
|
||||||
|
schedule: '15 22 * * *'
|
||||||
|
jobTemplate:
|
||||||
|
spec:
|
||||||
|
backoffLimit: 2
|
||||||
|
activeDeadlineSeconds: 600
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
serviceAccountName: "restart-deployment"
|
||||||
|
restartPolicy: Never
|
||||||
|
containers:
|
||||||
|
- name: "kubectl"
|
||||||
|
image: "bitnami/kubectl"
|
||||||
|
command:
|
||||||
|
- "bash"
|
||||||
|
- "-c"
|
||||||
|
- >-
|
||||||
|
kubectl rollout restart deployment/nodepool-launcher &&
|
||||||
|
kubectl rollout status deployment/nodepool-launcher
|
@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: CronJob
|
||||||
|
metadata:
|
||||||
|
name: "restart-zuul-web"
|
||||||
|
spec:
|
||||||
|
successfulJobsHistoryLimit: 1
|
||||||
|
failedJobsHistoryLimit: 2
|
||||||
|
concurrencyPolicy: Forbid
|
||||||
|
schedule: '0 0 * * *'
|
||||||
|
jobTemplate:
|
||||||
|
spec:
|
||||||
|
backoffLimit: 2
|
||||||
|
activeDeadlineSeconds: 600
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
serviceAccountName: "restart-deployment"
|
||||||
|
restartPolicy: Never
|
||||||
|
containers:
|
||||||
|
- name: "kubectl"
|
||||||
|
image: "bitnami/kubectl"
|
||||||
|
command:
|
||||||
|
- "bash"
|
||||||
|
- "-c"
|
||||||
|
- >-
|
||||||
|
kubectl rollout restart deployment/zuul-web &&
|
||||||
|
kubectl rollout status deployment/zuul-web
|
10
kubernetes/zuul/components/restarter/kustomization.yaml
Normal file
10
kubernetes/zuul/components/restarter/kustomization.yaml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1alpha1
|
||||||
|
kind: Component
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- sa.yaml
|
||||||
|
- role.yaml
|
||||||
|
- crb.yaml
|
||||||
|
- job-restart-zuul-web.yaml
|
||||||
|
- job-restart-nodepool-launcher.yaml
|
10
kubernetes/zuul/components/restarter/role.yaml
Normal file
10
kubernetes/zuul/components/restarter/role.yaml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
name: "restart-deployment"
|
||||||
|
rules:
|
||||||
|
- apiGroups: ["apps"]
|
||||||
|
resources: ["deployments"]
|
||||||
|
# resourceNames: ["test-pod"]
|
||||||
|
verbs: ["get", "patch", "list", "watch"]
|
5
kubernetes/zuul/components/restarter/sa.yaml
Normal file
5
kubernetes/zuul/components/restarter/sa.yaml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
kind: ServiceAccount
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: "restart-deployment"
|
@ -19,7 +19,7 @@ labels:
|
|||||||
images:
|
images:
|
||||||
- name: "zookeeper"
|
- name: "zookeeper"
|
||||||
newName: "quay.io/opentelekomcloud/zookeeper"
|
newName: "quay.io/opentelekomcloud/zookeeper"
|
||||||
newTag: "3.8.0"
|
newTag: "3.8.1"
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
- cert.yaml
|
- cert.yaml
|
||||||
|
@ -40,13 +40,6 @@ spec:
|
|||||||
runAsUser: 1000
|
runAsUser: 1000
|
||||||
runAsGroup: 1000
|
runAsGroup: 1000
|
||||||
image: "zookeeper"
|
image: "zookeeper"
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: "500m"
|
|
||||||
memory: "4Gi"
|
|
||||||
requests:
|
|
||||||
cpu: "100m"
|
|
||||||
memory: "1Gi"
|
|
||||||
command:
|
command:
|
||||||
- "/bin/bash"
|
- "/bin/bash"
|
||||||
- "-xec"
|
- "-xec"
|
||||||
@ -93,9 +86,9 @@ spec:
|
|||||||
value: "10"
|
value: "10"
|
||||||
- name: ZK_TICK_TIME
|
- name: ZK_TICK_TIME
|
||||||
value: "2000"
|
value: "2000"
|
||||||
- name: ZOO_AUTOPURGE_PURGEINTERVAL
|
- name: ZK_PURGE_INTERVAL
|
||||||
value: "6"
|
value: "6"
|
||||||
- name: ZOO_AUTOPURGE_SNAPRETAINCOUNT
|
- name: ZK_SNAP_RETAIN_COUNT
|
||||||
value: "3"
|
value: "3"
|
||||||
- name: ZOO_INIT_LIMIT
|
- name: ZOO_INIT_LIMIT
|
||||||
value: "5"
|
value: "5"
|
||||||
@ -108,6 +101,14 @@ spec:
|
|||||||
- name: ZOO_TICK_TIME
|
- name: ZOO_TICK_TIME
|
||||||
value: "2000"
|
value: "2000"
|
||||||
|
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: "100m"
|
||||||
|
memory: "2Gi"
|
||||||
|
requests:
|
||||||
|
cpu: "20m"
|
||||||
|
memory: "1Gi"
|
||||||
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: data
|
- name: data
|
||||||
mountPath: /data
|
mountPath: /data
|
||||||
|
@ -24,7 +24,7 @@ spec:
|
|||||||
# Zuul-client is a regular zuul-web image doing nothing.
|
# Zuul-client is a regular zuul-web image doing nothing.
|
||||||
# We use it only to have completely independent pod serving as
|
# We use it only to have completely independent pod serving as
|
||||||
# zuul client for i.e. maintenance.
|
# zuul client for i.e. maintenance.
|
||||||
- name: "zuul-client"
|
- name: "zuul"
|
||||||
image: "zuul/zuul-web"
|
image: "zuul/zuul-web"
|
||||||
command:
|
command:
|
||||||
- "sh"
|
- "sh"
|
||||||
@ -34,17 +34,17 @@ spec:
|
|||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: "50m"
|
cpu: "50m"
|
||||||
memory: "200Mi"
|
memory: "128Mi"
|
||||||
requests:
|
requests:
|
||||||
cpu: "20m"
|
cpu: "10m"
|
||||||
memory: "100Mi"
|
memory: "32Mi"
|
||||||
|
|
||||||
securityContext:
|
securityContext:
|
||||||
runAsUser: 10001
|
runAsUser: 10001
|
||||||
runAsGroup: 10001
|
runAsGroup: 10001
|
||||||
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: "zuul-config"
|
- name: "zuul-cfg"
|
||||||
mountPath: "/etc/zuul"
|
mountPath: "/etc/zuul"
|
||||||
readOnly: true
|
readOnly: true
|
||||||
- name: "zookeeper-client-tls"
|
- name: "zookeeper-client-tls"
|
||||||
@ -54,7 +54,7 @@ spec:
|
|||||||
mountPath: "/etc/zuul-config"
|
mountPath: "/etc/zuul-config"
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: "zuul-config"
|
- name: "zuul-cfg"
|
||||||
secret:
|
secret:
|
||||||
secretName: "zuul-config"
|
secretName: "zuul-config"
|
||||||
|
|
||||||
|
@ -44,11 +44,11 @@ spec:
|
|||||||
image: "zuul/nodepool-builder"
|
image: "zuul/nodepool-builder"
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: "100m"
|
cpu: "50m"
|
||||||
memory: "128Mi"
|
memory: "64Mi"
|
||||||
requests:
|
requests:
|
||||||
cpu: "10m"
|
cpu: "10m"
|
||||||
memory: "64Mi"
|
memory: "5Mi"
|
||||||
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: "zuul-config-data"
|
- name: "zuul-config-data"
|
||||||
@ -62,3 +62,5 @@ spec:
|
|||||||
- name: "zuul-config-data"
|
- name: "zuul-config-data"
|
||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
claimName: "zuul-config"
|
claimName: "zuul-config"
|
||||||
|
|
||||||
|
revisionHistoryLimit: 2
|
||||||
|
@ -40,7 +40,7 @@ spec:
|
|||||||
topologyKey: "kubernetes.io/hostname"
|
topologyKey: "kubernetes.io/hostname"
|
||||||
|
|
||||||
containers:
|
containers:
|
||||||
- name: "executor"
|
- name: "zuul"
|
||||||
image: "zuul/zuul-executor"
|
image: "zuul/zuul-executor"
|
||||||
args: ["/usr/local/bin/zuul-executor", "-f", "-d"]
|
args: ["/usr/local/bin/zuul-executor", "-f", "-d"]
|
||||||
env:
|
env:
|
||||||
@ -58,19 +58,36 @@ spec:
|
|||||||
- containerPort: 7900
|
- containerPort: 7900
|
||||||
name: "logs"
|
name: "logs"
|
||||||
protocol: "TCP"
|
protocol: "TCP"
|
||||||
|
- containerPort: 9091
|
||||||
|
name: "prometheus"
|
||||||
|
protocol: "TCP"
|
||||||
|
|
||||||
|
# readinessProbe:
|
||||||
|
# httpGet:
|
||||||
|
# path: "/health/ready"
|
||||||
|
# port: "prometheus"
|
||||||
|
# failureThreshold: 20
|
||||||
|
# periodSeconds: 10
|
||||||
|
# livenessProbe:
|
||||||
|
# httpGet:
|
||||||
|
# path: "/health/live"
|
||||||
|
# port: "prometheus"
|
||||||
|
# initialDelaySeconds: 120
|
||||||
|
# failureThreshold: 10
|
||||||
|
# periodSeconds: 5
|
||||||
|
# timeoutSeconds: 5
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: "2"
|
cpu: "2000m"
|
||||||
memory: "8G"
|
memory: "8G"
|
||||||
requests:
|
requests:
|
||||||
cpu: "1"
|
cpu: "500m"
|
||||||
memory: "1G"
|
memory: "1G"
|
||||||
|
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
|
|
||||||
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: "zuul-config"
|
- name: "zuul-config"
|
||||||
mountPath: "/etc/zuul"
|
mountPath: "/etc/zuul"
|
||||||
@ -82,6 +99,11 @@ spec:
|
|||||||
mountPath: "/etc/zuul-config"
|
mountPath: "/etc/zuul-config"
|
||||||
- name: "zuul-var"
|
- name: "zuul-var"
|
||||||
mountPath: "/var/lib/zuul"
|
mountPath: "/var/lib/zuul"
|
||||||
|
- name: "zuul-vars"
|
||||||
|
mountPath: "/var/run/zuul/vars"
|
||||||
|
- name: "zuul-trusted-ro"
|
||||||
|
mountPath: "/var/run/zuul/trusted-ro"
|
||||||
|
readOnly: true
|
||||||
|
|
||||||
serviceAccountName: "zuul"
|
serviceAccountName: "zuul"
|
||||||
terminationGracePeriodSeconds: 120
|
terminationGracePeriodSeconds: 120
|
||||||
@ -98,5 +120,12 @@ spec:
|
|||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
claimName: "zuul-config"
|
claimName: "zuul-config"
|
||||||
|
|
||||||
|
- name: "zuul-vars"
|
||||||
|
configMap:
|
||||||
|
name: "zuul-executor-vars"
|
||||||
|
|
||||||
|
- name: "zuul-trusted-ro"
|
||||||
|
emptyDir: {}
|
||||||
|
|
||||||
- name: "zuul-var"
|
- name: "zuul-var"
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
|
107
kubernetes/zuul/components/zuul-merger/deployment.yaml
Normal file
107
kubernetes/zuul/components/zuul-merger/deployment.yaml
Normal file
File diff suppressed because it is too large
Load Diff
23
kubernetes/zuul/components/zuul-merger/hpa.yaml
Normal file
23
kubernetes/zuul/components/zuul-merger/hpa.yaml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
apiVersion: autoscaling/v2
|
||||||
|
kind: "HorizontalPodAutoscaler"
|
||||||
|
metadata:
|
||||||
|
name: "zuul-merger"
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: "zuul"
|
||||||
|
app.kubernetes.io/part-of: "zuul"
|
||||||
|
app.kubernetes.io/component: "zuul-merger"
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
kind: "Deployment"
|
||||||
|
name: "zuul-merger"
|
||||||
|
apiVersion: "apps/v1"
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 4
|
||||||
|
metrics:
|
||||||
|
- type: "Resource"
|
||||||
|
resource:
|
||||||
|
name: "cpu"
|
||||||
|
target:
|
||||||
|
type: "Utilization"
|
||||||
|
averageUtilization: 70
|
@ -3,4 +3,5 @@ apiVersion: kustomize.config.k8s.io/v1alpha1
|
|||||||
kind: Component
|
kind: Component
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
- statefulset.yaml
|
- deployment.yaml
|
||||||
|
- hpa.yaml
|
||||||
|
110
kubernetes/zuul/components/zuul-scheduler/deployment.yaml
Normal file
110
kubernetes/zuul/components/zuul-scheduler/deployment.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@ -3,4 +3,4 @@ apiVersion: kustomize.config.k8s.io/v1alpha1
|
|||||||
kind: Component
|
kind: Component
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
- statefulset.yaml
|
- deployment.yaml
|
||||||
|
@ -22,7 +22,7 @@ spec:
|
|||||||
app.kubernetes.io/component: "zuul-web"
|
app.kubernetes.io/component: "zuul-web"
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: "web"
|
- name: "zuul"
|
||||||
image: "zuul/zuul-web"
|
image: "zuul/zuul-web"
|
||||||
args: ["/usr/local/bin/zuul-web", "-f", "-d"]
|
args: ["/usr/local/bin/zuul-web", "-f", "-d"]
|
||||||
|
|
||||||
@ -30,6 +30,24 @@ spec:
|
|||||||
- containerPort: 9000
|
- containerPort: 9000
|
||||||
name: "web"
|
name: "web"
|
||||||
protocol: "TCP"
|
protocol: "TCP"
|
||||||
|
- containerPort: 9091
|
||||||
|
name: "prometheus"
|
||||||
|
protocol: "TCP"
|
||||||
|
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: "/health/ready"
|
||||||
|
port: "prometheus"
|
||||||
|
failureThreshold: 30
|
||||||
|
periodSeconds: 10
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: "/health/live"
|
||||||
|
port: "prometheus"
|
||||||
|
initialDelaySeconds: 120
|
||||||
|
failureThreshold: 10
|
||||||
|
periodSeconds: 5
|
||||||
|
timeoutSeconds: 5
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
|
50
kubernetes/zuul/overlays/scs/configs/gitea.key
Normal file
50
kubernetes/zuul/overlays/scs/configs/gitea.key
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||||
|
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn
|
||||||
|
NhAAAAAwEAAQAAAgEArig9BW994crJrAfM3H9P/HH+yz7fJI+2SOsVaDVle+2tWD+rfpFJ
|
||||||
|
7SNLgXG1ipv/XnA0W7S/i0+7ShPieiakRuXqHrnfZVNf5Z/smH5aweZB62CgNxIH2fCCRI
|
||||||
|
lKJ8YbNDOdulpltbELjHieXU9mjIapYrLFx13xLjr2mcRNrkOs+N1JcHxiRoG4qez0KNlr
|
||||||
|
dn83c3Hda5lUi8O73ZxaGPzW5a9J89GLOiX7w+J6T3eDzMHOQqGOoC4S90QIRMya1UhP2J
|
||||||
|
8GU2FTOMav5ZlOFHTN4m+/iO0xe68rwAFuO+l0DN+nYUvEr/daK/YAAjZcDS0MvwRwpb9g
|
||||||
|
a+V6YoUCxnBZPa0GTqFe49UBUZzuwgdwoYznUYkKi1zodp0idR/VoDFwu/XmMM2OlhS4qT
|
||||||
|
vtDyyTRd2OJDCkVv2HHWp9vNmf8V6UIbPvvEpHwK0Ts1Z01aNMf9wmCxK3ORDdp1nrC3Uv
|
||||||
|
OjQ+AkxooZezpRwBEgXYfZH1XVMdrJDQSeMFln2/20BTYqrum2bpdheEbRfpwIT8YFfUsu
|
||||||
|
TiZZm4VRmDjtK+Zi+0IP4611M5Zeqpnrvxe4c4QeNJVR9/Euc8awhq78+8tfV+cOJygfu9
|
||||||
|
3JD43eVgd3qoR3jRSRPznPMffSlPma/Nu+gXHQ1nAmU/ZmBcq4Yx9XpIUNb05AJzAsU79o
|
||||||
|
MAAAdQ1aGCR9WhgkcAAAAHc3NoLXJzYQAAAgEArig9BW994crJrAfM3H9P/HH+yz7fJI+2
|
||||||
|
SOsVaDVle+2tWD+rfpFJ7SNLgXG1ipv/XnA0W7S/i0+7ShPieiakRuXqHrnfZVNf5Z/smH
|
||||||
|
5aweZB62CgNxIH2fCCRIlKJ8YbNDOdulpltbELjHieXU9mjIapYrLFx13xLjr2mcRNrkOs
|
||||||
|
+N1JcHxiRoG4qez0KNlrdn83c3Hda5lUi8O73ZxaGPzW5a9J89GLOiX7w+J6T3eDzMHOQq
|
||||||
|
GOoC4S90QIRMya1UhP2J8GU2FTOMav5ZlOFHTN4m+/iO0xe68rwAFuO+l0DN+nYUvEr/da
|
||||||
|
K/YAAjZcDS0MvwRwpb9ga+V6YoUCxnBZPa0GTqFe49UBUZzuwgdwoYznUYkKi1zodp0idR
|
||||||
|
/VoDFwu/XmMM2OlhS4qTvtDyyTRd2OJDCkVv2HHWp9vNmf8V6UIbPvvEpHwK0Ts1Z01aNM
|
||||||
|
f9wmCxK3ORDdp1nrC3UvOjQ+AkxooZezpRwBEgXYfZH1XVMdrJDQSeMFln2/20BTYqrum2
|
||||||
|
bpdheEbRfpwIT8YFfUsuTiZZm4VRmDjtK+Zi+0IP4611M5Zeqpnrvxe4c4QeNJVR9/Euc8
|
||||||
|
awhq78+8tfV+cOJygfu93JD43eVgd3qoR3jRSRPznPMffSlPma/Nu+gXHQ1nAmU/ZmBcq4
|
||||||
|
Yx9XpIUNb05AJzAsU79oMAAAADAQABAAACAEhUKdOiFDO8Frm9m5VPwpZjeaBLgj0a+mea
|
||||||
|
So+27WjkswNdngm4qW01JVyjLvRcCVjdXMFhddOTz4Lac0qr1bokLnGIXIEmeUNSgd5rS6
|
||||||
|
IP0PzCaoe0k1IuEswIAKY4HoA1l6IXfPpShytVxN+X5E0keCCngoBkQZAjqNr/rgtby/Cn
|
||||||
|
ZqKy5dXGdj0MTfLRKVJTT2JAvea8DWLmbZWCI+EQ0OcfP6VlN190+vTFkGqEhlZ5fwIpOq
|
||||||
|
THvdS3in+YQg2mNJMQqH3kg72mttKyMr1ILWGHa5Kgf9aQT6k0buWu9SWLlWZRI2S5Y9ey
|
||||||
|
GRrSHLTUKuECJQ6RRqhI6+USvK2hifFQQGcLhPc8hjT3S1dxrbUxfrMY6/f5v19AS8Sewf
|
||||||
|
RPLDK+NU+AigfbGj7rAqMwRfgSdvgs+7Cmx058fE5f6kIIyxpxmFloAv812Hwxc5cekDxH
|
||||||
|
hL24Y1OrK9Ij/FWZKUjK2q97Wv34p79kouwFVK8umfCSTaZoQWp03JgkSexjwT6rw+ULih
|
||||||
|
ChjNNMF7byWd+vBGXXbE6hAg7+cSpSmAI6vqB/29Fp4bnx3Dr7YT1guxmVlVsq0aqhWYcw
|
||||||
|
Mh6xgRHQgjBAVV1xi597e73b0JQz3fWTyYSX5jo+GlIGrzFDRDGs69QwHj93D3WPeL1QtP
|
||||||
|
BhmSjvKyILYLVaEPeRAAABAQDHOBm7iKv3cr4wOkHT6eUK0budnklJyVtktj0XBHjLFUpl
|
||||||
|
Ac4ViQjw6c1ev3gZ0vB9ykkDYNcJpqjBO6EDnq5iexDmfXMm6ZlVAYdPYb/wIdHiFTa+Pe
|
||||||
|
6Dbvcporu8ATl6EUMz1ZjZ1+752F1eybucoq3SiwgS7B0lz7lYwCH1VpocOmW1zlAPlfdd
|
||||||
|
YsRCjf0f31INQn3JPN9lb6BBdM2AB5lknjAmpZFOo/tKYDCUrKoSPyy14gqszHT0ah5x8C
|
||||||
|
Qvu0YhHq+uxMiadEAPwMuYXQf9K0Msd67Mh/0Z67keoLzAWz6iWsO9xuygQv2dXvdq74ww
|
||||||
|
f250qQnVSQhslaibAAABAQDjUq8q9j+Z2XDmdhaA8U8CwvTrYioFcEjlmOD33pK5vySRtg
|
||||||
|
XATgfzxhfrrek7LOuyK7i81lD3QMNtmxsi/c7NvqqU1av7nPKdVL25qi1KmIKbDD1PqQCE
|
||||||
|
BvkB+wRmPXLHae3HmAjSovayD1S9K2txx5mOJ17RHJfALADhnVdJBvP1kdqNJjI+rTCNku
|
||||||
|
cm8UcQy+TxmC8dErCy5Kh259JrrtShGMLAT0r27CEe3DDnLj46YDledj2W/3PDKSvPkDSL
|
||||||
|
2lwmrxrLGqoWnQO7jg2tsrtGFvDrze+peVtxvEshE2cED3qC1H0PcvfC32Fbra5KY4DIkj
|
||||||
|
4+v/VaTVANAQi/AAABAQDEIJVMCjvgL+06OeC1PbN0l/B4oDfKCkCbPwV7BlNt+6rF0Sd3
|
||||||
|
kHfz5bi1Y/iEtuaFjG/Cdvour81m4GP8atDUqdwLSjbsC+EhLdwiuZVNfponzaLYUzs61m
|
||||||
|
+qEA2OW276t/FvFHLmm8zpKYPHC4T6uvAiy3ZjeMUAH2DRVcPVpoTICo1ki3lb3IWJqpjz
|
||||||
|
XnCEmo6w7zZDAPQdA95KrxEJML5bo78FM6Oh96Rvfq2MQz3iMwnPdUYlOX/F9jw3BHcFqM
|
||||||
|
uZnLViGhuDJnFrMsgFIRDVCyUi8icB+WreLWAvY2tmmUhmzrNeL8oZl0yuBKDuz3FNmlvh
|
||||||
|
Iv5vPJWXwl89AAAAE2ExMTc5MDU2OTRAUkRERTAzVU0BAgMEBQYH
|
||||||
|
-----END OPENSSH PRIVATE KEY-----
|
||||||
|
|
50
kubernetes/zuul/overlays/scs/configs/zuul-gitea-ssh.key
Normal file
50
kubernetes/zuul/overlays/scs/configs/zuul-gitea-ssh.key
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||||
|
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn
|
||||||
|
NhAAAAAwEAAQAAAgEArig9BW994crJrAfM3H9P/HH+yz7fJI+2SOsVaDVle+2tWD+rfpFJ
|
||||||
|
7SNLgXG1ipv/XnA0W7S/i0+7ShPieiakRuXqHrnfZVNf5Z/smH5aweZB62CgNxIH2fCCRI
|
||||||
|
lKJ8YbNDOdulpltbELjHieXU9mjIapYrLFx13xLjr2mcRNrkOs+N1JcHxiRoG4qez0KNlr
|
||||||
|
dn83c3Hda5lUi8O73ZxaGPzW5a9J89GLOiX7w+J6T3eDzMHOQqGOoC4S90QIRMya1UhP2J
|
||||||
|
8GU2FTOMav5ZlOFHTN4m+/iO0xe68rwAFuO+l0DN+nYUvEr/daK/YAAjZcDS0MvwRwpb9g
|
||||||
|
a+V6YoUCxnBZPa0GTqFe49UBUZzuwgdwoYznUYkKi1zodp0idR/VoDFwu/XmMM2OlhS4qT
|
||||||
|
vtDyyTRd2OJDCkVv2HHWp9vNmf8V6UIbPvvEpHwK0Ts1Z01aNMf9wmCxK3ORDdp1nrC3Uv
|
||||||
|
OjQ+AkxooZezpRwBEgXYfZH1XVMdrJDQSeMFln2/20BTYqrum2bpdheEbRfpwIT8YFfUsu
|
||||||
|
TiZZm4VRmDjtK+Zi+0IP4611M5Zeqpnrvxe4c4QeNJVR9/Euc8awhq78+8tfV+cOJygfu9
|
||||||
|
3JD43eVgd3qoR3jRSRPznPMffSlPma/Nu+gXHQ1nAmU/ZmBcq4Yx9XpIUNb05AJzAsU79o
|
||||||
|
MAAAdQ1aGCR9WhgkcAAAAHc3NoLXJzYQAAAgEArig9BW994crJrAfM3H9P/HH+yz7fJI+2
|
||||||
|
SOsVaDVle+2tWD+rfpFJ7SNLgXG1ipv/XnA0W7S/i0+7ShPieiakRuXqHrnfZVNf5Z/smH
|
||||||
|
5aweZB62CgNxIH2fCCRIlKJ8YbNDOdulpltbELjHieXU9mjIapYrLFx13xLjr2mcRNrkOs
|
||||||
|
+N1JcHxiRoG4qez0KNlrdn83c3Hda5lUi8O73ZxaGPzW5a9J89GLOiX7w+J6T3eDzMHOQq
|
||||||
|
GOoC4S90QIRMya1UhP2J8GU2FTOMav5ZlOFHTN4m+/iO0xe68rwAFuO+l0DN+nYUvEr/da
|
||||||
|
K/YAAjZcDS0MvwRwpb9ga+V6YoUCxnBZPa0GTqFe49UBUZzuwgdwoYznUYkKi1zodp0idR
|
||||||
|
/VoDFwu/XmMM2OlhS4qTvtDyyTRd2OJDCkVv2HHWp9vNmf8V6UIbPvvEpHwK0Ts1Z01aNM
|
||||||
|
f9wmCxK3ORDdp1nrC3UvOjQ+AkxooZezpRwBEgXYfZH1XVMdrJDQSeMFln2/20BTYqrum2
|
||||||
|
bpdheEbRfpwIT8YFfUsuTiZZm4VRmDjtK+Zi+0IP4611M5Zeqpnrvxe4c4QeNJVR9/Euc8
|
||||||
|
awhq78+8tfV+cOJygfu93JD43eVgd3qoR3jRSRPznPMffSlPma/Nu+gXHQ1nAmU/ZmBcq4
|
||||||
|
Yx9XpIUNb05AJzAsU79oMAAAADAQABAAACAEhUKdOiFDO8Frm9m5VPwpZjeaBLgj0a+mea
|
||||||
|
So+27WjkswNdngm4qW01JVyjLvRcCVjdXMFhddOTz4Lac0qr1bokLnGIXIEmeUNSgd5rS6
|
||||||
|
IP0PzCaoe0k1IuEswIAKY4HoA1l6IXfPpShytVxN+X5E0keCCngoBkQZAjqNr/rgtby/Cn
|
||||||
|
ZqKy5dXGdj0MTfLRKVJTT2JAvea8DWLmbZWCI+EQ0OcfP6VlN190+vTFkGqEhlZ5fwIpOq
|
||||||
|
THvdS3in+YQg2mNJMQqH3kg72mttKyMr1ILWGHa5Kgf9aQT6k0buWu9SWLlWZRI2S5Y9ey
|
||||||
|
GRrSHLTUKuECJQ6RRqhI6+USvK2hifFQQGcLhPc8hjT3S1dxrbUxfrMY6/f5v19AS8Sewf
|
||||||
|
RPLDK+NU+AigfbGj7rAqMwRfgSdvgs+7Cmx058fE5f6kIIyxpxmFloAv812Hwxc5cekDxH
|
||||||
|
hL24Y1OrK9Ij/FWZKUjK2q97Wv34p79kouwFVK8umfCSTaZoQWp03JgkSexjwT6rw+ULih
|
||||||
|
ChjNNMF7byWd+vBGXXbE6hAg7+cSpSmAI6vqB/29Fp4bnx3Dr7YT1guxmVlVsq0aqhWYcw
|
||||||
|
Mh6xgRHQgjBAVV1xi597e73b0JQz3fWTyYSX5jo+GlIGrzFDRDGs69QwHj93D3WPeL1QtP
|
||||||
|
BhmSjvKyILYLVaEPeRAAABAQDHOBm7iKv3cr4wOkHT6eUK0budnklJyVtktj0XBHjLFUpl
|
||||||
|
Ac4ViQjw6c1ev3gZ0vB9ykkDYNcJpqjBO6EDnq5iexDmfXMm6ZlVAYdPYb/wIdHiFTa+Pe
|
||||||
|
6Dbvcporu8ATl6EUMz1ZjZ1+752F1eybucoq3SiwgS7B0lz7lYwCH1VpocOmW1zlAPlfdd
|
||||||
|
YsRCjf0f31INQn3JPN9lb6BBdM2AB5lknjAmpZFOo/tKYDCUrKoSPyy14gqszHT0ah5x8C
|
||||||
|
Qvu0YhHq+uxMiadEAPwMuYXQf9K0Msd67Mh/0Z67keoLzAWz6iWsO9xuygQv2dXvdq74ww
|
||||||
|
f250qQnVSQhslaibAAABAQDjUq8q9j+Z2XDmdhaA8U8CwvTrYioFcEjlmOD33pK5vySRtg
|
||||||
|
XATgfzxhfrrek7LOuyK7i81lD3QMNtmxsi/c7NvqqU1av7nPKdVL25qi1KmIKbDD1PqQCE
|
||||||
|
BvkB+wRmPXLHae3HmAjSovayD1S9K2txx5mOJ17RHJfALADhnVdJBvP1kdqNJjI+rTCNku
|
||||||
|
cm8UcQy+TxmC8dErCy5Kh259JrrtShGMLAT0r27CEe3DDnLj46YDledj2W/3PDKSvPkDSL
|
||||||
|
2lwmrxrLGqoWnQO7jg2tsrtGFvDrze+peVtxvEshE2cED3qC1H0PcvfC32Fbra5KY4DIkj
|
||||||
|
4+v/VaTVANAQi/AAABAQDEIJVMCjvgL+06OeC1PbN0l/B4oDfKCkCbPwV7BlNt+6rF0Sd3
|
||||||
|
kHfz5bi1Y/iEtuaFjG/Cdvour81m4GP8atDUqdwLSjbsC+EhLdwiuZVNfponzaLYUzs61m
|
||||||
|
+qEA2OW276t/FvFHLmm8zpKYPHC4T6uvAiy3ZjeMUAH2DRVcPVpoTICo1ki3lb3IWJqpjz
|
||||||
|
XnCEmo6w7zZDAPQdA95KrxEJML5bo78FM6Oh96Rvfq2MQz3iMwnPdUYlOX/F9jw3BHcFqM
|
||||||
|
uZnLViGhuDJnFrMsgFIRDVCyUi8icB+WreLWAvY2tmmUhmzrNeL8oZl0yuBKDuz3FNmlvh
|
||||||
|
Iv5vPJWXwl89AAAAE2ExMTc5MDU2OTRAUkRERTAzVU0BAgMEBQYH
|
||||||
|
-----END OPENSSH PRIVATE KEY-----
|
||||||
|
|
60
kubernetes/zuul/overlays/scs/configs/zuul.conf
Normal file
60
kubernetes/zuul/overlays/scs/configs/zuul.conf
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
[zookeeper]
|
||||||
|
hosts=zookeeper.zuul-ci.svc.cluster.local:2281
|
||||||
|
tls_cert=/tls/client/tls.crt
|
||||||
|
tls_key=/tls/client/tls.key
|
||||||
|
tls_ca=/tls/client/ca.crt
|
||||||
|
session_timeout=40
|
||||||
|
|
||||||
|
[scheduler]
|
||||||
|
tenant_config=/etc/zuul-config/zuul/main.yaml
|
||||||
|
state_dir=/var/lib/zuul
|
||||||
|
relative_priority=true
|
||||||
|
prometheus_port=9091
|
||||||
|
|
||||||
|
[web]
|
||||||
|
listen_address=0.0.0.0
|
||||||
|
port=9000
|
||||||
|
status_url=https://zuul.scs.otc-service.com
|
||||||
|
root=https://zuul.scs.otc-service.com
|
||||||
|
prometheus_port=9091
|
||||||
|
|
||||||
|
[fingergw]
|
||||||
|
port=9079
|
||||||
|
user=zuul
|
||||||
|
|
||||||
|
[keystore]
|
||||||
|
password=abc
|
||||||
|
|
||||||
|
[merger]
|
||||||
|
git_dir=/var/lib/zuul/git
|
||||||
|
git_timeout=600
|
||||||
|
git_user_email=zuul@zuul.scs.otc-service.com
|
||||||
|
git_user_name=OTC SCS Zuul
|
||||||
|
prometheus_port=9091
|
||||||
|
|
||||||
|
[executor]
|
||||||
|
manage_ansible=true
|
||||||
|
ansible_root=/var/lib/zuul/managed_ansible
|
||||||
|
private_key_file=/etc/zuul/sshkey
|
||||||
|
disk_limit_per_job=2000
|
||||||
|
max_starting_builds=5
|
||||||
|
prometheus_port=9091
|
||||||
|
|
||||||
|
[database]
|
||||||
|
dburi=postgresql://root:Holla_DieWaldfee2023@10.20.0.21:8635/zuul?sslmode=require
|
||||||
|
|
||||||
|
# base zuul jobs
|
||||||
|
[connection "opendev"]
|
||||||
|
name=opendev
|
||||||
|
driver=git
|
||||||
|
baseurl=https://opendev.org
|
||||||
|
|
||||||
|
[connection "gitea"]
|
||||||
|
name=gitea
|
||||||
|
driver=gitea
|
||||||
|
baseurl=http://10.10.0.119:3000
|
||||||
|
server=10.10.0.119
|
||||||
|
cloneurl=ssh://git@10.10.0.119:2222
|
||||||
|
api_token=77142be0fe4644d5450652da17aff9ef0530993b
|
||||||
|
webhook_secret=TODO
|
||||||
|
sshkey=/etc/zuul/gitea.key
|
File diff suppressed because it is too large
Load Diff
57
playbooks/acme-certs-local.yaml
Normal file
57
playbooks/acme-certs-local.yaml
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
---
|
||||||
|
# This playbook is intended to be used i.e. during bootstraping
|
||||||
|
# of the system to generate required TLS certs using Acme
|
||||||
|
# (LetsEncrypt) on localhost.
|
||||||
|
# It expects following variables:
|
||||||
|
# - :dns_cloud: name of the clouds.yaml entry on the localhost
|
||||||
|
# which is hosting target DNS zone
|
||||||
|
#
|
||||||
|
# - :certs_path: path to the folder where certificates would be
|
||||||
|
# places
|
||||||
|
#
|
||||||
|
# - :ssl_certs: a dictionary for the requested certificates
|
||||||
|
# key: cert name (file name) to be used
|
||||||
|
# value: list of DNS names
|
||||||
|
- hosts: localhost
|
||||||
|
become: true
|
||||||
|
vars_prompt:
|
||||||
|
|
||||||
|
- name: certs_path
|
||||||
|
prompt: Path to the certs folder
|
||||||
|
default: "certs"
|
||||||
|
|
||||||
|
vars:
|
||||||
|
ssl_certs:
|
||||||
|
vault1:
|
||||||
|
- vault1.scs.otc-service.com
|
||||||
|
- vault-lb.scs.otc-service.com
|
||||||
|
vault2:
|
||||||
|
- vault2.scs.otc-service.com
|
||||||
|
- vault-lb.scs.otc-service.com
|
||||||
|
vault3:
|
||||||
|
- vault3.scs.otc-service.com
|
||||||
|
- vault-lb.scs.otc-service.com
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Generate CSRs
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
name: acme_request_certs
|
||||||
|
|
||||||
|
- name: Add localhost into the ssl_certs group as expected by the role
|
||||||
|
ansible.builtin.add_host:
|
||||||
|
name: localhost
|
||||||
|
group: ssl_certs
|
||||||
|
ansible_connection: local
|
||||||
|
|
||||||
|
- name: Install TXT records
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
name: acme_install_txt_records
|
||||||
|
|
||||||
|
- name: Generate Certs
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
name: acme_create_certs
|
||||||
|
|
||||||
|
- name: Remove TXT records
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
name: acme_drop_txt_records
|
||||||
|
|
@ -11,7 +11,8 @@
|
|||||||
#
|
#
|
||||||
# In both cases, the "bastion" group has one entry, which is the
|
# In both cases, the "bastion" group has one entry, which is the
|
||||||
# bastion host to run against.
|
# bastion host to run against.
|
||||||
- hosts: prod_bastion[0]:!disabled
|
- hosts: localhost
|
||||||
|
#prod_bastion[0]:!disabled
|
||||||
name: "Bridge: boostrap the bastion host"
|
name: "Bridge: boostrap the bastion host"
|
||||||
become: true
|
become: true
|
||||||
tasks:
|
tasks:
|
||||||
@ -76,22 +77,6 @@
|
|||||||
owner: root
|
owner: root
|
||||||
mode: 0755
|
mode: 0755
|
||||||
|
|
||||||
- name: Ensure sc2 folders
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ item }}"
|
|
||||||
state: "directory"
|
|
||||||
loop:
|
|
||||||
- "/home/zuul/src/gitlab/ecosystem/system-config/inventory/base"
|
|
||||||
- "/home/zuul/src/gitlab/ecosystem/system-config/inventory/service"
|
|
||||||
|
|
||||||
- name: Ensure sc2 files
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ item }}"
|
|
||||||
state: "touch"
|
|
||||||
loop:
|
|
||||||
- "/home/zuul/src/gitlab/ecosystem/system-config/inventory/base/hosts.yaml"
|
|
||||||
- "/home/zuul/src/gitlab/ecosystem/system-config/inventory/service/groups.yaml"
|
|
||||||
|
|
||||||
- name: Setup global known_hosts
|
- name: Setup global known_hosts
|
||||||
ansible.builtin.include_role:
|
ansible.builtin.include_role:
|
||||||
name: add-inventory-known-hosts
|
name: add-inventory-known-hosts
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
- hosts: keycloak-controller:!disabled
|
|
||||||
name: "Configure Keycloak instances"
|
|
||||||
tasks:
|
|
||||||
- name: Configure Keycloak instance
|
|
||||||
include_role:
|
|
||||||
name: "configure_keycloak"
|
|
||||||
vars:
|
|
||||||
keycloak: "{{ item.value }}"
|
|
||||||
loop: "{{ keycloak_instances | dict2items }}"
|
|
3
playbooks/distribute-tls-certs.yaml
Normal file
3
playbooks/distribute-tls-certs.yaml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
hosts: ssl_certs:!disabled
|
||||||
|
become: true
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
- include_tasks: common.yaml
|
- include_tasks: common.yaml
|
||||||
|
|
||||||
- name: Create acme challenge
|
- name: Create acme challenge
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
---
|
---
|
||||||
- name: Add PPA GPG key
|
- name: Add PPA GPG key
|
||||||
become: yes
|
become: true
|
||||||
apt_key:
|
ansible.builtin.apt_key:
|
||||||
data: "{{ hashicorp_gpg_key }}"
|
data: "{{ hashicorp_gpg_key }}"
|
||||||
|
|
||||||
- name: Add hashicorp apt repo
|
- name: Add hashicorp apt repo
|
||||||
become: yes
|
become: true
|
||||||
template:
|
ansible.builtin.template:
|
||||||
dest: /etc/apt/sources.list.d/hashicorp.list
|
dest: /etc/apt/sources.list.d/hashicorp.list
|
||||||
group: root
|
group: root
|
||||||
mode: 0644
|
mode: 0644
|
||||||
@ -14,8 +14,8 @@
|
|||||||
src: sources.list.j2
|
src: sources.list.j2
|
||||||
|
|
||||||
- name: Install vault
|
- name: Install vault
|
||||||
become: yes
|
become: true
|
||||||
apt:
|
ansible.builtin.apt:
|
||||||
name: vault
|
name: vault
|
||||||
state: present
|
state: present
|
||||||
update_cache: yes
|
update_cache: true
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
file_list: "{{ distro_lookup_path }}"
|
file_list: "{{ distro_lookup_path }}"
|
||||||
|
|
||||||
- name: Add PPA GPG key
|
- name: Add PPA GPG key
|
||||||
become: yes
|
become: true
|
||||||
apt_key:
|
apt_key:
|
||||||
data: "{{ hashicorp_gpg_key }}"
|
data: "{{ hashicorp_gpg_key }}"
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user