first db deployment script
This commit is contained in:
78
.woodpecker/db.yml
Normal file
78
.woodpecker/db.yml
Normal file
@@ -0,0 +1,78 @@
|
||||
when:
|
||||
- event: push
|
||||
branch: main
|
||||
|
||||
steps:
|
||||
- name: version
|
||||
image: alpine:latest
|
||||
commands:
|
||||
- apk add --no-cache git
|
||||
- GIT_REV="$(git rev-parse --short HEAD)"
|
||||
- BUILD_BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
- APP_V="$(cat version)"
|
||||
- printf "GIT_REV=%s\nBUILD_BRANCH=%s\nAPP_V=%s\n" "$GIT_REV" "$BUILD_BRANCH" "$APP_V" | tee .env.version
|
||||
|
||||
- name: secrets
|
||||
image: alpine:latest
|
||||
depends_on: [ version ]
|
||||
environment:
|
||||
VAULT_ADDR: https://vault.sendico.io
|
||||
VAULT_ROLE_ID: { from_secret: VAULT_ROLE_ID }
|
||||
VAULT_SECRET_ID: { from_secret: VAULT_SECRET_ID }
|
||||
commands:
|
||||
- apk add --no-cache curl bash coreutils sed
|
||||
- mkdir -p secrets
|
||||
- set -a; . ./.env.version; set +a
|
||||
# fetch registry creds
|
||||
- ./ci/vlt kv_to_file kv sendico/registry user secrets/REGISTRY_USER 600
|
||||
- ./ci/vlt kv_to_file kv sendico/registry password secrets/REGISTRY_PASS 600
|
||||
# fetch SSH private key for deploy
|
||||
- ./ci/vlt kv_to_file kv sendico/ops/deploy/ssh_key private secrets/SSH_KEY 600
|
||||
|
||||
- name: lock-db
|
||||
image: quay.io/skopeo/stable:latest
|
||||
depends_on: [ secrets ]
|
||||
environment:
|
||||
REGISTRY_URL: registry.sendico.io
|
||||
MONGO_VERSION: latest
|
||||
commands:
|
||||
- apk add --no-cache bash coreutils sed
|
||||
- mkdir -p ci/prod/env
|
||||
- set -a; . ./ci/prod/.env.runtime; set +a
|
||||
- set -a; . ./.env.version; set +a
|
||||
- test -s secrets/REGISTRY_USER && test -s secrets/REGISTRY_PASS
|
||||
- CREDS="$(cat secrets/REGISTRY_USER):$(cat secrets/REGISTRY_PASS)"
|
||||
# mirror multi-arch image into registry under app version tag
|
||||
- skopeo copy --all docker://docker.io/library/mongo:${MONGO_VERSION} docker://${REGISTRY_URL}/mirror/mongo:${APP_V} --dest-creds "$CREDS"
|
||||
# inspect the mirrored image to capture immutable digest
|
||||
- INSPECT=$(skopeo inspect docker://${REGISTRY_URL}/mirror/mongo:${APP_V} --creds "$CREDS")
|
||||
- DIGEST="$(printf '%s' "$INSPECT" | tr -d '\n' | sed -n 's/.*"Digest"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/p')"
|
||||
- test -n "$DIGEST"
|
||||
# store lock both for local deploy metadata and rsync to server
|
||||
- printf 'MONGO_TAG=%s\nMONGO_DIGEST=%s\n' "$APP_V" "$DIGEST" | tee .env.lock ci/prod/env/.env.lock.db
|
||||
- cat .env.lock
|
||||
|
||||
- name: deploy
|
||||
image: alpine:latest
|
||||
depends_on: [ lock-db ]
|
||||
commands:
|
||||
- apk add --no-cache openssh-client rsync
|
||||
- set -a; . ./ci/prod/.env.runtime; set +a # SSH_HOST, SSH_USER, REMOTE_DIR берём из репо
|
||||
- set -a; . ./.env.version; set +a
|
||||
- install -m 600 secrets/SSH_KEY /root/.ssh/id_rsa
|
||||
# ensure target dir layout
|
||||
- ssh -o StrictHostKeyChecking=no ${SSH_USER}@${SSH_HOST} "mkdir -p ${REMOTE_DIR}/{ops,vault/templates,backup,.woodpecker}"
|
||||
# sync non-secret runtime files from repo → server
|
||||
- rsync -avz --delete ci/prod/compose/ ${SSH_USER}@${SSH_HOST}:${REMOTE_BASE}/${DB_DIR}/compose/
|
||||
- rsync -avz .woodpecker/ ${SSH_USER}@${SSH_HOST}:${REMOTE_DIR}/.woodpecker/
|
||||
- rsync -avz ci/prod/.env.runtime ${SSH_USER}@${SSH_HOST}:${REMOTE_BASE}/${DB_DIR}/env/.env.runtime
|
||||
- rsync -avz ci/prod/env/.env.lock.db ${SSH_USER}@${SSH_HOST}:${REMOTE_BASE}/${DB_DIR}/env/.env.lock.db
|
||||
# upload the generated lock
|
||||
- scp -o StrictHostKeyChecking=no .env.lock ${SSH_USER}@${SSH_HOST}:${REMOTE_DIR}/.env.lock
|
||||
# deploy
|
||||
- ssh -o StrictHostKeyChecking=no ${SSH_USER}@${SSH_HOST} "
|
||||
set -euo pipefail
|
||||
cd ${REMOTE_DIR}
|
||||
docker compose -f .woodpecker/db.yml pull
|
||||
docker compose -f .woodpecker/db.yml up -d --remove-orphans
|
||||
"
|
||||
16
ci/prod/.env.runtime
Normal file
16
ci/prod/.env.runtime
Normal file
@@ -0,0 +1,16 @@
|
||||
REGISTRY_URL=registry.sendico.io
|
||||
|
||||
VAULT_ADDR=https://vault.sendico.io
|
||||
|
||||
MONGO_PORT=27017
|
||||
MONGO_REPLICA_SET=sendico-rs
|
||||
MONGO_AUTH_SOURCE=admin
|
||||
|
||||
PBM_S3_ENDPOINT=https://s3.sendico.io
|
||||
PBM_S3_REGION=eu-central-1
|
||||
PBM_S3_BUCKET=backup
|
||||
|
||||
SSH_HOST=178.57.67.248
|
||||
SSH_USER=cloud
|
||||
REMOTE_BASE=/srv/sendico
|
||||
DB_DIR=db
|
||||
0
ci/prod/compose/backup/config.yml
Normal file
0
ci/prod/compose/backup/config.yml
Normal file
193
ci/prod/compose/db.yml
Normal file
193
ci/prod/compose/db.yml
Normal file
@@ -0,0 +1,193 @@
|
||||
# Compose v2
|
||||
|
||||
x-common-env: &common-env
|
||||
env_file:
|
||||
- ../env/.env.runtime
|
||||
- ../env/.env.lock.db
|
||||
|
||||
volumes:
|
||||
mongo1_data: {}
|
||||
mongo2_data: {}
|
||||
mongo3_data: {}
|
||||
vault_secrets:
|
||||
driver: local
|
||||
driver_opts: { type: tmpfs, device: tmpfs, o: size=32m,uid=999,gid=999,mode=0750 }
|
||||
pbm_cfg:
|
||||
driver: local
|
||||
driver_opts: { type: tmpfs, device: tmpfs, o: size=16m,uid=0,gid=0,mode=0750 }
|
||||
|
||||
services:
|
||||
vault-agent-sendico:
|
||||
<<: *common-env
|
||||
image: hashicorp/vault:latest
|
||||
container_name: vault-agent-sendico
|
||||
restart: unless-stopped
|
||||
cap_add: ["IPC_LOCK"]
|
||||
environment:
|
||||
VAULT_ADDR: ${VAULT_ADDR}
|
||||
volumes:
|
||||
- ./vault/agent.hcl:/etc/vault/agent.hcl:ro
|
||||
- ./vault/templates:/etc/vault/templates:ro
|
||||
- /opt/sendico/vault/sendico-db/role_id:/vault/role_id:ro
|
||||
- /opt/sendico/vault/sendico-db/secret_id:/vault/secret_id:ro
|
||||
- vault_secrets:/vault/secrets:rw
|
||||
- pbm_cfg:/etc/backup:rw
|
||||
command: sh -lc 'vault agent -config=/etc/vault/agent.hcl'
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL","test -s /vault/secrets/MONGO_INITDB_ROOT_USERNAME -a -s /vault/secrets/MONGO_INITDB_ROOT_PASSWORD -a -s /vault/secrets/mongo.kf -a -s /etc/backup/pbm.env -a -s /etc/backup/.u -a -s /etc/backup/.p"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
start_period: 5s
|
||||
|
||||
sendico_db1:
|
||||
<<: *common-env
|
||||
image: ${REGISTRY_URL}/mirror/mongo:${MONGO_TAG}@${MONGO_DIGEST}
|
||||
container_name: sendico_db1
|
||||
restart: unless-stopped
|
||||
depends_on: { vault-agent-sendico: { condition: service_healthy } }
|
||||
entrypoint: ["/usr/local/bin/mongo-entrypoint-wrapper.sh"]
|
||||
command: >
|
||||
mongod --replSet ${MONGO_REPLICA_SET} --bind_ip_all --auth
|
||||
--keyFile /vault/secrets/mongo.kf --port ${MONGO_PORT}
|
||||
volumes:
|
||||
- mongo1_data:/data/db
|
||||
- vault_secrets:/vault/secrets:ro
|
||||
- ./ops/mongo-entrypoint.sh:/usr/local/bin/mongo-entrypoint-wrapper.sh:ro
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL","mongosh --quiet --host localhost --port ${MONGO_PORT} --eval 'db.runCommand({ ping: 1 }).ok' || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
ports: [ "0.0.0.0:${MONGO_PORT}:${MONGO_PORT}" ]
|
||||
|
||||
sendico_db2:
|
||||
<<: *common-env
|
||||
image: ${REGISTRY_URL}/mirror/mongo:${MONGO_TAG}@${MONGO_DIGEST}
|
||||
container_name: sendico_db2
|
||||
restart: unless-stopped
|
||||
depends_on: { vault-agent-sendico: { condition: service_healthy } }
|
||||
entrypoint: ["/usr/local/bin/mongo-entrypoint-wrapper.sh"]
|
||||
command: >
|
||||
mongod --replSet ${MONGO_REPLICA_SET} --bind_ip_all --auth
|
||||
--keyFile /vault/secrets/mongo.kf --port ${MONGO_PORT}
|
||||
volumes:
|
||||
- mongo2_data:/data/db
|
||||
- vault_secrets:/vault/secrets:ro
|
||||
- ./ops/mongo-entrypoint.sh:/usr/local/bin/mongo-entrypoint-wrapper.sh:ro
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL","mongosh --quiet --host localhost --port ${MONGO_PORT} --eval 'db.runCommand({ ping: 1 }).ok' || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
|
||||
sendico_db3:
|
||||
<<: *common-env
|
||||
image: ${REGISTRY_URL}/mirror/mongo:${MONGO_TAG}@${MONGO_DIGEST}
|
||||
container_name: sendico_db3
|
||||
restart: unless-stopped
|
||||
depends_on: { vault-agent-sendico: { condition: service_healthy } }
|
||||
entrypoint: ["/usr/local/bin/mongo-entrypoint-wrapper.sh"]
|
||||
command: >
|
||||
mongod --replSet ${MONGO_REPLICA_SET} --bind_ip_all --auth
|
||||
--keyFile /vault/secrets/mongo.kf --port ${MONGO_PORT}
|
||||
volumes:
|
||||
- mongo3_data:/data/db
|
||||
- vault_secrets:/vault/secrets:ro
|
||||
- ./ops/mongo-entrypoint.sh:/usr/local/bin/mongo-entrypoint-wrapper.sh:ro
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL","mongosh --quiet --host localhost --port ${MONGO_PORT} --eval 'db.runCommand({ ping: 1 }).ok' || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
|
||||
mongo_setup:
|
||||
<<: *common-env
|
||||
image: ${REGISTRY_URL}/mirror/mongo:${MONGO_TAG}@${MONGO_DIGEST}
|
||||
depends_on:
|
||||
sendico_db1: { condition: service_healthy }
|
||||
sendico_db2: { condition: service_healthy }
|
||||
sendico_db3: { condition: service_healthy }
|
||||
volumes:
|
||||
- vault_secrets:/vault/secrets:ro
|
||||
entrypoint: |
|
||||
bash -c '
|
||||
u=$(cat /vault/secrets/MONGO_INITDB_ROOT_USERNAME)
|
||||
p=$(cat /vault/secrets/MONGO_INITDB_ROOT_PASSWORD)
|
||||
until mongosh --quiet --host sendico_db1 --port ${MONGO_PORT} --eval "db.adminCommand({ ping: 1 })"; do
|
||||
echo "waiting for MongoDB…"; sleep 2;
|
||||
done
|
||||
mongosh --host sendico_db1 --port ${MONGO_PORT} -u "$u" -p "$p" --authenticationDatabase admin <<EOJS
|
||||
try { rs.status() } catch (e) {
|
||||
rs.initiate({
|
||||
_id: "${MONGO_REPLICA_SET}",
|
||||
members: [
|
||||
{ _id: 0, host: "sendico_db1:${MONGO_PORT}", priority: 2 },
|
||||
{ _id: 1, host: "sendico_db2:${MONGO_PORT}", priority: 1 },
|
||||
{ _id: 2, host: "sendico_db3:${MONGO_PORT}", priority: 1 }
|
||||
]
|
||||
})
|
||||
}
|
||||
EOJS
|
||||
'
|
||||
restart: "no"
|
||||
|
||||
pbm-agent-1:
|
||||
<<: *common-env
|
||||
image: percona/percona-backup-mongodb:latest
|
||||
container_name: pbm-agent-1
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
sendico_db1: { condition: service_healthy }
|
||||
vault-agent-sendico: { condition: service_healthy }
|
||||
volumes:
|
||||
- pbm_cfg:/etc/backup:ro
|
||||
command: |
|
||||
sh -lc '
|
||||
. /etc/backup/pbm.env
|
||||
U=$(cat /etc/backup/.u) ; P=$(cat /etc/backup/.p)
|
||||
export AWS_EC2_METADATA_DISABLED=true
|
||||
export PBM_MONGODB_URI="mongodb://${U}:${P}@sendico_db1:${MONGO_PORT}/?authSource=${MONGO_AUTH_SOURCE}&replicaSet=${MONGO_REPLICA_SET}"
|
||||
exec pbm-agent --config=/etc/backup/pbm-config.yaml
|
||||
'
|
||||
|
||||
pbm-agent-2:
|
||||
<<: *common-env
|
||||
image: percona/percona-backup-mongodb:latest
|
||||
container_name: pbm-agent-2
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
sendico_db2: { condition: service_healthy }
|
||||
vault-agent-sendico: { condition: service_healthy }
|
||||
volumes:
|
||||
- pbm_cfg:/etc/backup:ro
|
||||
command: |
|
||||
sh -lc '
|
||||
. /etc/backup/pbm.env
|
||||
U=$(cat /etc/backup/.u) ; P=$(cat /etc/backup/.p)
|
||||
export AWS_EC2_METADATA_DISABLED=true
|
||||
export PBM_MONGODB_URI="mongodb://${U}:${P}@sendico_db2:${MONGO_PORT}/?authSource=${MONGO_AUTH_SOURCE}&replicaSet=${MONGO_REPLICA_SET}"
|
||||
exec pbm-agent --config=/etc/backup/pbm-config.yaml
|
||||
'
|
||||
|
||||
pbm-agent-3:
|
||||
<<: *common-env
|
||||
image: percona/percona-backup-mongodb:latest
|
||||
container_name: pbm-agent-3
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
sendico_db3: { condition: service_healthy }
|
||||
vault-agent-sendico: { condition: service_healthy }
|
||||
volumes:
|
||||
- pbm_cfg:/etc/backup:ro
|
||||
command: |
|
||||
sh -lc '
|
||||
. /etc/backup/pbm.env
|
||||
U=$(cat /etc/backup/.u) ; P=$(cat /etc/backup/.p)
|
||||
export AWS_EC2_METADATA_DISABLED=true
|
||||
export PBM_MONGODB_URI="mongodb://${U}:${P}@sendico_db3:${MONGO_PORT}/?authSource=${MONGO_AUTH_SOURCE}&replicaSet=${MONGO_REPLICA_SET}"
|
||||
exec pbm-agent --config=/etc/backup/pbm-config.yaml
|
||||
'
|
||||
25
ci/prod/compose/ops/mongo-entrypoint.sh
Executable file
25
ci/prod/compose/ops/mongo-entrypoint.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
wait_for_file() {
|
||||
local path="$1" name="$2" retries="${3:-30}" interval="${4:-2}"
|
||||
for _ in $(seq 1 "$retries"); do
|
||||
if [ -s "$path" ]; then
|
||||
return 0
|
||||
fi
|
||||
sleep "$interval"
|
||||
done
|
||||
echo "missing required secret: ${name} (${path})" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
wait_for_file /vault/secrets/MONGO_INITDB_ROOT_USERNAME "root username"
|
||||
wait_for_file /vault/secrets/MONGO_INITDB_ROOT_PASSWORD "root password"
|
||||
wait_for_file /vault/secrets/mongo.kf "replica set keyFile"
|
||||
|
||||
export MONGO_INITDB_ROOT_USERNAME="$(cat /vault/secrets/MONGO_INITDB_ROOT_USERNAME)"
|
||||
export MONGO_INITDB_ROOT_PASSWORD="$(cat /vault/secrets/MONGO_INITDB_ROOT_PASSWORD)"
|
||||
chown 999:999 /vault/secrets/mongo.kf
|
||||
chmod 0400 /vault/secrets/mongo.kf
|
||||
|
||||
exec /usr/local/bin/docker-entrypoint.sh "$@"
|
||||
50
ci/prod/compose/vault/agent.hcl
Normal file
50
ci/prod/compose/vault/agent.hcl
Normal file
@@ -0,0 +1,50 @@
|
||||
# Vault Agent for DB stack. AppRole creds are files on the host.
|
||||
pid_file = "/tmp/vault-agent.pid"
|
||||
|
||||
auto_auth {
|
||||
method "approle" {
|
||||
mount_path = "auth/approle"
|
||||
config = {
|
||||
role_id_file_path = "/vault/role_id"
|
||||
secret_id_file_path = "/vault/secret_id"
|
||||
}
|
||||
}
|
||||
sink "file" { config = { path = "/vault/token" } }
|
||||
}
|
||||
|
||||
vault { address = "{{ env `VAULT_ADDR` }}" }
|
||||
|
||||
# Mongo root credentials
|
||||
template {
|
||||
source = "/etc/vault/templates/mongo/user.ctmpl"
|
||||
destination = "/vault/secrets/MONGO_INITDB_ROOT_USERNAME"
|
||||
}
|
||||
template {
|
||||
source = "/etc/vault/templates/mongo/pass.ctmpl"
|
||||
destination = "/vault/secrets/MONGO_INITDB_ROOT_PASSWORD"
|
||||
}
|
||||
|
||||
# Replica set keyFile (strict perms)
|
||||
template {
|
||||
source = "/etc/vault/templates/mongo/keyfile.ctmpl"
|
||||
destination = "/vault/secrets/mongo.kf"
|
||||
command = "sh -lc 'chown 999:999 /vault/secrets/mongo.kf && chmod 0400 /vault/secrets/mongo.kf'"
|
||||
}
|
||||
|
||||
# PBM: backup user/pass + S3 creds env
|
||||
template {
|
||||
source = "/etc/vault/templates/backup/user.ctmpl"
|
||||
destination = "/etc/backup/.u"
|
||||
}
|
||||
template {
|
||||
source = "/etc/vault/templates/backup/pass.ctmpl"
|
||||
destination = "/etc/backup/.p"
|
||||
}
|
||||
template {
|
||||
source = "/etc/vault/templates/pbm/env.ctmpl"
|
||||
destination = "/etc/backup/pbm.env"
|
||||
}
|
||||
template {
|
||||
source = "/etc/vault/templates/pbm/config.ctmpl"
|
||||
destination = "/etc/backup/pbm-config.yaml"
|
||||
}
|
||||
3
ci/prod/compose/vault/templates/backup/pass.ctmpl
Normal file
3
ci/prod/compose/vault/templates/backup/pass.ctmpl
Normal file
@@ -0,0 +1,3 @@
|
||||
{{ with secret "kv/data/ops/db/backup" -}}
|
||||
{{ .Data.data.user }}
|
||||
{{- end }}
|
||||
3
ci/prod/compose/vault/templates/backup/user.ctmpl
Normal file
3
ci/prod/compose/vault/templates/backup/user.ctmpl
Normal file
@@ -0,0 +1,3 @@
|
||||
{{ with secret "kv/data/ops/db/backup" -}}
|
||||
{{ .Data.data.user }}
|
||||
{{- end }}
|
||||
3
ci/prod/compose/vault/templates/mongo/keyfile.ctmpl
Normal file
3
ci/prod/compose/vault/templates/mongo/keyfile.ctmpl
Normal file
@@ -0,0 +1,3 @@
|
||||
{{ with secret "kv/data/sendico/db" -}}
|
||||
{{ .Data.data.key }}
|
||||
{{- end }}
|
||||
3
ci/prod/compose/vault/templates/mongo/pass.ctmpl
Normal file
3
ci/prod/compose/vault/templates/mongo/pass.ctmpl
Normal file
@@ -0,0 +1,3 @@
|
||||
{{ with secret "kv/data/sendico/db" -}}
|
||||
{{ .Data.data.password }}
|
||||
{{- end }}
|
||||
3
ci/prod/compose/vault/templates/mongo/user.ctmpl
Normal file
3
ci/prod/compose/vault/templates/mongo/user.ctmpl
Normal file
@@ -0,0 +1,3 @@
|
||||
{{ with secret "kv/data/sendico/db" -}}
|
||||
{{ .Data.data.user }}
|
||||
{{- end }}
|
||||
16
ci/prod/compose/vault/templates/pbm/config.ctmpl
Normal file
16
ci/prod/compose/vault/templates/pbm/config.ctmpl
Normal file
@@ -0,0 +1,16 @@
|
||||
# Rendered by Vault Agent; contains no secrets.
|
||||
storage:
|
||||
type: s3
|
||||
s3:
|
||||
endpointUrl: "{{ env "PBM_S3_ENDPOINT" }}"
|
||||
region: "{{ env "PBM_S3_REGION" }}"
|
||||
bucket: "{{ env "PBM_S3_BUCKET" }}"
|
||||
forcePathStyle: true
|
||||
|
||||
pitr:
|
||||
enabled: true
|
||||
oplogSpanMin: 10
|
||||
compression: "s2"
|
||||
|
||||
backup:
|
||||
compression: "s2"
|
||||
3
ci/prod/compose/vault/templates/pbm/env.ctmpl
Normal file
3
ci/prod/compose/vault/templates/pbm/env.ctmpl
Normal file
@@ -0,0 +1,3 @@
|
||||
# Rendered by Vault Agent. Contains only secrets.
|
||||
AWS_ACCESS_KEY_ID={{ with secret "kv/data/s3/backup" -}}{{ .Data.data.access_key_id }}{{- end }}
|
||||
AWS_SECRET_ACCESS_KEY={{ with secret "kv/data/s3/backup" -}}{{ .Data.data.secret_access_key }}{{- end }}
|
||||
58
ci/vlt
Executable file
58
ci/vlt
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env bash
|
||||
# Minimal Vault helper for CI steps (AppRole login + KVv2 reads).
|
||||
# Requires: curl, sed. Uses VAULT_ADDR, VAULT_ROLE_ID, VAULT_SECRET_ID from env.
|
||||
set -euo pipefail
|
||||
|
||||
: "${VAULT_ADDR:?missing VAULT_ADDR}"
|
||||
VAULT_TOKEN_FILE="${VAULT_TOKEN_FILE:-.vault_token}"
|
||||
|
||||
log(){ printf '[vlt] %s\n' "$*" >&2; }
|
||||
|
||||
login() {
|
||||
: "${VAULT_ROLE_ID:?missing VAULT_ROLE_ID}"
|
||||
: "${VAULT_SECRET_ID:?missing VAULT_SECRET_ID}"
|
||||
log "login approle"
|
||||
resp="$(curl -sfS -X POST -H 'Content-Type: application/json' \
|
||||
-d "{\"role_id\":\"${VAULT_ROLE_ID}\",\"secret_id\":\"${VAULT_SECRET_ID}\"}" \
|
||||
"${VAULT_ADDR%/}/v1/auth/approle/login")"
|
||||
token="$(printf '%s' "$resp" | sed -n 's/.*"client_token"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/p')"
|
||||
[ -n "$token" ] || { echo "login failed" >&2; exit 1; }
|
||||
printf '%s' "$token" > "$VAULT_TOKEN_FILE"
|
||||
}
|
||||
|
||||
ensure_token() {
|
||||
if [ -s "$VAULT_TOKEN_FILE" ]; then
|
||||
VAULT_TOKEN="$(cat "$VAULT_TOKEN_FILE")"
|
||||
else
|
||||
login
|
||||
VAULT_TOKEN="$(cat "$VAULT_TOKEN_FILE")"
|
||||
fi
|
||||
}
|
||||
|
||||
# kv_get <mount> <path> <field> (KV v2)
|
||||
kv_get() {
|
||||
mount="$1"; path="$2"; field="$3"
|
||||
ensure_token
|
||||
url="${VAULT_ADDR%/}/v1/${mount}/data/${path}"
|
||||
resp="$(curl -sfS -H "X-Vault-Token: ${VAULT_TOKEN}" "$url")"
|
||||
raw="$(printf '%s' "$resp" | sed -n "s/.*\"${field}\"[[:space:]]*:[[:space:]]*\"\([^\"]*\)\".*/\1/p")"
|
||||
[ -n "$raw" ] || { echo "field not found: ${mount}/${path}:${field}" >&2; exit 2; }
|
||||
printf '%s' "$raw" | sed -e 's/\\n/\n/g' -e 's/\\t/\t/g' -e 's/\\"/"/g' -e 's/\\\\/\\/g'
|
||||
}
|
||||
|
||||
# kv_to_file <mount> <path> <field> <dest> [mode]
|
||||
kv_to_file() {
|
||||
mount="$1"; path="$2"; field="$3"; dest="$4"; mode="${5:-600}"
|
||||
tmp="$(mktemp)"
|
||||
kv_get "$mount" "$path" "$field" > "$tmp"
|
||||
install -m "$mode" "$tmp" "$dest"
|
||||
rm -f "$tmp"
|
||||
log "wrote $dest"
|
||||
}
|
||||
|
||||
case "${1:-}" in
|
||||
login) shift; login "$@";;
|
||||
kv_get) shift; kv_get "$@";;
|
||||
kv_to_file) shift; kv_to_file "$@";;
|
||||
*) echo "usage: vlt {login|kv_get|kv_to_file} ..." >&2; exit 64;;
|
||||
esac
|
||||
121
infra/woodpecker/docker-compose.yml
Normal file
121
infra/woodpecker/docker-compose.yml
Normal file
@@ -0,0 +1,121 @@
|
||||
networks:
|
||||
cicd:
|
||||
external: true
|
||||
|
||||
secrets:
|
||||
woodpecker_vault_role_id:
|
||||
external: true
|
||||
woodpecker_vault_secret_id:
|
||||
external: true
|
||||
|
||||
configs:
|
||||
woodpecker_vault_agent_hcl:
|
||||
file: ./vault/agent.hcl
|
||||
tpl_agent_secret:
|
||||
file: ./vault/templates/agent_secret.ctmpl
|
||||
tpl_gitea_client_id:
|
||||
file: ./vault/templates/gitea_client_id.ctmpl
|
||||
tpl_gitea_client_secret:
|
||||
file: ./vault/templates/gitea_client_secret.ctmpl
|
||||
tpl_pg_dsn:
|
||||
file: ./vault/templates/pg_dsn.ctmpl
|
||||
|
||||
volumes:
|
||||
vault_secrets:
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: tmpfs
|
||||
device: tmpfs
|
||||
o: size=32m,uid=0,gid=0,mode=0750
|
||||
|
||||
services:
|
||||
vault-agent-woodpecker:
|
||||
image: hashicorp/vault:latest
|
||||
networks: [cicd]
|
||||
cap_add: ["IPC_LOCK"]
|
||||
environment:
|
||||
VAULT_ADDR: "http://vault:8200" # or your HTTPS URL
|
||||
secrets:
|
||||
- source: woodpecker_vault_role_id
|
||||
target: /vault/secrets/role_id
|
||||
- source: woodpecker_vault_secret_id
|
||||
target: /vault/secrets/secret_id
|
||||
volumes:
|
||||
- vault_secrets:/vault/secrets:rw
|
||||
configs:
|
||||
- source: woodpecker_vault_agent_hcl
|
||||
target: /etc/vault/agent.hcl
|
||||
- source: tpl_agent_secret
|
||||
target: /etc/vault/templates/agent_secret.ctmpl
|
||||
- source: tpl_gitea_client_id
|
||||
target: /etc/vault/templates/gitea_client_id.ctmpl
|
||||
- source: tpl_gitea_client_secret
|
||||
target: /etc/vault/templates/gitea_client_secret.ctmpl
|
||||
- source: tpl_pg_dsn
|
||||
target: /etc/vault/templates/pg_dsn.ctmpl
|
||||
command: [ "sh", "-lc", "vault agent -config=/etc/vault/agent.hcl" ]
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "test -s /vault/secrets/agent_secret -a -s /vault/secrets/gitea_client_id -a -s /vault/secrets/gitea_client_secret -a -s /vault/secrets/pg_dsn" ]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
woodpecker-server:
|
||||
image: woodpeckerci/woodpecker-server:latest
|
||||
networks: [cicd]
|
||||
depends_on: [vault-agent-woodpecker]
|
||||
volumes:
|
||||
- vault_secrets:/vault/secrets:ro
|
||||
environment:
|
||||
WOODPECKER_HOST: "https://ci.sendico.io"
|
||||
WOODPECKER_OPEN: "false"
|
||||
|
||||
# Gitea (now your URL)
|
||||
WOODPECKER_GITEA: "true"
|
||||
WOODPECKER_GITEA_URL: "https://git.sendico.io"
|
||||
WOODPECKER_GITEA_CLIENT_FILE: "/vault/secrets/gitea_client_id"
|
||||
WOODPECKER_GITEA_SECRET_FILE: "/vault/secrets/gitea_client_secret"
|
||||
|
||||
# Agent shared secret (lowercase file, env stays uppercase)
|
||||
WOODPECKER_AGENT_SECRET_FILE: "/vault/secrets/agent_secret"
|
||||
|
||||
# Postgres (from Vault Agent rendered file)
|
||||
WOODPECKER_DATABASE_DRIVER: "postgres"
|
||||
WOODPECKER_DATABASE_DATASOURCE_FILE: "/vault/secrets/pg_dsn"
|
||||
|
||||
WOODPECKER_BACKEND_DOCKER_NETWORK: "cicd"
|
||||
deploy:
|
||||
labels:
|
||||
traefik.enable: "true"
|
||||
traefik.docker.network: "cicd"
|
||||
traefik.http.routers.woodpecker-server.rule: "Host(`ci.sendico.io`)"
|
||||
traefik.http.routers.woodpecker-server.entrypoints: "websecure"
|
||||
traefik.http.routers.woodpecker-server.tls: "true"
|
||||
traefik.http.routers.woodpecker-server.tls.certresolver: "letsencrypt"
|
||||
traefik.http.services.woodpecker-server.loadbalancer.server.port: "3000"
|
||||
healthcheck:
|
||||
test: ["CMD", "/bin/woodpecker-server", "ping"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 10
|
||||
start_period: 20s
|
||||
|
||||
woodpecker-agent:
|
||||
image: woodpeckerci/woodpecker-agent:latest
|
||||
networks: [cicd]
|
||||
depends_on: [woodpecker-server, vault-agent-woodpecker]
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- vault_secrets:/vault/secrets:ro
|
||||
environment:
|
||||
WOODPECKER_SERVER: "woodpecker-server:9000" # gRPC in overlay
|
||||
WOODPECKER_AGENT_SECRET_FILE: "/vault/secrets/agent_secret"
|
||||
WOODPECKER_BACKEND: "docker"
|
||||
WOODPECKER_BACKEND_DOCKER_NETWORK: "cicd"
|
||||
WOODPECKER_MAX_WORKFLOWS: "2"
|
||||
healthcheck:
|
||||
test: ["CMD", "/bin/woodpecker-agent", "ping"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 10
|
||||
start_period: 20s
|
||||
Reference in New Issue
Block a user