This commit is contained in:
@@ -17,36 +17,35 @@ steps:
|
|||||||
image: alpine:latest
|
image: alpine:latest
|
||||||
depends_on: [ version ]
|
depends_on: [ version ]
|
||||||
environment:
|
environment:
|
||||||
# Vault access for CI (AppRole for CI itself, NOT the app AppRole)
|
# CI's own AppRole creds for accessing Vault to fetch the SSH key (existing names)
|
||||||
VAULT_ADDR: https://vault.sendico.io
|
VAULT_ADDR: https://vault.sendico.io
|
||||||
VAULT_ROLE_ID: { from_secret: VAULT_APP_ROLE } # CI's AppRole role_id
|
VAULT_ROLE_ID: { from_secret: VAULT_APP_ROLE }
|
||||||
VAULT_SECRET_ID: { from_secret: VAULT_SECRET_ID } # CI's AppRole secret_id
|
VAULT_SECRET_ID: { from_secret: VAULT_SECRET_ID }
|
||||||
commands:
|
commands:
|
||||||
- set -euo pipefail
|
- set -euo pipefail
|
||||||
- apk add --no-cache curl bash coreutils sed python3 openssh-keygen
|
- apk add --no-cache bash coreutils openssh-keygen curl sed
|
||||||
- mkdir -p secrets
|
- mkdir -p secrets
|
||||||
# Fetch SSH private key for deploy (base64-encoded) and decode
|
# Retrieve SSH private key for deploy (existing helper)
|
||||||
- ./ci/vlt kv_to_file kv ops/deploy/ssh_key private_b64 secrets/SSH_KEY.b64 600
|
- ./ci/vlt kv_to_file kv ops/deploy/ssh_key private_b64 secrets/SSH_KEY.b64 600
|
||||||
- base64 -d secrets/SSH_KEY.b64 > secrets/SSH_KEY
|
- base64 -d secrets/SSH_KEY.b64 > secrets/SSH_KEY
|
||||||
- chmod 600 secrets/SSH_KEY
|
- chmod 600 secrets/SSH_KEY
|
||||||
- ssh-keygen -y -f secrets/SSH_KEY >/dev/null
|
- ssh-keygen -y -f secrets/SSH_KEY >/dev/null
|
||||||
# Fetch AppRole creds for Vault Agent (adjust the KV path if different)
|
|
||||||
- ./ci/vlt kv_to_file kv ops/vault/approle/sendico-db role_id secrets/ROLE_ID 600
|
|
||||||
- ./ci/vlt kv_to_file kv ops/vault/approle/sendico-db secret_id secrets/SECRET_ID 600
|
|
||||||
|
|
||||||
- name: deploy
|
- name: deploy
|
||||||
image: alpine:latest
|
image: alpine:latest
|
||||||
depends_on: [ secrets ]
|
depends_on: [ secrets ]
|
||||||
|
# Reuse the SAME Woodpecker secrets to pass AppRole to the Vault Agent at runtime
|
||||||
|
environment:
|
||||||
|
VAULT_ROLE_ID: { from_secret: VAULT_APP_ROLE }
|
||||||
|
VAULT_SECRET_ID: { from_secret: VAULT_SECRET_ID }
|
||||||
commands:
|
commands:
|
||||||
- set -euo pipefail
|
- set -euo pipefail
|
||||||
- apk add --no-cache bash openssh-client rsync coreutils
|
- apk add --no-cache bash openssh-client rsync coreutils
|
||||||
- mkdir -p /root/.ssh
|
- mkdir -p /root/.ssh
|
||||||
- install -m 600 secrets/SSH_KEY /root/.ssh/id_rsa
|
- install -m 600 secrets/SSH_KEY /root/.ssh/id_rsa
|
||||||
# Normalize CRLF if any, then export runtime env (Compose variables)
|
|
||||||
- sed -i 's/\r$//' ./ci/prod/.env.runtime
|
- sed -i 's/\r$//' ./ci/prod/.env.runtime
|
||||||
- set -a
|
- set -a
|
||||||
- . ./ci/prod/.env.runtime
|
- . ./ci/prod/.env.runtime
|
||||||
- . ./.env.version
|
- . ./.env.version
|
||||||
- set +a
|
- set +a
|
||||||
# Run external deploy script (quiet by default; set DEBUG_DEPLOY=1 to debug)
|
|
||||||
- bash ci/prod/scripts/deploy-db.sh
|
- bash ci/prod/scripts/deploy-db.sh
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ volumes:
|
|||||||
type: tmpfs
|
type: tmpfs
|
||||||
device: tmpfs
|
device: tmpfs
|
||||||
o: size=32m,uid=999,gid=999,mode=0750
|
o: size=32m,uid=999,gid=999,mode=0750
|
||||||
# In-memory config for PBM (renders from templates, no host persistence)
|
# In-memory config for PBM (rendered from templates, no host persistence)
|
||||||
pbm_cfg:
|
pbm_cfg:
|
||||||
driver: local
|
driver: local
|
||||||
driver_opts:
|
driver_opts:
|
||||||
@@ -30,22 +30,24 @@ services:
|
|||||||
container_name: vault-agent-sendico
|
container_name: vault-agent-sendico
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
cap_add: ["IPC_LOCK"]
|
cap_add: ["IPC_LOCK"]
|
||||||
# Only static env here. AppRole creds are injected via CI at runtime.
|
# VAULT_ADDR is required by the agent and comes from ../env/.env.runtime
|
||||||
|
# VAULT_ROLE_ID / VAULT_SECRET_ID are passed only during `docker compose` from CI
|
||||||
environment:
|
environment:
|
||||||
VAULT_ADDR: ${VAULT_ADDR}
|
VAULT_ADDR: ${VAULT_ADDR}
|
||||||
VAULT_ROLE_ID: ${VAULT_ROLE_ID} # provided only during `docker compose up`
|
VAULT_ROLE_ID: ${VAULT_ROLE_ID}
|
||||||
VAULT_SECRET_ID: ${VAULT_SECRET_ID} # provided only during `docker compose up`
|
VAULT_SECRET_ID: ${VAULT_SECRET_ID}
|
||||||
volumes:
|
volumes:
|
||||||
- ./vault/agent.hcl:/etc/vault/agent.hcl:ro
|
- ./vault/agent.hcl:/etc/vault/agent.hcl:ro
|
||||||
- ./vault/templates:/etc/vault/templates:ro
|
- ./vault/templates:/etc/vault/templates:ro
|
||||||
- vault_secrets:/vault/secrets:rw
|
- vault_secrets:/vault/secrets:rw
|
||||||
- pbm_cfg:/etc/backup:rw
|
- pbm_cfg:/etc/backup:rw
|
||||||
# Write AppRole creds to tmpfs, drop them from env, then exec agent
|
# Write AppRole creds into tmpfs, unset them from env, then exec the agent
|
||||||
|
# Note: use $${...} here to avoid compose-time interpolation
|
||||||
command: >
|
command: >
|
||||||
sh -lc 'set -euo pipefail; umask 077;
|
sh -lc 'set -euo pipefail; umask 077;
|
||||||
: "${VAULT_ROLE_ID:?}"; : "${VAULT_SECRET_ID:?}";
|
: "$${VAULT_ADDR:?}"; : "$${VAULT_ROLE_ID:?}"; : "$${VAULT_SECRET_ID:?}";
|
||||||
printf "%s" "$VAULT_ROLE_ID" > /vault/secrets/role_id;
|
printf "%s" "$${VAULT_ROLE_ID}" > /vault/secrets/role_id;
|
||||||
printf "%s" "$VAULT_SECRET_ID" > /vault/secrets/secret_id;
|
printf "%s" "$${VAULT_SECRET_ID}" > /vault/secrets/secret_id;
|
||||||
unset VAULT_ROLE_ID VAULT_SECRET_ID;
|
unset VAULT_ROLE_ID VAULT_SECRET_ID;
|
||||||
exec vault agent -config=/etc/vault/agent.hcl'
|
exec vault agent -config=/etc/vault/agent.hcl'
|
||||||
healthcheck:
|
healthcheck:
|
||||||
@@ -60,7 +62,9 @@ services:
|
|||||||
image: docker.io/library/mongo:latest
|
image: docker.io/library/mongo:latest
|
||||||
container_name: sendico_db1
|
container_name: sendico_db1
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
depends_on: { vault-agent-sendico: { condition: service_healthy } }
|
depends_on:
|
||||||
|
vault-agent-sendico:
|
||||||
|
condition: service_healthy
|
||||||
entrypoint: ["/usr/local/bin/mongo-entrypoint-wrapper.sh"]
|
entrypoint: ["/usr/local/bin/mongo-entrypoint-wrapper.sh"]
|
||||||
command: >
|
command: >
|
||||||
mongod --replSet ${MONGO_REPLICA_SET} --bind_ip_all --auth
|
mongod --replSet ${MONGO_REPLICA_SET} --bind_ip_all --auth
|
||||||
@@ -82,7 +86,9 @@ services:
|
|||||||
image: docker.io/library/mongo:latest
|
image: docker.io/library/mongo:latest
|
||||||
container_name: sendico_db2
|
container_name: sendico_db2
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
depends_on: { vault-agent-sendico: { condition: service_healthy } }
|
depends_on:
|
||||||
|
vault-agent-sendico:
|
||||||
|
condition: service_healthy
|
||||||
entrypoint: ["/usr/local/bin/mongo-entrypoint-wrapper.sh"]
|
entrypoint: ["/usr/local/bin/mongo-entrypoint-wrapper.sh"]
|
||||||
command: >
|
command: >
|
||||||
mongod --replSet ${MONGO_REPLICA_SET} --bind_ip_all --auth
|
mongod --replSet ${MONGO_REPLICA_SET} --bind_ip_all --auth
|
||||||
@@ -103,7 +109,9 @@ services:
|
|||||||
image: docker.io/library/mongo:latest
|
image: docker.io/library/mongo:latest
|
||||||
container_name: sendico_db3
|
container_name: sendico_db3
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
depends_on: { vault-agent-sendico: { condition: service_healthy } }
|
depends_on:
|
||||||
|
vault-agent-sendico:
|
||||||
|
condition: service_healthy
|
||||||
entrypoint: ["/usr/local/bin/mongo-entrypoint-wrapper.sh"]
|
entrypoint: ["/usr/local/bin/mongo-entrypoint-wrapper.sh"]
|
||||||
command: >
|
command: >
|
||||||
mongod --replSet ${MONGO_REPLICA_SET} --bind_ip_all --auth
|
mongod --replSet ${MONGO_REPLICA_SET} --bind_ip_all --auth
|
||||||
|
|||||||
@@ -1,14 +1,17 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
# Enable verbose mode only when DEBUG_DEPLOY=1
|
# Enable verbose logs only when DEBUG_DEPLOY=1
|
||||||
[[ "${DEBUG_DEPLOY:-0}" = "1" ]] && set -x
|
[[ "${DEBUG_DEPLOY:-0}" = "1" ]] && set -x
|
||||||
trap 'echo "[deploy-db] error at line $LINENO" >&2' ERR
|
trap 'echo "[deploy-db] error at line $LINENO" >&2' ERR
|
||||||
|
|
||||||
# Required env (exported by CI step from .env.runtime)
|
# Required env exported by CI
|
||||||
: "${REMOTE_BASE:?missing REMOTE_BASE}"
|
: "${REMOTE_BASE:?missing REMOTE_BASE}"
|
||||||
: "${DB_DIR:?missing DB_DIR}"
|
: "${DB_DIR:?missing DB_DIR}"
|
||||||
: "${SSH_USER:?missing SSH_USER}"
|
: "${SSH_USER:?missing SSH_USER}"
|
||||||
: "${SSH_HOST:?missing SSH_HOST}"
|
: "${SSH_HOST:?missing SSH_HOST}"
|
||||||
|
# Pass-through AppRole creds for Vault Agent (provided by Woodpecker secrets with existing names)
|
||||||
|
: "${VAULT_ROLE_ID:?missing VAULT_ROLE_ID}"
|
||||||
|
: "${VAULT_SECRET_ID:?missing VAULT_SECRET_ID}"
|
||||||
|
|
||||||
REMOTE_DIR="${REMOTE_BASE%/}/${DB_DIR}"
|
REMOTE_DIR="${REMOTE_BASE%/}/${DB_DIR}"
|
||||||
REMOTE_TARGET="${SSH_USER}@${SSH_HOST}"
|
REMOTE_TARGET="${SSH_USER}@${SSH_HOST}"
|
||||||
@@ -29,19 +32,15 @@ fi
|
|||||||
RSYNC_FLAGS=(-az --delete)
|
RSYNC_FLAGS=(-az --delete)
|
||||||
[[ "${DEBUG_DEPLOY:-0}" = "1" ]] && RSYNC_FLAGS=(-avz --delete)
|
[[ "${DEBUG_DEPLOY:-0}" = "1" ]] && RSYNC_FLAGS=(-avz --delete)
|
||||||
|
|
||||||
# AppRole creds for Vault Agent, fetched in the previous pipeline step
|
# Prepare remote filesystem
|
||||||
VAULT_ROLE_ID="$(cat secrets/ROLE_ID)"
|
|
||||||
VAULT_SECRET_ID="$(cat secrets/SECRET_ID)"
|
|
||||||
|
|
||||||
# Create remote directory structure
|
|
||||||
ssh "${SSH_OPTS[@]}" "$REMOTE_TARGET" "mkdir -p ${REMOTE_DIR}/{compose,env}"
|
ssh "${SSH_OPTS[@]}" "$REMOTE_TARGET" "mkdir -p ${REMOTE_DIR}/{compose,env}"
|
||||||
|
|
||||||
# Sync compose bundle and runtime env to the remote host
|
# Sync compose bundle and runtime env to the remote host
|
||||||
rsync "${RSYNC_FLAGS[@]}" -e "ssh ${SSH_OPTS[*]}" ci/prod/compose/ "$REMOTE_TARGET:${REMOTE_DIR}/compose/"
|
rsync "${RSYNC_FLAGS[@]}" -e "ssh ${SSH_OPTS[*]}" ci/prod/compose/ "$REMOTE_TARGET:${REMOTE_DIR}/compose/"
|
||||||
rsync "${RSYNC_FLAGS[@]}" -e "ssh ${SSH_OPTS[*]}" ci/prod/.env.runtime "$REMOTE_TARGET:${REMOTE_DIR}/env/.env.runtime"
|
rsync "${RSYNC_FLAGS[@]}" -e "ssh ${SSH_OPTS[*]}" ci/prod/.env.runtime "$REMOTE_TARGET:${REMOTE_DIR}/env/.env.runtime"
|
||||||
|
|
||||||
# Deploy on remote: export AppRole creds ONLY to the compose commands.
|
# Deploy on remote: pass AppRole creds ONLY to compose invocations.
|
||||||
# The vault-agent container will read them, write to tmpfs, unset, and exec.
|
# The vault-agent container writes them into tmpfs and unsets them internally.
|
||||||
ssh "${SSH_OPTS[@]}" "$REMOTE_TARGET" \
|
ssh "${SSH_OPTS[@]}" "$REMOTE_TARGET" \
|
||||||
REMOTE_DIR="$REMOTE_DIR" \
|
REMOTE_DIR="$REMOTE_DIR" \
|
||||||
VAULT_ROLE_ID="$VAULT_ROLE_ID" \
|
VAULT_ROLE_ID="$VAULT_ROLE_ID" \
|
||||||
@@ -51,7 +50,7 @@ set -euo pipefail
|
|||||||
cd "${REMOTE_DIR}/compose"
|
cd "${REMOTE_DIR}/compose"
|
||||||
set -a; . ../env/.env.runtime; set +a
|
set -a; . ../env/.env.runtime; set +a
|
||||||
|
|
||||||
# Pull and start with ephemeral AppRole env present only for these invocations
|
# Run with ephemeral AppRole env (scoped only to these commands)
|
||||||
VAULT_ROLE_ID="${VAULT_ROLE_ID}" VAULT_SECRET_ID="${VAULT_SECRET_ID}" docker compose -f db.yml pull --quiet 2>/dev/null || \
|
VAULT_ROLE_ID="${VAULT_ROLE_ID}" VAULT_SECRET_ID="${VAULT_SECRET_ID}" docker compose -f db.yml pull --quiet 2>/dev/null || \
|
||||||
VAULT_ROLE_ID="${VAULT_ROLE_ID}" VAULT_SECRET_ID="${VAULT_SECRET_ID}" docker compose -f db.yml pull
|
VAULT_ROLE_ID="${VAULT_ROLE_ID}" VAULT_SECRET_ID="${VAULT_SECRET_ID}" docker compose -f db.yml pull
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user