updated for infra

This commit is contained in:
Arseni
2026-03-10 20:40:20 +03:00
parent 9c2b3bf8bd
commit 840a7f85c8
13 changed files with 661 additions and 12 deletions

View File

@@ -1,6 +1,7 @@
import 'package:json_annotation/json_annotation.dart';
import 'package:pshared/data/dto/payment/operation.dart';
import 'package:pshared/data/dto/payment/intent/payment.dart';
import 'package:pshared/data/dto/payment/payment_quote.dart';
part 'payment.g.dart';
@@ -13,6 +14,7 @@ class PaymentDTO {
final String? state;
final String? failureCode;
final String? failureReason;
final PaymentIntentDTO? intent;
final List<PaymentOperationDTO> operations;
final PaymentQuoteDTO? lastQuote;
final Map<String, String>? metadata;
@@ -24,6 +26,7 @@ class PaymentDTO {
this.state,
this.failureCode,
this.failureReason,
this.intent,
this.operations = const <PaymentOperationDTO>[],
this.lastQuote,
this.metadata,

View File

@@ -1,4 +1,5 @@
import 'package:pshared/data/dto/payment/payment.dart';
import 'package:pshared/data/mapper/payment/intent/payment.dart';
import 'package:pshared/data/mapper/payment/operation.dart';
import 'package:pshared/data/mapper/payment/quote.dart';
import 'package:pshared/models/payment/payment.dart';
@@ -13,6 +14,7 @@ extension PaymentDTOMapper on PaymentDTO {
orchestrationState: paymentOrchestrationStateFromValue(state),
failureCode: failureCode,
failureReason: failureReason,
intent: intent?.toDomain(),
operations: operations.map((item) => item.toDomain()).toList(),
lastQuote: lastQuote?.toDomain(),
metadata: metadata,
@@ -27,6 +29,7 @@ extension PaymentMapper on Payment {
state: state ?? paymentOrchestrationStateToValue(orchestrationState),
failureCode: failureCode,
failureReason: failureReason,
intent: intent?.toDTO(),
operations: operations.map((item) => item.toDTO()).toList(),
lastQuote: lastQuote?.toDTO(),
metadata: metadata,

View File

@@ -76,6 +76,10 @@ services:
gitea:
image: gitea/gitea:latest
networks: [cicd]
ports:
- target: 22
published: 2222
mode: host
depends_on:
- gitea-db
- vault-agent-gitea

View File

@@ -0,0 +1,145 @@
secrets:
monitoring_vault_role_id:
external: true
monitoring_vault_secret_id:
external: true
networks:
cicd:
external: true
volumes:
loki_data:
grafana_data:
prometheus_data:
alertmanager_data:
alertmanager_config:
driver: local
driver_opts:
type: tmpfs
device: tmpfs
o: size=8m,uid=0,gid=0,mode=0755
vault_secrets:
driver: local
driver_opts:
type: tmpfs
device: tmpfs
o: size=32m,uid=472,gid=472,mode=0750
services:
vault-agent-monitoring:
image: hashicorp/vault:latest
networks: [cicd]
cap_add: ["IPC_LOCK"]
environment:
VAULT_ADDR: "http://vault:8200"
command: >
sh -lc 'vault agent -config=/etc/vault/agent.hcl'
secrets:
- source: monitoring_vault_role_id
target: /etc/vault/role_id
- source: monitoring_vault_secret_id
target: /etc/vault/secret_id
volumes:
- ./vault-agent/agent.hcl:/etc/vault/agent.hcl:ro
- ./vault-agent/templates:/etc/vault/templates:ro
- vault_secrets:/vault/secrets:rw
- alertmanager_config:/vault/alertmanager:rw
healthcheck:
test: ["CMD-SHELL", "test -s /vault/secrets/grafana.env"]
interval: 30s
timeout: 5s
retries: 3
deploy:
restart_policy:
condition: any
prometheus:
image: prom/prometheus:latest
networks: [cicd]
command:
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.path=/prometheus
- --storage.tsdb.retention.time=30d
- --web.enable-lifecycle
volumes:
- ./prometheus/config.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:9090/-/ready"]
interval: 30s
timeout: 5s
retries: 3
labels:
- "traefik.enable=true"
- "traefik.http.routers.prometheus.rule=Host(`prometheus.sendico.io`)"
- "traefik.http.routers.prometheus.entrypoints=websecure"
- "traefik.http.routers.prometheus.tls.certresolver=letsencrypt"
- "traefik.http.services.prometheus.loadbalancer.server.port=9090"
deploy:
restart_policy:
condition: any
alertmanager:
image: prom/alertmanager:latest
networks: [cicd]
command: >
sh -c 'while [ ! -s /vault/alertmanager/alertmanager.yml ]; do echo "⏳ waiting for alertmanager.yml"; sleep 2; done;
exec /bin/alertmanager --config.file=/vault/alertmanager/alertmanager.yml --storage.path=/alertmanager'
volumes:
- alertmanager_data:/alertmanager
- alertmanager_config:/vault/alertmanager:ro
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:9093/-/ready"]
interval: 30s
timeout: 5s
retries: 3
labels:
- "traefik.enable=true"
- "traefik.http.routers.alertmanager.rule=Host(`alertmanager.sendico.io`)"
- "traefik.http.routers.alertmanager.entrypoints=websecure"
- "traefik.http.routers.alertmanager.tls.certresolver=letsencrypt"
- "traefik.http.services.alertmanager.loadbalancer.server.port=9093"
deploy:
restart_policy:
condition: any
loki:
image: grafana/loki:latest
networks: [cicd]
command: ["-config.file=/etc/loki/config.yml"]
volumes:
- ./loki/config.yml:/etc/loki/config.yml:ro
- loki_data:/loki
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:3100/ready"]
interval: 30s
timeout: 5s
retries: 5
deploy:
restart_policy:
condition: any
grafana:
image: grafana/grafana:latest
networks: [cicd]
command: >
sh -c 'while [ ! -s /vault/secrets/grafana.env ]; do echo "⏳ waiting for grafana.env"; sleep 2; done;
set -a; . /vault/secrets/grafana.env; set +a; exec /run.sh'
volumes:
- grafana_data:/var/lib/grafana
- vault_secrets:/vault/secrets:ro
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:3000/api/health"]
interval: 30s
timeout: 5s
retries: 5
labels:
- "traefik.enable=true"
- "traefik.http.routers.grafana.rule=Host(`grafana.sendico.io`)"
- "traefik.http.routers.grafana.entrypoints=websecure"
- "traefik.http.routers.grafana.tls.certresolver=letsencrypt"
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
deploy:
restart_policy:
condition: any

View File

@@ -0,0 +1,37 @@
# loki/config.yml — single-binary, filesystem-backed TSDB storage, 7-day retention
server:
http_listen_port: 3100
instance_addr: 127.0.0.1
common:
path_prefix: /loki
storage:
filesystem:
chunks_directory: /loki/chunks
rules_directory: /loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
schema_config:
configs:
- from: "2025-01-01"
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
limits_config:
retention_period: 168h
max_query_lookback: 168h
allow_structured_metadata: true
compactor:
working_directory: /loki/compactor
compaction_interval: 5m
retention_enabled: true
delete_request_store: filesystem

View File

@@ -0,0 +1,22 @@
global:
scrape_interval: 15s
evaluation_interval: 15s
alerting:
alertmanagers:
- static_configs:
- targets: ['alertmanager:9093']
scrape_configs:
- job_name: prometheus
static_configs:
- targets: ['localhost:9090']
- job_name: loki
static_configs:
- targets: ['loki:3100']
# Uncomment if Grafana metrics are enabled:
# - job_name: grafana
# static_configs:
# - targets: ['grafana:3000']

30
infra/registry/config.yml Normal file
View File

@@ -0,0 +1,30 @@
version: 0.1
log:
level: info
storage:
s3:
accesskey: registry
secretkey: "88m]6uu:5^B>"
bucket: registry
region: us-east-1
regionendpoint: https://s3.sendico.io
secure: true
v4auth: true
forcepathstyle: true # required for MinIO path-style
delete:
enabled: true
http:
addr: :5000
auth:
htpasswd:
realm: "Registry Realm"
path: /vault/secrets/htpasswd
health:
storagedriver:
enabled: true
monitoring:
enabled: false

View File

@@ -0,0 +1,79 @@
configs:
registry_wait_sh:
file: ./registry-wait.sh
registry_config_yml:
file: ./config.yml
services:
vault-agent-registry:
image: hashicorp/vault:latest
command: >
sh -lc 'vault agent -config=/etc/vault/agent.hcl'
cap_add: ["IPC_LOCK"]
environment:
VAULT_ADDR: "http://vault:8200"
secrets:
- source: registry_vault_role_id
target: /vault/secrets/role_id
- source: registry_vault_secret_id
target: /vault/secrets/secret_id
volumes:
- ./vault:/etc/vault:ro
- vault-secrets:/vault/secrets:rw
networks: [cicd]
healthcheck:
test: ["CMD-SHELL", "test -s /vault/secrets/htpasswd -a -s /vault/secrets/env"]
interval: 10s
timeout: 3s
retries: 10
deploy:
placement:
constraints: [node.role == manager]
registry:
image: registry:latest
entrypoint: ["/usr/local/bin/registry-wait"]
command: ["serve", "/etc/registry/config.yml"]
configs:
- source: registry_wait_sh
target: /usr/local/bin/registry-wait
mode: 0755
- source: registry_config_yml
target: /etc/registry/config.yml
volumes:
- registry_data:/var/lib/registry
- vault-secrets:/vault/secrets:ro
environment:
OTEL_TRACES_EXPORTER: "none"
networks: [cicd]
deploy:
placement:
constraints: [node.role == manager]
labels:
- "traefik.enable=true"
- "traefik.docker.network=cicd"
- "traefik.http.services.registry.loadbalancer.server.port=5000"
- "traefik.http.routers.registry.rule=Host(`registry.sendico.io`)"
- "traefik.http.routers.registry.entrypoints=websecure"
- "traefik.http.routers.registry.tls=true"
- "traefik.http.routers.registry.tls.certresolver=letsencrypt"
networks:
cicd:
external: true
volumes:
vault-secrets:
driver: local
driver_opts:
type: tmpfs
device: tmpfs
o: size=16m,uid=1000,gid=1000,mode=0750
registry_data:
secrets:
registry_vault_role_id:
external: true
registry_vault_secret_id:
external: true

198
infra/s3/docker-compose.yml Normal file
View File

@@ -0,0 +1,198 @@
configs:
minio_wait_sh:
file: ./minio-wait.sh
services:
vault-agent-s3:
image: hashicorp/vault:latest
command: >
sh -lc 'vault agent -config=/etc/vault/agent.hcl'
cap_add: ["IPC_LOCK"]
environment:
VAULT_ADDR: "http://vault:8200"
secrets:
- source: s3_vault_role_id
target: /vault/secrets/role_id
- source: s3_vault_secret_id
target: /vault/secrets/secret_id
volumes:
- ./vault:/etc/vault:ro
- vault-secrets:/vault/secrets:rw
networks: [cicd]
healthcheck:
test: ["CMD-SHELL", "test -s /vault/secrets/MINIO_ROOT_USER -a -s /vault/secrets/MINIO_ROOT_PASSWORD"]
interval: 10s
timeout: 3s
retries: 10
deploy:
placement:
constraints: [node.role == manager]
minio1:
image: quay.io/minio/minio:latest
hostname: minio1
entrypoint: ["/usr/local/bin/minio-wait"]
command:
- server
- --console-address
- :9001
- http://minio1:9000/data
- http://minio2:9000/data
- http://minio3:9000/data
- http://minio4:9000/data
configs:
- source: minio_wait_sh
target: /usr/local/bin/minio-wait
mode: 0755
environment:
MINIO_ROOT_USER_FILE: /vault/secrets/MINIO_ROOT_USER
MINIO_ROOT_PASSWORD_FILE: /vault/secrets/MINIO_ROOT_PASSWORD
MINIO_SERVER_URL: https://s3.sendico.io
MINIO_BROWSER_REDIRECT_URL: https://minio.sendico.io
volumes:
- minio1_data:/data
- vault-secrets:/vault/secrets:ro
networks: [cicd]
deploy:
placement:
constraints: [node.role == manager]
labels:
- "traefik.enable=true"
- "traefik.docker.network=cicd"
# services (чётко укажем порты)
- "traefik.http.services.s3-minio-api.loadbalancer.server.port=9000"
- "traefik.http.services.s3-minio-console.loadbalancer.server.port=9001"
# router для API
- "traefik.http.routers.s3-minio-api.rule=Host(`s3.sendico.io`)"
- "traefik.http.routers.s3-minio-api.entrypoints=websecure"
- "traefik.http.routers.s3-minio-api.tls=true"
- "traefik.http.routers.s3-minio-api.tls.certresolver=letsencrypt"
- "traefik.http.routers.s3-minio-api.service=s3-minio-api"
# router для Console
- "traefik.http.routers.s3-minio-console.rule=Host(`minio.sendico.io`)"
- "traefik.http.routers.s3-minio-console.entrypoints=websecure"
- "traefik.http.routers.s3-minio-console.tls=true"
- "traefik.http.routers.s3-minio-console.tls.certresolver=letsencrypt"
- "traefik.http.routers.s3-minio-console.service=s3-minio-console"
minio2:
image: quay.io/minio/minio:latest
hostname: minio2
entrypoint: ["/usr/local/bin/minio-wait"]
command:
- server
- --console-address
- :9001
- http://minio1:9000/data
- http://minio2:9000/data
- http://minio3:9000/data
- http://minio4:9000/data
configs:
- source: minio_wait_sh
target: /usr/local/bin/minio-wait
mode: 0755
environment:
MINIO_ROOT_USER_FILE: /vault/secrets/MINIO_ROOT_USER
MINIO_ROOT_PASSWORD_FILE: /vault/secrets/MINIO_ROOT_PASSWORD
MINIO_SERVER_URL: https://s3.sendico.io
MINIO_BROWSER_REDIRECT_URL: https://minio.sendico.io
volumes:
- minio2_data:/data
- vault-secrets:/vault/secrets:ro
networks: [cicd]
deploy:
placement:
constraints: [node.role == manager]
labels:
- "traefik.enable=false"
minio3:
image: quay.io/minio/minio:latest
hostname: minio3
entrypoint: ["/usr/local/bin/minio-wait"]
command:
- server
- --console-address
- :9001
- http://minio1:9000/data
- http://minio2:9000/data
- http://minio3:9000/data
- http://minio4:9000/data
configs:
- source: minio_wait_sh
target: /usr/local/bin/minio-wait
mode: 0755
environment:
MINIO_ROOT_USER_FILE: /vault/secrets/MINIO_ROOT_USER
MINIO_ROOT_PASSWORD_FILE: /vault/secrets/MINIO_ROOT_PASSWORD
MINIO_SERVER_URL: https://s3.sendico.io
MINIO_BROWSER_REDIRECT_URL: https://minio.sendico.io
volumes:
- minio3_data:/data
- vault-secrets:/vault/secrets:ro
networks:
- cicd
deploy:
placement:
constraints: [node.role == manager]
labels:
- "traefik.enable=false"
minio4:
image: quay.io/minio/minio:latest
hostname: minio4
entrypoint: ["/usr/local/bin/minio-wait"]
command:
- server
- --console-address
- :9001
- http://minio1:9000/data
- http://minio2:9000/data
- http://minio3:9000/data
- http://minio4:9000/data
configs:
- source: minio_wait_sh
target: /usr/local/bin/minio-wait
mode: 0755
environment:
MINIO_ROOT_USER_FILE: /vault/secrets/MINIO_ROOT_USER
MINIO_ROOT_PASSWORD_FILE: /vault/secrets/MINIO_ROOT_PASSWORD
MINIO_SERVER_URL: https://s3.sendico.io
MINIO_BROWSER_REDIRECT_URL: https://minio.sendico.io
volumes:
- minio4_data:/data
- vault-secrets:/vault/secrets:ro
networks:
- cicd
deploy:
placement:
constraints: [node.role == manager]
labels:
- "traefik.enable=false"
networks:
cicd:
external: true
volumes:
vault-secrets:
driver: local
driver_opts:
type: tmpfs
device: tmpfs
o: size=16m,uid=1000,gid=1000,mode=0750
minio1_data:
minio2_data:
minio3_data:
minio4_data:
secrets:
s3_vault_role_id:
external: true
s3_vault_secret_id:
external: true

47
infra/traefik/config.yml Normal file
View File

@@ -0,0 +1,47 @@
log:
level: INFO
format: json
accessLog: {}
entryPoints:
web:
address: ":80"
http:
redirections:
entryPoint:
to: websecure
scheme: https
websecure:
address: ":443"
http3: {}
http:
encodedCharacters:
allowEncodedSlash: true
providers:
docker:
endpoint: "unix:///var/run/docker.sock"
exposedByDefault: false
network: cicd
watch: true
constraints:
swarm:
endpoint: "unix:///var/run/docker.sock"
exposedByDefault: false
network: cicd
watch: true
file:
filename: /etc/traefik/dynamic.yml
watch: true
certificatesResolvers:
letsencrypt:
acme:
email: si@sendico.io
storage: /sendico.json
httpChallenge:
entryPoint: web
api:
dashboard: true

View File

@@ -0,0 +1,43 @@
services:
traefik:
image: traefik:latest
command:
- "--configFile=/etc/traefik/traefik.yml"
ports:
- "80:80"
- "443:443"
networks:
- cicd
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./config.yml:/etc/traefik/traefik.yml:ro
- ./dynamic.yml:/etc/traefik/dynamic.yml:ro
- ./sendico.json:/sendico.json
- traefik_letsencrypt:/letsencrypt
labels:
- "traefik.enable=true"
- "traefik.docker.network=cicd"
- "traefik.http.routers.traefik.rule=Host(`traefik.sendico.io`)"
- "traefik.http.routers.traefik.entrypoints=websecure"
- "traefik.http.routers.traefik.tls.certresolver=letsencrypt"
- "traefik.http.routers.traefik.service=api@internal"
- "traefik.http.routers.traefik.middlewares=secure-headers@file,dashboard-auth@file"
mail-cert-proxy:
image: traefik/whoami
networks:
- cicd
deploy:
labels:
- "traefik.enable=true"
- "traefik.http.routers.mail-cert.rule=Host(`mail.sendico.io`)"
- "traefik.http.routers.mail-cert.entrypoints=websecure"
- "traefik.http.routers.mail-cert.tls.certresolver=letsencrypt"
- "traefik.http.services.mail-cert.loadbalancer.server.port=80"
networks:
cicd:
external: true
volumes:
traefik_letsencrypt:

17
infra/traefik/dynamic.yml Normal file
View File

@@ -0,0 +1,17 @@
http:
middlewares:
secure-headers:
headers:
stsSeconds: 63072000
stsIncludeSubdomains: true
stsPreload: true
frameDeny: true
contentTypeNosniff: true
browserXssFilter: true
referrerPolicy: "strict-origin-when-cross-origin"
dashboard-auth:
basicAuth:
users:
- "admin:$2y$05$m22ds4RLIsR9UY3DdZHB8umL4FHXmLvc8ZUE/RrFvNKrDP0GMIyeS"

View File

@@ -1,4 +1,5 @@
networks:
# Overlay network used by your Swarm services (Traefik, Vault, etc.)
cicd:
external: true
@@ -21,6 +22,7 @@ configs:
file: ./vault/templates/pg_dsn.ctmpl
volumes:
# tmpfs volume for rendered secrets (read by server/agent)
vault_secrets:
driver: local
driver_opts:
@@ -29,12 +31,14 @@ volumes:
o: size=32m,uid=0,gid=0,mode=0750
services:
# Vault Agent sidecar to render secrets from Vault into files
vault-agent-woodpecker:
image: hashicorp/vault:latest
networks: [cicd]
cap_add: ["IPC_LOCK"]
environment:
VAULT_ADDR: "http://vault:8200" # or your HTTPS URL
# Use the actual Swarm service DNS name of Vault inside the overlay
VAULT_ADDR: "http://vault_vault:8200"
secrets:
- source: woodpecker_vault_role_id
target: /vault/secrets/role_id
@@ -53,15 +57,17 @@ services:
target: /etc/vault/templates/gitea_client_secret.ctmpl
- source: tpl_pg_dsn
target: /etc/vault/templates/pg_dsn.ctmpl
command: [ "sh", "-lc", "vault agent -config=/etc/vault/agent.hcl" ]
command: ["sh", "-lc", "vault agent -config=/etc/vault/agent.hcl"]
healthcheck:
test: ["CMD-SHELL", "test -s /vault/secrets/agent_secret -a -s /vault/secrets/gitea_client_id -a -s /vault/secrets/gitea_client_secret -a -s /vault/secrets/pg_dsn" ]
interval: 10s
timeout: 3s
retries: 30
# Woodpecker Server (HTTP UI on :8000, gRPC on :9000)
woodpecker-server:
image: woodpeckerci/woodpecker-server:latest
user: "0:0" # ensures read access to tmpfs secrets (mode 0750)
image: woodpeckerci/woodpecker-server:v3-alpine
networks: [cicd]
depends_on: [vault-agent-woodpecker]
volumes:
@@ -70,29 +76,37 @@ services:
WOODPECKER_HOST: "https://ci.sendico.io"
WOODPECKER_OPEN: "false"
# Gitea (now your URL)
# Gitea OAuth
WOODPECKER_GITEA: "true"
WOODPECKER_GITEA_URL: "https://git.sendico.io"
WOODPECKER_GITEA_CLIENT_FILE: "/vault/secrets/gitea_client_id"
WOODPECKER_GITEA_SECRET_FILE: "/vault/secrets/gitea_client_secret"
# Agent shared secret (lowercase file, env stays uppercase)
# Shared secret between server and agent
WOODPECKER_AGENT_SECRET_FILE: "/vault/secrets/agent_secret"
# Postgres (from Vault Agent rendered file)
# Postgres DSN from Vault Agent rendered file
WOODPECKER_DATABASE_DRIVER: "postgres"
WOODPECKER_DATABASE_DATASOURCE_FILE: "/vault/secrets/pg_dsn"
WOODPECKER_BACKEND_DOCKER_NETWORK: "cicd"
deploy:
labels:
traefik.enable: "true"
traefik.docker.network: "cicd"
traefik.http.routers.woodpecker-server.rule: "Host(`ci.sendico.io`)"
traefik.http.routers.woodpecker-server.entrypoints: "websecure"
traefik.http.routers.woodpecker-server.tls: "true"
traefik.http.routers.woodpecker-server.tls.certresolver: "letsencrypt"
traefik.http.services.woodpecker-server.loadbalancer.server.port: "3000"
traefik.http.routers.woodpecker-server.service: "woodpecker-server"
traefik.http.services.woodpecker-server.loadbalancer.server.port: "8000"
traefik.http.routers.woodpecker-grpc.rule: "Host(`woodpecker-grpc.sendico.io`)"
traefik.http.routers.woodpecker-grpc.entrypoints: "websecure"
traefik.http.routers.woodpecker-grpc.tls: "true"
traefik.http.routers.woodpecker-grpc.tls.certresolver: "letsencrypt"
traefik.http.routers.woodpecker-grpc.service: "woodpecker-grpc"
traefik.http.services.woodpecker-grpc.loadbalancer.server.port: "9000"
traefik.http.services.woodpecker-grpc.loadbalancer.server.scheme: "h2c"
healthcheck:
test: ["CMD", "/bin/woodpecker-server", "ping"]
interval: 10s
@@ -100,18 +114,25 @@ services:
retries: 10
start_period: 20s
# Woodpecker Agent (creates step containers)
woodpecker-agent:
image: woodpeckerci/woodpecker-agent:latest
user: "0:0" # ensures read access to tmpfs secrets (mode 0750)
image: woodpeckerci/woodpecker-agent:v3-alpine
networks: [cicd]
depends_on: [woodpecker-server, vault-agent-woodpecker]
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- vault_secrets:/vault/secrets:ro
environment:
WOODPECKER_SERVER: "woodpecker-server:9000" # gRPC in overlay
# gRPC connection to server (overlay DNS)
WOODPECKER_SERVER: "woodpecker-server:9000"
# Shared secret file
WOODPECKER_AGENT_SECRET_FILE: "/vault/secrets/agent_secret"
# Docker backend for steps
WOODPECKER_BACKEND: "docker"
WOODPECKER_BACKEND_DOCKER_NETWORK: "cicd"
# Attach all step containers to a stable bridge network (created outside the stack)
WOODPECKER_BACKEND_DOCKER_NETWORK: "wp-ci"
# Concurrency limit
WOODPECKER_MAX_WORKFLOWS: "2"
healthcheck:
test: ["CMD", "/bin/woodpecker-agent", "ping"]