-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose-ha.yml
More file actions
125 lines (118 loc) · 3.71 KB
/
docker-compose-ha.yml
File metadata and controls
125 lines (118 loc) · 3.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
# YAML anchor for shared proxy service configuration
x-proxy-service: &proxy-service
build:
context: .
dockerfile: Dockerfile
network: host
environment:
# Proxy configuration - container always listens on 8080 internally
PROXY_ARGS: "--port 8080 "
# Admin dashboard configuration
ADMIN_PASSWORD: ${ADMIN_PASSWORD}
# Server secret for API operations (must be hex string with even length)
SERVER_SECRET: ${SERVER_SECRET}
# Logging configuration (used by logback.xml)
LOG_LEVEL: ${LOG_LEVEL:-INFO}
# Database configuration
DB_URL: jdbc:postgresql://postgres:5432/aggregator
DB_USER: aggregator
DB_PASSWORD: ${DB_PASSWORD}
# HikariCP connection pool configuration
HIKARI_MAX_POOL_SIZE: ${HIKARI_MAX_POOL_SIZE:-50}
HIKARI_MIN_IDLE: ${HIKARI_MIN_IDLE:-10}
HIKARI_CONNECTION_TIMEOUT: ${HIKARI_CONNECTION_TIMEOUT:-30000}
HIKARI_IDLE_TIMEOUT: ${HIKARI_IDLE_TIMEOUT:-600000}
HIKARI_MAX_LIFETIME: ${HIKARI_MAX_LIFETIME:-1800000}
HIKARI_VALIDATION_TIMEOUT: ${HIKARI_VALIDATION_TIMEOUT:-5000}
HIKARI_LEAK_DETECTION_THRESHOLD: ${HIKARI_LEAK_DETECTION_THRESHOLD:-60000}
# Log configuration
LOG_DIR: /var/log/aggregator
# Shard configuration - optional URI to load shard config from (file:// or http://)
# If set, overrides database config on startup. Maps to /etc/aggregator/config inside container.
SHARD_CONFIG_URI: ${SHARD_CONFIG_URI:-}
depends_on:
postgres:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:8080/health || exit 1"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
networks:
- aggregator-network
extra_hosts:
# Allow proxy to connect to host machine (for local aggregator)
- "host.docker.internal:host-gateway"
restart: unless-stopped
services:
postgres:
image: postgres:15-alpine
container_name: aggregator-postgres
environment:
POSTGRES_DB: aggregator
POSTGRES_USER: aggregator
POSTGRES_PASSWORD: ${DB_PASSWORD}
# ports:
# - "${POSTGRES_PORT:-5432}:5432"
volumes:
- postgres-data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U aggregator"]
interval: 10s
timeout: 5s
retries: 5
networks:
- aggregator-network
# HAProxy load balancer - distributes traffic across 3 proxy nodes
haproxy:
image: haproxy:2.8-alpine
container_name: aggregator-haproxy
environment:
HAPROXY_STATS_PASSWORD: ${HAPROXY_STATS_PASSWORD}
ports:
# Main proxy port
- "${PROXY_PORT:-8080}:8080"
# Stats/monitoring page
- "${HAPROXY_STATS_PORT:-8404}:8404"
volumes:
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
depends_on:
proxy-1:
condition: service_healthy
proxy-2:
condition: service_healthy
proxy-3:
condition: service_healthy
networks:
- aggregator-network
restart: unless-stopped
# Proxy node 1
proxy-1:
<<: *proxy-service
container_name: aggregator-proxy-1
volumes:
- ${SHARD_CONFIG_DIR:-./config}:/etc/aggregator/config:ro
- proxy-1-logs:/var/log/aggregator
# Proxy node 2
proxy-2:
<<: *proxy-service
container_name: aggregator-proxy-2
volumes:
- ${SHARD_CONFIG_DIR:-./config}:/etc/aggregator/config:ro
- proxy-2-logs:/var/log/aggregator
# Proxy node 3
proxy-3:
<<: *proxy-service
container_name: aggregator-proxy-3
volumes:
- ${SHARD_CONFIG_DIR:-./config}:/etc/aggregator/config:ro
- proxy-3-logs:/var/log/aggregator
volumes:
postgres-data:
proxy-1-logs:
proxy-2-logs:
proxy-3-logs:
networks:
aggregator-network:
driver: bridge