-
-
Notifications
You must be signed in to change notification settings - Fork 18
Expand file tree
/
Copy pathdocker-compose.yaml
More file actions
235 lines (222 loc) · 8.15 KB
/
docker-compose.yaml
File metadata and controls
235 lines (222 loc) · 8.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
version: "3.9"
# Bernstein cluster-mode deployment
#
# Roles:
# bernstein-server — task server + web dashboard (port 8052)
# bernstein-orchestrator — reads the backlog, spawns workers via HTTP
# bernstein-worker — claims and executes tasks (scale with --scale)
# prometheus — scrapes /metrics from bernstein-server (port 9090)
# grafana — pre-built agent dashboards (port 3000, admin/admin)
# postgres — shared relational state (future: persistent task store)
# redis — distributed locks + bulletin board (future)
#
# Usage:
# docker compose up -d
# docker compose up --scale bernstein-worker=4
#
# Dashboards:
# Bernstein web UI → http://localhost:8052/dashboard
# Prometheus → http://localhost:9090
# Grafana → http://localhost:3000 (admin / admin)
#
# Required env vars (copy .env.example → .env):
# ANTHROPIC_API_KEY, OPENAI_API_KEY, BERNSTEIN_AUTH_TOKEN
x-bernstein-common: &bernstein-common
image: bernstein:latest
build:
context: .
dockerfile: Dockerfile
environment:
BERNSTEIN_SERVER_URL: http://bernstein-server:8052
BERNSTEIN_AUTH_TOKEN: ${BERNSTEIN_AUTH_TOKEN:-changeme}
BERNSTEIN_BIND_HOST: "0.0.0.0"
BERNSTEIN_CLUSTER_ENABLED: "true"
# LLM provider keys — set in .env
ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY:-}
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
GOOGLE_API_KEY: ${GOOGLE_API_KEY:-}
OPENROUTER_API_KEY: ${OPENROUTER_API_KEY:-}
TAVILY_API_KEY: ${TAVILY_API_KEY:-}
# Future: database / cache URLs
BERNSTEIN_DATABASE_URL: postgresql://bernstein:bernstein@postgres:5432/bernstein # NOSONAR — development-only default, not used in production
BERNSTEIN_REDIS_URL: redis://redis:6379/0
volumes:
- sdd-data:/workspace/.sdd
- project-data:/workspace/project
depends_on:
bernstein-server:
condition: service_healthy
restart: unless-stopped
services:
# ── Task server ────────────────────────────────────────────────────────────
# Runs uvicorn directly so the container's PID 1 is the server (not a wrapper).
# The BERNSTEIN_* env vars are read by the FastAPI app at module load time.
bernstein-server:
image: bernstein:latest
build:
context: .
dockerfile: Dockerfile
entrypoint: ["python", "-m", "uvicorn"]
command:
- "bernstein.core.server:app"
- "--host"
- "0.0.0.0"
- "--port"
- "8052"
environment:
BERNSTEIN_BIND_HOST: "0.0.0.0"
BERNSTEIN_CLUSTER_ENABLED: "true"
BERNSTEIN_AUTH_TOKEN: ${BERNSTEIN_AUTH_TOKEN:-changeme}
BERNSTEIN_DATABASE_URL: postgresql://bernstein:bernstein@postgres:5432/bernstein # NOSONAR — development-only default, not used in production
BERNSTEIN_REDIS_URL: redis://redis:6379/0
ports:
- "8052:8052"
volumes:
- sdd-data:/workspace/.sdd
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8052/health"]
interval: 10s
timeout: 5s
retries: 5
start_period: 15s
restart: unless-stopped
# ── Orchestrator ───────────────────────────────────────────────────────────
# Connects to bernstein-server via BERNSTEIN_SERVER_URL, claims tasks,
# and spawns CLI agents. Does NOT start its own server.
bernstein-orchestrator:
<<: *bernstein-common
entrypoint: ["python", "-m", "bernstein.core.orchestrator"]
command: []
depends_on:
bernstein-server:
condition: service_healthy
# ── Worker nodes (scale with: docker compose up --scale bernstein-worker=N) ─
# Each worker is an independent orchestrator instance that claims and executes
# tasks from the shared task server.
bernstein-worker:
<<: *bernstein-common
entrypoint: ["python", "-m", "bernstein.core.orchestrator"]
command: []
depends_on:
bernstein-server:
condition: service_healthy
# ── Monitoring ─────────────────────────────────────────────────────────────
# OTel Collector: receives OTLP from Bernstein agents, re-exports to Prometheus
# and optionally Jaeger/Datadog/New Relic.
# Profile: docker compose --profile otel up -d
otel-collector:
image: otel/opentelemetry-collector-contrib:0.100.0
profiles: [otel]
command: ["--config=/etc/otelcol/config.yaml"]
volumes:
- ./deploy/otel-collector/otel-collector-config.yaml:/etc/otelcol/config.yaml:ro
ports:
- "4317:4317" # OTLP gRPC
- "4318:4318" # OTLP HTTP
- "8888:8888" # Collector internal metrics (Prometheus)
- "8889:8889" # Bernstein metrics re-exported by collector
- "13133:13133" # Health check
- "55679:55679" # zPages debug UI
environment:
JAEGER_ENDPOINT: ${JAEGER_ENDPOINT:-jaeger:4317}
DATADOG_API_KEY: ${DATADOG_API_KEY:-}
DD_SITE: ${DD_SITE:-datadoghq.com}
NEWRELIC_API_KEY: ${NEWRELIC_API_KEY:-}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:13133/healthz"]
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
depends_on:
bernstein-server:
condition: service_healthy
restart: unless-stopped
# Jaeger all-in-one: receives traces from OTel Collector (profile: otel)
jaeger:
image: jaegertracing/all-in-one:1.57
profiles: [otel]
environment:
COLLECTOR_OTLP_ENABLED: "true"
ports:
- "16686:16686" # Jaeger UI
- "14317:4317" # OTLP gRPC (host port 14317 to avoid conflict with otel-collector)
restart: unless-stopped
prometheus:
image: prom/prometheus:v2.52.0
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--storage.tsdb.retention.time=7d"
- "--web.enable-lifecycle"
volumes:
- ./deploy/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus-data:/prometheus
ports:
- "9090:9090"
depends_on:
bernstein-server:
condition: service_healthy
restart: unless-stopped
grafana:
image: grafana/grafana:10.4.2
environment:
GF_SECURITY_ADMIN_USER: ${GRAFANA_ADMIN_USER:-admin}
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASSWORD:-admin}
GF_AUTH_ANONYMOUS_ENABLED: "true"
GF_AUTH_ANONYMOUS_ORG_ROLE: Viewer
GF_USERS_ALLOW_SIGN_UP: "false"
volumes:
- ./deploy/grafana/provisioning:/etc/grafana/provisioning:ro
- ./deploy/grafana/dashboard.json:/var/lib/grafana/dashboards/bernstein.json:ro
- ./deploy/grafana/dashboards:/var/lib/grafana/dashboards/extended:ro
- grafana-data:/var/lib/grafana
ports:
- "3000:3000"
depends_on:
- prometheus
restart: unless-stopped
# ── Infrastructure ─────────────────────────────────────────────────────────
postgres:
image: postgres:16-alpine
environment:
POSTGRES_USER: bernstein
POSTGRES_PASSWORD: bernstein # NOSONAR — development-only default, not used in production
POSTGRES_DB: bernstein
volumes:
- postgres-data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U bernstein"]
interval: 5s
timeout: 3s
retries: 5
restart: unless-stopped
redis:
image: redis:7-alpine
command: ["redis-server", "--appendonly", "yes"]
volumes:
- redis-data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 3s
retries: 5
restart: unless-stopped
volumes:
sdd-data:
driver: local
project-data:
driver: local
postgres-data:
driver: local
redis-data:
driver: local
prometheus-data:
driver: local
grafana-data:
driver: local