-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathcompose.yaml
More file actions
142 lines (132 loc) · 4.16 KB
/
compose.yaml
File metadata and controls
142 lines (132 loc) · 4.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# This file is used for running tests with an attached database
#
# Prerequisites:
# 1. Talos cluster must be running (created via: just test cluster-init)
# 2. Kubeconfig must be configured and point to the Talos cluster
#
# Quick start:
# just dev # Setup Talos cluster and start all services
networks:
net:
name: scaleodm
volumes:
db_data:
garage_data:
services:
api:
image: "ghcr.io/hotosm/scaleodm:${TAG_OVERRIDE:-ci}"
build:
target: build
container_name: scaleodm
depends_on:
db:
condition: service_healthy
s3-init:
condition: service_completed_successfully
# Use host networking to access localhost Kubernetes API (Talos cluster)
# This also allows direct access to DB and S3 via localhost ports
network_mode: "host"
env_file: .env
environment:
KUBECONFIG_PATH: /root/.kube/config
SCALEODM_DATABASE_URL: ${SCALEODM_DATABASE_URL:-postgres://odm:odm@localhost:31101/scaleodm?sslmode=disable}
SCALEODM_S3_ENDPOINT: ${SCALEODM_S3_ENDPOINT:-http://localhost:31102}
SCALEODM_S3_ACCESS_KEY: ${SCALEODM_S3_ACCESS_KEY:-odm}
SCALEODM_S3_SECRET_KEY: ${SCALEODM_S3_SECRET_KEY:-somelongpassword}
volumes:
# Mount local files
- ./go.mod:/code/go.mod:ro
- ./go.sum:/code/go.sum:ro
- ./main.go:/code/main.go:ro
- ./main_test.go:/code/main_test.go:ro
- ./app:/code/app:ro
- ./testutil:/code/testutil:ro
- ./e2e:/code/e2e:ro
- $HOME/.kube/config:/root/.kube/config
entrypoint: go
command: run main.go
restart: "unless-stopped"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:31100/health"]
interval: 5s
retries: 10
start_period: 20s
timeout: 5s
db:
image: "docker.io/postgres:18-alpine"
container_name: scaleodm-db
environment:
- POSTGRES_USER=odm
- POSTGRES_PASSWORD=odm
- POSTGRES_DB=scaleodm
volumes:
- db_data:/var/lib/postgresql/data/
ports:
- "31101:5432"
networks:
- net
restart: "unless-stopped"
healthcheck:
test: pg_isready -U odm -d scaleodm
start_period: 5s
interval: 10s
timeout: 5s
retries: 3
s3:
image: docker.io/dxflrs/garage:v1.3.1
volumes:
- ./garage.toml:/etc/garage.toml:ro
- garage_data:/var/lib/garage
ports:
- "31102:3900"
networks:
- net
restart: unless-stopped
# https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/1354
# healthcheck:
# test: ["CMD-SHELL", "/garage node health-check"]
# start_period: 5s
# interval: 5s
# timeout: 5s
# retries: 10
s3-init:
image: docker.io/alpine:3.23
depends_on:
s3:
condition: service_started
network_mode: service:s3
pid: service:s3
environment:
GARAGE_ADMIN_TOKEN: garage-admin-token
SCALEODM_S3_ACCESS_KEY: ${SCALEODM_S3_ACCESS_KEY:-odm}
SCALEODM_S3_SECRET_KEY: ${SCALEODM_S3_SECRET_KEY:-somelongpassword}
restart: "on-failure:2"
entrypoint:
- /bin/sh
- -eu
- -c
- |
# chroot into the s3 container's filesystem so the garage binary runs
# with its own libs/linker, while the shared network namespace keeps
# 127.0.0.1:3901 pointing at the live Garage server
G="chroot /proc/1/root /garage -c /etc/garage.toml"
# Wait for server
for i in $$(seq 1 20); do
if $$G node id -q 2>/dev/null; then break; fi
echo "Waiting for Garage RPC... ($$i/20)"
sleep 3
done
$$G node id -q || { echo "Garage RPC not ready"; exit 1; }
# Init garage nodes
if $$G status 2>&1 | grep -q 'NO ROLE ASSIGNED'; then
NODE_ID=$$($$G node id -q | cut -c1-16)
$$G layout assign "$$NODE_ID" -z local -c 1G
$$G layout apply --version 1
fi
# Create S3 bucket
$$G key import --yes -n scaleodm \
"$${SCALEODM_S3_ACCESS_KEY}" \
"$${SCALEODM_S3_SECRET_KEY}" || true
$$G bucket create scaleodm-test || true
$$G bucket allow scaleodm-test --key scaleodm --read --write --owner || true
echo "Garage initialized."