-
Notifications
You must be signed in to change notification settings - Fork 27
Expand file tree
/
Copy path.env.example
More file actions
131 lines (120 loc) · 4.57 KB
/
.env.example
File metadata and controls
131 lines (120 loc) · 4.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# ============================================================
# Cube Pets Office Environment Configuration
# Copy this file to .env and fill in your values
# ============================================================
DB_HOST=localhost
DB_PORT=3306
DB_NAME=cube_pets_office
DB_USER=root
DB_PASSWORD=your_password_here
PORT=3001
NODE_ENV=development
LLM_API_KEY=your_api_key_here
LLM_BASE_URL=https://api.openai.com/v1
LLM_MODEL=gpt-5.4
LLM_WIRE_API=responses
LLM_REASONING_EFFORT=medium
LLM_TIMEOUT_MS=600000
LLM_MAX_CONCURRENT=9999
WORKFLOW_CONTEXT_RECENT_LIMIT=12
WORKFLOW_CONTEXT_EARLIER_PREVIEW_LIMIT=6
WORKFLOW_CONTEXT_ENTRY_CHAR_LIMIT=320
WORKFLOW_CONTEXT_TOTAL_CHAR_LIMIT=6000
FALLBACK_LLM_API_KEY=your_fallback_api_key_here
FALLBACK_LLM_BASE_URL=https://open.bigmodel.cn/api/paas/v4
FALLBACK_LLM_MODEL=glm-5-turbo
FALLBACK_LLM_WIRE_API=chat_completions
FALLBACK_LLM_TIMEOUT_MS=600000
FALLBACK_LLM_FORCE_MODEL=true
FALLBACK_LLM_STREAM=false
FALLBACK_LLM_CHAT_THINKING_TYPE=disabled
# Mission / tasks integration
MISSION_SMOKE_ENABLED=false
MISSION_SMOKE_SERVER_PORT=3101
MISSION_SMOKE_EXECUTOR_PORT=3131
MISSION_SMOKE_FEISHU_PORT=3141
MISSION_RESTART_SMOKE_PORT=3102
EXECUTOR_CALLBACK_SECRET=change_me_executor_secret
EXECUTOR_CALLBACK_MAX_SKEW_SECONDS=300
# Lobster executor
LOBSTER_EXECUTOR_BASE_URL=http://127.0.0.1:3031
LOBSTER_EXECUTOR_HOST=0.0.0.0
LOBSTER_EXECUTOR_PORT=3031
LOBSTER_EXECUTOR_DATA_ROOT=tmp/lobster-executor
LOBSTER_EXECUTOR_NAME=lobster-executor
LOBSTER_EXECUTION_MODE=real
LOBSTER_DEFAULT_IMAGE=node:20-slim
LOBSTER_MAX_CONCURRENT_JOBS=2
# DOCKER_HOST=tcp://127.0.0.1:2375
# DOCKER_TLS_VERIFY=1
# DOCKER_CERT_PATH=/path/to/certs
# ── Lobster Executor Security Sandbox ─────────────────────────────
# Security level preset: "strict" (default), "balanced", or "permissive"
LOBSTER_SECURITY_LEVEL=strict
# Container user UID (default: 65534 = nobody)
LOBSTER_CONTAINER_USER=65534
# Max memory per container (e.g. 512m, 1g)
LOBSTER_MAX_MEMORY=512m
# Max CPU cores per container (e.g. 1.0, 2.0)
LOBSTER_MAX_CPUS=1.0
# Max processes inside the container
LOBSTER_MAX_PIDS=256
# tmpfs /tmp size for read-only root filesystem mode
LOBSTER_TMPFS_SIZE=64m
# Comma-separated domain/IP whitelist for balanced network mode
LOBSTER_NETWORK_WHITELIST=
# Path to custom seccomp profile JSON (leave empty to use built-in)
# LOBSTER_SECCOMP_PROFILE=
# Feishu mission bridge
FEISHU_ENABLED=true
FEISHU_MODE=mock
FEISHU_BASE_TASK_URL=http://127.0.0.1:3001
FEISHU_PROGRESS_THROTTLE_PERCENT=15
FEISHU_RELAY_SECRET=change_me_feishu_relay_secret
FEISHU_RELAY_MAX_SKEW_SECONDS=300
FEISHU_RELAY_NONCE_TTL_SECONDS=300
FEISHU_WEBHOOK_VERIFICATION_TOKEN=
FEISHU_WEBHOOK_ENCRYPT_KEY=
FEISHU_WEBHOOK_MAX_SKEW_SECONDS=300
FEISHU_WEBHOOK_DEDUP_TTL_SECONDS=600
FEISHU_WEBHOOK_DEDUP_FILE=data/feishu/feishu-webhook-dedup.json
FEISHU_MESSAGE_FORMAT=card-live
FEISHU_FINAL_SUMMARY_MODE=both
FEISHU_DELIVERY_MAX_RETRIES=2
FEISHU_DELIVERY_RETRY_BASE_MS=300
FEISHU_DELIVERY_RETRY_MAX_MS=5000
FEISHU_APP_ID=
FEISHU_APP_SECRET=
FEISHU_TENANT_ACCESS_TOKEN=
FEISHU_API_BASE_URL=https://open.feishu.cn/open-apis
# ── Vision LLM Configuration ──────────────────────────────────────
# Optional: dedicated Vision LLM for image analysis.
# Fallback chain: VISION_LLM_* → FALLBACK_LLM_* → main LLM_*
# VISION_LLM_API_KEY=
# VISION_LLM_BASE_URL=
# VISION_LLM_MODEL=
# VISION_LLM_WIRE_API=chat_completions
# VISION_LLM_MAX_TOKENS=1000
# VISION_LLM_DETAIL=low
# VISION_LLM_TIMEOUT_MS=30000
# ── Voice (TTS / STT) Configuration ───────────────────────────────
# Optional: server-side TTS and STT services.
# When not configured, the frontend falls back to browser Web Speech APIs.
# TTS_API_URL=https://api.openai.com/v1/audio/speech
# TTS_API_KEY=
# TTS_MODEL=tts-1
# TTS_VOICE=alloy
# STT_API_URL=https://api.openai.com/v1/audio/transcriptions
# STT_API_KEY=
# STT_MODEL=whisper-1
# ── Agent Permission Model ─────────────────────────────────────────
# JWT signing secret for CapabilityToken (leave empty to auto-generate in dev)
PERMISSION_TOKEN_SECRET=
# Default token TTL in milliseconds (default: 7200000 = 2 hours)
PERMISSION_TOKEN_DEFAULT_TTL_MS=7200000
# LRU cache capacity for permission check results
PERMISSION_CACHE_SIZE=10000
# Cache TTL in milliseconds (default: 60000 = 1 minute)
PERMISSION_CACHE_TTL_MS=60000
# Enable permission audit logging
PERMISSION_AUDIT_ENABLED=true