-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathnginx-ingress-values.yaml
More file actions
110 lines (100 loc) · 3.17 KB
/
nginx-ingress-values.yaml
File metadata and controls
110 lines (100 loc) · 3.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
# F5 NGINX Ingress Controller -- Open Source
# Chart: nginx-stable/nginx-ingress v2.1.0
#
# Install:
# helm repo add nginx-stable https://helm.nginx.com/stable
# helm upgrade --install nginx-ingress nginx-stable/nginx-ingress \
# --version 2.1.0 \
# -f nginx-ingress-values.yaml \
# --create-namespace -n ingress-nginx
controller:
# --- Image (OSS) ---
nginxplus: false
# Export upstream latency histograms on /metrics for Prometheus scraping.
enableLatencyMetrics: true
image:
repository: nginx/nginx-ingress
tag: "5.3.4"
# --- IngressClass ---
ingressClass:
name: nginx
create: true
setAsDefaultIngress: true
# --- Replicas & HPA ---
replicaCount: 1
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 4
targetCPUUtilizationPercentage: 60
targetMemoryUtilizationPercentage: 70
# --- Resources ---
resources:
requests:
cpu: "1"
memory: "2Gi"
limits:
cpu: "2"
memory: "2Gi"
# --- Service ---
service:
type: LoadBalancer
externalTrafficPolicy: Local
# --- Snippets (REQUIRED for proxy header annotations) ---
enableSnippets: true
# --- Buffer auto-adjust ---
directiveAutoAdjust: "true"
# --- Global ConfigMap entries ---
config:
entries:
worker-processes: "auto"
worker-connections: "65535"
worker-rlimit-nofile: "131072"
client-max-body-size: "50m"
client-body-buffer-size: "1m"
keepalive-timeout: "75s"
keepalive-requests: "10000"
keepalive: "100"
proxy-buffering: "True"
proxy-buffers: "16 256k"
proxy-buffer-size: "256k"
proxy-busy-buffers-size: "512k"
proxy-max-temp-file-size: "2048m"
proxy-connect-timeout: "60s"
proxy-read-timeout: "120s"
proxy-send-timeout: "120s"
server-names-hash-bucket-size: "256"
server-names-hash-max-size: "2048"
variables-hash-bucket-size: "512"
variables-hash-max-size: "4096"
map-hash-bucket-size: "512"
map-hash-max-size: "4096"
server-tokens: "False"
http2: "True"
# --- OpenTelemetry tracing (ngx_otel_module) ---
# enable-opentelemetry activates otel_trace on; in the server block.
# Uses 'propagate' mode: honors inbound W3C context AND generates edge spans.
# Exports spans to Alloy-OTLP via gRPC for Tempo ingestion.
enable-opentelemetry: "true"
otel-exporter-endpoint: "countly-observability-alloy-otlp.observability.svc.cluster.local:4317"
otel-service-name: "edge-nginx"
otel-trace-context: "propagate"
otel-sampler: "AlwaysOn"
otel-sampler-ratio: "1.0"
# --- Access log format ---
# Uses $otel_trace_id and $otel_span_id from ngx_otel_module.
# Format matches Alloy Loki regex: traceId=<32hex>.*spanId=<16hex>
log-format: >-
$remote_addr - $remote_user [$time_local] "$request"
$status $body_bytes_sent "$http_referer" "$http_user_agent"
$request_time $upstream_response_time
request_id=$request_id traceId=$otel_trace_id spanId=$otel_span_id
telemetryReporting:
enable: false
prometheus:
create: true
port: 9113
service:
create: true
serviceMonitor:
create: false