Skip to content

Commit eb47c2d

Browse files
committed
test: add integration test suite with wiremock LLM mocking
Covers Ollama health check, model verification, streaming, error handling; OpenAI streaming and auth; sanitizer pipeline with JSON, preamble stripping, and code-fenced output. Total: 101 tests.
1 parent e594bd0 commit eb47c2d

1 file changed

Lines changed: 318 additions & 0 deletions

File tree

tests/integration.rs

Lines changed: 318 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,318 @@
1+
// SPDX-FileCopyrightText: 2026 Sephyi <me@sephy.io>
2+
//
3+
// SPDX-License-Identifier: GPL-3.0-only
4+
5+
//! Integration tests for LLM providers and sanitizer pipeline.
6+
//!
7+
//! Uses `wiremock` to mock HTTP endpoints so no real LLM servers are needed.
8+
9+
use tokio::sync::mpsc;
10+
use tokio_util::sync::CancellationToken;
11+
use wiremock::matchers::{method, path};
12+
use wiremock::{Mock, MockServer, ResponseTemplate};
13+
14+
use commitbee::config::{CommitFormat, Config, Provider};
15+
use commitbee::error::Error;
16+
use commitbee::services::llm::ollama::OllamaProvider;
17+
use commitbee::services::llm::openai::OpenAiProvider;
18+
use commitbee::services::sanitizer::CommitSanitizer;
19+
20+
// ─── Test helpers ────────────────────────────────────────────────────────────
21+
22+
fn ollama_config(server_url: &str) -> Config {
23+
Config {
24+
provider: Provider::Ollama,
25+
model: "qwen3:4b".into(),
26+
ollama_host: server_url.to_string(),
27+
timeout_secs: 5,
28+
temperature: 0.3,
29+
num_predict: 256,
30+
..Config::default()
31+
}
32+
}
33+
34+
fn openai_config(server_url: &str) -> Config {
35+
Config {
36+
provider: Provider::OpenAI,
37+
model: "gpt-4o-mini".into(),
38+
openai_base_url: Some(server_url.to_string()),
39+
api_key: Some("test-key".into()),
40+
timeout_secs: 5,
41+
temperature: 0.3,
42+
num_predict: 256,
43+
..Config::default()
44+
}
45+
}
46+
47+
fn default_format() -> CommitFormat {
48+
CommitFormat::default()
49+
}
50+
51+
/// Drain the token receiver and return all collected tokens.
52+
async fn drain_tokens(mut rx: mpsc::Receiver<String>) -> Vec<String> {
53+
let mut tokens = Vec::new();
54+
while let Some(tok) = rx.recv().await {
55+
tokens.push(tok);
56+
}
57+
tokens
58+
}
59+
60+
// ─── Ollama health check ─────────────────────────────────────────────────────
61+
62+
#[tokio::test]
63+
async fn ollama_health_check_success() {
64+
let server = MockServer::start().await;
65+
66+
Mock::given(method("GET"))
67+
.and(path("/api/tags"))
68+
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
69+
"models": [
70+
{"name": "qwen3:4b"},
71+
{"name": "llama3:8b"}
72+
]
73+
})))
74+
.mount(&server)
75+
.await;
76+
77+
let provider = OllamaProvider::new(&ollama_config(&server.uri()));
78+
let models = provider.health_check().await.unwrap();
79+
80+
assert_eq!(models.len(), 2);
81+
assert!(models.contains(&"qwen3:4b".to_string()));
82+
assert!(models.contains(&"llama3:8b".to_string()));
83+
}
84+
85+
#[tokio::test]
86+
async fn ollama_health_check_connection_refused() {
87+
// Use a port that is almost certainly not listening
88+
let provider = OllamaProvider::new(&ollama_config("http://127.0.0.1:1"));
89+
let result = provider.health_check().await;
90+
91+
assert!(result.is_err(), "expected error for connection refused");
92+
let err = result.unwrap_err();
93+
assert!(
94+
matches!(err, Error::OllamaNotRunning { .. }),
95+
"expected OllamaNotRunning, got: {err:?}"
96+
);
97+
}
98+
99+
// ─── Ollama model verification ───────────────────────────────────────────────
100+
101+
#[tokio::test]
102+
async fn ollama_model_not_found() {
103+
let server = MockServer::start().await;
104+
105+
Mock::given(method("GET"))
106+
.and(path("/api/tags"))
107+
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
108+
"models": [
109+
{"name": "llama3:8b"},
110+
{"name": "codellama:7b"}
111+
]
112+
})))
113+
.mount(&server)
114+
.await;
115+
116+
let provider = OllamaProvider::new(&ollama_config(&server.uri()));
117+
let result = provider.verify_model().await;
118+
119+
assert!(result.is_err(), "expected error when model is not found");
120+
let err = result.unwrap_err();
121+
match err {
122+
Error::ModelNotFound { model, available } => {
123+
assert_eq!(model, "qwen3:4b");
124+
assert!(available.contains(&"llama3:8b".to_string()));
125+
assert!(available.contains(&"codellama:7b".to_string()));
126+
}
127+
other => panic!("expected ModelNotFound, got: {other:?}"),
128+
}
129+
}
130+
131+
// ─── Ollama streaming response ───────────────────────────────────────────────
132+
133+
#[tokio::test]
134+
async fn ollama_streaming_response() {
135+
let server = MockServer::start().await;
136+
137+
// NDJSON streaming: each line is a separate JSON object
138+
let body = [
139+
r#"{"response":"feat","done":false}"#,
140+
r#"{"response":"(scope","done":false}"#,
141+
r#"{"response":"): add","done":false}"#,
142+
r#"{"response":" feature","done":true}"#,
143+
]
144+
.join("\n");
145+
146+
Mock::given(method("POST"))
147+
.and(path("/api/generate"))
148+
.respond_with(ResponseTemplate::new(200).set_body_string(body))
149+
.mount(&server)
150+
.await;
151+
152+
let provider = OllamaProvider::new(&ollama_config(&server.uri()));
153+
let (tx, rx) = mpsc::channel(32);
154+
let cancel = CancellationToken::new();
155+
156+
let result = provider.generate("test prompt", tx, cancel).await.unwrap();
157+
158+
assert_eq!(result, "feat(scope): add feature");
159+
160+
// Verify tokens were streamed
161+
let tokens = drain_tokens(rx).await;
162+
assert!(
163+
!tokens.is_empty(),
164+
"expected streaming tokens to be received"
165+
);
166+
}
167+
168+
// ─── Ollama server error ─────────────────────────────────────────────────────
169+
170+
#[tokio::test]
171+
async fn ollama_server_error() {
172+
let server = MockServer::start().await;
173+
174+
Mock::given(method("POST"))
175+
.and(path("/api/generate"))
176+
.respond_with(ResponseTemplate::new(500).set_body_string("Internal Server Error"))
177+
.mount(&server)
178+
.await;
179+
180+
let provider = OllamaProvider::new(&ollama_config(&server.uri()));
181+
let (tx, _rx) = mpsc::channel(32);
182+
let cancel = CancellationToken::new();
183+
184+
let result = provider.generate("test prompt", tx, cancel).await;
185+
186+
assert!(result.is_err(), "expected error for 500 response");
187+
let err = result.unwrap_err();
188+
match err {
189+
Error::Provider { provider, message } => {
190+
assert_eq!(provider, "ollama");
191+
assert!(
192+
message.contains("500"),
193+
"expected message to contain status code 500, got: {message}"
194+
);
195+
}
196+
other => panic!("expected Provider error, got: {other:?}"),
197+
}
198+
}
199+
200+
// ─── OpenAI streaming response ───────────────────────────────────────────────
201+
202+
#[tokio::test]
203+
async fn openai_streaming_response() {
204+
let server = MockServer::start().await;
205+
206+
let body = [
207+
r#"data: {"choices":[{"delta":{"content":"feat"},"finish_reason":null}]}"#,
208+
"",
209+
r#"data: {"choices":[{"delta":{"content":": add test"},"finish_reason":"stop"}]}"#,
210+
"",
211+
"data: [DONE]",
212+
"",
213+
]
214+
.join("\n");
215+
216+
Mock::given(method("POST"))
217+
.and(path("/chat/completions"))
218+
.respond_with(ResponseTemplate::new(200).set_body_string(body))
219+
.mount(&server)
220+
.await;
221+
222+
let provider = OpenAiProvider::new(&openai_config(&server.uri()));
223+
let (tx, rx) = mpsc::channel(32);
224+
let cancel = CancellationToken::new();
225+
226+
let result = provider.generate("test prompt", tx, cancel).await.unwrap();
227+
228+
assert_eq!(result, "feat: add test");
229+
230+
let tokens = drain_tokens(rx).await;
231+
assert!(
232+
!tokens.is_empty(),
233+
"expected streaming tokens to be received"
234+
);
235+
}
236+
237+
// ─── OpenAI unauthorized ─────────────────────────────────────────────────────
238+
239+
#[tokio::test]
240+
async fn openai_unauthorized() {
241+
let server = MockServer::start().await;
242+
243+
Mock::given(method("GET"))
244+
.and(path("/models"))
245+
.respond_with(
246+
ResponseTemplate::new(401)
247+
.set_body_json(serde_json::json!({"error": {"message": "invalid API key"}})),
248+
)
249+
.mount(&server)
250+
.await;
251+
252+
let provider = OpenAiProvider::new(&openai_config(&server.uri()));
253+
let result = provider.verify_connection().await;
254+
255+
assert!(result.is_err(), "expected error for 401 response");
256+
let err = result.unwrap_err();
257+
match err {
258+
Error::Provider { provider, message } => {
259+
assert_eq!(provider, "openai");
260+
assert!(
261+
message.contains("invalid API key"),
262+
"expected 'invalid API key' in message, got: {message}"
263+
);
264+
}
265+
other => panic!("expected Provider error, got: {other:?}"),
266+
}
267+
}
268+
269+
// ─── Anthropic provider (base URL is hardcoded) ──────────────────────────────
270+
// The AnthropicProvider uses a hardcoded `BASE_URL` const pointing to
271+
// `https://api.anthropic.com/v1`, so we cannot redirect it to wiremock.
272+
// Instead, we test the Anthropic SSE format via the sanitizer pipeline below.
273+
274+
// ─── Sanitizer: full JSON pipeline ───────────────────────────────────────────
275+
276+
#[test]
277+
fn sanitizer_integration_with_llm_json() {
278+
let raw = r#"{"type":"feat","scope":"auth","subject":"add login endpoint","body":"Implements POST /login with JWT."}"#;
279+
280+
let result = CommitSanitizer::sanitize(raw, &default_format()).unwrap();
281+
282+
assert_eq!(
283+
result,
284+
"feat(auth): add login endpoint\n\nImplements POST /login with JWT."
285+
);
286+
}
287+
288+
// ─── Sanitizer: preamble stripping ───────────────────────────────────────────
289+
290+
#[test]
291+
fn sanitizer_integration_with_llm_preamble() {
292+
// Simulate an LLM that emits a preamble before the actual commit message.
293+
// Uses "Suggested commit:" which is a single-pattern match (avoids the known
294+
// overlapping preamble bug documented in CLAUDE.md).
295+
let raw = "Suggested commit: feat(cli): add verbose flag";
296+
let result = CommitSanitizer::sanitize(raw, &default_format()).unwrap();
297+
298+
assert_eq!(result, "feat(cli): add verbose flag");
299+
}
300+
301+
// ─── Sanitizer: Anthropic-style output ───────────────────────────────────────
302+
// Since we can't point AnthropicProvider at wiremock, we test that the
303+
// sanitizer correctly handles the kind of output Anthropic models produce.
304+
305+
#[test]
306+
fn sanitizer_integration_with_anthropic_style_output() {
307+
// Anthropic models sometimes wrap JSON in markdown code fences
308+
let raw = r#"```json
309+
{"type":"fix","scope":"parser","subject":"resolve bug in token scanner","body":"Fixes off-by-one error when scanning multi-byte characters."}
310+
```"#;
311+
312+
let result = CommitSanitizer::sanitize(raw, &default_format()).unwrap();
313+
314+
assert_eq!(
315+
result,
316+
"fix(parser): resolve bug in token scanner\n\nFixes off-by-one error when scanning multi-byte characters."
317+
);
318+
}

0 commit comments

Comments
 (0)