Skip to content

Commit dacbc03

Browse files
committed
fix: handle both output formats in Responses API parsing
Fixed "Failed to parse OpenAI Responses API response" error that was breaking agentic tools with Ollama, LM Studio, and other OpenAI-compatible providers. Changes: - Added #[serde(default)] to ResponseAPIResponse fields to handle missing fields - Added support for both output_text (string) and output (array) response formats - Added ResponseOutput struct to deserialize array elements - Updated generate_chat to try output_text first, then fall back to output array This ensures the parser handles response variations across different providers and matches the behavior of the official OpenAI provider implementation.
1 parent 5041b27 commit dacbc03

File tree

1 file changed

+27
-2
lines changed

1 file changed

+27
-2
lines changed

crates/codegraph-ai/src/openai_compatible_provider.rs

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ impl Default for OpenAICompatibleConfig {
3636
max_retries: 3,
3737
api_key: None,
3838
provider_name: "openai-compatible".to_string(),
39-
use_responses_api: true, // Default to new Responses API
39+
use_responses_api: true, // All providers support Responses API
4040
}
4141
}
4242
}
@@ -294,6 +294,7 @@ impl OpenAICompatibleProvider {
294294
response_type: "response".to_string(),
295295
status: choice.finish_reason.clone(),
296296
output_text: choice.message.content.clone(),
297+
output: Vec::new(), // Chat Completions uses output_text, not output array
297298
usage: chat_response.usage.map(|u| Usage {
298299
prompt_tokens: u.prompt_tokens,
299300
output_tokens: u.completion_tokens,
@@ -313,8 +314,20 @@ impl LLMProvider for OpenAICompatibleProvider {
313314
) -> LLMResult<LLMResponse> {
314315
let response = self.send_request(messages, config).await?;
315316

317+
// Handle both old output_text field and new output array format
318+
let content = if !response.output_text.is_empty() {
319+
response.output_text
320+
} else if !response.output.is_empty() {
321+
response.output.iter()
322+
.map(|o| o.content.as_str())
323+
.collect::<Vec<_>>()
324+
.join("\n")
325+
} else {
326+
String::new()
327+
};
328+
316329
Ok(LLMResponse {
317-
content: response.output_text,
330+
content,
318331
total_tokens: response.usage.as_ref().map(|u| u.total_tokens),
319332
prompt_tokens: response.usage.as_ref().map(|u| u.prompt_tokens),
320333
completion_tokens: response.usage.as_ref().map(|u| u.output_tokens),
@@ -446,12 +459,24 @@ struct ResponseAPIResponse {
446459
id: String,
447460
#[serde(rename = "type")]
448461
response_type: String,
462+
#[serde(default)]
449463
status: Option<String>,
464+
#[serde(default)]
450465
output_text: String,
451466
#[serde(default)]
467+
output: Vec<ResponseOutput>,
468+
#[serde(default)]
452469
usage: Option<Usage>,
453470
}
454471

472+
#[derive(Debug, Deserialize)]
473+
struct ResponseOutput {
474+
#[serde(rename = "type")]
475+
output_type: String,
476+
#[serde(default)]
477+
content: String,
478+
}
479+
455480
#[derive(Debug, Deserialize)]
456481
struct Usage {
457482
prompt_tokens: usize,

0 commit comments

Comments
 (0)