@@ -33,7 +33,7 @@ impl Default for OpenAIConfig {
3333 api_key : std:: env:: var ( "OPENAI_API_KEY" ) . unwrap_or_default ( ) ,
3434 base_url : OPENAI_API_BASE . to_string ( ) ,
3535 model : DEFAULT_MODEL . to_string ( ) ,
36- context_window : 128_000 ,
36+ context_window : 400000 ,
3737 timeout_secs : 120 ,
3838 max_retries : 3 ,
3939 organization : std:: env:: var ( "OPENAI_ORG_ID" ) . ok ( ) ,
@@ -72,10 +72,7 @@ impl OpenAIProvider {
7272 /// Check if this is a reasoning model
7373 fn is_reasoning_model ( & self ) -> bool {
7474 let model = self . config . model . to_lowercase ( ) ;
75- model. contains ( "o1" )
76- || model. contains ( "o3" )
77- || model. contains ( "o4" )
78- || model. starts_with ( "gpt-5" )
75+ model. starts_with ( "gpt-5" )
7976 }
8077
8178 /// Send a request to OpenAI Responses API with retry logic
@@ -138,7 +135,7 @@ impl OpenAIProvider {
138135 input,
139136 instructions,
140137 max_completion_token : config. max_completion_token . or ( config. max_tokens ) ,
141- reasoning_effort : None ,
138+ reasoning : None ,
142139 temperature : None ,
143140 top_p : None ,
144141 stop : config. stop . clone ( ) ,
@@ -150,7 +147,9 @@ impl OpenAIProvider {
150147 request. top_p = config. top_p ;
151148 } else {
152149 // Add reasoning effort for reasoning models
153- request. reasoning_effort = config. reasoning_effort . clone ( ) ;
150+ request. reasoning = config. reasoning_effort . as_ref ( ) . map ( |effort| Reasoning {
151+ effort : effort. clone ( ) ,
152+ } ) ;
154153 }
155154
156155 let mut request_builder = self
@@ -197,8 +196,20 @@ impl LLMProvider for OpenAIProvider {
197196 let start = Instant :: now ( ) ;
198197 let response = self . send_request ( messages, config) . await ?;
199198
199+ // Handle both old output_text field and new output array format
200+ let content = if !response. output_text . is_empty ( ) {
201+ response. output_text
202+ } else if !response. output . is_empty ( ) {
203+ response. output . iter ( )
204+ . map ( |o| o. content . as_str ( ) )
205+ . collect :: < Vec < _ > > ( )
206+ . join ( "\n " )
207+ } else {
208+ String :: new ( )
209+ } ;
210+
200211 Ok ( LLMResponse {
201- content : response . output_text ,
212+ content,
202213 total_tokens : response. usage . as_ref ( ) . map ( |u| u. total_tokens ) ,
203214 prompt_tokens : response. usage . as_ref ( ) . map ( |u| u. prompt_tokens ) ,
204215 completion_tokens : response. usage . as_ref ( ) . map ( |u| u. output_tokens ) ,
@@ -232,17 +243,8 @@ impl LLMProvider for OpenAIProvider {
232243 // Characteristics vary by model
233244 let ( max_tokens, rpm_limit, tpm_limit, supports_functions) =
234245 match self . config . model . as_str ( ) {
235- // Reasoning models
236- m if m. contains ( "o1" ) => ( 200_000 , Some ( 50 ) , Some ( 30_000 ) , false ) ,
237- m if m. contains ( "o3" ) || m. contains ( "o4" ) => {
238- ( 200_000 , Some ( 50 ) , Some ( 30_000 ) , false )
239- }
240- m if m. starts_with ( "gpt-5" ) => ( 200_000 , Some ( 50 ) , Some ( 30_000 ) , false ) ,
241- // Standard models
242- "gpt-4o" => ( 128_000 , Some ( 500 ) , Some ( 30_000 ) , true ) ,
243- "gpt-4o-mini" => ( 128_000 , Some ( 500 ) , Some ( 200_000 ) , true ) ,
244- "gpt-4-turbo" => ( 128_000 , Some ( 500 ) , Some ( 30_000 ) , true ) ,
245- "gpt-4" => ( 8_192 , Some ( 500 ) , Some ( 10_000 ) , true ) ,
246+ m if m. starts_with ( "gpt-5" ) => ( 400_000 , Some ( 50 ) , Some ( 30_000 ) , true ) ,
247+
246248 _ => ( self . config . context_window , Some ( 500 ) , Some ( 30_000 ) , true ) ,
247249 } ;
248250
@@ -328,6 +330,11 @@ impl CodeIntelligenceProvider for OpenAIProvider {
328330
329331// OpenAI Responses API request/response types
330332
333+ #[ derive( Debug , Serialize ) ]
334+ struct Reasoning {
335+ effort : String ,
336+ }
337+
331338#[ derive( Debug , Serialize ) ]
332339struct OpenAIRequest {
333340 model : String ,
@@ -337,7 +344,7 @@ struct OpenAIRequest {
337344 #[ serde( skip_serializing_if = "Option::is_none" ) ]
338345 max_completion_token : Option < usize > ,
339346 #[ serde( skip_serializing_if = "Option::is_none" ) ]
340- reasoning_effort : Option < String > ,
347+ reasoning : Option < Reasoning > ,
341348 #[ serde( skip_serializing_if = "Option::is_none" ) ]
342349 temperature : Option < f32 > ,
343350 #[ serde( skip_serializing_if = "Option::is_none" ) ]
@@ -351,12 +358,24 @@ struct OpenAIResponse {
351358 id : String ,
352359 #[ serde( rename = "type" ) ]
353360 response_type : String ,
361+ #[ serde( default ) ]
354362 status : Option < String > ,
363+ #[ serde( default ) ]
355364 output_text : String ,
356365 #[ serde( default ) ]
366+ output : Vec < ResponseOutput > ,
367+ #[ serde( default ) ]
357368 usage : Option < Usage > ,
358369}
359370
371+ #[ derive( Debug , Deserialize ) ]
372+ struct ResponseOutput {
373+ #[ serde( rename = "type" ) ]
374+ output_type : String ,
375+ #[ serde( default ) ]
376+ content : String ,
377+ }
378+
360379#[ derive( Debug , Deserialize ) ]
361380struct Usage {
362381 prompt_tokens : usize ,
@@ -389,7 +408,7 @@ mod tests {
389408
390409 #[ test]
391410 fn test_reasoning_model_detection ( ) {
392- let models = vec ! [ "o1-preview" , "o3-mini" , "o4-mini" , " gpt-5"] ;
411+ let models = vec ! [ "gpt-5" ] ;
393412 for model in models {
394413 let config = OpenAIConfig {
395414 api_key : "test" . to_string ( ) ,
@@ -404,22 +423,4 @@ mod tests {
404423 ) ;
405424 }
406425 }
407-
408- #[ test]
409- fn test_standard_model_detection ( ) {
410- let models = vec ! [ "gpt-4o" , "gpt-4-turbo" , "gpt-3.5-turbo" ] ;
411- for model in models {
412- let config = OpenAIConfig {
413- api_key : "test" . to_string ( ) ,
414- model : model. to_string ( ) ,
415- ..Default :: default ( )
416- } ;
417- let provider = OpenAIProvider :: new ( config) . unwrap ( ) ;
418- assert ! (
419- !provider. is_reasoning_model( ) ,
420- "Model {} should NOT be detected as reasoning model" ,
421- model
422- ) ;
423- }
424- }
425426}
0 commit comments