Skip to content

Commit e2cfce7

Browse files
committed
Chore: Fix warning across crates
1 parent d406c73 commit e2cfce7

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+327
-242
lines changed

Cargo.lock

Lines changed: 13 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

clippy.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,6 @@ disallowed-methods = [
2222

2323
disallowed-types = [
2424
# Avoid raw pointers where safe abstractions exist
25-
"*const _",
26-
"*mut _",
25+
# Note: Wildcard patterns like "*const _" are not supported
26+
# Use specific types if needed, e.g., "*const std::ffi::c_void"
2727
]

crates/codegraph-ai/src/openai_compatible_provider.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ impl Default for OpenAICompatibleConfig {
3131
Self {
3232
base_url: "http://localhost:1234/v1".to_string(),
3333
model: "local-model".to_string(),
34-
context_window: 32_000,
34+
context_window: 256_000,
3535
timeout_secs: 120,
3636
max_retries: 3,
3737
api_key: None,
@@ -47,7 +47,7 @@ impl OpenAICompatibleConfig {
4747
Self {
4848
base_url: "http://localhost:1234/v1".to_string(),
4949
model,
50-
context_window: 32_000,
50+
context_window: 256_000,
5151
provider_name: "lmstudio".to_string(),
5252
use_responses_api: true,
5353
..Default::default()
@@ -59,7 +59,7 @@ impl OpenAICompatibleConfig {
5959
Self {
6060
base_url: "http://localhost:11434/v1".to_string(),
6161
model,
62-
context_window: 128_000,
62+
context_window: 256_000,
6363
provider_name: "ollama".to_string(),
6464
use_responses_api: true,
6565
..Default::default()

crates/codegraph-ai/src/openai_llm_provider.rs

Lines changed: 40 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ impl Default for OpenAIConfig {
3333
api_key: std::env::var("OPENAI_API_KEY").unwrap_or_default(),
3434
base_url: OPENAI_API_BASE.to_string(),
3535
model: DEFAULT_MODEL.to_string(),
36-
context_window: 128_000,
36+
context_window: 400000,
3737
timeout_secs: 120,
3838
max_retries: 3,
3939
organization: std::env::var("OPENAI_ORG_ID").ok(),
@@ -72,10 +72,7 @@ impl OpenAIProvider {
7272
/// Check if this is a reasoning model
7373
fn is_reasoning_model(&self) -> bool {
7474
let model = self.config.model.to_lowercase();
75-
model.contains("o1")
76-
|| model.contains("o3")
77-
|| model.contains("o4")
78-
|| model.starts_with("gpt-5")
75+
model.starts_with("gpt-5")
7976
}
8077

8178
/// Send a request to OpenAI Responses API with retry logic
@@ -138,7 +135,7 @@ impl OpenAIProvider {
138135
input,
139136
instructions,
140137
max_completion_token: config.max_completion_token.or(config.max_tokens),
141-
reasoning_effort: None,
138+
reasoning: None,
142139
temperature: None,
143140
top_p: None,
144141
stop: config.stop.clone(),
@@ -150,7 +147,9 @@ impl OpenAIProvider {
150147
request.top_p = config.top_p;
151148
} else {
152149
// Add reasoning effort for reasoning models
153-
request.reasoning_effort = config.reasoning_effort.clone();
150+
request.reasoning = config.reasoning_effort.as_ref().map(|effort| Reasoning {
151+
effort: effort.clone(),
152+
});
154153
}
155154

156155
let mut request_builder = self
@@ -197,8 +196,20 @@ impl LLMProvider for OpenAIProvider {
197196
let start = Instant::now();
198197
let response = self.send_request(messages, config).await?;
199198

199+
// Handle both old output_text field and new output array format
200+
let content = if !response.output_text.is_empty() {
201+
response.output_text
202+
} else if !response.output.is_empty() {
203+
response.output.iter()
204+
.map(|o| o.content.as_str())
205+
.collect::<Vec<_>>()
206+
.join("\n")
207+
} else {
208+
String::new()
209+
};
210+
200211
Ok(LLMResponse {
201-
content: response.output_text,
212+
content,
202213
total_tokens: response.usage.as_ref().map(|u| u.total_tokens),
203214
prompt_tokens: response.usage.as_ref().map(|u| u.prompt_tokens),
204215
completion_tokens: response.usage.as_ref().map(|u| u.output_tokens),
@@ -232,17 +243,8 @@ impl LLMProvider for OpenAIProvider {
232243
// Characteristics vary by model
233244
let (max_tokens, rpm_limit, tpm_limit, supports_functions) =
234245
match self.config.model.as_str() {
235-
// Reasoning models
236-
m if m.contains("o1") => (200_000, Some(50), Some(30_000), false),
237-
m if m.contains("o3") || m.contains("o4") => {
238-
(200_000, Some(50), Some(30_000), false)
239-
}
240-
m if m.starts_with("gpt-5") => (200_000, Some(50), Some(30_000), false),
241-
// Standard models
242-
"gpt-4o" => (128_000, Some(500), Some(30_000), true),
243-
"gpt-4o-mini" => (128_000, Some(500), Some(200_000), true),
244-
"gpt-4-turbo" => (128_000, Some(500), Some(30_000), true),
245-
"gpt-4" => (8_192, Some(500), Some(10_000), true),
246+
m if m.starts_with("gpt-5") => (400_000, Some(50), Some(30_000), true),
247+
246248
_ => (self.config.context_window, Some(500), Some(30_000), true),
247249
};
248250

@@ -328,6 +330,11 @@ impl CodeIntelligenceProvider for OpenAIProvider {
328330

329331
// OpenAI Responses API request/response types
330332

333+
#[derive(Debug, Serialize)]
334+
struct Reasoning {
335+
effort: String,
336+
}
337+
331338
#[derive(Debug, Serialize)]
332339
struct OpenAIRequest {
333340
model: String,
@@ -337,7 +344,7 @@ struct OpenAIRequest {
337344
#[serde(skip_serializing_if = "Option::is_none")]
338345
max_completion_token: Option<usize>,
339346
#[serde(skip_serializing_if = "Option::is_none")]
340-
reasoning_effort: Option<String>,
347+
reasoning: Option<Reasoning>,
341348
#[serde(skip_serializing_if = "Option::is_none")]
342349
temperature: Option<f32>,
343350
#[serde(skip_serializing_if = "Option::is_none")]
@@ -351,12 +358,24 @@ struct OpenAIResponse {
351358
id: String,
352359
#[serde(rename = "type")]
353360
response_type: String,
361+
#[serde(default)]
354362
status: Option<String>,
363+
#[serde(default)]
355364
output_text: String,
356365
#[serde(default)]
366+
output: Vec<ResponseOutput>,
367+
#[serde(default)]
357368
usage: Option<Usage>,
358369
}
359370

371+
#[derive(Debug, Deserialize)]
372+
struct ResponseOutput {
373+
#[serde(rename = "type")]
374+
output_type: String,
375+
#[serde(default)]
376+
content: String,
377+
}
378+
360379
#[derive(Debug, Deserialize)]
361380
struct Usage {
362381
prompt_tokens: usize,
@@ -389,7 +408,7 @@ mod tests {
389408

390409
#[test]
391410
fn test_reasoning_model_detection() {
392-
let models = vec!["o1-preview", "o3-mini", "o4-mini", "gpt-5"];
411+
let models = vec!["gpt-5"];
393412
for model in models {
394413
let config = OpenAIConfig {
395414
api_key: "test".to_string(),
@@ -404,22 +423,4 @@ mod tests {
404423
);
405424
}
406425
}
407-
408-
#[test]
409-
fn test_standard_model_detection() {
410-
let models = vec!["gpt-4o", "gpt-4-turbo", "gpt-3.5-turbo"];
411-
for model in models {
412-
let config = OpenAIConfig {
413-
api_key: "test".to_string(),
414-
model: model.to_string(),
415-
..Default::default()
416-
};
417-
let provider = OpenAIProvider::new(config).unwrap();
418-
assert!(
419-
!provider.is_reasoning_model(),
420-
"Model {} should NOT be detected as reasoning model",
421-
model
422-
);
423-
}
424-
}
425426
}

crates/codegraph-core/src/buffer_pool.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ pub struct PooledBuffer {
8888
}
8989

9090
impl PooledBuffer {
91-
pub fn as_mut(&mut self) -> &mut Vec<u8> {
91+
pub fn buffer_mut(&mut self) -> &mut Vec<u8> {
9292
self.buf.as_mut().unwrap()
9393
}
9494

crates/codegraph-core/src/incremental/updater.rs

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -135,8 +135,8 @@ impl IncrementalCache {
135135
}
136136
items.sort_by_key(|(_f, t)| *t);
137137
let to_remove = items.len().saturating_sub(self.max_files);
138-
for i in 0..to_remove {
139-
self.invalidate_file(&items[i].0);
138+
for (file, _) in items.iter().take(to_remove) {
139+
self.invalidate_file(file);
140140
}
141141
}
142142
}
@@ -438,8 +438,6 @@ mod tests {
438438
use crate::traits::VectorStore;
439439
use crate::CodeGraphError;
440440
use async_trait::async_trait;
441-
use crossbeam_channel::{unbounded, Receiver, Sender};
442-
use tokio_test::block_on;
443441

444442
struct InMemoryParser {
445443
files: DashMap<String, Vec<CodeNode>>,

crates/codegraph-core/src/integration/graph_vector.rs

Lines changed: 13 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,18 @@ use dashmap::DashMap;
55
use memmap2::Mmap;
66
use std::collections::HashSet;
77
use std::fs::File;
8+
use std::future::Future;
89
use std::hash::{Hash, Hasher};
910
use std::path::Path;
11+
use std::pin::Pin;
1012
use std::sync::Arc;
1113
use tokio::sync::Mutex;
1214

15+
/// Type alias for async embedding function
16+
type EmbeddingFn = Arc<
17+
dyn Fn(CodeNode) -> Pin<Box<dyn Future<Output = Result<Vec<f32>>> + Send>> + Send + Sync,
18+
>;
19+
1320
/// Embedding service abstraction used by the integrator.
1421
///
1522
/// This lives in `core` to avoid a dependency cycle on the `codegraph-vector` crate.
@@ -76,9 +83,9 @@ impl EmbeddingService for HasherEmbeddingService {
7683
}
7784
let mut state = hash;
7885
let mut v = vec![0.0f32; self.dim];
79-
for i in 0..self.dim {
86+
for val in v.iter_mut().take(self.dim) {
8087
state = state.wrapping_mul(1103515245).wrapping_add(12345);
81-
v[i] = ((state as f32 / u32::MAX as f32) - 0.5) * 2.0;
88+
*val = ((state as f32 / u32::MAX as f32) - 0.5) * 2.0;
8289
}
8390
let norm: f32 = v.iter().map(|x| x * x).sum::<f32>().sqrt();
8491
if norm > 0.0 {
@@ -90,15 +97,10 @@ impl EmbeddingService for HasherEmbeddingService {
9097
}
9198
}
9299

93-
use std::future::Future;
94-
use std::pin::Pin;
95-
96100
/// Adapter to build an embedding service from an async function/closure.
97101
pub struct FnEmbeddingService {
98102
dim: usize,
99-
func: Arc<
100-
dyn Fn(CodeNode) -> Pin<Box<dyn Future<Output = Result<Vec<f32>>> + Send>> + Send + Sync,
101-
>,
103+
func: EmbeddingFn,
102104
}
103105

104106
impl FnEmbeddingService {
@@ -107,11 +109,7 @@ impl FnEmbeddingService {
107109
F: Fn(CodeNode) -> Fut + Send + Sync + 'static,
108110
Fut: Future<Output = Result<Vec<f32>>> + Send + 'static,
109111
{
110-
let func: Arc<
111-
dyn Fn(CodeNode) -> Pin<Box<dyn Future<Output = Result<Vec<f32>>> + Send>>
112-
+ Send
113-
+ Sync,
114-
> = Arc::new(move |n: CodeNode| {
112+
let func: EmbeddingFn = Arc::new(move |n: CodeNode| {
115113
let fut = f(n);
116114
Box::pin(fut)
117115
});
@@ -233,7 +231,7 @@ impl SnippetExtractor {
233231
/// Maintains a vector index synced with the code graph and provides semantic search returning graph nodes.
234232
pub struct GraphVectorIntegrator {
235233
graph: Arc<dyn GraphStore>,
236-
vector: Arc<Mutex<Box<dyn VectorStore>>>,
234+
vector: Arc<Mutex<Box<dyn VectorStore + Send>>>,
237235
embedder: Arc<dyn EmbeddingService>,
238236
extractor: SnippetExtractor,
239237
// Track node signatures for incremental updates
@@ -243,7 +241,7 @@ pub struct GraphVectorIntegrator {
243241
impl GraphVectorIntegrator {
244242
pub fn new(
245243
graph: Arc<dyn GraphStore>,
246-
vector: Box<dyn VectorStore>,
244+
vector: Box<dyn VectorStore + Send>,
247245
embedder: Arc<dyn EmbeddingService>,
248246
) -> Self {
249247
Self {
@@ -429,7 +427,6 @@ impl GraphVectorIntegrator {
429427
mod tests {
430428
use super::*;
431429
use crate::{Language, Location, Metadata, NodeType};
432-
use crossbeam_channel::{unbounded, Receiver, Sender};
433430
use std::collections::HashMap;
434431
use tokio_test::block_on;
435432

crates/codegraph-core/src/integration/parser_graph.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -641,8 +641,8 @@ async fn collect_source_files(dir: &str) -> Result<Vec<PathBuf>> {
641641
}
642642

643643
fn is_supported_source(path: &Path) -> bool {
644-
match path.extension().and_then(|s| s.to_str()) {
645-
Some("rs" | "ts" | "js" | "py" | "go" | "java" | "cpp" | "cc" | "cxx") => true,
646-
_ => false,
647-
}
644+
matches!(
645+
path.extension().and_then(|s| s.to_str()),
646+
Some("rs" | "ts" | "js" | "py" | "go" | "java" | "cpp" | "cc" | "cxx")
647+
)
648648
}

crates/codegraph-core/src/memory/arena.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -213,6 +213,10 @@ impl<T> ChunkArena<T> {
213213
self.len
214214
}
215215

216+
pub fn is_empty(&self) -> bool {
217+
self.len == 0
218+
}
219+
216220
/// Push an element, moving it into the arena.
217221
pub fn push(&mut self, value: T) {
218222
self.ensure_chunk();

crates/codegraph-core/src/memory/compact_map.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,27 +32,27 @@ impl<K: Eq + Hash, V> CompactHashMap<K, V> {
3232
pub fn clear(&mut self) {
3333
self.0.clear()
3434
}
35-
pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
35+
pub fn get<Q>(&self, k: &Q) -> Option<&V>
3636
where
37+
Q: ?Sized + std::hash::Hash + Eq,
3738
K: std::borrow::Borrow<Q>,
38-
Q: std::hash::Hash + Eq,
3939
{
4040
self.0.get(k)
4141
}
42-
pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
42+
pub fn get_mut<Q>(&mut self, k: &Q) -> Option<&mut V>
4343
where
44+
Q: ?Sized + std::hash::Hash + Eq,
4445
K: std::borrow::Borrow<Q>,
45-
Q: std::hash::Hash + Eq,
4646
{
4747
self.0.get_mut(k)
4848
}
4949
pub fn insert(&mut self, k: K, v: V) -> Option<V> {
5050
self.0.insert(k, v)
5151
}
52-
pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
52+
pub fn remove<Q>(&mut self, k: &Q) -> Option<V>
5353
where
54+
Q: ?Sized + std::hash::Hash + Eq,
5455
K: std::borrow::Borrow<Q>,
55-
Q: std::hash::Hash + Eq,
5656
{
5757
self.0.remove(k)
5858
}

0 commit comments

Comments
 (0)