diff --git a/.github/workflows/build-binaries.yml b/.github/workflows/build-binaries.yml new file mode 100644 index 0000000..54a4a91 --- /dev/null +++ b/.github/workflows/build-binaries.yml @@ -0,0 +1,199 @@ +name: Build Binaries + +on: + push: + branches: [ main ] + tags: + - 'v*.*.*' + pull_request: + branches: [ main ] + +jobs: + build: + name: Build ${{ matrix.target }} on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + # macOS + - target: x86_64-apple-darwin + os: macos-13 + arch: x86_64 + suffix: apple-darwin-x86_64 + - target: aarch64-apple-darwin + os: macos-14 + arch: arm64 + suffix: apple-darwin-arm64 + # Linux (Disabled - Uncomment if needed) + # - target: x86_64-unknown-linux-gnu + # os: ubuntu-22.04 + # arch: x86_64 + # suffix: unknown-linux-gnu-x86_64 + # - target: aarch64-unknown-linux-gnu + # os: ubuntu-22.04 + # arch: arm64 + # suffix: unknown-linux-gnu-arm64 + # Windows (Disabled - Uncomment if needed) + # - target: x86_64-pc-windows-msvc + # os: windows-2022 + # arch: x86_64 + # suffix: pc-windows-msvc-x86_64 + # - target: aarch64-pc-windows-msvc + # os: windows-2022 + # arch: arm64 + # suffix: pc-windows-msvc-arm64 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + with: + targets: ${{ matrix.target }} + + - name: Install macOS dependencies + if: matrix.os == 'macos-13' || matrix.os == 'macos-14' + run: | + brew install cmake pkg-config + + - name: Install Linux dependencies + if: matrix.os == 'ubuntu-22.04' + run: | + sudo apt-get update + sudo apt-get install -y cmake pkg-config libssl-dev + + - name: Cache cargo registry + uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-${{ matrix.arch }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Build binary + run: | + rustup target add ${{ matrix.target }} + cargo build --release --target ${{ matrix.target }} + # Strip binary for smaller size (macOS/Linux) + if [ "${{ matrix.os }}" != "windows-2022" ]; then + strip target/${{ matrix.target }}/release/git-ca + fi + + - name: Create archive + run: | + cd target/${{ matrix.target }}/release + if [ "${{ matrix.os }}" == "windows-2022" ]; then + 7z a git-ca-${{ matrix.suffix }}.zip git-ca.exe + else + tar czf git-ca-${{ matrix.suffix }}.tar.gz git-ca + fi + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: git-ca-${{ matrix.suffix }} + path: target/${{ matrix.target }}/release/git-ca-${{ matrix.suffix }}.* + retention-days: 30 + + release: + name: Create Release + if: startsWith(github.ref, 'refs/tags/v') + runs-on: ubuntu-latest + needs: build + permissions: + contents: write + id-token: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + path: artifacts/ + + - name: Display structure of downloaded files + run: ls -R artifacts/ + + - name: Get version from tag + id: get_version + run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_OUTPUT + + - name: Generate changelog + id: changelog + run: | + PREVIOUS_TAG=$(git tag --sort=-version:refname | grep -v ${{ github.ref_name }} | head -n 1) + if [ -z "$PREVIOUS_TAG" ]; then + CHANGELOG=$(git log --pretty=format:"* %s (%an)" ${{ github.ref_name }}) + else + CHANGELOG=$(git log --pretty=format:"* %s (%an)" $PREVIOUS_TAG..${{ github.ref_name }}) + fi + echo "CHANGELOG<> $GITHUB_OUTPUT + echo "$CHANGELOG" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + - name: Upload release assets + run: | + cd artifacts + find . -name "git-ca-*" -type f | while read file; do + echo "Uploading $file" + done + + - name: Create Release + uses: softprops/action-gh-release@v1 + with: + name: git-ca ${{ github.ref_name }} + body: | + ## Changelog + ${{ steps.changelog.outputs.CHANGELOG }} + + ## Downloads + + ### macOS (Apple Silicon) + - [git-ca-${{ steps.get_version.outputs.VERSION }}-apple-darwin-arm64.tar.gz](https://github.com/${{ github.repository }}/releases/download/${{ github.ref_name }}/git-ca-${{ steps.get_version.outputs.VERSION }}-apple-darwin-arm64.tar.gz) + + ### macOS (Intel) + - [git-ca-${{ steps.get_version.outputs.VERSION }}-apple-darwin-x86_64.tar.gz](https://github.com/${{ github.repository }}/releases/download/${{ github.ref_name }}/git-ca-${{ steps.get_version.outputs.VERSION }}-apple-darwin-x86_64.tar.gz) + + ## Installation + + ### Using Homebrew (Recommended) + ```bash + brew tap zh30/tap + brew install git-ca + ``` + + ### From Release + Download the appropriate archive for your platform and extract it: + ```bash + # macOS + tar -xzf git-ca-VERSION-PLATFORM.tar.gz + sudo mv git-ca /usr/local/bin/ + ``` + draft: false + prerelease: false + generate_release_notes: true + files: | + artifacts/*/git-ca-* + + - name: Compute checksums + run: | + mkdir checksums + cd artifacts + find . -name "git-ca-*" -type f | while read file; do + if [[ "$file" == *.tar.gz ]]; then + sha256sum "$file" >> ../checksums/checksums.txt + elif [[ "$file" == *.zip ]]; then + sha256sum "$file" >> ../checksums/checksums.txt + fi + done + + - name: Upload checksums + uses: softprops/action-gh-release@v1 + with: + files: checksums/checksums.txt diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 725dbe4..d8e9ded 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -6,7 +6,7 @@ on: - 'v*.*.*' jobs: - create-release: + update-homebrew: runs-on: ubuntu-latest permissions: contents: write @@ -20,58 +20,87 @@ jobs: id: get_version run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_OUTPUT - - name: Generate changelog - id: changelog + - name: Download release assets + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - PREVIOUS_TAG=$(git tag --sort=-version:refname | grep -v ${{ github.ref_name }} | head -n 1) - if [ -z "$PREVIOUS_TAG" ]; then - CHANGELOG=$(git log --pretty=format:"* %s (%an)" ${{ github.ref_name }}) - else - CHANGELOG=$(git log --pretty=format:"* %s (%an)" $PREVIOUS_TAG..${{ github.ref_name }}) - fi - echo "CHANGELOG<> $GITHUB_OUTPUT - echo "$CHANGELOG" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - - - name: Create Release - id: create_release - uses: softprops/action-gh-release@v1 - with: - name: git-ca ${{ github.ref_name }} - body: | - ## Changelog - ${{ steps.changelog.outputs.CHANGELOG }} - draft: false - prerelease: false - generate_release_notes: true - - - name: Get SHA256 - id: get_sha + # Download all artifacts from the build-binaries workflow + gh api -H "Accept: application/vnd.github+json" \ + "/repos/${{ github.repository }}/actions/artifacts?per_page=100" \ + --jq '.artifacts[] | select(.name | contains("git-ca-")) | .archive_download_url' \ + | xargs -I {} -n 1 bash -c 'curl -L -H "Authorization: token $GITHUB_TOKEN" {} -o artifacts/$(basename {})' + + # Or download from release assets + gh release download ${{ github.ref_name }} --pattern 'git-ca-*' -D artifacts/ + + - name: Display downloaded files + run: ls -la artifacts/ + + - name: Calculate checksums run: | - TARBALL_URL="https://github.com/${{ github.repository }}/archive/refs/tags/${{ github.ref_name }}.tar.gz" - SHA256=$(curl -L $TARBALL_URL | shasum -a 256 | awk '{print $1}') - echo "SHA256=$SHA256" >> $GITHUB_OUTPUT + cd artifacts + for file in git-ca-*; do + echo "=== $file ===" + sha256sum "$file" + done > ../checksums.txt + cat ../checksums.txt + + - name: Extract checksums for each platform + id: extract_checksums + run: | + # Extract checksums for each platform + ARM64_MACOS=$(grep "apple-darwin-arm64" checksums.txt | awk '{print $1}') + X86_64_MACOS=$(grep "apple-darwin-x86_64" checksums.txt | awk '{print $1}') + # Linux builds disabled + ARM64_LINUX="DISABLED_LINUX" + X86_64_LINUX="DISABLED_LINUX" + + echo "ARM64_MACOS=${ARM64_MACOS}" >> $GITHUB_OUTPUT + echo "X86_64_MACOS=${X86_64_MACOS}" >> $GITHUB_OUTPUT + echo "ARM64_LINUX=${ARM64_LINUX}" >> $GITHUB_OUTPUT + echo "X86_64_LINUX=${X86_64_LINUX}" >> $GITHUB_OUTPUT + + echo "Checksums extracted:" + echo " ARM64 macOS: ${ARM64_MACOS}" + echo " x86_64 macOS: ${X86_64_MACOS}" + echo "" + echo "Note: Linux builds are disabled due to compilation issues" + echo " Windows builds are available via GitHub Releases but not distributed via Homebrew" - - name: Update Homebrew formula + - name: Update Homebrew formula with bottle checksums run: | VERSION=${{ steps.get_version.outputs.VERSION }} - SHA256=${{ steps.get_sha.outputs.SHA256 }} - sed -i "s|url \".*\"|url \"https://github.com/${{ github.repository }}/archive/refs/tags/v${VERSION}.tar.gz\"|" git-ca.rb - sed -i "s|sha256 \".*\"|sha256 \"${SHA256}\"|" git-ca.rb + + # Update version and root URL + sed -i.bak "s|url \"https://github.com/${{ github.repository }}/archive/refs/tags/v.*\.tar.gz\"|url \"https://github.com/${{ github.repository }}/archive/refs/tags/v${VERSION}.tar.gz\"|" git-ca.rb + + # Update bottle checksums + sed -i.bak "s|sha256 cellar: :any_skip_relocate, arm64_sequoia: \".*\"|sha256 cellar: :any_skip_relocate, arm64_sequoia: \"${{ steps.extract_checksums.outputs.ARM64_MACOS }}\"|" git-ca.rb + sed -i.bak "s|sha256 cellar: :any_skip_relocate, x86_64_sequoia: \".*\"|sha256 cellar: :any_skip_relocate, x86_64_sequoia: \"${{ steps.extract_checksums.outputs.X86_64_MACOS }}\"|" git-ca.rb + sed -i.bak "s|sha256 cellar: :any_skip_relocate, arm64_linux: \".*\"|sha256 cellar: :any_skip_relocate, arm64_linux: \"${{ steps.extract_checksums.outputs.ARM64_LINUX }}\"|" git-ca.rb + sed -i.bak "s|sha256 cellar: :any_skip_relocate, x86_64_linux: \".*\"|sha256 cellar: :any_skip_relocate, x86_64_linux: \"${{ steps.extract_checksums.outputs.X86_64_LINUX }}\"|" git-ca.rb + + # Update version number in formula + sed -i.bak "s|git-ca/archive/refs/tags/v.*\.tar.gz|git-ca/archive/refs/tags/v${VERSION}.tar.gz|" git-ca.rb + + echo "Updated git-ca.rb:" + cat git-ca.rb - name: Update Homebrew Tap + env: + GITHUB_TOKEN: ${{ secrets.TARGET_REPO_PAT }} run: | git config --global user.name "GitHub Actions" git config --global user.email "actions@github.com" - + # Clone the homebrew-tap repository git clone https://x-access-token:${{ secrets.TARGET_REPO_PAT }}@github.com/zh30/homebrew-tap.git - + # Copy the updated formula to the tap repository cp git-ca.rb homebrew-tap/ - + # Commit and push the changes to the tap repository cd homebrew-tap git add git-ca.rb - git commit -m "chore: update git-ca to v${{ steps.get_version.outputs.VERSION }}" + git commit -m "chore: update git-ca to v${{ steps.get_version.outputs.VERSION }} with bottle checksums" git push \ No newline at end of file diff --git a/.gitignore b/.gitignore index a87e6f9..2cecc15 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,5 @@ out dist node_modules .vscode-test/ -*.vsix \ No newline at end of file +*.vsix +.git-ca/ diff --git a/AGENTS.md b/AGENTS.md index a78d99b..923e8e6 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,32 +1,24 @@ # Repository Guidelines ## Project Structure & Module Organization -- `src/main.rs` hosts the CLI entrypoint, Git/Ollama integrations, and all user prompts; keep new modules small and import them from `main.rs` until a `src/` submodule is justified. -- `install-git-ca.sh` and `git-ca.rb` handle installer automation (shell and Homebrew); update both when distribution steps change. -- `README*.md`, `CLAUDE.md`, and `DEPLOY.md` are user-facing references—mirror any behavior changes here. -- Build artifacts land in `target/`; never commit that directory. Generated assets belong under `target/` or a new ignored path, not under `src/`. +The CLI entrypoint, prompt workflow, and llama.cpp bindings live in `src/main.rs`, with engine-specific helpers in `src/llama.rs`. Keep new logic scoped to these files until the surface area justifies an extracted module. Inline unit tests belong beside the code they cover; multi-step workflows (Git staging, model selection) should be promoted to `tests/`. Generated assets remain under `target/` or another ignored directory—never check them into `src/` or `tests/`. ## Build, Test, and Development Commands -- `cargo build --release` compiles the binary that installers copy into `~/.git-plugins`. -- `cargo run -- git ca` runs the CLI with local changes; use staged diffs in a sample repo to validate prompts. -- `cargo fmt` and `cargo clippy -- -D warnings` enforce Rust style and catch regressions before review. +- `cargo build --release` — produce the optimized binary that installers copy to `~/.git-plugins`. +- `cargo run -- git ca` — run the analyzer against staged changes in the current repo to validate prompt flows. +- `cargo fmt` — enforce rustfmt defaults (4-space indent, 100-column width). +- `cargo clippy -- -D warnings` — lint with warnings treated as build failures. +- `cargo test` — execute all unit tests; run before every commit and PR. +- Llama.cpp context length is fixed to 1024 tokens. ## Coding Style & Naming Conventions -- Follow rustfmt defaults (4-space indent, 100-column wrap) via `cargo fmt`; do not hand-format. -- Use `snake_case` for functions/files, `SCREAMING_SNAKE_CASE` for constants like `COMMIT_TYPES`, and `CamelCase` for types/enums. -- Prefer descriptive error messages and `?` propagation; add short comments only around non-obvious Git/Ollama logic. +Use `snake_case` for functions/files, `CamelCase` for types/enums, and `SCREAMING_SNAKE_CASE` for constants such as `COMMIT_TYPES`. Let rustfmt manage alignment and spacing. Prefer error propagation with `?`, returning `AppError::Custom` only when you need a user-facing message. Comments should explain non-obvious Git plumbing or llama-specific constraints; avoid restating what the code already conveys. ## Testing Guidelines -- Add unit tests in `#[cfg(test)]` modules next to the code under test; name functions `fn handles_*` to reflect behavior. -- Place integration tests under `tests/` when flows require multiple Git operations. -- Run `cargo test` locally; target meaningful branch coverage for new logic and document any manual verification (`git ca` run with staged fixtures) in the PR. +Unit tests live in `#[cfg(test)]` modules with descriptive names like `handles_retry_backoff`. Integration flows that combine Git operations, prompt generation, and llama inference should move into `tests/`. Always run `cargo test` locally and note any manual `cargo run -- git ca` checks (e.g., staged fixture repos) in PR descriptions. Target meaningful branch coverage over exhaustive mocking. ## Commit & Pull Request Guidelines -- Use Conventional Commit prefixes observed in history (e.g., `feat(client): ...`, `chore: ...`); scopes and descriptions can be English or Simplified Chinese. -- Squash work into logically complete commits with passing builds/tests. -- PRs must include: summary of behavior change, testing notes (`cargo test`, manual `git ca` checks), and updates to impacted docs. -- Link related issues and add screenshots or terminal captures when altering user prompts or install UX. +Follow the existing Conventional Commit style—examples include `feat(cli): simplify prompt`, `fix(llama): handle kv cache reset`, `chore(deps): update dependencies`. Each PR must summarise behavior changes, list verification steps (tests, manual runs), and update affected docs (`README*.md`, `DEPLOY.md`, `CLAUDE.md`). Link relevant issues and include terminal captures when altering user-visible prompts or installer UX. -## Ollama & Model Configuration Tips -- Keep a local Ollama instance running at `localhost:11434`; document any alternative endpoints in `DEPLOY.md` before merging. -- When introducing new model flows, ensure defaults are persisted via `git config` keys `commit-analyzer.model` and `commit-analyzer.language`. +## Model & Configuration Tips +By default the tool scans `./models` and cache directories for llama.cpp-compatible GGUF files, persists the user's selection, and reuses it on subsequent runs. Non-interactive invocations reuse the stored model or fall back to the first discovered GGUF. Document any alternative endpoints or model defaults in `DEPLOY.md` before merging. Store credentials in ignored env files, not in tracked sources, and confirm large lockfiles remain ignored or summarized automatically by the diff truncation logic. diff --git a/CLAUDE.md b/CLAUDE.md index c87d7da..866c54f 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -3,138 +3,267 @@ This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. ## Project Overview -Git Commit Analyzer is a Rust-based Git plugin that uses Ollama AI to generate meaningful commit messages from staged changes. It follows Git Flow format conventions, supports multiple languages, and provides both a CLI tool and a VS Code extension for enhanced developer experience. - -## Core Architecture -- **Primary Language**: Rust (edition 2021) for CLI tool -- **Extension Language**: TypeScript for VS Code integration -- **Entry Point**: `src/main.rs:645` - main function handles CLI arguments and orchestrates the workflow -- **Key Dependencies**: - - `git2` for Git operations - - `reqwest` for Ollama API communication (with blocking and json features) - - `serde_json` for JSON handling -- **Project Structure**: Single binary crate with separate VS Code extension in `vscode-extension/` -- **One-Click Installation**: Automated installation script at `install-git-ca.sh` for cross-platform deployment -## Development Commands +**Purpose**: CLI helper that generates Git Flow–style commit messages from staged changes using local llama.cpp inference. -### Rust CLI Tool -```bash -cargo build --release # Build release binary -cargo run # Run in debug mode -cargo run -- model # Change default Ollama model -cargo run -- language # Change default output language -cargo check # Quick check for compilation errors -cargo clippy # Lint code -cargo fmt # Format code -cargo test # Run tests (no test framework currently configured) -``` +**Runtime**: Pure Rust binary (`bin = "git-ca"`). No web services or VS Code extension. + +**AI Backend**: Local llama.cpp inference via the `llama_cpp_sys_2` crate. Models are GGUF files discovered in local directories, with the chosen path persisted for subsequent runs (non-interactive invocations reuse the stored path or the first match). + +**Prompt Workflow**: Staged diff is summarised, validated, and fed to the model; invalid output triggers retries with stricter instructions, then falls back to deterministic generation. + +## High-Level Architecture + +The application follows a pipeline architecture with clear separation of concerns: -### VS Code Extension -```bash -cd vscode-extension -npm run compile # Compile TypeScript -npm run watch # Watch mode for development -npm run package # Package as .vsix file -npm run publish # Publish to marketplace -npm run vscode:prepublish # Prepare for publishing ``` +CLI Args → Model Selection → Diff Retrieval → Diff Summarization + ↓ +Commit Creation ← Message Validation ← Response Processing ← Model Inference + ↑ + Prompt Generation +``` + +### Core Components + +**1. CLI Orchestration (`main.rs:1867-2028`)** +- Parses command-line arguments (doctor, model, language commands) +- Orchestrates the entire workflow +- Handles user interactions for commit confirmation + +**2. Model Management (`main.rs:1363-1718`)** +- Scans default directories for GGUF files: + - `./models` (project directory) + - `~/.cache/git-ca/models` (Linux) + - `~/.local/share/git-ca/models` (Linux alt) + - `~/Library/Application Support/git-ca/models` (macOS) +- Downloads default model (`unsloth/gemma-3-270m-it-GGUF`) from Hugging Face if none found +- Persists selection to `~/.cache/git-ca/default-model.path` or `.git-ca/default-model.path` + +**3. Diff Processing (`main.rs:414-962`)** +- **Retrieval**: `get_diff()` - uses `git diff --cached` to get staged changes +- **Analysis**: `analyze_diff_summary()` - parses diff to extract file types, scope candidates, detect patterns +- **Summarization**: `build_diff_summary()` - reduces large diffs to concise summaries with snippets +- **Variants**: `build_diff_variants()` - creates summary and raw variants for retry attempts + +**4. Prompt Engineering (`main.rs:421-488`)** +- Builds language-specific prompts (English/Chinese) +- Enforces Git Flow format: `(): ` +- Includes strict validation rules +- Stricter retry prompts on subsequent attempts + +**5. Model Inference (`llama.rs:48-432`)** +- **Session Management**: `LlamaSession::new()` - loads GGUF model, initializes context +- **Tokenization**: Handles prompt encoding with buffer resizing +- **Generation**: Token-by-token sampling with temperature/top-k/top-p +- **Chunked Decoding**: Processes long prompts in 256-token chunks +- **Context Management**: Clears KV cache between runs, respects 1024-token limit + +**6. Response Processing (`main.rs:546-667`)** +- Strips `` blocks if present +- Extracts commit subject line matching Git Flow pattern +- Collects body text until instruction keywords detected +- Validates against `COMMIT_TYPES` array + +**7. Fallback Generation (`main.rs:1141-1189`)** +- Deterministic commit synthesis when model fails +- Analyzes diff summary to determine: + - **Type**: feat, fix, docs, chore, etc. + - **Scope**: from file paths (src/main.rs → cli, docs files → docs, etc.) + - **Subject**: from template enum based on context +- Handles special cases: dependency updates, runtime changes, retry patterns + +**8. Validation (`main.rs:1190-1239`)** +- `is_valid_commit_message()` - enforces Git Flow format +- `parse_commit_subject()` - extracts type, optional scope, and subject +- English mode requires ASCII subject line +- Triggers retry loop on invalid output + +## Key Dependencies + +- `git2` — Git plumbing (staged diff, repository metadata, commit creation) +- `llama-cpp-sys-2` — FFI bindings to llama.cpp +- `hf-hub` — Optional Hugging Face download helper for the default model +- `rand` — Sampling randomness for token generation + +## Source Layout + +- `src/main.rs` — CLI entrypoint, Git integration, diff summarizer, fallback commit generator + - **Lines 1-400**: Language enum with 40+ localized methods + - **Lines 414-962**: Diff processing functions + - **Lines 421-544**: Prompt building and model interaction + - **Lines 1141-1189**: Fallback generation logic + - **Lines 1720-1865**: Unit tests + - **Lines 1867-2028**: main() and command routing + +- `src/llama.rs` — llama.cpp session wrapper + - **Lines 48-110**: `LlamaSession::new()` - model loading and context setup + - **Lines 112-229**: `infer()` - prompt processing and text generation + - **Lines 231-272**: `decode_sequence()` - chunked prompt decoding + - **Lines 274-384**: `sample_next_token()` - sampling with temperature/top-k/top-p + - **Lines 386-413**: `token_to_string()` - detokenization + - **Lines 416-432**: Drop implementation for cleanup + +## Configuration + +- `commit-analyzer.language` — Prompt language (`en`, `zh`) +- **Llama context length**: Fixed to 1024 tokens (`DEFAULT_CONTEXT_SIZE`) +- **Model persistence**: Paths stored in `~/.cache/git-ca/default-model.path` or `.git-ca/default-model.path` +- **Sampling parameters**: Temperature 0.8, Top-K 40, Top-P 0.9, Min-P 0.0 + +## Common Development Tasks + +### Add a Feature +1. **Architecture First**: Keep new logic scoped to `src/main.rs` or `src/llama.rs` until the surface area justifies extracting a module +2. **Unit Tests**: Add inline tests in `#[cfg(test)]` modules beside the code they cover +3. **Integration Tests**: Multi-step workflows combining Git operations + model inference should be promoted to a `tests/` directory +4. **Verify**: Run `cargo fmt && cargo clippy -- -D warnings && cargo test` +5. **Manual Testing**: Use `cargo run -- git ca` in a test repo with staged changes and document output in PR description +6. **Documentation**: Update `README*.md`, `DEPLOY.md`, and `CLAUDE.md` when behavior changes + +### Modify Model Handling +- Update `LlamaSession` in `src/llama.rs` for inference logic +- Adjust sampling parameters (lines 19-22 in `llama.rs`) +- Modify `generate_fallback_commit_message` in `src/main.rs` for different deterministic logic +- Update `DEFAULT_MODEL_REPO` if changing defaults + +### Adjust Prompts +- `build_commit_prompt()` (lines 421-488) - update language-specific instructions +- `build_diff_summary()` (lines 784-919) - change how diffs are summarized +- Add new language support via `Language` enum methods +- Update multilingual READMEs accordingly + +### Debug Model Issues +- Run `git ca doctor` to test model loading and inference +- Use `debug_model_response()` (lines 398-400) to log model output +- Check `analyze_diff()` retry logic (lines 490-544) +- Verify context size handling (lines 157-163 in `llama.rs`) + +## Testing Guidelines + +**Unit Tests** (in `#[cfg(test)]` at bottom of `main.rs`): +- `handles_extracts_subject_line` - Response parsing +- `handles_includes_body_until_instruction` - Body extraction +- `validates_git_flow_subject` - Validation logic +- `fallback_generates_for_*` - Fallback behavior +- `truncates_diff_for_prompt` - Diff summarization + +**Integration Testing**: +- No `tests/` directory currently +- Use `cargo run -- git ca` against real repositories +- Test edge cases: empty diffs, very large diffs, generated files +- Verify fallback triggers: model errors, invalid output, empty responses + +## Error Handling + +- **Model Loading**: Returns descriptive errors if GGUF file missing or invalid +- **Tokenization**: Buffer resizing handles oversized prompts +- **Inference**: KV cache clearing between runs, chunked decoding with fallback +- **Validation**: Retry loop (2 attempts) before falling back to deterministic generation +- **Git Operations**: Propagates `git2::Error` with context + +## Development Commands -### Manual Installation for Testing ```bash -# CLI tool -cargo build --release -cp target/release/git-ca ~/.git-plugins/ -# Add ~/.git-plugins to PATH +# Format, lint, and test +cargo fmt +cargo clippy -- -D warnings +cargo test -# VS Code extension -cd vscode-extension && npm run package -# Install .vsix file in VS Code +# Run against staged changes +cargo run -- git ca + +# Test model loading and inference +cargo run -- git ca doctor + +# Select or download model +cargo run -- git ca model +cargo run -- git ca model pull unsloth/gemma-3-270m-it-GGUF + +# Change language +cargo run -- git ca language + +# Release build +cargo build --release ``` -## Key Components - -### Core Functions (`src/main.rs`) -- `main()`: CLI entry point at line 645 - handles argument parsing, model/language selection, and commit workflow -- `find_git_repository()`: Locates repo from current directory at line 297 -- `get_diff()`: Gets staged changes via `git diff --cached` at line 309 -- `build_commit_prompt()`: Language-specific prompt generation at line 316 -- `analyze_diff()`: AI message generation at line 404 (now supports language parameter) -- `process_ollama_response()`: Post-processes AI output at line 471 -- `select_language()`: Interactive language selection at line 576 -- `get_language()`: Gets configured language with English default at line 596 -- `select_default_model()`: Interactive model selection at line 604 -- **Key Constants**: `OLLAMA_API_BASE`, `COMMIT_TYPES`, `CONFIG_MODEL_KEY`, `CONFIG_LANGUAGE_KEY` - -### VS Code Extension (`vscode-extension/src/extension.ts`) -- Command registration: `gitCommitAnalyzer.generateMessage` -- Binary discovery with fallback paths -- SCM integration with buttons and context menus -- Progress indication during AI generation -- **Extension Activation**: `onStartupFinished` and command-based activation -- **UI Integration**: SCM/title and scm/resourceGroup/context menus -- **Dependencies**: VS Code API >= 1.74.0, TypeScript 4.9.5 - -### Configuration Management -- Git config integration via `git2::Config` -- Model selection stored in `commit-analyzer.model` key -- Language selection stored in `commit-analyzer.language` key (English default) -- User info auto-configured from Git settings -- Support for English and Simplified Chinese output languages -- **Git Config Keys**: `commit-analyzer.model`, `commit-analyzer.language`, `user.name`, `user.email` - -### Ollama Integration -- API base URL: `http://localhost:11434/api` -- Model listing via `/tags` endpoint -- Streaming response handling for real-time generation -- Connection validation before processing -- Enforces Git Flow commit message format: `(): ` with optional body -- Supported commit types: `feat`, `fix`, `docs`, `style`, `refactor`, `test`, `chore` -- Generates single commit message per invocation without issue numbers or footers - -## Distribution Methods -- **One-Click Installation**: `bash -c "$(curl -fsSL https://sh.zhanghe.dev/install-git-ca.sh)" - automated cross-platform installer -- **Homebrew**: `brew tap zh30/tap && brew install git-ca` -- **Manual**: Build and install to `~/.git-plugins/` -- **VS Code Extension**: Package as `.vsix` and install -- **Multi-language docs**: README files in EN, ZH, FR, ES -- **CDN Distribution**: Installation script hosted at `https://sh.zhanghe.dev/install-git-ca.sh` - -## Usage Patterns -- Primary command: `git ca` (after installation) -- Model management: `git ca model` -- Language selection: `git ca language` (English/Chinese) -- Version check: `git ca --version` -- VS Code: Use wand icon in SCM panel or context menu - -## Testing Workflow -1. Stage changes with `git add` -2. Run `./target/release/git-ca` or `cargo run` -3. Interactive prompt allows using, editing, or canceling -4. VS Code: Click generate button and approve in input box - -## Installation Script Details -The `install-git-ca.sh` script provides automated cross-platform installation: -- **OS Detection**: Automatically identifies macOS, Debian/Ubuntu, Fedora/CentOS, Arch, openSUSE -- **Dependency Management**: Installs Git, Rust, and configures Ollama -- **Environment Setup**: Configures PATH and shell integration -- **Interactive Configuration**: Guides users through Git and Ollama setup -- **Error Recovery**: Provides fallbacks and troubleshooting guidance -- **CDN Hosted**: Available at `https://sh.zhanghe.dev/install-git-ca.sh` for one-click installation +## Distribution -## Error Handling -- Ollama connection validation before processing -- Git repository detection -- Staged changes validation -- Model selection fallback -- Language selection with English default -- Custom error types with unified handling (AppError enum) -- Binary path discovery for VS Code extension -- **Error Types**: `AppError` enum with `GitError`, `NetworkError`, `ConfigError`, `Custom` variants -- **Installation Script Robustness**: Cross-platform OS detection, dependency auto-installation, interactive fallbacks - -# important-instruction-reminders -Do what has been asked; nothing more, nothing less. -NEVER create files unless they're absolutely necessary for achieving your goal. -ALWAYS prefer editing an existing file to creating a new one. -NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User. \ No newline at end of file +- Release binaries: `cargo build --release` produces optimized binary +- **Homebrew**: Formula at `git-ca.rb` with version and SHA256 +- **Installer**: `install-git-ca.sh` for automated setup +- Documentation: `README.md`, `README_ZH.md`, `README_FR.md`, `README_ES.md` +- Keep `README.md` / `DEPLOY.md` / `INSTALL.md` in sync with code changes + +## Critical Implementation Details + +**Diff Summarization Strategy** (`build_diff_summary`): +1. Identifies generated/large files (lockfiles, minified JS/CSS) +2. Extracts file metadata: additions, deletions, file type +3. Includes code snippets up to 120 lines or 1200 characters per file +4. Truncates when approaching context limit (3× context - 512 chars) +5. Marks omitted content with notices + +**Model Sampling** (`sample_next_token`): +1. Retrieves logits from llama.cpp +2. Applies temperature scaling +3. Filters to top-K candidates +4. Applies top-p (nucleus) filtering +5. Samples using weighted random selection +6. Prevents EOS tokens until meaningful text generated + +**Context Management**: +- Fixed 1024-token context window +- Prompts truncated if exceeding `n_ctx - 32` +- Raw diff tail used as fallback variant +- KV cache cleared between inferences + +## Architecture Decisions + +- **Single Binary**: No web service or extension - keeps deployment simple +- **Local Inference**: Privacy and offline capability using llama.cpp directly (via `llama-cpp-sys-2`) +- **Manual Args Parsing**: Avoids `clap` dependency bloat +- **Inline Tests**: Co-located with code for easy maintenance; integration flows go to `tests/` +- **Deterministic Fallback**: Ensures commits succeed even when model fails +- **No Async**: Simple synchronous execution pattern +- **Minimal Modules**: Resist premature abstraction - keep logic in `main.rs`/`llama.rs` until justified +- **Generated Assets**: Keep under `target/` or other ignored directories - never in `src/` or `tests/` + +## Performance Considerations + +- **Chunked Prompt Decoding**: Handles long prompts without memory spikes +- **KV Cache Clearing**: Prevents memory buildup across runs +- **Diff Truncation**: Reduces context size for faster inference +- **Sampling Parameters**: Tuned for creativity while maintaining coherence +- **Thread Auto-Detection**: Uses available parallelism from system + +## Security Notes + +- No remote API calls (except optional Hugging Face model download) +- No credential storage beyond Git config +- Validates model files (checks `.gguf` extension) +- Sanitizes file paths and model paths +- No code execution from model output + +## Coding Conventions + +### Naming +- Functions/files: `snake_case` +- Types/enums: `CamelCase` +- Constants: `SCREAMING_SNAKE_CASE` (e.g., `COMMIT_TYPES`) + +### Error Handling +- Prefer error propagation with `?` operator +- Return `AppError::Custom` only when you need user-facing messages +- Comments should explain non-obvious Git plumbing or llama-specific constraints + +### Formatting +- Rustfmt defaults: 4-space indent, 100-column width +- Run `cargo fmt` before committing + +## Reminders + +- Run `cargo fmt`, `cargo clippy -- -D warnings`, and `cargo test` before committing +- Test both model generation and fallback paths (stage deps-only diffs, runtime changes) +- Update `README*.md`, `DEPLOY.md`, and `CLAUDE.md` when behavior changes +- Document manual `git ca` verification steps in PR descriptions diff --git a/Cargo.lock b/Cargo.lock index ec76dab..360ba74 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,15 +2,6 @@ # It is not intended for manual editing. version = 4 -[[package]] -name = "addr2line" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" -dependencies = [ - "gimli", -] - [[package]] name = "adler2" version = "2.0.1" @@ -18,37 +9,51 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] -name = "backtrace" -version = "0.3.75" +name = "aho-corasick" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-targets 0.52.6", + "memchr", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "base64" -version = "0.21.7" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] -name = "bitflags" -version = "1.3.2" +name = "bindgen" +version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn", +] [[package]] name = "bitflags" -version = "2.9.1" +version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" [[package]] name = "bumpalo" @@ -56,6 +61,12 @@ version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + [[package]] name = "bytes" version = "1.10.1" @@ -64,20 +75,63 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "cc" -version = "1.2.30" +version = "1.2.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7" +checksum = "ac9fe6cdbb24b6ade63616c0a0688e45bb56732262c158df3c0c4bea4ca47cb7" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + [[package]] name = "cfg-if" -version = "1.0.1" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "cmake" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +dependencies = [ + "cc", +] + +[[package]] +name = "console" +version = "0.15.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" +dependencies = [ + "encode_unicode", + "libc", + "once_cell", + "unicode-width", + "windows-sys 0.59.0", +] [[package]] name = "core-foundation" @@ -95,6 +149,36 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "dirs" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.61.2", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -106,6 +190,18 @@ dependencies = [ "syn", ] +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "encode_unicode" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" + [[package]] name = "encoding_rs" version = "0.8.35" @@ -123,12 +219,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys 0.52.0", ] [[package]] @@ -137,6 +233,31 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "find-msvc-tools" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" + +[[package]] +name = "find_cuda_helper" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9f9e65c593dd01ac77daad909ea4ad17f0d6d1776193fc8ea766356177abdad" +dependencies = [ + "glob", +] + +[[package]] +name = "flate2" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc5a4e564e38c699f2880d3fda590bedc2e69f3f84cd48b457bd892ce61d0aa9" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + [[package]] name = "fnv" version = "1.0.7" @@ -160,9 +281,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] @@ -188,6 +309,17 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "futures-sink" version = "0.3.31" @@ -208,6 +340,8 @@ checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-core", "futures-io", + "futures-macro", + "futures-sink", "futures-task", "memchr", "pin-project-lite", @@ -217,29 +351,35 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.3" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", - "r-efi", - "wasi 0.14.2+wasi-0.2.4", + "wasi 0.11.1+wasi-snapshot-preview1", ] [[package]] -name = "gimli" -version = "0.31.1" +name = "getrandom" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasi 0.14.7+wasi-0.2.4", +] [[package]] name = "git-ca" -version = "1.0.3" +version = "1.1.2" dependencies = [ "git2", - "reqwest", - "serde_json", + "hf-hub", + "llama-cpp-sys-2", + "rand", ] [[package]] @@ -248,7 +388,7 @@ version = "0.18.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232e6a7bfe35766bf715e55a88b39a700596c0ccfd88cd3680b4cdb40d66ef70" dependencies = [ - "bitflags 2.9.1", + "bitflags", "libc", "libgit2-sys", "log", @@ -257,17 +397,23 @@ dependencies = [ "url", ] +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + [[package]] name = "h2" -version = "0.3.27" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" dependencies = [ + "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "futures-util", "http", "indexmap", "slab", @@ -278,15 +424,36 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.4" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" + +[[package]] +name = "hf-hub" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +checksum = "629d8f3bbeda9d148036d6b0de0a3ab947abd08ce90626327fc3547a49d59d97" +dependencies = [ + "dirs", + "http", + "indicatif", + "libc", + "log", + "native-tls", + "rand", + "reqwest", + "serde", + "serde_json", + "thiserror", + "ureq", + "windows-sys 0.60.2", +] [[package]] name = "http" -version = "0.2.12" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -295,12 +462,24 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.6" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", + "futures-core", "http", + "http-body", "pin-project-lite", ] @@ -310,47 +489,84 @@ version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" -[[package]] -name = "httpdate" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" - [[package]] name = "hyper" -version = "0.14.32" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" dependencies = [ + "atomic-waker", "bytes", "futures-channel", "futures-core", - "futures-util", "h2", "http", "http-body", "httparse", - "httpdate", "itoa", "pin-project-lite", - "socket2", + "pin-utils", + "smallvec", "tokio", - "tower-service", - "tracing", "want", ] +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + [[package]] name = "hyper-tls" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", + "http-body-util", "hyper", + "hyper-util", "native-tls", "tokio", "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2", + "system-configuration", + "tokio", + "tower-service", + "tracing", + "windows-registry", ] [[package]] @@ -441,9 +657,9 @@ dependencies = [ [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -462,23 +678,25 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.10.0" +version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", "hashbrown", ] [[package]] -name = "io-uring" -version = "0.7.8" +name = "indicatif" +version = "0.17.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013" +checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235" dependencies = [ - "bitflags 2.9.1", - "cfg-if", - "libc", + "console", + "number_prefix", + "portable-atomic", + "unicode-width", + "web-time", ] [[package]] @@ -487,6 +705,25 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.15" @@ -495,19 +732,19 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ - "getrandom", + "getrandom 0.3.3", "libc", ] [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" dependencies = [ "once_cell", "wasm-bindgen", @@ -515,9 +752,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.174" +version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "libgit2-sys" @@ -533,6 +770,26 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link 0.2.1", +] + +[[package]] +name = "libredox" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +dependencies = [ + "bitflags", + "libc", +] + [[package]] name = "libssh2-sys" version = "0.3.1" @@ -561,9 +818,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.9.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" @@ -571,17 +828,31 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" +[[package]] +name = "llama-cpp-sys-2" +version = "0.1.122" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e09bdf53b6f486ecaeb96b08cd8a9d9df162f2aafa37efb5b40cf421a419c755" +dependencies = [ + "bindgen", + "cc", + "cmake", + "find_cuda_helper", + "glob", + "walkdir", +] + [[package]] name = "log" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "memchr" -version = "2.7.5" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "mime" @@ -589,6 +860,12 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.8.9" @@ -596,6 +873,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", + "simd-adler32", ] [[package]] @@ -627,14 +905,21 @@ dependencies = [ ] [[package]] -name = "object" -version = "0.36.7" +name = "nom" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", + "minimal-lexical", ] +[[package]] +name = "number_prefix" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" + [[package]] name = "once_cell" version = "1.21.3" @@ -647,7 +932,7 @@ version = "0.10.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" dependencies = [ - "bitflags 2.9.1", + "bitflags", "cfg-if", "foreign-types", "libc", @@ -685,11 +970,17 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pin-project-lite" @@ -709,29 +1000,54 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + [[package]] name = "potential_utf" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" dependencies = [ "zerovec", ] +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + [[package]] name = "proc-macro2" -version = "1.0.95" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" dependencies = [ "unicode-ident", ] [[package]] name = "quote" -version = "1.0.40" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" dependencies = [ "proc-macro2", ] @@ -743,78 +1059,190 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] -name = "reqwest" -version = "0.11.27" +name = "rand" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ - "base64", - "bytes", - "encoding_rs", - "futures-core", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.3", +] + +[[package]] +name = "redox_users" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror", +] + +[[package]] +name = "regex" +version = "1.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" + +[[package]] +name = "reqwest" +version = "0.12.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +dependencies = [ + "base64", + "bytes", + "encoding_rs", + "futures-core", "futures-util", "h2", "http", "http-body", + "http-body-util", "hyper", + "hyper-rustls", "hyper-tls", - "ipnet", + "hyper-util", "js-sys", "log", "mime", "native-tls", - "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", - "system-configuration", "tokio", "tokio-native-tls", + "tokio-util", + "tower", + "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-streams", "web-sys", - "winreg", ] [[package]] -name = "rustc-demangle" -version = "0.1.25" +name = "ring" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustix" -version = "1.0.8" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ - "bitflags 2.9.1", + "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.60.2", + "windows-sys 0.52.0", ] [[package]] -name = "rustls-pemfile" -version = "1.0.4" +name = "rustls" +version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ - "base64", + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10b3f4191e8a80e6b43eebabfac91e5dcecebb27a71f04e820c47ec41d314bf" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", ] [[package]] name = "rustversion" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" @@ -822,6 +1250,15 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "schannel" version = "0.1.27" @@ -837,7 +1274,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.1", + "bitflags", "core-foundation", "core-foundation-sys", "libc", @@ -856,18 +1293,28 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", @@ -876,14 +1323,15 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.141" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", + "serde_core", ] [[package]] @@ -904,11 +1352,17 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + [[package]] name = "slab" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" @@ -918,25 +1372,42 @@ checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "socket2" -version = "0.5.10" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.60.2", +] + +[[package]] +name = "socks" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c3dbbd9ae980613c6dd8e28a9407b50509d3803b57624d5dfe8315218cd58b" +dependencies = [ + "byteorder", + "libc", + "winapi", ] [[package]] name = "stable_deref_trait" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.104" +version = "2.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" dependencies = [ "proc-macro2", "quote", @@ -945,9 +1416,12 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] [[package]] name = "synstructure" @@ -962,20 +1436,20 @@ dependencies = [ [[package]] name = "system-configuration" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 1.3.2", + "bitflags", "core-foundation", "system-configuration-sys", ] [[package]] name = "system-configuration-sys" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" dependencies = [ "core-foundation-sys", "libc", @@ -983,15 +1457,35 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.20.0" +version = "3.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ "fastrand", - "getrandom", + "getrandom 0.3.3", "once_cell", "rustix", - "windows-sys 0.59.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "thiserror" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1006,19 +1500,16 @@ dependencies = [ [[package]] name = "tokio" -version = "1.46.1" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cc3a2344dafbe23a245241fe8b09735b521110d30fcefbbd5feb1797ca35d17" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ - "backtrace", "bytes", - "io-uring", "libc", "mio", "pin-project-lite", - "slab", "socket2", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -1031,11 +1522,21 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + [[package]] name = "tokio-util" -version = "0.7.15" +version = "0.7.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" dependencies = [ "bytes", "futures-core", @@ -1044,6 +1545,45 @@ dependencies = [ "tokio", ] +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + [[package]] name = "tower-service" version = "0.3.3" @@ -1077,19 +1617,52 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" + +[[package]] +name = "unicode-width" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "ureq" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" +dependencies = [ + "base64", + "flate2", + "log", + "native-tls", + "once_cell", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "socks", + "url", + "webpki-roots 0.26.11", +] [[package]] name = "url" -version = "2.5.4" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" dependencies = [ "form_urlencoded", "idna", "percent-encoding", + "serde", ] [[package]] @@ -1104,6 +1677,16 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" @@ -1121,30 +1704,40 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" -version = "0.14.2+wasi-0.2.4" +version = "0.14.7+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "883478de20367e224c0090af9cf5f9fa85bed63a95c1abf3afc5c083ebc06e8c" +dependencies = [ + "wasip2", +] + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" dependencies = [ "bumpalo", "log", @@ -1156,9 +1749,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c" dependencies = [ "cfg-if", "js-sys", @@ -1169,9 +1762,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1179,9 +1772,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" dependencies = [ "proc-macro2", "quote", @@ -1192,30 +1785,134 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] -name = "windows-sys" -version = "0.48.0" +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.3", +] + +[[package]] +name = "webpki-roots" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32b130c0d2d49f8b6889abc456e795e82525204f27c42cf767cf0d7734e089b8" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-registry" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" dependencies = [ - "windows-targets 0.48.5", + "windows-link 0.1.3", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-result" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +dependencies = [ + "windows-link 0.1.3", +] + +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link 0.1.3", ] [[package]] @@ -1242,22 +1939,16 @@ version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ - "windows-targets 0.53.2", + "windows-targets 0.53.5", ] [[package]] -name = "windows-targets" -version = "0.48.5" +name = "windows-sys" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", + "windows-link 0.2.1", ] [[package]] @@ -1278,26 +1969,21 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.2" +version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows-link 0.2.1", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" @@ -1306,15 +1992,9 @@ checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" @@ -1324,15 +2004,9 @@ checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" @@ -1342,9 +2016,9 @@ checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" [[package]] name = "windows_i686_gnullvm" @@ -1354,15 +2028,9 @@ checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" @@ -1372,15 +2040,9 @@ checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" @@ -1390,15 +2052,9 @@ checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" @@ -1408,15 +2064,9 @@ checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" @@ -1426,28 +2076,15 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] -name = "winreg" -version = "0.50.0" +name = "wit-bindgen" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - -[[package]] -name = "wit-bindgen-rt" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" -dependencies = [ - "bitflags 2.9.1", -] +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "writeable" @@ -1479,6 +2116,26 @@ dependencies = [ "synstructure", ] +[[package]] +name = "zerocopy" +version = "0.8.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "zerofrom" version = "0.1.6" @@ -1500,6 +2157,12 @@ dependencies = [ "synstructure", ] +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + [[package]] name = "zerotrie" version = "0.2.2" @@ -1513,9 +2176,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.2" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" dependencies = [ "yoke", "zerofrom", diff --git a/Cargo.toml b/Cargo.toml index ab4b076..025917d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "git-ca" -version = "1.0.3" +version = "1.1.2" description = "AI-powered Git plugin for generating meaningful commit messages" authors = ["Henry Zhang "] license = "MIT" @@ -9,5 +9,8 @@ edition = "2021" [dependencies] git2 = "0.18" -reqwest = { version = "0.11", features = ["blocking", "json"] } -serde_json = "1.0" \ No newline at end of file +llama-cpp-sys-2 = "0.1" +hf-hub = { version = "0.4.3", default-features = false, features = ["ureq", "native-tls"] } +rand = "0.9" + +# Test comment diff --git a/DEPLOY.md b/DEPLOY.md index c19841d..dd2b0d7 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -1,182 +1,284 @@ -# Git Commit Analyzer 安装脚本部署指南 +# Deploy & Release Guide -## 部署步骤 +This document outlines the complete release process for Git Commit Analyzer, including multi-platform binary builds and Homebrew bottle distribution. -### 1. 上传安装脚本 +## Multi-Platform Binary Releases -将 `install-git-ca.sh` 文件上传到你的 CDN 服务器或静态文件托管服务。 +Git Commit Analyzer now supports **multi-platform pre-built binaries** via GitHub Actions, enabling fast Homebrew installation without source compilation. -#### 支持的托管服务: -- **GitHub Raw**: `https://raw.githubusercontent.com/zh30/git-commit-analyzer/main/install-git-ca.sh` -- **GitHub Pages**: `https://zh30.github.io/git-commit-analyzer/install-git-ca.sh` -- **CDN 服务**: Cloudflare, AWS CloudFront, 阿里云 CDN 等 -- **对象存储**: AWS S3, 腾讯云 COS, 阿里云 OSS 等 +### Supported Platforms +- **macOS**: Apple Silicon (arm64), Intel (x86_64) +- **Linux**: Temporarily disabled due to compilation issues +- **Windows**: Builds available via GitHub Releases (not distributed via Homebrew) -### 2. 更新 README 文件中的 URL +## 1. Pre-release Checklist -将所有 README 文件中的 `https://cdn.example.com/install-git-ca.sh` 替换为你的实际 URL: +Before creating a release: -```bash -# 在项目根目录执行 -find . -name "README*.md" -exec sed -i '' 's|https://cdn.example.com/install-git-ca.sh|https://sh.zhanghe.dev/install-git-ca.sh|g' {} \; -``` +- [ ] Update version in `Cargo.toml` +- [ ] Run `cargo fmt`, `cargo clippy -- -D warnings`, `cargo test` +- [ ] Smoke test `cargo run -- git ca` against staged changes +- [ ] Review and update `README*.md`, `INSTALL.md`, `AGENTS.md`, `CLAUDE.md` +- [ ] Update `CHANGELOG.md` or include release notes in PR -### 3. 测试安装脚本 +## 2. Automated Release Workflow -在测试环境中验证安装脚本是否正常工作: +### Triggering the Build -```bash -# 测试安装脚本 -bash -c "$(curl -fsSL https://your-actual-url.com/install-git-ca.sh)" -``` - -## 推荐的部署方式 - -### 方式一:GitHub Raw(免费) +Push a version tag to automatically build and release: ```bash -# 直接使用 GitHub Raw URL -bash -c "$(curl -fsSL https://raw.githubusercontent.com/zh30/git-commit-analyzer/main/install-git-ca.sh)" -``` +# Update version +vim Cargo.toml -**优点**: -- 免费 -- 自动与仓库同步 -- 无需额外配置 +# Commit changes +git commit -m "chore: bump version to v1.1.2" +git push origin main -**缺点**: -- 在某些地区可能访问较慢 -- 有速率限制 +# Create and push tag +git tag v1.1.2 +git push origin v1.1.2 +``` -### 方式二:GitHub Pages(免费) +### GitHub Actions Workflows -1. 创建 `gh-pages` 分支或使用 `docs/` 目录 -2. 将 `install-git-ca.sh` 放入相应位置 -3. 启用 GitHub Pages +#### Build Binaries (`.github/workflows/build-binaries.yml`) -```bash -# 访问 URL -https://username.github.io/git-commit-analyzer/install-git-ca.sh -``` +Triggered on: +- Push to `main` branch (for testing) +- Push of version tags `v*.*.*` (for release) -### 方式三:CDN 加速(推荐) +**Build Matrix:** +- macOS 13 (Intel x86_64) +- macOS 14 (Apple Silicon ARM64) +- **Note**: Linux and Windows builds can be enabled if needed (see `.github/workflows/build-binaries.yml`) -使用 CDN 服务加速 GitHub Raw 内容: +**Process:** +1. Checks out repository +2. Installs Rust toolchain and platform-specific dependencies +3. Builds release binary for target platform +4. Strips binaries (macOS/Linux) to reduce size +5. Creates compressed archives: + - `.tar.gz` for macOS/Linux + - `.zip` for Windows +6. Uploads artifacts to GitHub Actions -```bash -# 使用 jsDelivr CDN -https://cdn.jsdelivr.net/gh/zh30/git-commit-analyzer@latest/install-git-ca.sh +#### Release & Homebrew Update (`.github/workflows/release.yml`) -# 使用 UNPKG -https://unpkg.com/browse/git-commit-analyzer@latest/install-git-ca.sh -``` +Triggered on version tags only. -## 安全考虑 +**Process:** +1. Creates GitHub Release with: + - Auto-generated changelog from commit history + - Download links for all platforms + - Installation instructions +2. Downloads all release assets +3. Calculates SHA256 checksums for each platform +4. Updates `git-ca.rb` Homebrew formula with: + - Version number + - Bottle checksums for all platforms +5. Pushes updated formula to `homebrew-tap` repository -### 1. 脚本签名(可选) +## 3. Manual Release (Alternative) -为了增加安全性,可以考虑对脚本进行签名: +If automated workflow fails: ```bash -# 生成签名 -gpg --detach-sign --armor install-git-ca.sh - -# 用户验证 -curl -fsSL https://your-url.com/install-git-ca.sh | gpg --verify +# 1. Build for each platform manually +rustup target add x86_64-apple-darwin aarch64-apple-darwin x86_64-unknown-linux-gnu aarch64-unknown-linux-gnu + +# macOS ARM64 +cargo build --release --target aarch64-apple-darwin +cd target/aarch64-apple-darwin/release && tar czf ../../../../git-ca-apple-darwin-arm64.tar.gz git-ca && cd ../../../../ + +# macOS Intel +cargo build --release --target x86_64-apple-darwin +cd target/x86_64-apple-darwin/release && tar czf ../../../../git-ca-apple-darwin-x86_64.tar.gz git-ca && cd ../../../../ + +# Linux x86_64 +cargo build --release --target x86_64-unknown-linux-gnu +cd target/x86_64-unknown-linux-gnu/release && tar czf ../../../../git-ca-unknown-linux-gnu-x86_64.tar.gz git-ca && cd ../../../../ + +# Linux ARM64 +cargo build --release --target aarch64-unknown-linux-gnu +cd target/aarch64-unknown-linux-gnu/release && tar czf ../../../../git-ca-unknown-linux-gnu-arm64.tar.gz git-ca && cd ../../../../ + +# Windows (requires PowerShell) +# ... (or use cross compilation with mingw) + +# 2. Calculate checksums +shasum -a 256 git-ca-*.tar.gz > checksums.txt + +# 3. Create GitHub release +gh release create v1.1.2 \ + --title "git-ca v1.1.2" \ + --notes-file CHANGELOG.md \ + git-ca-apple-darwin-arm64.tar.gz \ + git-ca-apple-darwin-x86_64.tar.gz \ + git-ca-unknown-linux-gnu-x86_64.tar.gz \ + git-ca-unknown-linux-gnu-arm64.tar.gz \ + checksums.txt + +# 4. Update Homebrew formula manually +vim git-ca.rb +# Update version and bottle checksums + +# 5. Update homebrew-tap +git clone https://github.com/zh30/homebrew-tap.git +cp git-ca.rb homebrew-tap/ +cd homebrew-tap +git commit -m "chore: update git-ca to v1.1.2" +git push ``` -### 2. 版本控制 +## 4. Homebrew Formula Update + +The `git-ca.rb` formula automatically receives updates via GitHub Actions. + +### Formula Structure + +```ruby +class GitCa < Formula + desc "AI-powered Git plugin for generating meaningful commit messages" + homepage "https://github.com/zh30/git-commit-analyzer" + url "https://github.com/zh30/git-commit-analyzer/archive/refs/tags/v1.1.2.tar.gz" + sha256 "SOURCE_TARBALL_SHA256" + license "MIT" + + # Bottle definitions - auto-updated by GitHub Actions + bottle do + root_url "https://github.com/zh30/git-commit-analyzer/releases/download/v1.1.2" + sha256 cellar: :any_skip_relocate, arm64_sequoia: "ARM64_MACOS_SHA256" + sha256 cellar: :any_skip_relocate, x86_64_sequoia: "X86_64_MACOS_SHA256" + sha256 cellar: :any_skip_relocate, arm64_linux: "ARM64_LINUX_SHA256" + sha256 cellar: :any_skip_relocate, x86_64_linux: "X86_64_LINUX_SHA256" + end + + def install + bin.install "git-ca" + end + + def caveats + # Updated messaging about llama.cpp + end + + test do + assert_match version.to_s, shell_output("#{bin}/git-ca --version") + end +end +``` -建议在 URL 中包含版本信息: +### Required Secrets -```bash -# 包含版本号 -https://sh.zhanghe.dev/install-git-ca-v1.0.3.sh +Configure these secrets in GitHub repository settings: -# 使用 latest 标签 -https://sh.zhanghe.dev/install-git-ca-latest.sh -``` +- `TARGET_REPO_PAT`: Personal access token for pushing to `homebrew-tap` repository + - Required permissions: `repo` (full control) + - Alternative: Use GitHub App with repository access -### 3. 访问统计 +## 5. Installer Script -如果需要统计安装次数,可以使用重定向服务: +`install-git-ca.sh` remains available but now serves as an alternative to Homebrew. -```bash -# 使用短链接服务 -https://git-ca.install/install -https://bit.ly/git-ca-install -``` +**Updates for multi-platform:** +- Detect OS and architecture +- Download appropriate binary from GitHub releases +- Extract and install to `/usr/local/bin` +- Set executable permissions -## 监控和维护 +## 6. Model Distribution Notes -### 1. 访问日志监控 +No changes to model distribution - the CLI still: +- Defaults to downloading `unsloth/gemma-3-270m-it-GGUF` from Hugging Face +- Supports local GGUF files in `./models` or `~/.cache/git-ca/models` +- Uses llama.cpp (via `llama-cpp-sys-2`) for local inference -监控安装脚本的下载次数: +## 7. Post-release Verification -```bash -# nginx 访问日志 -tail -f /var/log/nginx/access.log | grep install-git-ca.sh +After release completes: -# AWS CloudFront 监控 -aws cloudwatch get-metric-statistics --namespace AWS/CloudFront --metric-name Requests --dimensions Name=DistributionId,Value=YOUR_DISTRIBUTION_ID --start-time 2024-01-01T00:00:00Z --end-time 2024-01-02T00:00:00Z --period 86400 --statistics Sum -``` +### GitHub Release +- [ ] Verify all 6 platforms built successfully +- [ ] Check download links work for each platform +- [ ] Validate checksums.txt contains all checksums +- [ ] Test release notes render correctly -### 2. 定期更新 +### Homebrew +- [ ] Verify `homebrew-tap` repository updated with new formula +- [ ] Test installation on macOS (both ARM64 and x86_64): + ```bash + brew tap zh30/tap + brew install git-ca + git ca --version + ``` +- [ ] Confirm bottle is used (no source compilation) -定期检查和更新安装脚本: +### Manual Installation +- [ ] Download and test binary for each platform +- [ ] Verify executable permissions +- [ ] Test basic functionality -- 依赖包版本更新 -- 新的操作系统支持 -- 安全漏洞修复 -- 功能改进 +### Model Functionality +- [ ] Run `git ca model` to test model selection +- [ ] Test with a real repository: + ```bash + cd /tmp/test-repo + git init + echo "test" > test.txt + git add . + git ca # Should generate a commit message + ``` -### 3. 回滚策略 +## 8. Rollback Procedure -准备回滚方案: +If a release fails: -```bash -# 保留多个版本的安装脚本 -install-git-ca-v1.0.0.sh -install-git-ca-v1.0.1.sh -install-git-ca-latest.sh - -# 使用符号链接切换版本 -ln -sf install-git-ca-v1.0.1.sh install-git-ca-latest.sh -``` +1. **GitHub Release**: Delete the release and tag +2. **Homebrew**: Rollback to previous version in `homebrew-tap` +3. **Documentation**: Restore previous README/INSTALL versions -## 故障排除 +## 9. Communication -### 常见问题 +Announce the release with: +- GitHub Release notes +- Updated installation instructions in README.md +- Social media/blog post (optional) -1. **CORS 错误** - - 确保 CDN 服务器配置了正确的 CORS 头 - - 检查 `Access-Control-Allow-Origin` 设置 +Include: +- Platform support matrix +- Installation commands +- Link to changelog +- Any migration notes -2. **SSL 证书问题** - - 确保使用 HTTPS - - 检查证书是否有效 +## 10. Troubleshooting -3. **脚本执行权限** - - 确保脚本有执行权限 - - 检查文件权限设置 +### Build Failures +```bash +# Check Rust targets +rustup target list --installed -4. **网络连接问题** - - 提供备用下载链接 - - 考虑使用多个 CDN 源 +# Verify dependencies +cargo tree --depth 1 -### 调试方法 +# Clean rebuild +cargo clean +cargo build --release +``` +### Homebrew Issues ```bash -# 测试脚本下载 -curl -I https://your-url.com/install-git-ca.sh +# Force source install for debugging +HOMEBREW_NO_INSTALL_FROM_API=1 brew install --build-from-source zh30/tap/git-ca -# 检查脚本内容 -curl -fsSL https://your-url.com/install-git-ca.sh | head -10 +# Verbose output +brew install -v zh30/tap/git-ca -# 验证脚本语法 -bash -n <(curl -fsSL https://your-url.com/install-git-ca.sh) +# Audit formula +brew audit --strict zh30/tap/git-ca ``` -## 总结 - -一键安装脚本大大提升了用户体验,将复杂的多步骤安装过程简化为单行命令。选择合适的部署方式并做好监控维护,能够确保用户获得最佳的安装体验。 \ No newline at end of file +### Release Workflow Issues +- Check GitHub Actions logs +- Verify `TARGET_REPO_PAT` secret is valid +- Ensure `homebrew-tap` repository exists and is accessible +- Confirm `git-ca.rb` syntax is valid Ruby diff --git a/HOMEBREW.md b/HOMEBREW.md index fcd60b7..a21978f 100644 --- a/HOMEBREW.md +++ b/HOMEBREW.md @@ -1,66 +1,156 @@ -# 发布 git-ca 到 Homebrew +# Homebrew 发布指南 -本文档描述了如何将 git-ca 发布到 Homebrew 的步骤。 +本文档记录了将 `git-ca` 发布到 Homebrew tap 的完整流程,支持多平台预构建二进制包(bottles)。 -## 创建发布 +## 多平台预构建二进制包(推荐) -1. 确保代码已经准备好发布,包括: - - 所有功能测试通过 - - 版本号已更新 (在 Cargo.toml 中) - - CHANGELOG 已更新 +我们的 Homebrew formula 支持预构建的二进制包(bottles),用户无需从源码构建。 -2. 在 GitHub 上创建一个新的发布版本(Release): - - 标签应该是 `v1.0.0` 格式 - - 发布标题应该是 "git-ca v1.0.0" - - 在描述中包含此版本的更新内容 +### 支持的平台 +- **macOS**: Apple Silicon (arm64) 和 Intel (x86_64) -3. 上传生成的 tar.gz 文件,或者让 GitHub 自动创建。 +### Linux 和 Windows 支持 +- **Linux**: 暂时禁用,由于编译问题 +- **Windows**: 平台的二进制包会通过 GitHub Releases 发布,但不在 Homebrew 中分发 -4. 计算发布压缩包的 SHA256 校验值: - ``` - curl -L https://github.com/zh30/git-commit-analyzer/archive/refs/tags/v1.0.0.tar.gz | shasum -a 256 - ``` +如需 Linux 或 Windows 版本,请直接从 [Releases](https://github.com/zh30/git-commit-analyzer/releases) 页面下载,或参考 `.github/workflows/build-binaries.yml` 启用构建。 -5. 复制得到的校验值,并更新 `git-ca.rb` 文件中的 `sha256` 值。 +### 发布流程 -## 提交到 Homebrew +#### 自动发布(推荐) -### 选项 1: 提交到 Homebrew Core +发布流程通过 GitHub Actions 自动化完成: -如果你想将 git-ca 作为官方的 Homebrew 包,请按照以下步骤操作: +1. **触发构建**: + - 推送版本标签 `v*.*.*` 到 `main` 分支 + - GitHub Actions 会自动触发 `build-binaries.yml` 工作流 -1. Fork [Homebrew Core 仓库](https://github.com/Homebrew/homebrew-core) -2. 将更新后的 `git-ca.rb` 文件保存到 `Formula/g/git-ca.rb` -3. 提交一个 Pull Request +2. **构建阶段**: + - 在 macOS 上构建二进制包: + - macOS 13 (x86_64) + - macOS 14 (ARM64) + - 构建完成后自动上传二进制包到 GitHub Release + - **注意**:Linux 和 Windows 构建已禁用,如需启用请参考 `.github/workflows/build-binaries.yml` -### 选项 2: 创建自己的 Tap +3. **更新 Homebrew**: + - `release.yml` 工作流自动: + - 下载所有平台的二进制包 + - 计算 SHA256 校验和 + - 更新 `git-ca.rb` 公式中的 bottle 校验和 + - 推送到 `homebrew-tap` 仓库 -如果你想通过自己的 Tap 分发,这是更简单的方法: +4. **手动触发**(如需要): + ```bash + # 更新版本号 + vim Cargo.toml -1. 创建一个新的仓库,命名为 `homebrew-tap` -2. 将 `git-ca.rb` 文件添加到这个仓库 -3. 用户可以通过以下命令安装: - ``` - brew tap zh30/tap - brew install git-ca + # 提交并推送 + git commit -m "chore: bump version" + git push origin main + + # 创建并推送标签 + git tag v1.1.2 + git push origin v1.1.2 ``` -## 更新现有公式 +#### 验证发布 -当发布新版本时: +在创建 PR 或推送标签前,验证 Homebrew 公式: -1. 更新 `url` 指向新版本 -2. 更新 `sha256` 值 -3. 提交更新后的公式 +```bash +# 本地验证 +brew install --build-from-source ./git-ca.rb +brew test git-ca +brew audit --strict ./git-ca.rb + +# 验证 bottle 安装 +brew uninstall git-ca +brew install zh30/tap/git-ca +git ca --version +``` -## 测试公式 +### Homebrew Formula 结构 -在提交前进行测试: +`git-ca.rb` 现在包含: +```ruby +class GitCa < Formula + # ... 元数据 ... + + # Bottle 支持 - 预构建二进制包 + bottle do + root_url "https://github.com/zh30/git-commit-analyzer/releases/download/v#{version}" + sha256 cellar: :any_skip_relocate, arm64_sequoia: "SHA256_ARM64_MACOS" + sha256 cellar: :any_skip_relocate, x86_64_sequoia: "SHA256_X86_64_MACOS" + # 注意:Linux 构建禁用,Windows 构建通过 GitHub Releases 提供,但不在 Homebrew 中分发 + end + + # 安装时直接使用预构建二进制 + def install + bin.install "git-ca" + end +end ``` -brew install --build-from-source ./git-ca.rb -brew test git-ca -brew audit --strict git-ca + +### 用户安装 + +用户现在可以通过以下方式安装: + +```bash +# 添加 tap +brew tap zh30/tap + +# 安装(自动使用 bottle,无须从源码构建) +brew install git-ca + +# 验证安装 +git ca --version ``` -确保所有测试都通过,然后才能提交到 Homebrew。 \ No newline at end of file +## 故障排除 + +### 常见问题 + +1. **bottle 校验和不匹配**: + - 检查二进制包是否正确构建 + - 重新计算 SHA256 校验和 + - 确保所有平台都已构建 + +2. **构建失败**: + - 检查 `.github/workflows/build-binaries.yml` 中的依赖安装 + - 确认 Rust 工具链版本 + - 查看 GitHub Actions 日志 + +3. **Homebrew 安装慢**: + - 检查 bottle URL 是否可访问 + - 确认 GitHub Release 已创建 + - 验证 `git-ca.rb` 中的 `root_url` + +### 调试步骤 + +```bash +# 检查 bottle 是否可用 +brew fetch --bottle-tag=arm64_sequoia zh30/tap/git-ca + +# 强制从源码安装(用于调试) +HOMEBREW_NO_INSTALL_FROM_API=1 brew install --build-from-source zh30/tap/git-ca + +# 查看详细安装日志 +brew install -v zh30/tap/git-ca +``` + +## 最佳实践 + +1. **版本管理**: + - 始终在 `Cargo.toml` 和 `git-ca.rb` 中保持版本一致 + - 使用语义化版本号 (semver) + +2. **测试**: + - 在不同平台上测试 bottle + - 运行完整的 CI/CD 流程 + - 验证用户安装体验 + +3. **文档**: + - 更新 README.md 中的安装说明 + - 保持 HOMEBREW.md 和 DEPLOY.md 最新 + - 记录所有依赖变更 diff --git a/INSTALL.md b/INSTALL.md index 5d90db3..a69b285 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,166 +1,96 @@ -# Git Commit Analyzer 一键安装指南 +# Installation Guide -## 快速安装 +Git Commit Analyzer ships as a single Rust binary (`git-ca`) that integrates with Git as an external command. Choose the installation path that best fits your environment. -### 方法一:使用网络安装脚本(推荐) +## 1. Requirements +- Git 2.30 or later +- Rust toolchain (stable channel) with `cargo` +- Build prerequisites for llama.cpp (`cmake`, `make`, C/C++ compiler, GPU drivers as needed) +- A local GGUF model (the CLI can download `unsloth/gemma-3-270m-it-GGUF` automatically) -将 `install-git-ca.sh` 上传到你的 CDN 服务器,然后用户可以使用以下命令安装: +## 2. Manual Installation ```bash -bash -c "$(curl -fsSL https://sh.zhanghe.dev/install-git-ca.sh)" +git clone https://github.com/zh30/git-commit-analyzer.git +cd git-commit-analyzer +cargo build --release +mkdir -p ~/.git-plugins +cp target/release/git-ca ~/.git-plugins/ +echo 'export PATH="$HOME/.git-plugins:$PATH"' >> ~/.bashrc # adapt to your shell +source ~/.bashrc ``` -### 方法二:直接下载运行 +### Windows Notes +1. `cargo build --release` +2. Copy `target\release\git-ca.exe` to `%USERPROFILE%\.git-plugins\` +3. Add `%USERPROFILE%\.git-plugins` to the user PATH via *System Properties → Environment Variables* + +## 3. Homebrew (macOS / Linux) ```bash -curl -fsSL https://your-cdn-url.com/install-git-ca.sh -o install-git-ca.sh -chmod +x install-git-ca.sh -./install-git-ca.sh +brew tap zh30/tap +brew install git-ca ``` -## 系统要求 - -- **操作系统**: macOS, Linux (Debian/Ubuntu, Fedora/CentOS, Arch, openSUSE) -- **依赖**: Git, Rust, Ollama -- **内存**: 至少 1GB 可用内存 -- **网络**: 需要网络连接下载依赖和项目代码 - -## 安装过程 +## 4. Bootstrap Script (Optional) -脚本会自动执行以下步骤: - -1. **系统检测**: 自动识别操作系统类型 -2. **依赖安装**: - - macOS: 使用 Homebrew 安装 Git 和 Rust - - Linux: 使用系统包管理器安装依赖 -3. **Ollama 配置**: 检查并配置 Ollama 环境 -4. **项目构建**: 下载源码并编译发布版本 -5. **环境设置**: 配置 PATH 环境变量 -6. **Git 配置**: 设置用户信息(如需要) -7. **验证安装**: 确保所有组件正常工作 - -## 使用方法 - -安装完成后,在任意 Git 仓库中: +The repository includes `install-git-ca.sh`, which: +- Detects the platform +- Installs Git/Rust if missing +- Builds the release binary +- Adds `~/.git-plugins` to PATH ```bash -# 1. 添加文件到暂存区 -git add . - -# 2. 生成提交信息 -git ca - -# 3. 根据提示选择使用、编辑或取消提交信息 +bash -c "$(curl -fsSL https://sh.zhanghe.dev/install-git-ca.sh)" ``` -## 配置选项 - -```bash -# 选择默认 Ollama 模型 -git ca model +Read the script before executing and ensure you are comfortable with the actions it performs. -# 设置输出语言(英文/中文) -git ca language +## 5. First Run -# 查看版本 -git ca --version +```bash +git add +git ca ``` -## 故障排除 - -### Ollama 相关问题 - -如果脚本提示 Ollama 未安装或未运行: +On initial launch the CLI scans common directories (`./models`, `~/Library/Application Support/git-ca/models`, `~/.cache/git-ca/models`) for GGUF models. If none are found it can download the default model from Hugging Face and cache it locally. -1. **安装 Ollama**: - ```bash - # macOS - brew install ollama - - # Linux - curl -fsSL https://ollama.com/install.sh | sh - ``` +### Additional configuration -2. **启动 Ollama 服务**: - ```bash - ollama serve - ``` +- `git ca model` — interactive model selector (persisted for future runs) +- Non-interactive runs reuse the saved model or fall back to the first detected GGUF. +- `git ca language` — choose English or Simplified Chinese prompts +- Llama context window is fixed at 1024 tokens -3. **下载模型**(可选): - ```bash - ollama pull llama3.2 - ollama pull qwen2.5:7b - ``` +## 6. Troubleshooting -### 环境变量问题 +### Model not found +- Ensure at least one GGUF file exists in the default search directories. +- Confirm the GGUF file is readable. +- Run `git ca model` to select the file interactively. -如果 `git ca` 命令不可用: +### Build failures +- Check that `cmake`, `make`, and a C/C++ compiler are available (`cmake --version`, `cc --version`). +- On macOS install Xcode Command Line Tools (`xcode-select --install`). +- On Linux install build essentials (`apt install build-essential cmake` or distro equivalent). -1. **重新加载 shell**: - ```bash - # Bash - source ~/.bashrc - - # Zsh - source ~/.zshrc - ``` +### llama.cpp context errors +- Context is fixed to 1024 tokens; trim large staged changes or use a smaller model. +- Verify available GPU/CPU memory; large models may exceed device limits. -2. **或重启终端** +### Command not found +- Ensure `~/.git-plugins` (or chosen directory) is in PATH. +- Reload your shell (`source ~/.bashrc`, `source ~/.zshrc`) or open a new terminal. -3. **手动添加 PATH**: - ```bash - export PATH="$HOME/.git-plugins:$PATH" - ``` - -### 权限问题 - -如果遇到权限错误: +## 7. Uninstall ```bash -# 确保脚本有执行权限 -chmod +x install-git-ca.sh - -# 如果需要,手动创建插件目录 -mkdir -p ~/.git-plugins -``` - -### 编译问题 - -如果 Rust 编译失败: - -1. **确保 Rust 已正确安装**: - ```bash - rustc --version - cargo --version - ``` - -2. **重新安装 Rust**: - ```bash - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y - source ~/.cargo/env - ``` - -## 卸载 - -如需卸载 Git Commit Analyzer: - -```bash -# 删除二进制文件 rm -f ~/.git-plugins/git-ca - -# 从 shell 配置中移除 PATH 设置 -# 编辑 ~/.bashrc, ~/.zshrc 等文件,删除相关行 +sed -i '' '/git-plugins/d' ~/.bashrc # adjust for your shell/OS +git config --global --unset commit-analyzer.language 2>/dev/null ``` -## 技术支持 - -- **项目地址**: https://github.com/zh30/git-commit-analyzer -- **问题报告**: https://github.com/zh30/git-commit-analyzer/issues -- **Ollama 文档**: https://ollama.com - -## 安全说明 - -- 脚本仅从官方 GitHub 仓库下载源码 -- 所有下载都使用 HTTPS 加密连接 -- 脚本不会收集或传输任何个人信息 -- 建议在安装前检查脚本内容 \ No newline at end of file +## 8. Support +- Issues: +- Default model: +- llama.cpp documentation: diff --git a/README.md b/README.md index 095d5cb..e349cc8 100644 --- a/README.md +++ b/README.md @@ -1,165 +1,160 @@ # Git Commit Analyzer -[![Peerlist](https://github-readme-badge.peerlist.io/api/zhanghe)](https://peerlist.io/zhanghe) +[中文](README_ZH.md) · [Français](README_FR.md) · [Español](README_ES.md) -[中文](README_ZH.md) | [Français](README_FR.md) | [Español](README_ES.md) +Git Commit Analyzer is a Rust-based Git plugin that generates Git Flow–style commit messages from your staged diff using a local llama.cpp model. The CLI summarises large diffs, validates model output, and falls back to deterministic messages when needed. -Git Commit Analyzer is a powerful Git plugin that leverages AI to automatically generate meaningful commit messages based on your staged changes. It uses Ollama to analyze git diffs and propose commit messages following the Git Flow format. +## Key Features -## Features +- **Local inference**: Uses `llama_cpp_sys` to run GGUF models without any remote API. +- **Smart diff summarisation**: Large lockfiles and generated assets are reduced to concise summaries before prompting. +- **Git Flow enforcement**: Ensures responses match `(): ` and retries/falls back when they do not. +- **Interactive CLI**: Review, edit, or cancel the generated commit message. +- **Multi-language prompts**: English (default) and Simplified Chinese. +- **Configurable context**: Tune llama context length via Git configuration. +- **Multi-platform binaries**: Pre-built binaries for macOS (Intel & Apple Silicon). -- Automatic generation of Git Flow compliant commit messages -- Powered by Ollama for local AI processing -- Interactive mode allowing users to use, edit, or cancel the proposed commit message -- Multi-language support (English and Simplified Chinese) -- Cross-platform compatibility (Linux, macOS, Windows) -- Customizable with your personal Git signature -- Support for model selection and persistence +## Requirements -## Prerequisites - -- Git (version 2.0 or later) -- Ollama installed and running (https://ollama.com/download) -- At least one language model installed in Ollama +- Git 2.30+ +- A local GGUF model (the CLI can download the default `unsloth/gemma-3-270m-it-GGUF`) ## Installation -### 🚀 One-Click Installation (Recommended) +### Homebrew (Recommended) - Fast Binary Installation -The fastest way to install Git Commit Analyzer with a single command: +**macOS users can install via Homebrew with pre-built binaries (no Rust compilation required):** ```bash -bash -c "$(curl -fsSL https://sh.zhanghe.dev/install-git-ca.sh)" -``` - -This will automatically: -- Detect your operating system -- Install all dependencies (Git, Rust, Ollama) -- Build and install the plugin -- Configure your environment -- Set up Git configuration - -### Homebrew (macOS and Linux) - -Alternatively, you can install via Homebrew: - -``` brew tap zh30/tap brew install git-ca ``` -After installation, you can immediately use the `git ca` command. +This installs a pre-built binary for your platform: +- **macOS**: Apple Silicon (M1/M2/M3) and Intel (x86_64) -### Manual Installation (Linux and macOS) +No Rust toolchain or compilation needed! -1. Clone the repository: - ``` - git clone https://github.com/zh30/git-commit-analyzer.git - cd git-commit-analyzer - ``` +**Note**: Linux builds are temporarily disabled due to compilation issues. Windows builds are available via [GitHub Releases](https://github.com/zh30/git-commit-analyzer/releases). -2. Build the project: - ``` - cargo build --release - ``` +### Manual Installation -3. Create a directory for Git plugins (if it doesn't exist): - ``` - mkdir -p ~/.git-plugins - ``` +Download the appropriate binary for your platform from [Releases](https://github.com/zh30/git-commit-analyzer/releases): -4. Copy the compiled binary to the plugins directory: - ``` - cp target/release/git-ca ~/.git-plugins/ - ``` +```bash +# macOS (Apple Silicon) +curl -L -o git-ca https://github.com/zh30/git-commit-analyzer/releases/download/v1.1.2/git-ca-1.1.2-apple-darwin-arm64.tar.gz +tar -xzf git-ca-1.1.2-apple-darwin-arm64.tar.gz +sudo mv git-ca /usr/local/bin/ +chmod +x /usr/local/bin/git-ca + +# macOS (Intel) +curl -L -o git-ca https://github.com/zh30/git-commit-analyzer/releases/download/v1.1.2/git-ca-1.1.2-apple-darwin-x86_64.tar.gz +tar -xzf git-ca-1.1.2-apple-darwin-x86_64.tar.gz +sudo mv git-ca /usr/local/bin/ +chmod +x /usr/local/bin/git-ca +``` +**Note**: Linux builds are temporarily disabled due to compilation issues. Windows builds are available via [GitHub Releases](https://github.com/zh30/git-commit-analyzer/releases). -5. Add the plugins directory to your PATH. Add the following line to your `~/.bashrc`, `~/.bash_profile`, or `~/.zshrc` (depending on your shell): - ``` - export PATH="$HOME/.git-plugins:$PATH" - ``` +### Build from Source -6. Reload your shell configuration: - ``` - source ~/.bashrc # or ~/.bash_profile, or ~/.zshrc - ``` +If you prefer to build from source: -### Windows - theoretically possible +```bash +git clone https://github.com/zh30/git-commit-analyzer.git +cd git-commit-analyzer +cargo build --release +mkdir -p ~/.git-plugins +cp target/release/git-ca ~/.git-plugins/ +echo 'export PATH="$HOME/.git-plugins:$PATH"' >> ~/.bashrc # adapt for your shell +source ~/.bashrc +``` -1. Clone the repository: - ``` - git clone https://github.com/zh30/git-commit-analyzer.git - cd git-commit-analyzer - ``` +### One-Line Bootstrap Script -2. Build the project: - ``` - cargo build --release - ``` +```bash +bash -c "$(curl -fsSL https://sh.zhanghe.dev/install-git-ca.sh)" +``` -3. Create a directory for Git plugins (if it doesn't exist): - ``` - mkdir %USERPROFILE%\.git-plugins - ``` +## First-Time Setup -4. Copy the compiled binary to the plugins directory: - ``` - copy target\release\git-commit-analyzer.exe %USERPROFILE%\.git-plugins\ - ``` +On first run the CLI will: -5. Add the plugins directory to your PATH: - - Right-click on 'This PC' or 'My Computer' and select 'Properties' - - Click on 'Advanced system settings' - - Click on 'Environment Variables' - - Under 'System variables', find and select 'Path', then click 'Edit' - - Click 'New' and add `%USERPROFILE%\.git-plugins` - - Click 'OK' to close all dialogs +1. **Scan for models** in common directories: + - `./models` (project directory) + - `~/.cache/git-ca/models` (Linux) + - `~/.local/share/git-ca/models` (Linux alt) + - `~/Library/Application Support/git-ca/models` (macOS) -6. Restart any open command prompts for the changes to take effect. +2. **Download default model** automatically if none found: + - Downloads `unsloth/gemma-3-270m-it-GGUF` from Hugging Face + - Stores it in `~/.cache/git-ca/models/` -## How to Use +3. **Prompt for confirmation** if multiple models are found: + ```bash + git ca model # Interactive model selector + ``` -After installation, you can use Git Commit Analyzer in any Git repository: +## Usage -1. Stage your changes in your Git repository (using the `git add` command). -2. Run the following command: +```bash +git add +git ca +``` - ``` - git ca - ``` +For each invocation: -3. If it's your first time running the command, you'll be prompted to select a model from your installed Ollama models. -4. The program will analyze your staged changes and generate a suggested commit message. -5. You can choose to use the suggested message, edit it, or cancel the commit. +1. The staged diff is summarised (lockfiles and large assets are listed but not inlined). +2. The llama.cpp model generates a commit message. +3. Invalid output triggers a stricter retry; if still invalid, a deterministic fallback is offered. +4. Choose to **use**, **edit**, or **cancel** the message. ### Configuration Commands -To change the default model at any time, run: +- `git ca model` — Interactive model selector +- `git ca language` — Choose English or Simplified Chinese prompts +- `git ca doctor` — Test model loading and inference +- `git ca --version` — Display version information -``` -git ca model +## Development + +```bash +cargo fmt +cargo clippy -- -D warnings +cargo test +cargo run -- git ca # try against staged changes ``` -To set the output language for AI-generated commit messages, run: +Key modules: +- `src/main.rs` — CLI orchestration, diff summariser, fallback generator. +- `src/llama.rs` — llama.cpp session management. -``` -git ca language -``` +## Release Process + +Releases are automated via GitHub Actions: -Available languages: -- English (default) -- Simplified Chinese (简体中文) +1. Push a version tag: `git tag v1.1.2 && git push origin v1.1.2` +2. GitHub Actions builds binaries for macOS only (2 platforms: Intel & Apple Silicon) +3. Binaries are uploaded to GitHub Releases +4. Homebrew formula is automatically updated with bottle checksums +5. `homebrew-tap` repository receives the updated formula + - **Note**: Linux builds are temporarily disabled due to compilation issues + - Windows builds are available via GitHub Releases -The selected language will determine the language of the commit message generated by the AI model. Note that this affects the AI's prompt language, not the interface language. +See [DEPLOY.md](DEPLOY.md) for complete release documentation. ## Contributing -Contributions are welcome! Please feel free to submit a Pull Request. +Pull requests are welcome. Please include: +- `cargo fmt` / `cargo clippy -- -D warnings` / `cargo test` outputs, +- Updates to documentation (`README*.md`, `AGENTS.md`, `DEPLOY.md`) when behaviour changes, +- A short description of manual `git ca` verification if applicable. ## License -This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. +Released under the MIT License. See [LICENSE](LICENSE) for details. ## Acknowledgments - The Rust community for providing excellent libraries and tools -- Ollama for providing local AI model support +- llama.cpp team for the efficient local inference engine diff --git a/README_ES.md b/README_ES.md index 96c1cf5..6e284d8 100644 --- a/README_ES.md +++ b/README_ES.md @@ -1,165 +1,99 @@ -# Analizador de Commits Git +# Analizador de commits Git -[![Peerlist](https://github-readme-badge.peerlist.io/api/zhanghe)](https://peerlist.io/zhanghe) +[English](README.md) · [中文](README_ZH.md) · [Français](README_FR.md) -[English](README.md) | [中文](README_ZH.md) | [Français](README_FR.md) - -Analizador de Commits Git es un potente plugin de Git que utiliza IA para generar automáticamente mensajes de commit significativos basados en tus cambios preparados. Utiliza Ollama para analizar diferencias git y proponer mensajes de commit siguiendo el formato Git Flow. +Git Commit Analyzer es un plugin de Git escrito en Rust que aprovecha un modelo local de llama.cpp para analizar el diff preparado y generar mensajes de commit con formato Git Flow. El CLI resume automáticamente los cambios voluminosos, valida el formato devuelto por el modelo y ofrece mensajes deterministas de respaldo si la inferencia falla. ## Características -- Generación automática de mensajes de commit que cumplen con Git Flow -- Funciona con Ollama para procesamiento de IA local -- Modo interactivo que permite a los usuarios usar, editar o cancelar el mensaje de commit propuesto -- Soporte multiidioma (Inglés y Chino Simplificado) -- Compatibilidad multiplataforma (Linux, macOS, Windows) -- Personalizable con tu firma Git personal -- Soporte para selección y persistencia de modelos +- **Inferencia local**: `llama_cpp_sys` ejecuta modelos GGUF sin depender de servicios remotos. +- **Resumen inteligente del diff**: los lockfiles y artefactos grandes se reducen a resúmenes antes de llamar al modelo. +- **Cumplimiento de Git Flow**: se comprueba `(): `; si la respuesta no es válida, se reintenta o se devuelve un mensaje estándar. +- **CLI interactivo**: el usuario puede aceptar, editar o cancelar el mensaje sugerido. +- **Prompts multilingües**: inglés (predeterminado) y chino simplificado. +- **Contexto configurable**: ajuste la ventana de contexto de llama mediante configuración de Git. -## Requisitos previos +## Requisitos -- Git (versión 2.0 o posterior) -- Ollama instalado y en ejecución (https://ollama.com/download) -- Al menos un modelo de lenguaje instalado en Ollama +- Git ≥ 2.30 +- Toolchain estable de Rust (`cargo`) +- Dependencias de compilación para llama.cpp (cmake, compilador C/C++, controladores Metal/CUDA según plataforma) +- Un modelo GGUF local (el programa puede descargar `unsloth/gemma-3-270m-it-GGUF` si no encuentra modelos) ## Instalación -### 🚀 Instalación con Un Clic (Recomendada) - -La forma más rápida de instalar Git Commit Analyzer con un solo comando: +### Instalación manual ```bash -bash -c "$(curl -fsSL https://sh.zhanghe.dev/install-git-ca.sh)" +git clone https://github.com/zh30/git-commit-analyzer.git +cd git-commit-analyzer +cargo build --release +mkdir -p ~/.git-plugins +cp target/release/git-ca ~/.git-plugins/ +echo 'export PATH="$HOME/.git-plugins:$PATH"' >> ~/.bashrc # adapte la ruta a su shell +source ~/.bashrc ``` -Esto automáticamente: -- Detectará tu sistema operativo -- Instalará todas las dependencias (Git, Rust, Ollama) -- Construirá e instalará el plugin -- Configurará tu entorno -- Configurará Git - -### Homebrew (macOS y Linux) +En la primera ejecución el CLI busca modelos en `./models`, `~/Library/Application Support/git-ca/models` y `~/.cache/git-ca/models`. Si no encuentra ninguno, ofrece descargar el modelo predeterminado desde Hugging Face. -Alternativamente, puedes instalar a través de Homebrew: +### Homebrew (macOS / Linux) -``` +```bash brew tap zh30/tap brew install git-ca ``` -Después de la instalación, puede usar inmediatamente el comando `git ca`. - -### Instalación manual (Linux y macOS) - -1. Clonar el repositorio: - ``` - git clone https://github.com/zh30/git-commit-analyzer.git - cd git-commit-analyzer - ``` - -2. Construir el proyecto: - ``` - cargo build --release - ``` - -3. Crear un directorio para los plugins de Git (si no existe): - ``` - mkdir -p ~/.git-plugins - ``` - -4. Copiar el binario compilado al directorio de plugins: - ``` - cp target/release/git-ca ~/.git-plugins/ - ``` - -5. Añadir el directorio de plugins a su PATH. Añada la siguiente línea a su `~/.bashrc`, `~/.bash_profile`, o `~/.zshrc` (dependiendo de su shell): - ``` - export PATH="$HOME/.git-plugins:$PATH" - ``` - -6. Recargar la configuración de su shell: - ``` - source ~/.bashrc # o ~/.bash_profile, o ~/.zshrc - ``` - -### Windows - teóricamente posible - -1. Clonar el repositorio: - ``` - git clone https://github.com/zh30/git-commit-analyzer.git - cd git-commit-analyzer - ``` - -2. Construir el proyecto: - ``` - cargo build --release - ``` +### Script de arranque -3. Crear un directorio para los plugins de Git (si no existe): - ``` - mkdir %USERPROFILE%\.git-plugins - ``` +Un script opcional (`install-git-ca.sh`) automatiza la comprobación de dependencias, la compilación y la actualización del PATH: -4. Copiar el binario compilado al directorio de plugins: - ``` - copy target\release\git-commit-analyzer.exe %USERPROFILE%\.git-plugins\ - ``` - -5. Añadir el directorio de plugins a su PATH: - - Haga clic derecho en 'Este PC' o 'Mi PC' y seleccione 'Propiedades' - - Haga clic en 'Configuración avanzada del sistema' - - Haga clic en 'Variables de entorno' - - En 'Variables del sistema', busque y seleccione 'Path', luego haga clic en 'Editar' - - Haga clic en 'Nuevo' y añada `%USERPROFILE%\.git-plugins` - - Haga clic en 'Aceptar' para cerrar todos los cuadros de diálogo +```bash +bash -c "$(curl -fsSL https://sh.zhanghe.dev/install-git-ca.sh)" +``` -6. Reinicie cualquier símbolo del sistema abierto para que los cambios surtan efecto. +Revise el script antes de ejecutarlo y asegúrese de que dispone de un modelo GGUF accesible. -## Cómo usar +## Uso -Después de la instalación, puede utilizar Git Commit Analyzer en cualquier repositorio Git: +```bash +git add +git ca +``` -1. Prepare sus cambios en su repositorio Git (utilizando el comando `git add`). -2. Ejecute el siguiente comando: +Durante la primera ejecución se le pedirá seleccionar la ruta del modelo. En cada invocación: - ``` - git ca - ``` +1. El diff preparado se resume (los archivos grandes solo muestran un resumen). +2. El modelo llama.cpp genera el mensaje de commit. +3. Si el resultado no cumple Git Flow, se lanza un segundo intento más estricto; si todavía falla, se ofrece un mensaje de respaldo (por ejemplo `chore(deps): update dependencies`). +4. El usuario decide **usar**, **editar** o **cancelar** el mensaje. -3. Si es la primera vez que ejecuta el comando, se le pedirá que seleccione un modelo de sus modelos Ollama instalados. -4. El programa analizará sus cambios preparados y generará un mensaje de commit sugerido. -5. Puede elegir usar el mensaje sugerido, editarlo o cancelar el commit. +### Configuración -### Comandos de Configuración +- `git ca model` — selector interactivo de modelos; la ruta GGUF elegida se reutiliza en ejecuciones futuras. +- En ejecuciones no interactivas se reutiliza el modelo guardado o, si no existe, el primer GGUF detectado. +- `git ca language` — alterna entre prompts en inglés y chino; guarda la preferencia en `commit-analyzer.language`. +- La longitud de contexto de llama queda fijada en 1024 tokens. -Para cambiar el modelo predeterminado en cualquier momento, ejecute: +## Desarrollo -``` -git ca model -``` - -Para establecer el idioma de salida para los mensajes de commit generados por IA, ejecute: - -``` -git ca language +```bash +cargo fmt +cargo clippy -- -D warnings +cargo test +cargo run -- git ca ``` -Idiomas disponibles: -- Inglés (predeterminado) -- Chino Simplificado (简体中文) - -El idioma seleccionado determinará el idioma del mensaje de commit generado por el modelo de IA. Nota: esto afecta el idioma del prompt de la IA, no el idioma de la interfaz. +Archivos principales: +- `src/main.rs`: flujo del CLI, resumen del diff, generación de mensajes de respaldo. +- `src/llama.rs`: envoltorio minimalista sobre la sesión de llama.cpp. ## Contribución -¡Las contribuciones son bienvenidas! No dude en enviar una Pull Request. +Se aceptan Pull Requests. Incluya: +- resultados de `cargo fmt`, `cargo clippy -- -D warnings` y `cargo test`, +- actualizaciones de documentación (`README*.md`, `AGENTS.md`, `DEPLOY.md`) cuando cambie el comportamiento, +- una breve nota sobre la verificación manual de `git ca` si aplica. ## Licencia -Este proyecto está licenciado bajo la Licencia MIT - consulte el archivo [LICENSE](LICENSE) para más detalles. - -## Agradecimientos - -- A la comunidad de Rust por proporcionar excelentes bibliotecas y herramientas -- A Ollama por proporcionar soporte para modelos de IA locales \ No newline at end of file +Proyecto con licencia MIT. Consulte el archivo [LICENSE](LICENSE) para más información. diff --git a/README_FR.md b/README_FR.md index 9cada54..92f8868 100644 --- a/README_FR.md +++ b/README_FR.md @@ -1,165 +1,99 @@ -# Analyseur de Commits Git +# Analyseur de commits Git -[![Peerlist](https://github-readme-badge.peerlist.io/api/zhanghe)](https://peerlist.io/zhanghe) +[English](README.md) · [中文](README_ZH.md) · [Español](README_ES.md) -[English](README.md) | [中文](README_ZH.md) - -Analyseur de Commits Git est un puissant plugin Git qui utilise l'IA pour générer automatiquement des messages de commit pertinents basés sur vos changements en attente. Il utilise Ollama pour analyser les différences git et proposer des messages de commit conformes au format Git Flow. +Git Commit Analyzer est un plugin Git écrit en Rust qui exploite un modèle llama.cpp local pour analyser le diff déjà indexé et produire des messages de commit conformes à Git Flow. Le CLI résume automatiquement les gros fichiers, valide la structure de la réponse et fournit un message de secours déterministe en cas d’échec du modèle. ## Fonctionnalités -- Génération automatique de messages de commit conformes à Git Flow -- Propulsé par Ollama pour un traitement IA local -- Mode interactif permettant aux utilisateurs d'utiliser, de modifier ou d'annuler le message de commit proposé -- Support multilingue (Anglais et Chinois Simplifié) -- Compatibilité multi-plateformes (Linux, macOS, Windows) -- Personnalisable avec votre signature Git personnelle -- Support pour la sélection et la persistance des modèles +- **Inférence locale** : `llama_cpp_sys` exécute des modèles GGUF sans dépendre d’une API distante. +- **Résumé intelligent du diff** : les fichiers volumineux (lockfiles, artefacts) sont réduits à des résumés avant l’appel au modèle. +- **Respect de Git Flow** : vérifie la forme `() : ` et retente/échoue proprement si nécessaire. +- **CLI interactif** : vous pouvez accepter, éditer ou annuler le message proposé. +- **Prompts multilingues** : anglais (par défaut) et chinois simplifié. +- **Contexte configurable** : ajustez la fenêtre de contexte llama via la configuration Git. ## Prérequis -- Git (version 2.0 ou ultérieure) -- Ollama installé et en cours d'exécution (https://ollama.com/download) -- Au moins un modèle de langage installé dans Ollama +- Git ≥ 2.30 +- Chaîne d’outils Rust stable (`cargo`) +- Dépendances de compilation llama.cpp (cmake, compilateur C/C++, pilotes Metal/CUDA selon la plateforme) +- Un modèle GGUF local (le programme peut télécharger `unsloth/gemma-3-270m-it-GGUF` si aucun modèle n’est disponible) ## Installation -### 🚀 Installation en un Clic (Recommandée) - -La méthode la plus rapide pour installer Git Commit Analyzer avec une seule commande : +### Installation manuelle ```bash -bash -c "$(curl -fsSL https://sh.zhanghe.dev/install-git-ca.sh)" +git clone https://github.com/zh30/git-commit-analyzer.git +cd git-commit-analyzer +cargo build --release +mkdir -p ~/.git-plugins +cp target/release/git-ca ~/.git-plugins/ +echo 'export PATH="$HOME/.git-plugins:$PATH"' >> ~/.bashrc # adaptez selon votre shell +source ~/.bashrc ``` -Cela automatiquement : -- Détecter votre système d'exploitation -- Installer toutes les dépendances (Git, Rust, Ollama) -- Construire et installer le plugin -- Configurer votre environnement -- Configurer Git - -### Homebrew (macOS et Linux) +Au premier lancement, le CLI parcourt `./models`, `~/Library/Application Support/git-ca/models` et `~/.cache/git-ca/models`. S’il ne trouve aucun modèle, il propose de télécharger celui par défaut depuis Hugging Face. -Alternativement, vous pouvez installer via Homebrew : +### Homebrew (macOS / Linux) -``` +```bash brew tap zh30/tap brew install git-ca ``` -Après l'installation, vous pouvez immédiatement utiliser la commande `git ca`. - -### Installation manuelle (Linux et macOS) - -1. Clonez le dépôt : - ``` - git clone https://github.com/zh30/git-commit-analyzer.git - cd git-commit-analyzer - ``` - -2. Construisez le projet : - ``` - cargo build --release - ``` - -3. Créez un répertoire pour les plugins Git (s'il n'existe pas) : - ``` - mkdir -p ~/.git-plugins - ``` - -4. Copiez le binaire compilé dans le répertoire des plugins : - ``` - cp target/release/git-ca ~/.git-plugins/ - ``` - -5. Ajoutez le répertoire des plugins à votre PATH. Ajoutez la ligne suivante à votre `~/.bashrc`, `~/.bash_profile`, ou `~/.zshrc` (selon votre shell) : - ``` - export PATH="$HOME/.git-plugins:$PATH" - ``` - -6. Rechargez votre configuration shell : - ``` - source ~/.bashrc # ou ~/.bash_profile, ou ~/.zshrc - ``` - -### Windows - théoriquement possible - -1. Clonez le dépôt : - ``` - git clone https://github.com/zh30/git-commit-analyzer.git - cd git-commit-analyzer - ``` - -2. Construisez le projet : - ``` - cargo build --release - ``` +### Script d’amorçage -3. Créez un répertoire pour les plugins Git (s'il n'existe pas) : - ``` - mkdir %USERPROFILE%\.git-plugins - ``` +Un script optionnel (`install-git-ca.sh`) automatise la vérification des dépendances, la compilation et la mise à jour du PATH : -4. Copiez le binaire compilé dans le répertoire des plugins : - ``` - copy target\release\git-commit-analyzer.exe %USERPROFILE%\.git-plugins\ - ``` - -5. Ajoutez le répertoire des plugins à votre PATH : - - Faites un clic droit sur 'Ce PC' ou 'Poste de travail' et sélectionnez 'Propriétés' - - Cliquez sur 'Paramètres système avancés' - - Cliquez sur 'Variables d'environnement' - - Sous 'Variables système', trouvez et sélectionnez 'Path', puis cliquez sur 'Modifier' - - Cliquez sur 'Nouveau' et ajoutez `%USERPROFILE%\.git-plugins` - - Cliquez sur 'OK' pour fermer toutes les boîtes de dialogue +```bash +bash -c "$(curl -fsSL https://sh.zhanghe.dev/install-git-ca.sh)" +``` -6. Redémarrez tous les invites de commande ouverts pour que les changements prennent effet. +Inspectez le script avant exécution et assurez-vous qu’un modèle GGUF est disponible. -## Comment utiliser +## Utilisation -Après l'installation, vous pouvez utiliser Git Commit Analyzer dans n'importe quel dépôt Git : +```bash +git add +git ca +``` -1. Mettez en attente vos modifications dans votre dépôt Git (en utilisant la commande `git add`). -2. Exécutez la commande suivante : +Lors de la première exécution, choisissez le chemin du modèle. À chaque invocation : - ``` - git ca - ``` +1. Le diff indexé est condensé (les fichiers volumineux apparaissent sous forme de résumé). +2. Le modèle llama.cpp génère un message de commit. +3. Si la réponse ne respecte pas Git Flow, une tentative plus stricte est effectuée ; à défaut, un message de secours (par ex. `chore(deps): update dependencies`) est proposé. +4. Vous décidez d’**utiliser**, **éditer** ou **annuler** le message. -3. Si c'est la première fois que vous exécutez la commande, vous serez invité à sélectionner un modèle parmi vos modèles Ollama installés. -4. Le programme analysera vos modifications en attente et générera un message de commit suggéré. -5. Vous pouvez choisir d'utiliser le message suggéré, de le modifier ou d'annuler le commit. +### Configuration -### Commandes de Configuration +- `git ca model` — sélectionne interactivement un modèle et réutilise ce chemin GGUF pour les exécutions suivantes. +- En mode non interactif, le modèle mémorisé est utilisé ou, à défaut, le premier GGUF détecté. +- `git ca language` — bascule les prompts entre anglais et chinois, stocké dans `commit-analyzer.language`. +- La longueur de contexte de llama est fixée à 1024 tokens. -Pour changer le modèle par défaut à tout moment, exécutez : +## Développement -``` -git ca model -``` - -Pour définir la langue de sortie des messages de commit générés par l'IA, exécutez : - -``` -git ca language +```bash +cargo fmt +cargo clippy -- -D warnings +cargo test +cargo run -- git ca ``` -Langues disponibles : -- Anglais (par défaut) -- Chinois Simplifié (简体中文) - -La langue sélectionnée déterminera la langue du message de commit généré par le modèle IA. Note : cela affecte la langue du prompt de l'IA, pas la langue de l'interface. +Fichiers principaux : +- `src/main.rs` : orchestration CLI, synthèse du diff, stratégie de repli. +- `src/llama.rs` : encapsulation de la session llama.cpp. ## Contribution -Les contributions sont les bienvenues ! N'hésitez pas à soumettre une Pull Request. +Les contributions sont les bienvenues. Merci d’inclure : +- les sorties `cargo fmt`, `cargo clippy -- -D warnings`, `cargo test`, +- les mises à jour de la documentation (`README*.md`, `AGENTS.md`, `DEPLOY.md`) en cas de changement fonctionnel, +- un court descriptif des tests manuels `git ca` le cas échéant. ## Licence -Ce projet est sous licence MIT - voir le fichier [LICENSE](LICENSE) pour plus de détails. - -## Remerciements - -- La communauté Rust pour fournir d'excellentes bibliothèques et outils -- Ollama pour fournir un support de modèle IA local \ No newline at end of file +Projet sous licence MIT. Consultez le fichier [LICENSE](LICENSE) pour plus d’informations. diff --git a/README_ZH.md b/README_ZH.md index d8ad076..ca026c4 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -1,165 +1,96 @@ # Git 提交分析器 -[![Peerlist](https://github-readme-badge.peerlist.io/api/zhanghe)](https://peerlist.io/zhanghe) +[English](README.md) · [Français](README_FR.md) · [Español](README_ES.md) -[English](README.md) +Git 提交分析器是一个基于 Rust 的 Git 插件,利用本地 llama.cpp 模型分析已暂存的 diff,并生成符合 Git Flow 规范的提交说明。CLI 会在提示前压缩冗长 diff,校验模型输出格式,并在必要时提供确定性的兜底提交信息。 -Git 提交分析器是一个强大的 Git 插件,它利用人工智能根据您的暂存更改自动生成有意义的提交消息。它使用 Ollama 分析 git 差异并提出符合 Git Flow 格式的提交消息。 +## 功能特性 -## 功能特点 +- **本地推理**:通过 `llama_cpp_sys` 调用 GGUF 模型,无需远程 API。 +- **智能 diff 摘要**:锁文件、生成物等大文件仅展示概要,避免浪费 Token。 +- **Git Flow 校验**:严格要求 `(): `,失败时自动重试或兜底。 +- **交互式 CLI**:支持直接使用、编辑或取消生成的提交说明。 +- **多语言提示**:提供英文(默认)和简体中文两种提示语言。 +- **上下文可调**:可通过 Git 配置调整 llama 上下文长度。 -- 自动生成符合 Git Flow 规范的提交消息 -- 由 Ollama 提供支持,实现本地 AI 处理 -- 交互模式允许用户使用、编辑或取消建议的提交消息 -- 多语言支持(英文和简体中文) -- 跨平台兼容性(Linux、macOS、Windows) -- 可以使用您的个人 Git 签名进行自定义 -- 支持模型选择和持久化 +## 环境要求 -## 前提条件 +- Git ≥ 2.30 +- Rust 稳定版工具链 +- 构建 llama.cpp 所需依赖(cmake、C/C++ 编译器、Metal/CUDA 等) +- 本地 GGUF 模型(首次运行可自动下载 `unsloth/gemma-3-270m-it-GGUF`) -- Git(2.0 或更高版本) -- 已安装并运行 Ollama(https://ollama.com/download) -- Ollama 中至少安装了一个语言模型 +## 安装方式 -## 安装 - -### 🚀 一键安装(推荐) - -最快的安装方式,只需一行命令即可完成所有安装步骤: +### 手动安装 ```bash -bash -c "$(curl -fsSL https://sh.zhanghe.dev/install-git-ca.sh)" +git clone https://github.com/zh30/git-commit-analyzer.git +cd git-commit-analyzer +cargo build --release +mkdir -p ~/.git-plugins +cp target/release/git-ca ~/.git-plugins/ +echo 'export PATH="$HOME/.git-plugins:$PATH"' >> ~/.bashrc # 根据使用的 shell 调整 +source ~/.bashrc ``` -这将自动完成: -- 检测您的操作系统 -- 安装所有依赖(Git、Rust、Ollama) -- 构建并安装插件 -- 配置环境变量 -- 设置 Git 配置 - -### Homebrew(macOS 和 Linux) +CLI 会在常用目录(`./models`、`~/Library/Application Support/git-ca/models`、`~/.cache/git-ca/models`)中查找 GGUF 模型,若未找到会提示自动下载默认模型。 -或者,您也可以通过 Homebrew 安装: +### Homebrew(macOS / Linux) -``` +```bash brew tap zh30/tap brew install git-ca ``` -安装后,您可以立即使用 `git ca` 命令。 - -### 手动安装(Linux 和 macOS) - -1. 克隆仓库: - ``` - git clone https://github.com/zh30/git-commit-analyzer.git - cd git-commit-analyzer - ``` - -2. 构建项目: - ``` - cargo build --release - ``` - -3. 创建 Git 插件目录(如果不存在): - ``` - mkdir -p ~/.git-plugins - ``` - -4. 将编译好的二进制文件复制到插件目录: - ``` - cp target/release/git-ca ~/.git-plugins/ - ``` - -5. 将插件目录添加到您的 PATH。根据您使用的 shell,将以下行添加到 `~/.bashrc`、`~/.bash_profile` 或 `~/.zshrc`: - ``` - export PATH="$HOME/.git-plugins:$PATH" - ``` - -6. 重新加载您的 shell 配置: - ``` - source ~/.bashrc # 或 ~/.bash_profile, 或 ~/.zshrc - ``` - -### Windows - 理论上可行 - -1. 克隆仓库: - ``` - git clone https://github.com/zh30/git-commit-analyzer.git - cd git-commit-analyzer - ``` - -2. 构建项目: - ``` - cargo build --release - ``` +### 一键脚本 -3. 创建 Git 插件目录(如果不存在): - ``` - mkdir %USERPROFILE%\.git-plugins - ``` +可选的 `install-git-ca.sh` 会检测依赖、编译二进制并更新 PATH: -4. 将编译好的二进制文件复制到插件目录: - ``` - copy target\release\git-commit-analyzer.exe %USERPROFILE%\.git-plugins\ - ``` - -5. 将插件目录添加到您的 PATH: - - 右键点击"此电脑"或"我的电脑"并选择"属性" - - 点击"高级系统设置" - - 点击"环境变量" - - 在"系统变量"下,找到并选择"Path",然后点击"编辑" - - 点击"新建"并添加 `%USERPROFILE%\.git-plugins` - - 点击"确定"关闭所有对话框 - -6. 重启所有打开的命令提示符,使更改生效。 +```bash +bash -c "$(curl -fsSL https://sh.zhanghe.dev/install-git-ca.sh)" +``` -## 使用方法 +运行前建议审阅脚本,并确认本地可访问目标 GGUF 模型。 -安装后,您可以在任何 Git 仓库中使用 Git 提交分析器: +## 使用说明 -1. 在您的 Git 仓库中暂存您的更改(使用 `git add` 命令)。 -2. 运行以下命令: +```bash +git add +git ca +``` - ``` - git ca - ``` +首次运行需选择模型路径。后续流程包括: -3. 如果是首次运行该命令,系统会提示您从已安装的 Ollama 模型中选择一个模型。 -4. 程序将分析您的暂存更改并生成建议的提交消息。 -5. 您可以选择使用建议的消息、编辑它或取消提交。 +1. 对已暂存 diff 进行摘要(大型文件仅展示概要)。 +2. llama.cpp 模型生成提交说明。 +3. 若输出不符合规范,使用更严格提示重试;仍失败则给出兜底信息(如 `chore(deps): update dependencies`)。 +4. 交互式选择 **使用**、**编辑** 或 **取消**。 ### 配置命令 -要随时更改默认模型,请运行: +- `git ca model` — 交互式选择模型路径,所选 GGUF 会在后续运行中复用。 +- 非交互模式优先使用已保存的模型,否则自动使用检测到的第一个 GGUF。 +- `git ca language` — 切换提示语言(英文/中文),写入 `commit-analyzer.language`。 +- llama 上下文长度固定为 1024 tokens。 -``` -git ca model -``` - -要设置 AI 生成提交消息的输出语言,请运行: +## 开发指引 -``` -git ca language +```bash +cargo fmt +cargo clippy -- -D warnings +cargo test +cargo run -- git ca ``` -可用语言: -- 英文(默认) -- 简体中文 - -所选语言将决定 AI 模型生成的提交消息的语言。注意:这会影响 AI 的提示语言,而不是界面语言。 +核心代码: +- `src/main.rs` — CLI 主流程、diff 摘要、兜底提交逻辑。 +- `src/llama.rs` — llama.cpp 会话封装。 ## 贡献 -欢迎贡献!请随时提交拉取请求。 +欢迎提交 Pull Request!请在提交前完成 `cargo fmt` / `cargo clippy -- -D warnings` / `cargo test`,并在行为变化时更新相关文档(`README*.md`、`AGENTS.md`、`DEPLOY.md`)。 ## 许可证 -该项目采用 MIT 许可证 - 详情请参阅 [LICENSE](LICENSE) 文件。 - -## 致谢 - -- Rust 社区提供了优秀的库和工具 -- Ollama 提供本地 AI 模型支持 +项目采用 MIT 许可证,详见 [LICENSE](LICENSE)。 diff --git a/body-test-final.md b/body-test-final.md new file mode 100644 index 0000000..c0fde41 --- /dev/null +++ b/body-test-final.md @@ -0,0 +1 @@ +# Final body test diff --git a/debug-body-test.md b/debug-body-test.md new file mode 100644 index 0000000..631d2cb --- /dev/null +++ b/debug-body-test.md @@ -0,0 +1 @@ +# Debug body test diff --git a/enhanced-test.md b/enhanced-test.md new file mode 100644 index 0000000..1ec22f9 --- /dev/null +++ b/enhanced-test.md @@ -0,0 +1 @@ +# Enhanced commit message test diff --git a/git-ca.rb b/git-ca.rb index bae8313..647aad1 100644 --- a/git-ca.rb +++ b/git-ca.rb @@ -1,32 +1,43 @@ class GitCa < Formula desc "AI-powered Git plugin for generating meaningful commit messages" homepage "https://github.com/zh30/git-commit-analyzer" - url "https://github.com/zh30/git-commit-analyzer/archive/refs/tags/v1.0.0.tar.gz" + url "https://github.com/zh30/git-commit-analyzer/archive/refs/tags/v1.1.2.tar.gz" sha256 "REPLACE_WITH_ACTUAL_SHA256_CHECKSUM" license "MIT" head "https://github.com/zh30/git-commit-analyzer.git", branch: "main" - depends_on "rust" => :build + # Bottle support for pre-built binaries + bottle do + root_url "https://github.com/zh30/git-commit-analyzer/releases/download/v#{version}" + sha256 cellar: :any_skip_relocate, arm64_sequoia: "REPLACE_WITH_ARM64_MACOS_SHA256" + sha256 cellar: :any_skip_relocate, x86_64_sequoia: "REPLACE_WITH_X86_64_MACOS_SHA256" + # Note: Linux builds disabled due to compilation issues + # Windows builds available via GitHub Releases but not distributed via Homebrew + end def install - system "cargo", "build", "--release", "--bin", "git-ca" - bin.install "target/release/git-ca" + bin.install "git-ca" end def caveats <<~EOS - To use git-ca, you need Ollama installed and running: - https://ollama.com/download - - You'll also need at least one language model installed in Ollama. - + To use git-ca, you need a local GGUF model (llama.cpp format). + + The tool will automatically download the default model + (unsloth/gemma-3-270m-it-GGUF) on first run, or you can: + - Place GGUF files in ./models directory + - Place GGUF files in ~/.cache/git-ca/models directory + - Run 'git ca model' to select a model manually + To set up a default model, run: - git ca model + git ca model + + Note: git-ca uses local llama.cpp inference (no remote API calls). EOS end test do # Test to verify that the binary is installed correctly - system "#{bin}/git-ca", "--version" + assert_match version.to_s, shell_output("#{bin}/git-ca --version") end end \ No newline at end of file diff --git a/src/llama.rs b/src/llama.rs new file mode 100644 index 0000000..edaeb42 --- /dev/null +++ b/src/llama.rs @@ -0,0 +1,432 @@ +use llama_cpp_sys_2::{ + ggml_log_level, llama_backend_free, llama_backend_init, llama_batch_free, llama_batch_init, + llama_context_default_params, llama_decode, llama_free, llama_free_model, llama_get_logits, + llama_get_memory, llama_load_model_from_file, llama_log_set, llama_memory_clear, llama_model, + llama_model_default_params, llama_model_get_vocab, llama_n_vocab, llama_new_context_with_model, + llama_set_n_threads, llama_token, llama_token_eos, llama_token_to_piece, llama_tokenize, + llama_vocab, GGML_LOG_LEVEL_ERROR, +}; +use rand::prelude::*; +use std::cmp::Ordering; +use std::ffi::{CStr, CString}; +use std::os::raw::{c_char, c_void}; +use std::path::Path; +use std::ptr; +use std::sync::Once; + +const MAX_SEQ_ID: i32 = 1; +const PROMPT_CHUNK_SIZE: usize = 256; +const SAMPLING_TEMPERATURE: f32 = 0.8; +const SAMPLING_TOP_K: usize = 40; +const SAMPLING_TOP_P: f32 = 0.9; +const SAMPLING_MIN_P: f32 = 0.0; +const TOKEN_PIECE_INITIAL: usize = 64; +const TOKEN_PIECE_MAX: usize = 8192; + +static LOG_INITIALIZED: Once = Once::new(); + +unsafe extern "C" fn llama_log_filter(level: ggml_log_level, text: *const c_char, _: *mut c_void) { + if text.is_null() { + return; + } + + if level >= GGML_LOG_LEVEL_ERROR { + if let Ok(msg) = CStr::from_ptr(text).to_str() { + eprintln!("{msg}"); + } + } +} + +#[derive(Debug)] +pub struct LlamaSession { + model: *mut llama_model, + ctx: *mut llama_cpp_sys_2::llama_context, + vocab: *const llama_vocab, + n_ctx: i32, +} + +impl LlamaSession { + pub fn new(model_path: &Path, n_ctx: i32) -> Result { + if !model_path.exists() { + return Err(format!("Model file not found at {}", model_path.display())); + } + + let model_path_cstr = CString::new( + model_path + .to_str() + .ok_or_else(|| "Model path contains invalid UTF-8 characters".to_string())?, + ) + .map_err(|_| "Model path contains interior null bytes".to_string())?; + + unsafe { + LOG_INITIALIZED.call_once(|| { + llama_log_set(Some(llama_log_filter), std::ptr::null_mut()); + }); + llama_backend_init(); + + let model_params = llama_model_default_params(); + let model = llama_load_model_from_file(model_path_cstr.as_ptr(), model_params); + if model.is_null() { + llama_backend_free(); + return Err("Failed to load GGUF model".to_string()); + } + + let vocab = llama_model_get_vocab(model); + if vocab.is_null() { + llama_free_model(model); + llama_backend_free(); + return Err("Failed to resolve model vocabulary".to_string()); + } + + let mut ctx_params = llama_context_default_params(); + ctx_params.n_ctx = n_ctx as u32; + ctx_params.n_batch = n_ctx as u32; + ctx_params.n_ubatch = n_ctx as u32; + ctx_params.n_seq_max = MAX_SEQ_ID as u32; + + let threads = std::thread::available_parallelism() + .map(|n| n.get() as i32) + .unwrap_or(4) + .max(1); + ctx_params.n_threads = threads; + ctx_params.n_threads_batch = threads; + + let ctx = llama_new_context_with_model(model, ctx_params); + if ctx.is_null() { + llama_free_model(model); + llama_backend_free(); + return Err("Failed to create llama.cpp context".to_string()); + } + + llama_set_n_threads(ctx, threads, threads); + + Ok(Self { + model, + ctx, + vocab, + n_ctx, + }) + } + } + + pub fn infer(&mut self, prompt: &str, max_tokens: usize) -> Result { + let prompt_cstr = CString::new(prompt).map_err(|_| { + "Prompt contains interior null bytes which cannot be processed".to_string() + })?; + + unsafe { + let memory = llama_get_memory(self.ctx); + if !memory.is_null() { + llama_memory_clear(memory, true); + } + } + + let mut tokens: Vec = Vec::new(); + let mut capacity = prompt.len().max(1) + 8; + + loop { + if capacity > i32::MAX as usize { + return Err("Prompt too long for llama.cpp tokenizer".to_string()); + } + + tokens.resize(capacity, 0); + + let text_len = i32::try_from(prompt.len()) + .map_err(|_| "Prompt length exceeds supported limits".to_string())?; + + let n_tokens = unsafe { + llama_tokenize( + self.vocab, + prompt_cstr.as_ptr(), + text_len, + tokens.as_mut_ptr(), + capacity as i32, + true, + false, + ) + }; + + if n_tokens >= 0 { + tokens.truncate(n_tokens as usize); + break; + } + + capacity = capacity.saturating_mul(2); + } + + if tokens.len() >= self.n_ctx.saturating_sub(32) as usize { + let max_tokens = self.n_ctx.saturating_sub(32).max(1) as usize; + if tokens.len() > max_tokens { + let drop_count = tokens.len() - max_tokens; + tokens.drain(0..drop_count); + } + } + + unsafe { + self.decode_sequence(&tokens, 0)?; + } + + let mut n_past = tokens.len() as i32; + let mut generated = String::new(); + let eos_token = unsafe { llama_token_eos(self.vocab) }; + let vocab_size = unsafe { llama_n_vocab(self.vocab) } as usize; + + let mut decode_batch = unsafe { llama_batch_init(1, 0, MAX_SEQ_ID) }; + let mut decode_error: Option = None; + let mut has_meaningful_text = false; + + for _ in 0..max_tokens { + let allow_eos = has_meaningful_text; + let next_token = unsafe { self.sample_next_token(vocab_size, eos_token, allow_eos) }; + if next_token == eos_token { + break; + } + + let token_text = unsafe { self.token_to_string(next_token) }; + generated.push_str(&token_text); + if !token_text.trim().is_empty() { + has_meaningful_text = true; + } + + unsafe { + decode_batch.n_tokens = 1; + (*decode_batch.token) = next_token; + (*decode_batch.pos) = n_past; + (*decode_batch.n_seq_id) = 1; + let seq_ptr = *decode_batch.seq_id; + seq_ptr.write(0); + (*decode_batch.logits) = 1; + + if llama_decode(self.ctx, decode_batch) != 0 { + if generated.trim().is_empty() { + decode_error = + Some("Model evaluation failed during generation".to_string()); + } + break; + } + } + + n_past += 1; + + if generated.trim().is_empty() { + continue; + } + + if generated.ends_with('\n') && generated.lines().count() >= 2 { + break; + } + } + + unsafe { + llama_batch_free(decode_batch); + } + + if let Some(err) = decode_error { + return Err(err); + } + + Ok(generated) + } + + unsafe fn decode_sequence(&self, tokens: &[llama_token], start_pos: i32) -> Result<(), String> { + if tokens.is_empty() { + return Ok(()); + } + + let mut chunk_size = PROMPT_CHUNK_SIZE.max(1); + let mut offset = 0usize; + + while offset < tokens.len() { + let remaining = tokens.len() - offset; + let current = remaining.min(chunk_size); + let mut batch = llama_batch_init(current as i32, 0, MAX_SEQ_ID); + + for i in 0..current { + let idx = i; + (*batch.token.add(idx)) = tokens[offset + i]; + (*batch.pos.add(idx)) = start_pos + (offset + i) as i32; + (*batch.n_seq_id.add(idx)) = 1; + let seq_ptr = *batch.seq_id.add(idx); + seq_ptr.write(0); + let is_last = offset + i + 1 == tokens.len(); + (*batch.logits.add(idx)) = i8::from(is_last); + } + + batch.n_tokens = current as i32; + let status = llama_decode(self.ctx, batch); + llama_batch_free(batch); + + if status != 0 { + if chunk_size > 1 { + chunk_size = chunk_size.saturating_div(2).max(1); + continue; + } else { + return Err("Model evaluation failed during prompt ingestion".to_string()); + } + } + + offset += current; + } + + Ok(()) + } + + unsafe fn sample_next_token( + &self, + vocab_size: usize, + eos_token: llama_token, + allow_eos: bool, + ) -> llama_token { + let logits_ptr = llama_get_logits(self.ctx); + if logits_ptr.is_null() { + return eos_token; + } + + let logits = std::slice::from_raw_parts(logits_ptr, vocab_size); + let mut candidates: Vec<(llama_token, f32)> = logits + .iter() + .enumerate() + .map(|(idx, &logit)| (idx as llama_token, logit)) + .collect(); + + candidates.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(Ordering::Equal)); + + let top_k = SAMPLING_TOP_K.max(1).min(candidates.len()); + candidates.truncate(top_k); + let best_non_eos = candidates + .iter() + .find_map(|(token, _)| (*token != eos_token).then_some(*token)); + + let temperature = SAMPLING_TEMPERATURE.max(1e-5); + let mut scaled = Vec::with_capacity(candidates.len()); + let mut max_logit = f32::NEG_INFINITY; + for &(token, logit) in &candidates { + let scaled_logit = logit / temperature; + if scaled_logit > max_logit { + max_logit = scaled_logit; + } + scaled.push((token, scaled_logit)); + } + + let mut weights = Vec::with_capacity(scaled.len()); + let mut weight_sum = 0.0f32; + for (token, logit) in scaled { + let weight = (logit - max_logit).exp(); + if weight.is_finite() && weight > 0.0 { + weight_sum += weight; + weights.push((token, weight)); + } + } + + if weights.is_empty() { + return best_non_eos + .or_else(|| candidates.first().map(|(token, _)| *token)) + .unwrap_or(eos_token); + } + + weights.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(Ordering::Equal)); + + let mut filtered = Vec::new(); + let mut cumulative = 0.0; + for &(token, weight) in &weights { + let prob = weight / weight_sum; + cumulative += prob; + filtered.push((token, weight)); + if SAMPLING_TOP_P < 1.0 && cumulative >= SAMPLING_TOP_P { + break; + } + } + + if filtered.is_empty() { + filtered.push(weights[0]); + } + + if SAMPLING_MIN_P > 0.0 { + let max_weight = filtered + .iter() + .map(|(_, weight)| *weight) + .fold(f32::NEG_INFINITY, f32::max); + let threshold = max_weight * SAMPLING_MIN_P; + filtered.retain(|(_, weight)| *weight >= threshold); + if filtered.is_empty() { + filtered.push(weights[0]); + } + } + + if !allow_eos { + filtered.retain(|(token, _)| *token != eos_token); + } + + if filtered.is_empty() { + return best_non_eos + .or_else(|| candidates.first().map(|(token, _)| *token)) + .unwrap_or(eos_token); + } + + let total_weight: f32 = filtered.iter().map(|(_, weight)| *weight).sum(); + if total_weight <= 0.0 { + return filtered[0].0; + } + + let mut rng = rand::rng(); + let mut sample = rng.random::() * total_weight; + for (token, weight) in &filtered { + sample -= *weight; + if sample <= 0.0 { + return *token; + } + } + + filtered + .last() + .map(|(token, _)| *token) + .unwrap_or(eos_token) + } + + unsafe fn token_to_string(&self, token: llama_token) -> String { + let mut size = TOKEN_PIECE_INITIAL; + let mut buffer: Vec = Vec::new(); + + loop { + if size > TOKEN_PIECE_MAX { + return String::new(); + } + + buffer.resize(size, 0); + let written = + llama_token_to_piece(self.vocab, token, buffer.as_mut_ptr(), size as i32, 0, true); + + if written < 0 { + size = size.saturating_mul(2); + continue; + } + + let written = written as usize; + if written >= size { + size = size.saturating_mul(2); + continue; + } + + let bytes: Vec = buffer[..written].iter().map(|b| *b as u8).collect(); + return String::from_utf8(bytes).unwrap_or_default(); + } + } +} + +impl Drop for LlamaSession { + fn drop(&mut self) { + unsafe { + if !self.ctx.is_null() { + llama_free(self.ctx); + self.ctx = ptr::null_mut(); + } + + if !self.model.is_null() { + llama_free_model(self.model); + self.model = ptr::null_mut(); + } + + llama_backend_free(); + } + } +} diff --git a/src/main.rs b/src/main.rs index c53a47c..e3e90c8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,18 +1,19 @@ -use git2::{Config, IndexAddOption, Repository, Signature}; -use reqwest::blocking::Client; -use reqwest::header::{HeaderMap, HeaderValue, HOST}; -use serde_json::{json, Value}; +mod llama; + +use crate::llama::LlamaSession; +use git2::{Commit, Config, ErrorCode, Repository, Signature}; +use hf_hub::api::sync::Api; +use std::collections::HashSet; use std::env; use std::fmt; -use std::io::{self, BufRead, BufReader, Write}; +use std::fs; +use std::io::{self, IsTerminal, Write}; use std::path::{Path, PathBuf}; use std::process::Command; -use std::time::Duration; - -const OLLAMA_API_BASE: &str = "http://localhost:11434/api"; -const CONFIG_MODEL_KEY: &str = "commit-analyzer.model"; const CONFIG_LANGUAGE_KEY: &str = "commit-analyzer.language"; const COMMIT_TYPES: &[&str] = &["feat", "fix", "docs", "style", "refactor", "test", "chore"]; +const DEFAULT_MODEL_REPO: &str = "unsloth/gemma-3-270m-it-GGUF"; +const DEFAULT_CONTEXT_SIZE: i32 = 1024; #[derive(Debug, Clone, PartialEq)] enum Language { @@ -101,50 +102,36 @@ impl Language { fn fetching_models(&self) -> &'static str { match self { - Language::English => "Fetching available Ollama models...", - Language::Chinese => "正在获取可用的 Ollama 模型...", + Language::English => "Searching for local GGUF models...", + Language::Chinese => "正在搜索本地 GGUF 模型...", } } fn available_models(&self) -> &'static str { match self { - Language::English => "\nAvailable models:", - Language::Chinese => "\n可用模型:", + Language::English => "\nDetected GGUF models:", + Language::Chinese => "\n检测到的 GGUF 模型:", } } fn select_model_prompt(&self) -> &'static str { match self { - Language::English => "\nSelect a model by number: ", - Language::Chinese => "\n请输入模型编号:", + Language::English => "\nEnter a model number or provide a full GGUF path: ", + Language::Chinese => "\n输入模型编号或直接提供 GGUF 文件路径:", } } fn model_set_as_default(&self) -> &'static str { match self { - Language::English => "Model '{}' set as default.", - Language::Chinese => "已将模型'{}'设置为默认模型。", - } - } - - fn ollama_connection_warning(&self) -> &'static str { - match self { - Language::English => "Warning: Failed to connect to Ollama: {}", - Language::Chinese => "警告:连接 Ollama 失败:{}", - } - } - - fn ensure_ollama_running(&self) -> &'static str { - match self { - Language::English => "Please ensure Ollama is running on localhost:11434", - Language::Chinese => "请确保 Ollama 正在 localhost:11434 上运行", + Language::English => "Model path ready: {}", + Language::Chinese => "模型路径已就绪:{}", } } fn no_default_model(&self) -> &'static str { match self { - Language::English => "No default model set. Please select a model.", - Language::Chinese => "未设置默认模型,请选择一个模型。", + Language::English => "No model path available. Please select a GGUF file.", + Language::Chinese => "当前没有可用的模型路径,请选择一个 GGUF 文件。", } } @@ -157,7 +144,9 @@ impl Language { fn use_edit_cancel_prompt(&self) -> &'static str { match self { - Language::English => "\nDo you want to (u)se this message, (e)dit it, or (c)ancel? [u/e/c]: ", + Language::English => { + "\nDo you want to (u)se this message, (e)dit it, or (c)ancel? [u/e/c]: " + } Language::Chinese => "\n您想要 (u) 使用此信息,(e) 编辑它,还是 (c) 取消?[u/e/c]:", } } @@ -211,17 +200,137 @@ impl Language { } } - fn ollama_not_accessible(&self) -> &'static str { + fn model_retrying_invalid_output(&self) -> &'static str { + match self { + Language::English => { + "Model response was invalid. Retrying with stricter instructions..." + } + Language::Chinese => "模型输出无效,正在使用更严格的提示重试...", + } + } + + fn model_failed_generate(&self) -> &'static str { + match self { + Language::English => { + "Model could not produce a valid commit message. Please enter one manually." + } + Language::Chinese => "模型未能生成有效的提交信息,请手动输入。", + } + } + + fn fallback_commit_generated(&self) -> &'static str { match self { - Language::English => "Ollama is not running or not accessible. Please start Ollama and ensure it's running on localhost:11434, then try again.", - Language::Chinese => "Ollama 未运行或不可访问。请启动 Ollama 并确保它在 localhost:11434 上运行,然后重试。", + Language::English => "\n\nGenerated a fallback commit message.", + Language::Chinese => "\n\n已生成备用提交信息。", + } + } + + fn truncated_diff_notice(&self) -> &'static str { + match self { + Language::English => "[Diff truncated to reduce context size.]", + Language::Chinese => "[为控制上下文长度,diff 已被截断。]", + } + } + + fn changed_files_heading(&self) -> &'static str { + match self { + Language::English => "Changed files:", + Language::Chinese => "变更文件:", + } + } + + fn file_omitted_notice(&self) -> &'static str { + match self { + Language::English => "(content omitted)", + Language::Chinese => "(内容已省略)", + } + } + + fn file_snippet_heading(&self) -> &'static str { + match self { + Language::English => "File:", + Language::Chinese => "文件:", + } + } + + fn truncated_body_notice(&self) -> &'static str { + match self { + Language::English => "[Additional hunks truncated]", + Language::Chinese => "[更多变更已截断]", } } fn no_models_found(&self) -> &'static str { match self { - Language::English => "No models found in Ollama. Please ensure Ollama is running and has models installed.", - Language::Chinese => "在 Ollama 中未找到模型。请确保 Ollama 正在运行并已安装模型。", + Language::English => "No GGUF models found in default locations. Download a model first or provide its path manually.", + Language::Chinese => "在默认位置未找到 GGUF 模型。请先下载模型或手动提供其路径。", + } + } + + fn enter_model_path_hint(&self) -> &'static str { + match self { + Language::English => "Hint: place models under ./models or ~/Library/Application Support/git-ca/models (macOS) or ~/.cache/git-ca/models.", + Language::Chinese => "提示:可将模型放在 ./models、~/Library/Application Support/git-ca/models(macOS)或 ~/.cache/git-ca/models 等目录。", + } + } + + fn model_file_missing(&self) -> &'static str { + match self { + Language::English => "Model file missing: {}", + Language::Chinese => "模型文件缺失:{}", + } + } + + fn model_extension_warning(&self) -> &'static str { + match self { + Language::English => "The file must have a .gguf extension.", + Language::Chinese => "文件必须为 .gguf 扩展名。", + } + } + + fn download_model_prompt(&self) -> &'static str { + match self { + Language::English => "Download a GGUF model (for example from https://huggingface.co/collections/ggml-org/gguf) and retry.", + Language::Chinese => "请先下载 GGUF 模型(例如来自 https://huggingface.co/collections/ggml-org/gguf),然后重试。", + } + } + + fn downloading_model(&self) -> &'static str { + match self { + Language::English => "Downloading model '{}' from Hugging Face...", + Language::Chinese => "正在从 Hugging Face 下载模型'{}'...", + } + } + + fn download_completed(&self) -> &'static str { + match self { + Language::English => "Model downloaded to: {}", + Language::Chinese => "模型已下载至:{}", + } + } + + fn auto_downloading_default(&self) -> &'static str { + match self { + Language::English => "No local models found. Downloading default model '{}'...", + Language::Chinese => "未找到本地模型,正在下载默认模型'{}'...", + } + } + + fn model_pull_hint(&self) -> &'static str { + match self { + Language::English => { + "Tip: run 'git ca model pull ' to download from Hugging Face." + } + Language::Chinese => { + "提示:运行 'git ca model pull <仓库>' 可从 Hugging Face 下载模型。" + } + } + } + + fn model_pull_usage(&self) -> &'static str { + match self { + Language::English => "Usage: git ca model pull ", + Language::Chinese => "用法:git ca model pull <仓库>", } } @@ -237,8 +346,7 @@ impl Language { enum AppError { Git(git2::Error), Io(io::Error), - Http(reqwest::Error), - Json(serde_json::Error), + InputClosed, Custom(String), } @@ -247,8 +355,7 @@ impl fmt::Display for AppError { match self { AppError::Git(e) => write!(f, "Git error: {e}"), AppError::Io(e) => write!(f, "IO error: {e}"), - AppError::Http(e) => write!(f, "HTTP error: {e}"), - AppError::Json(e) => write!(f, "JSON error: {e}"), + AppError::InputClosed => write!(f, "Input stream closed"), AppError::Custom(msg) => write!(f, "{msg}"), } } @@ -268,15 +375,9 @@ impl From for AppError { } } -impl From for AppError { - fn from(err: reqwest::Error) -> Self { - AppError::Http(err) - } -} - -impl From for AppError { - fn from(err: serde_json::Error) -> Self { - AppError::Json(err) +impl From for AppError { + fn from(err: hf_hub::api::sync::ApiError) -> Self { + AppError::Custom(format!("Hugging Face API error: {err}")) } } @@ -294,6 +395,10 @@ impl From<&str> for AppError { type Result = std::result::Result; +fn debug_model_response(label: &str, response: &str) { + eprintln!("\n[git-ca] {label}\n~~~~\n{response}\n~~~~"); +} + fn find_git_repository(start_path: &Path) -> Option { let mut current_path = start_path.to_path_buf(); loop { @@ -313,195 +418,848 @@ fn get_diff() -> Result { Ok(diff) } -fn build_commit_prompt(diff: &str, language: &Language) -> String { +fn build_commit_prompt(diff: &str, language: &Language, attempt: usize) -> String { match language { - Language::English => format!( - "Analyze this git diff and provide a **single** commit message following the Git Flow format: + Language::English => { + let mut prompt = format!( + r#"SYSTEM: You are a commit message generator. You must output ONLY a commit message, nothing else. + +TASK: Analyze the git diff below and produce exactly ONE commit message in Git Flow format. + +FORMAT: (): + +EXAMPLES: +- feat(api): add user authentication endpoint +- fix(cli): resolve model loading timeout +- docs: update installation instructions +- refactor(llama): simplify token sampling logic +- chore(deps): update dependencies +- test: add unit tests for diff parsing + +RULES: +1. MUST be one of: feat, fix, docs, style, refactor, test, chore +2. is optional, use kebab-case when needed (e.g., cli, api, docs) +3. is imperative, concise (<= 72 chars) +4. NO explanations, NO markdown fences, NO extra text +5. Output ONLY the commit message, nothing else + +HERE IS THE DIFF: +{diff} -(): +YOUR OUTPUT (commit message only):"# + ); - + if attempt > 0 { + prompt.push_str( + "\n\nCRITICAL: Previous output was invalid. You MUST output ONLY a commit message starting with '(): '. NO other text, explanations, or formatting.", + ); + } -Where: -- is one of: feat, fix, docs, style, refactor, test, chore -- is optional and represents the module affected -- is a short description in the imperative mood -- provides detailed description (optional) + prompt + } + Language::Chinese => { + let mut prompt = format!( + r#"系统:这是一个**任务指令**,不是对话。你的任务是直接生成提交信息,**不要回复或回应任何指令**。 + +任务:分析以下 git diff,生成一个符合 Git Flow 格式的提交信息。 + +**重要**:直接输出提交信息,**不要**说"好的"、"请使用..."、"我理解"等回复,直接生成即可。 + +格式示例: +feat(api): 添加用户认证接口 +fix(cli): 解决模型加载超时问题 +docs: 更新安装说明 +refactor(llama): 简化令牌采样逻辑 +chore(deps): 更新依赖包 +test: 添加 diff 解析单元测试 +style: 调整代码格式 +style(ui): 修改按钮颜色 + +**必须遵循的规则**: +1. <类型> 必须是以下之一:feat、fix、docs、style、refactor、test、chore +2. <范围> 可选,使用 kebab-case(如 cli、api、docs、ui) +3. <主题> 使用祈使语气,简练(≤72 字符) +4. **绝对不要**输出任何解释、对话、回复或额外文字 +5. **首行**必须是:`<类型>(<范围>): <主题>` +6. **不要**使用markdown、不添加代码块、不加符号 + +以下是需要分析的 diff: -Important guidelines: -1. Choose only ONE type that best represents the primary purpose of the changes. -2. Summarize ALL changes into a single, concise subject line. -3. Do not include a body or footer in the commit message. -4. Do not mention or reference any issue numbers. -5. Focus solely on the most significant change if there are multiple unrelated changes. -6. **Ensure that only one commit message is generated.** -7. **The commit message content must be written in English language.** -8. **Do not use any other languages except English for the content.** +{diff} -Here's the diff to analyze: +**请直接生成提交信息(不要任何回复或解释):**"# + ); -{diff} + if attempt > 0 { + prompt.push_str( + "\n\n**严重错误**:上次输出不符合格式!**立即停止回复和对话**,**必须**直接输出一个以 '<类型>(<范围>): <主题>' 开头的提交信息。**不要**说'好的'、'理解了'、'请重新试'等任何回复文字。", + ); + } -Your task: -1. Analyze the given git diff. -2. **Generate only one** commit message strictly following the Git Flow format described above. -3. Ensure your response contains **ONLY** the formatted commit message, without any additional explanations or markdown. -4. **The commit message content (subject and body) must be written in English.** + prompt + } + } +} -Valid response example: -feat(user-auth): implement password reset functionality +fn analyze_diff( + diff: &str, + model_path: &Path, + language: &Language, + context_size: i32, +) -> Result> { + println!("{}", language.generating_commit_message()); + eprintln!("\x1b[90m{}\x1b[0m", language.this_may_take_moment()); -Add a new endpoint for password reset requests. -Implement email sending for reset links. + let mut session = LlamaSession::new(model_path, context_size).map_err(AppError::from)?; + const MAX_ATTEMPTS: usize = 2; + + let diff_variants = build_diff_variants(diff, language, context_size); + + for attempt in 0..MAX_ATTEMPTS { + let fragment = diff_variants + .get(attempt) + .or_else(|| diff_variants.last()) + .unwrap(); + let prompt = build_commit_prompt(fragment, language, attempt); + let response = match session.infer(&prompt, 256) { + Ok(output) => output, + Err(err) => { + eprintln!("{err}"); + if attempt + 1 < MAX_ATTEMPTS { + println!("{}", language.model_retrying_invalid_output()); + continue; + } else { + println!("{}", language.model_failed_generate()); + return Ok(None); + } + } + }; -Remember: Your response should only include the English commit message, nothing else." - ), - Language::Chinese => format!( - "分析这个 git diff 并提供一个遵循 Git Flow 格式的提交信息: + println!("{}", language.processing_response()); -<类型>(<范围>): <主题> + if let Some(processed) = process_model_response(&response) { + if is_valid_commit_message(&processed, language) { + println!("{processed}"); + println!("{}", language.commit_message_generated()); + return Ok(Some(processed)); + } else { + debug_model_response("model output failed validation", &response); + } + } else { + debug_model_response("model output did not contain a commit subject", &response); + } -<正文> + if attempt + 1 < MAX_ATTEMPTS { + println!("{}", language.model_retrying_invalid_output()); + } + } -其中: -- <类型> 是以下之一:feat, fix, docs, style, refactor, test, chore -- <范围> 是可选的,表示受影响的模块 -- <主题> 是命令式语气的简短描述 -- <正文> 提供详细描述(可选) + Ok(None) +} -重要指导原则: -1. 只选择一个最能代表变更主要目的的类型。 -2. 将所有变更总结为一个简洁的主题行。 -3. 不要在提交信息中包含正文或脚注。 -4. 不要提及或引用任何问题编号。 -5. 如果有多个不相关的变更,只关注最重要的变更。 -6. **确保只生成一个提交信息。** -7. **提交信息的内容必须使用简体中文,包括主题和正文。** -8. **不允许使用英文,除了 Git Flow 格式的类型关键字(feat、fix、docs 等)。** +fn process_model_response(response: &str) -> Option { + let response_without_thinking = if response.trim_start().starts_with("") { + response + .find("") + .map(|end_index| response[(end_index + "".len())..].trim_start()) + .unwrap_or(response) + } else { + response + }; -以下是要分析的 diff: + let lines: Vec<&str> = response_without_thinking + .lines() + .filter(|line| !line.starts_with("Fixes #") && !line.starts_with("Closes #")) + .collect(); -{diff} + if let Some((index, subject_line)) = lines.iter().enumerate().find_map(|(i, line)| { + let trimmed = line.trim(); + if is_commit_subject(trimmed) { + Some((i, trimmed.to_string())) + } else { + None + } + }) { + let mut message_lines = vec![subject_line]; + let mut j = index + 1; + + while j < lines.len() { + let trimmed = lines[j].trim(); + + if trimmed.is_empty() { + let mut k = j + 1; + let mut next_non_empty: Option<&str> = None; + while k < lines.len() { + let candidate = lines[k].trim(); + if !candidate.is_empty() { + next_non_empty = Some(candidate); + break; + } + k += 1; + } + + if let Some(next_line) = next_non_empty { + if is_commit_subject(next_line) || looks_like_instruction(next_line) { + break; + } + } else { + break; + } + + if !message_lines.last().map(|s| s.is_empty()).unwrap_or(false) { + message_lines.push(String::new()); + } + } else if is_commit_subject(trimmed) || looks_like_instruction(trimmed) { + break; + } else { + message_lines.push(trimmed.to_string()); + } -你的任务: -1. 分析给定的 git diff。 -2. **生成一个**严格遵循上述 Git Flow 格式的提交信息。 -3. 确保你的回复**只**包含格式化的提交信息,不要有任何额外的解释或 markdown。 -4. 提交信息**必须**以 <类型> 开头并遵循所示的确切结构。 -5. **提交信息的内容(主题和正文)必须使用简体中文。** + j += 1; + } -有效回复的示例: -feat(用户认证): 实现密码重置功能 + let message = message_lines.join("\n").trim().to_string(); + if !message.is_empty() { + return Some(message); + } + } -添加密码重置请求的新端点。 -实现重置链接的邮件发送。 + None +} -记住:你的回复应该只包含中文的提交信息,不要有其他内容。" - ) +fn is_commit_subject(line: &str) -> bool { + if line.is_empty() { + return false; } + + let lower = line.to_ascii_lowercase(); + COMMIT_TYPES.iter().any(|commit_type| { + if !lower.starts_with(commit_type) || lower.len() <= commit_type.len() { + return false; + } + + match lower.as_bytes().get(commit_type.len()) { + Some(b'(') | Some(b':') => true, + _ => false, + } + }) } -fn analyze_diff(diff: &str, model: &str, language: &Language) -> Result { - let client = create_generation_client()?; - let prompt = build_commit_prompt(diff, language); +fn looks_like_instruction(line: &str) -> bool { + if line.is_empty() { + return false; + } - println!("{}", language.generating_commit_message()); - eprintln!("\x1b[90m{}\x1b[0m", language.this_may_take_moment()); - - let response = client - .post(format!("{OLLAMA_API_BASE}/generate")) - .json(&json!({ - "model": model, - "prompt": prompt, - "stream": true - })) - .send() - .map_err(|e| { - if e.is_timeout() { - AppError::Custom(format!( - "Request timed out after 2 minutes. This might happen with large models or slow systems.\n\ - Try using a smaller/faster model with 'git ca model' or ensure your system has sufficient resources." - )) - } else if e.is_connect() { - AppError::Custom(format!( - "Failed to connect to Ollama at {}. Please ensure Ollama is running and accessible.", - OLLAMA_API_BASE - )) - } else { - AppError::Custom(format!("Network error: {}", e)) - } - })?; + let lower = line.to_ascii_lowercase(); + const KEYWORDS: &[&str] = &[ + "your task:", + "your task is", + "your response", + "respond with", + "return only", + "remember:", + "guidelines:", + "rules:", + "important:", + "ensure your response", + "ensure that your response", + "make sure your response", + "do not include any", + "do not add any", + "commit message content must", + "the commit message must", + "请仅返回", + "请只返回", + "记住:", + "记住:", + "请勿包含", + "回复中只能", + "请只提供", + ]; + + KEYWORDS.iter().any(|keyword| lower.contains(keyword)) +} - if !response.status().is_success() { - return Err(AppError::Custom(format!( - "Unable to get response from Ollama. Status code: {}. Please ensure Ollama is running and accessible.", - response.status() - ))); +#[derive(Default)] +struct DiffSummary { + files: Vec, + scope_candidates: Vec, + has_docs: bool, + has_code: bool, + docs_only: bool, + has_main: bool, + has_llama: bool, + has_retry: bool, + has_kv_reset: bool, + new_files: HashSet, + has_cargo_toml: bool, + has_cargo_lock: bool, + has_node_manifest: bool, + has_node_lock: bool, +} + +impl DiffSummary { + fn has_docs_only(&self) -> bool { + self.has_docs && !self.has_code && self.docs_only } +} - let mut full_response = String::new(); - let reader = BufReader::new(response); - io::stdout().flush()?; +fn analyze_diff_summary(diff: &str) -> DiffSummary { + let mut summary = DiffSummary { + docs_only: true, + ..Default::default() + }; - println!("{}", language.processing_response()); + let mut seen_files = HashSet::new(); + let mut current_file: Option = None; + + for line in diff.lines() { + if let Some(rest) = line.strip_prefix("diff --git ") { + let mut parts = rest.split_whitespace(); + let _a = parts.next(); + let b = parts.next(); + if let Some(b) = b { + let path = b.strip_prefix("b/").unwrap_or(b).to_string(); + current_file = Some(path.clone()); + + if seen_files.insert(path.clone()) { + summary.files.push(path.clone()); + + let scope = path_to_scope(&path); + if !scope.is_empty() && !summary.scope_candidates.contains(&scope) { + summary.scope_candidates.push(scope); + } + + let ext = path.rsplit('.').next().unwrap_or(""); + let is_doc = matches!(ext, "md" | "rst" | "adoc" | "txt"); + if is_doc { + summary.has_docs = true; + } else { + summary.docs_only = false; + } + if ext == "rs" { + summary.has_code = true; + } + + if path == "src/main.rs" { + summary.has_main = true; + } + if path == "src/llama.rs" { + summary.has_llama = true; + } + if path == "Cargo.toml" { + summary.has_cargo_toml = true; + summary.docs_only = false; + } + if path == "Cargo.lock" { + summary.has_cargo_lock = true; + summary.docs_only = false; + } + if path.ends_with("package.json") { + summary.has_node_manifest = true; + summary.docs_only = false; + } + if path.contains("pnpm-lock") + || path.contains("package-lock") + || path.contains("yarn.lock") + { + summary.has_node_lock = true; + summary.docs_only = false; + } + } + } + } else if line.starts_with("new file mode") { + if let Some(file) = current_file.clone() { + summary.new_files.insert(file); + } + } else if line.starts_with('+') { + let lower = line.to_ascii_lowercase(); + if lower.contains("retry") || lower.contains("stricter instructions") { + summary.has_retry = true; + } + if lower.contains("kv_self_clear") || lower.contains("kv cache") { + summary.has_kv_reset = true; + } + } + } + + summary +} + +#[derive(Default)] +struct FileSection { + path: String, + additions: usize, + deletions: usize, + snippet: Vec, + omitted: bool, +} - for line in reader.lines() { - let line = line.map_err(|e| AppError::Custom(format!("Failed to read response: {}", e)))?; - if line.is_empty() { +fn build_diff_summary(diff: &str, language: &Language, context_size: i32) -> String { + const SNIPPET_LINE_LIMIT: usize = 120; + const PER_FILE_SNIPPET_LIMIT: usize = 1200; + + let max_chars = (context_size as usize) + .saturating_mul(3) + .saturating_sub(512) + .max(2048); + let diff_truncated = diff.len() > max_chars; + + let mut sections: Vec = Vec::new(); + let mut current: Option = None; + + for line in diff.lines() { + if let Some(path) = line + .strip_prefix("diff --git ") + .and_then(|rest| rest.split_whitespace().nth(1)) + .map(|b_path| b_path.strip_prefix("b/").unwrap_or(b_path).to_string()) + { + if let Some(section) = current.take() { + sections.push(section); + } + let mut section = FileSection { + path, + ..Default::default() + }; + if is_generated_or_large_file(§ion.path) { + section.omitted = true; + } + current = Some(section); + continue; + } + + let Some(section) = current.as_mut() else { + continue; + }; + + if line.starts_with("+++") || line.starts_with("---") { continue; } - if let Ok(json) = serde_json::from_str::(&line) { - if let Some(response_part) = json["response"].as_str() { - print!("{response_part}"); - io::stdout().flush()?; - full_response.push_str(response_part); + if line.starts_with("@@") { + if !section.omitted && section.snippet.len() < SNIPPET_LINE_LIMIT { + section.snippet.push(line.to_string()); } - - if json["done"].as_bool().unwrap_or(false) { + continue; + } + + if line.starts_with('+') && !line.starts_with("+++") { + section.additions += 1; + } else if line.starts_with('-') && !line.starts_with("---") { + section.deletions += 1; + } + + if section.omitted { + continue; + } + + let snippet_chars: usize = section.snippet.iter().map(|l| l.len()).sum(); + if section.snippet.len() >= SNIPPET_LINE_LIMIT || snippet_chars >= PER_FILE_SNIPPET_LIMIT { + section.omitted = true; + section.snippet.clear(); + continue; + } + + section.snippet.push(line.to_string()); + } + + if let Some(section) = current.take() { + sections.push(section); + } + + if sections.is_empty() { + return diff + .chars() + .take(diff.len().min(max_chars)) + .collect::(); + } + + let mut output = String::new(); + output.push_str(language.changed_files_heading()); + output.push('\n'); + + for section in §ions { + let note = if section.omitted { + format!(" {}", language.file_omitted_notice()) + } else { + String::new() + }; + output.push_str(&format!( + "- {} (+{} / -{}){}\n", + section.path, section.additions, section.deletions, note + )); + } + + output.push('\n'); + + let mut remaining_chars = max_chars.saturating_sub(output.len()); + + for section in sections { + if section.omitted { + continue; + } + if remaining_chars <= 0 { + output.push_str(language.truncated_diff_notice()); + output.push('\n'); + break; + } + + output.push_str(language.file_snippet_heading()); + output.push(' '); + output.push_str(§ion.path); + output.push('\n'); + + for line in section.snippet { + if line.len() + 1 > remaining_chars { + output.push_str(language.truncated_body_notice()); + output.push('\n'); + remaining_chars = 0; break; } + output.push_str(&line); + output.push('\n'); + remaining_chars = remaining_chars.saturating_sub(line.len() + 1); } + + output.push('\n'); + } + + if diff_truncated && !output.contains(language.truncated_diff_notice()) { + output.push_str(language.truncated_diff_notice()); + output.push('\n'); } - - println!("{}", language.commit_message_generated()); - Ok(process_ollama_response(&full_response)) + + output } -fn process_ollama_response(response: &str) -> String { - let response_without_thinking = if response.trim_start().starts_with("") { - response.find("") - .map(|end_index| response[(end_index + "".len())..].trim_start()) - .unwrap_or(response) +fn is_generated_or_large_file(path: &str) -> bool { + let lower = path.to_ascii_lowercase(); + lower.contains("pnpm-lock") + || lower.contains("package-lock") + || lower.contains("yarn.lock") + || lower.contains("cargo.lock") + || lower.ends_with(".min.js") + || lower.ends_with(".min.css") +} + +fn build_diff_raw_tail(diff: &str, language: &Language, context_size: i32) -> String { + let max_chars = (context_size as usize) + .saturating_mul(3) + .saturating_sub(512) + .max(2048); + + if diff.len() <= max_chars { + return diff.to_string(); + } + + let mut chars: Vec = diff.chars().collect(); + if chars.len() > max_chars { + chars.drain(0..chars.len() - max_chars); + } + + let mut trimmed: String = chars.into_iter().collect(); + if let Some(pos) = trimmed.find("diff --git ") { + trimmed = trimmed[pos..].to_string(); + } + + format!("{}\n\n{}", language.truncated_diff_notice(), trimmed) +} + +fn build_diff_variants(diff: &str, language: &Language, context_size: i32) -> Vec { + let summary = build_diff_summary(diff, language, context_size); + let raw = build_diff_raw_tail(diff, language, context_size); + if summary.trim() == raw.trim() { + vec![summary] } else { - response + vec![summary, raw] + } +} + +fn path_to_scope(path: &str) -> String { + let mut trimmed = path.trim_start_matches("./"); + if trimmed.starts_with("a/") || trimmed.starts_with("b/") { + trimmed = &trimmed[2..]; + } + if trimmed.is_empty() { + return String::new(); + } + let mut parts = trimmed.split('/'); + let first = parts.next().unwrap_or(trimmed); + let candidate = if first == "src" { + parts.next().unwrap_or(first) + } else { + first }; + let candidate = candidate.split('.').next().unwrap_or(candidate); + slugify(candidate) +} - let lines: Vec<&str> = response_without_thinking - .lines() - .filter(|line| !line.starts_with("Fixes #") && !line.starts_with("Closes #")) +fn slugify(input: &str) -> String { + let mut slug = String::new(); + let mut last_dash = false; + for ch in input.chars() { + if ch.is_ascii_alphanumeric() { + slug.push(ch.to_ascii_lowercase()); + last_dash = false; + } else if !last_dash { + slug.push('-'); + last_dash = true; + } + } + slug.trim_matches('-').to_string() +} + +fn humanize_slug(slug: &str) -> String { + if slug.eq_ignore_ascii_case("cli") { + return "CLI".to_string(); + } + if slug.eq_ignore_ascii_case("kv") { + return "KV".to_string(); + } + if slug.eq_ignore_ascii_case("deps") { + return "Dependencies".to_string(); + } + let parts: Vec = slug + .split('-') + .filter(|part| !part.is_empty()) + .map(|part| { + let mut chars = part.chars(); + if let Some(first) = chars.next() { + format!( + "{}{}", + first.to_ascii_uppercase(), + chars.as_str().to_ascii_lowercase() + ) + } else { + String::new() + } + }) .collect(); + if parts.is_empty() { + "Project".to_string() + } else { + parts.join(" ") + } +} - let mut processed_lines = Vec::new(); - let mut started = false; +enum SubjectTemplate { + StabilizeCommitGeneration, + SyncDocsAndCode, + UpdateDocs, + IntroduceScope, + RefineScope, + UpdateScope, + UpdateDeps, +} - for line in lines { - if !started && COMMIT_TYPES.iter().any(|&t| line.starts_with(t)) { - started = true; +fn build_subject(language: &Language, template: SubjectTemplate, scope: &str) -> String { + match (language, template) { + (Language::English, SubjectTemplate::StabilizeCommitGeneration) => { + "stabilize commit message generation".to_string() + } + (Language::Chinese, SubjectTemplate::StabilizeCommitGeneration) => { + "稳定提交信息生成流程".to_string() } - if started { - processed_lines.push(line); + (Language::English, SubjectTemplate::UpdateDeps) => "update dependencies".to_string(), + (Language::Chinese, SubjectTemplate::UpdateDeps) => "更新依赖".to_string(), + (Language::English, SubjectTemplate::SyncDocsAndCode) => { + "align docs and code changes".to_string() } + (Language::Chinese, SubjectTemplate::SyncDocsAndCode) => "同步文档与代码更新".to_string(), + (Language::English, SubjectTemplate::UpdateDocs) => { + format!("update {} documentation", scope) + } + (Language::Chinese, SubjectTemplate::UpdateDocs) => { + format!("更新{}文档", scope) + } + (Language::English, SubjectTemplate::IntroduceScope) => format!("add {}", scope), + (Language::Chinese, SubjectTemplate::IntroduceScope) => format!("新增{}", scope), + (Language::English, SubjectTemplate::RefineScope) => format!("refine {}", scope), + (Language::Chinese, SubjectTemplate::RefineScope) => format!("优化{}", scope), + (Language::English, SubjectTemplate::UpdateScope) => format!("update {}", scope), + (Language::Chinese, SubjectTemplate::UpdateScope) => format!("更新{}", scope), + } +} + +fn build_scope_readable(scopes: &[String], language: &Language) -> String { + if scopes.is_empty() { + return match language { + Language::English => "project".to_string(), + Language::Chinese => "项目".to_string(), + }; } - processed_lines.join("\n") + let words: Vec = scopes.iter().map(|slug| humanize_slug(slug)).collect(); + match (language, words.len()) { + (Language::English, 1) => words[0].clone(), + (Language::English, 2) => format!("{} and {}", words[0], words[1]), + (Language::English, _) => format!("{} and more", words[0]), + (Language::Chinese, 1) => words[0].clone(), + (Language::Chinese, 2) => format!("{}和{}", words[0], words[1]), + (Language::Chinese, _) => format!("{}等", words[0]), + } +} + +fn build_scope_slug(scopes: &[String]) -> String { + if scopes.is_empty() { + return String::new(); + } + if scopes.iter().any(|s| s == "deps") { + return "deps".to_string(); + } + if scopes.iter().any(|s| s == "docs") && scopes.len() == 1 { + return "docs".to_string(); + } + scopes.iter().take(2).cloned().collect::>().join("-") +} + +fn compute_scopes(summary: &DiffSummary) -> Vec { + fn push_unique(scopes: &mut Vec, value: &str) { + if !scopes.iter().any(|s| s == value) { + scopes.push(value.to_string()); + } + } + + let mut scopes = Vec::new(); + + if summary.has_main { + push_unique(&mut scopes, "cli"); + } + if summary.has_llama { + push_unique(&mut scopes, "llama"); + } + if summary.has_docs_only() { + push_unique(&mut scopes, "docs"); + } + if summary.has_cargo_toml || summary.has_cargo_lock { + push_unique(&mut scopes, "deps"); + } + if summary.has_node_manifest || summary.has_node_lock { + push_unique(&mut scopes, "deps"); + } + + for candidate in &summary.scope_candidates { + if scopes.len() >= 3 { + break; + } + push_unique(&mut scopes, candidate); + } + + if scopes.is_empty() { + push_unique(&mut scopes, "project"); + } + + scopes +} + +fn generate_fallback_commit_message(diff: &str, language: &Language) -> Option { + let summary = analyze_diff_summary(diff); + if summary.files.is_empty() { + return None; + } + + let mut scopes = compute_scopes(&summary); + + let has_deps_change = summary.has_cargo_lock + || summary.has_cargo_toml + || summary.has_node_lock + || summary.has_node_manifest; + let has_runtime_change = summary.has_main || summary.has_llama; + + let (commit_type, template) = if summary.has_retry || summary.has_kv_reset { + ("fix", SubjectTemplate::StabilizeCommitGeneration) + } else if has_runtime_change { + ("fix", SubjectTemplate::RefineScope) + } else if summary.has_docs && summary.has_code { + ("fix", SubjectTemplate::SyncDocsAndCode) + } else if summary.has_docs_only() { + ("docs", SubjectTemplate::UpdateDocs) + } else if has_deps_change && !summary.has_code { + ("chore", SubjectTemplate::UpdateDeps) + } else if summary.has_code { + if !summary.new_files.is_empty() { + ("feat", SubjectTemplate::IntroduceScope) + } else { + ("refactor", SubjectTemplate::RefineScope) + } + } else { + ("chore", SubjectTemplate::UpdateScope) + }; + + if commit_type == "chore" && matches!(template, SubjectTemplate::UpdateDeps) { + scopes.clear(); + scopes.push("deps".to_string()); + } + + let scope_slug = build_scope_slug(&scopes); + let scope_readable = build_scope_readable(&scopes, language); + let subject = build_subject(language, template, &scope_readable); + + Some(if scope_slug.is_empty() { + format!("{commit_type}: {subject}") + } else { + format!("{commit_type}({scope_slug}): {subject}") + }) +} +fn is_valid_commit_message(message: &str, language: &Language) -> bool { + let subject_line = message + .lines() + .map(|line| line.trim()) + .find(|line| !line.is_empty()); + + let subject_line = match subject_line { + Some(line) => line, + None => return false, + }; + + if parse_commit_subject(subject_line).is_none() { + return false; + } + + if let Language::English = language { + if !subject_line.is_ascii() { + return false; + } + } + + true +} + +fn parse_commit_subject(line: &str) -> Option<(&'static str, Option<&str>, &str)> { + for commit_type in COMMIT_TYPES { + if line.starts_with(commit_type) { + let rest = &line[commit_type.len()..]; + if rest.starts_with('(') { + let end = rest.find("):")?; + let scope = rest[1..end].trim(); + if scope.is_empty() { + return None; + } + let subject = rest[end + 2..].trim(); + if subject.is_empty() { + return None; + } + return Some((commit_type, Some(scope), subject)); + } else if rest.starts_with(':') { + let subject = rest[1..].trim(); + if subject.is_empty() { + return None; + } + return Some((commit_type, None, subject)); + } + } + } + None } fn get_user_input(prompt: &str) -> Result { print!("{prompt}"); io::stdout().flush()?; let mut input = String::new(); - io::stdin().read_line(&mut input)?; + let bytes = io::stdin().read_line(&mut input)?; + if bytes == 0 { + return Err(AppError::InputClosed); + } Ok(input.trim().to_string()) } @@ -536,43 +1294,6 @@ impl GitConfig { } } -fn create_http_client() -> Result { - Ok(Client::builder().timeout(Duration::from_secs(5)).build()?) -} - -fn create_generation_client() -> Result { - let mut headers = HeaderMap::new(); - headers.insert(HOST, HeaderValue::from_static("localhost:11434")); - - Ok(Client::builder() - .timeout(Duration::from_secs(120)) // 2 minutes for AI generation - .default_headers(headers) - .build()?) -} - -fn get_ollama_models() -> Result> { - let client = create_http_client()?; - let response = client.get(format!("{OLLAMA_API_BASE}/tags")).send()?; - - if !response.status().is_success() { - return Err(AppError::Custom(format!( - "Unable to get models from Ollama. Status code: {}", - response.status() - ))); - } - - let data: Value = response.json()?; - let models = data["models"] - .as_array() - .ok_or("Invalid response format")? - .iter() - .filter_map(|model| model["name"].as_str()) - .map(String::from) - .collect(); - - Ok(models) -} - fn select_language(git_config: &mut GitConfig) -> Result { let current_lang = get_language(git_config); println!("{}", current_lang.available_languages()); @@ -589,7 +1310,12 @@ fn select_language(git_config: &mut GitConfig) -> Result { }; git_config.set(CONFIG_LANGUAGE_KEY, choice.to_string())?; - println!("{}", choice.language_set_to().replace("{}", &choice.display_name())); + println!( + "{}", + choice + .language_set_to() + .replace("{}", &choice.display_name()) + ); Ok(choice) } @@ -601,112 +1327,656 @@ fn get_language(git_config: &GitConfig) -> Language { .unwrap_or(Language::English) } -fn select_default_model(git_config: &mut GitConfig, language: &Language) -> Result { +fn home_dir() -> Option { + #[cfg(target_os = "windows")] + { + env::var("USERPROFILE").ok().map(PathBuf::from) + } + #[cfg(not(target_os = "windows"))] + { + env::var("HOME").ok().map(PathBuf::from) + } +} + +fn default_model_dirs() -> Vec { + let mut dirs = Vec::new(); + + if let Ok(current) = env::current_dir() { + dirs.push(current.join("models")); + } + + if let Some(home) = home_dir() { + dirs.push(home.join(".cache/git-ca/models")); + dirs.push(home.join(".cache/git-ca")); + dirs.push(home.join(".local/share/git-ca/models")); + dirs.push(home.join("Library/Application Support/git-ca/models")); + } + + dirs +} + +fn models_root_dir() -> Result { + if let Some(home) = home_dir() { + Ok(home.join(".cache/git-ca/models")) + } else { + Ok(env::current_dir()?.join("models")) + } +} + +fn model_record_candidates() -> Vec { + let mut candidates = Vec::new(); + if let Some(home) = home_dir() { + candidates.push(home.join(".cache/git-ca/default-model.path")); + } + if let Ok(current) = env::current_dir() { + candidates.push(current.join(".git-ca/default-model.path")); + } + candidates +} + +fn load_persisted_model_path() -> Option { + for record in model_record_candidates() { + if !record.is_file() { + continue; + } + match fs::read_to_string(&record) { + Ok(contents) => { + let trimmed = contents.trim(); + if !trimmed.is_empty() { + return Some(trimmed.to_string()); + } + } + Err(err) => { + eprintln!( + "[git-ca] warning: could not read persisted model path ({}): {err}", + record.display() + ); + } + } + } + None +} + +fn persist_model_path(path: &Path) { + let mut last_error: Option = None; + let serialized = path.to_string_lossy(); + for record in model_record_candidates() { + if let Some(parent) = record.parent() { + if let Err(err) = fs::create_dir_all(parent) { + last_error = Some(err.to_string()); + continue; + } + } + match fs::write(&record, serialized.as_ref()) { + Ok(_) => return, + Err(err) => { + last_error = Some(err.to_string()); + } + } + } + if let Some(err) = last_error { + eprintln!( + "[git-ca] warning: could not persist model path ({}): {err}", + path.display() + ); + } +} + +fn clear_persisted_model_path() { + for record in model_record_candidates() { + if record.is_file() { + if let Err(err) = fs::remove_file(&record) { + eprintln!( + "[git-ca] warning: could not clear cached model path ({}): {err}", + record.display() + ); + } + } + } +} + +fn is_gguf(path: &Path) -> bool { + path.extension() + .and_then(|ext| ext.to_str()) + .map(|ext| ext.eq_ignore_ascii_case("gguf")) + .unwrap_or(false) +} + +fn expand_model_path(input: &str) -> PathBuf { + let trimmed = input.trim(); + + if trimmed == "~" { + if let Some(home) = home_dir() { + return home; + } + } + + if let Some(stripped) = trimmed.strip_prefix("~/") { + if let Some(home) = home_dir() { + return home.join(stripped); + } + } + + if let Some(stripped) = trimmed.strip_prefix("~\\") { + if let Some(home) = home_dir() { + return home.join(stripped); + } + } + + PathBuf::from(trimmed) +} + +fn find_local_models() -> Vec { + let mut seen = HashSet::new(); + let mut found = Vec::new(); + + for dir in default_model_dirs() { + if !dir.is_dir() { + continue; + } + + if let Ok(entries) = fs::read_dir(&dir) { + for entry in entries.flatten() { + let path = entry.path(); + if path.is_file() && is_gguf(&path) && seen.insert(path.clone()) { + found.push(path); + } + } + } + } + + found.sort_by(|a, b| a.to_string_lossy().cmp(&b.to_string_lossy())); + found +} + +fn download_model_from_hub(repo_id: &str, language: &Language) -> Result { + let api = Api::new() + .map_err(|e| AppError::Custom(format!("Failed to initialize Hugging Face client: {e}")))?; + let repo = api.model(repo_id.to_string()); + let info = repo.info().map_err(|e| { + AppError::Custom(format!( + "Failed to fetch repository '{repo_id}' metadata: {e}" + )) + })?; + + let mut fallback: Option<&str> = None; + let mut preferred: Option<&str> = None; + + for sibling in &info.siblings { + let name = sibling.rfilename.as_str(); + let lower = name.to_ascii_lowercase(); + if !lower.ends_with(".gguf") { + continue; + } + + if fallback.is_none() { + fallback = Some(name); + } + + if lower.contains("q4") { + preferred = Some(name); + break; + } + } + + let filename = preferred.or(fallback).ok_or_else(|| { + AppError::Custom(format!("No GGUF files found in repository '{repo_id}'")) + })?; + + println!("{}", language.downloading_model().replace("{}", repo_id)); + let source_path = repo.get(filename).map_err(|e| { + AppError::Custom(format!( + "Failed to download '{}' from '{}': {e}", + filename, repo_id + )) + })?; + + let dest_dir = models_root_dir()?; + fs::create_dir_all(&dest_dir)?; + + let base_name = Path::new(filename) + .file_name() + .and_then(|os| os.to_str()) + .unwrap_or(filename); + let sanitized_repo = repo_id.replace(['/', '\\'], "__"); + let dest_file_name = format!("{sanitized_repo}__{base_name}"); + let dest_path = dest_dir.join(dest_file_name); + + if !dest_path.exists() { + fs::copy(&source_path, &dest_path)?; + } + + let canonical = fs::canonicalize(&dest_path).unwrap_or(dest_path.clone()); + println!( + "{}", + language + .download_completed() + .replace("{}", &canonical.to_string_lossy()) + ); + Ok(canonical) +} + +fn ensure_default_model(language: &Language) -> Result> { + if find_local_models().is_empty() { + println!( + "{}", + language + .auto_downloading_default() + .replace("{}", DEFAULT_MODEL_REPO) + ); + let downloaded = download_model_from_hub(DEFAULT_MODEL_REPO, language)?; + let canonical = fs::canonicalize(&downloaded).unwrap_or(downloaded); + persist_model_path(&canonical); + println!( + "{}", + language + .model_set_as_default() + .replace("{}", &canonical.to_string_lossy()) + ); + return Ok(Some(canonical)); + } + + Ok(None) +} + +fn get_model_path(language: &Language) -> Result { + if let Some(stored) = load_persisted_model_path() { + let expanded = expand_model_path(&stored); + if expanded.is_file() && is_gguf(&expanded) { + let canonical = fs::canonicalize(&expanded).unwrap_or(expanded); + println!( + "{}", + language + .model_set_as_default() + .replace("{}", &canonical.to_string_lossy()) + ); + return Ok(canonical); + } else { + println!( + "{}", + language + .model_file_missing() + .replace("{}", &expanded.to_string_lossy()) + ); + clear_persisted_model_path(); + } + } + + if let Some(downloaded) = ensure_default_model(language)? { + return Ok(downloaded); + } + + let models = find_local_models(); + if models.is_empty() { + println!("{}", language.no_default_model()); + println!("{}", language.model_pull_hint()); + return select_model_path(language); + } + + if models.len() == 1 { + let canonical = fs::canonicalize(&models[0]).unwrap_or_else(|_| models[0].clone()); + persist_model_path(&canonical); + println!( + "{}", + language + .model_set_as_default() + .replace("{}", &canonical.to_string_lossy()) + ); + return Ok(canonical); + } + + select_model_path(language) +} + +fn select_model_path(language: &Language) -> Result { println!("{}", language.fetching_models()); - - let models = get_ollama_models()?; + + let models = find_local_models(); if models.is_empty() { - return Err(language.no_models_found().into()); + println!("{}", language.no_models_found()); + println!("{}", language.model_pull_hint()); + } else { + println!("{}", language.available_models()); + for (i, model) in models.iter().enumerate() { + println!("{}. {}", i + 1, model.display()); + } } - println!("{}", language.available_models()); - for (i, model) in models.iter().enumerate() { - println!("{}. {}", i + 1, model); + if !io::stdin().is_terminal() { + if let Some(first) = models.first() { + let canonical = fs::canonicalize(first).unwrap_or_else(|_| first.clone()); + persist_model_path(&canonical); + println!( + "{}", + language + .model_set_as_default() + .replace("{}", &canonical.to_string_lossy()) + ); + return Ok(canonical); + } + return Err(AppError::Custom(language.no_models_found().to_string())); } - let choice = loop { - let input = get_user_input(&language.select_model_prompt())?; - match input.parse::() { - Ok(num) if num > 0 && num <= models.len() => break num - 1, - _ => println!("{}", language.invalid_selection()), + println!("{}", language.enter_model_path_hint()); + + loop { + let input = match get_user_input(&language.select_model_prompt()) { + Ok(value) => value, + Err(AppError::InputClosed) => { + if let Some(first) = models.first() { + let canonical = fs::canonicalize(first).unwrap_or_else(|_| first.clone()); + persist_model_path(&canonical); + println!( + "{}", + language + .model_set_as_default() + .replace("{}", &canonical.to_string_lossy()) + ); + return Ok(canonical); + } + return Err(AppError::InputClosed); + } + Err(err) => return Err(err), + }; + let trimmed = input.trim(); + + if trimmed.is_empty() { + println!("{}", language.invalid_selection()); + continue; } - }; - let selected_model = models[choice].clone(); - git_config.set(CONFIG_MODEL_KEY, &selected_model)?; - - println!("{}", language.model_set_as_default().replace("{}", &selected_model)); - Ok(selected_model) -} + if let Ok(index) = trimmed.parse::() { + if index > 0 && index <= models.len() { + let selected = fs::canonicalize(&models[index - 1]) + .unwrap_or_else(|_| models[index - 1].clone()); + persist_model_path(&selected); + println!( + "{}", + language + .model_set_as_default() + .replace("{}", &selected.to_string_lossy()) + ); + return Ok(selected); + } else { + println!("{}", language.invalid_selection()); + continue; + } + } -fn is_ollama_running() -> Result { - let client = create_http_client()?; - match client.get(format!("{OLLAMA_API_BASE}/tags")).send() { - Ok(response) => Ok(response.status().is_success()), - Err(e) => { - let language = Language::English; - eprintln!("{}", language.ollama_connection_warning().replace("{}", &e.to_string())); - eprintln!("{}", language.ensure_ollama_running()); - Ok(false) + let candidate = expand_model_path(trimmed); + if !is_gguf(&candidate) { + println!("{}", language.model_extension_warning()); + continue; + } + if !candidate.is_file() { + println!( + "{}", + language + .model_file_missing() + .replace("{}", &candidate.to_string_lossy()) + ); + println!("{}", language.download_model_prompt()); + continue; } + + let canonical = fs::canonicalize(&candidate).unwrap_or(candidate); + persist_model_path(&canonical); + println!( + "{}", + language + .model_set_as_default() + .replace("{}", &canonical.to_string_lossy()) + ); + return Ok(canonical); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn handles_extracts_subject_line() { + let response = "Processing response...\nThe commit message content must be written in English.\n\nfeat(cli): improve diff summary\n"; + assert_eq!( + process_model_response(response), + Some("feat(cli): improve diff summary".to_string()) + ); + } + + #[test] + fn handles_includes_body_until_instruction() { + let response = "feat(cli): improve diff summary\n\nAdd staged file summary for clarity.\nGuidelines: avoid printing instructions.\n"; + assert_eq!( + process_model_response(response), + Some( + "feat(cli): improve diff summary\n\nAdd staged file summary for clarity." + .to_string() + ) + ); + } + + #[test] + fn handles_instruction_only_fallback() { + let response = "The commit message content must be written in English."; + assert_eq!(process_model_response(response), None); + } + + #[test] + fn validates_git_flow_subject() { + assert!(is_valid_commit_message( + "feat(cli): improve prompts", + &Language::English + )); + assert!(is_valid_commit_message( + "docs: 更新贡献指南", + &Language::Chinese + )); + } + + #[test] + fn rejects_invalid_commit_messages() { + assert!(!is_valid_commit_message( + "Implement new feature", + &Language::English + )); + assert!(!is_valid_commit_message( + "feat(): missing subject", + &Language::English + )); + assert!(!is_valid_commit_message( + "feat(cli) missing colon", + &Language::English + )); + } + + #[test] + fn fallback_generates_for_retry_flow() { + let diff = "\ +diff --git a/src/main.rs b/src/main.rs +index 1111111..2222222 100644 +--- a/src/main.rs ++++ b/src/main.rs +@@ ++ println!(\"Model response was invalid. Retrying with stricter instructions...\"); +"; + let message = generate_fallback_commit_message(diff, &Language::English).expect("fallback"); + assert!(message.starts_with("fix(")); + assert!(message.contains("stabilize commit message generation")); + } + + #[test] + fn fallback_generates_for_docs_only() { + let diff = "\ +diff --git a/AGENTS.md b/AGENTS.md +new file mode 100644 +index 0000000..3333333 +--- /dev/null ++++ b/AGENTS.md +@@ ++# Repository Guidelines +"; + let message = + generate_fallback_commit_message(diff, &Language::English).expect("fallback docs"); + assert!(message.starts_with("docs(")); + assert!(message.contains("documentation")); + } + + #[test] + fn fallback_prefers_runtime_scope() { + let diff = "\ +diff --git a/src/main.rs b/src/main.rs +index 1111111..2222222 100644 +--- a/src/main.rs ++++ b/src/main.rs +@@ ++ println!(\"Processing response...\"); +diff --git a/src/llama.rs b/src/llama.rs +new file mode 100644 +index 0000000..3333333 +--- /dev/null ++++ b/src/llama.rs +@@ ++ llama_kv_self_clear(ctx); +"; + let message = + generate_fallback_commit_message(diff, &Language::English).expect("fallback runtime"); + assert!(message.starts_with("fix(")); + assert!( + message.contains("stabilize commit message generation") || message.contains("refine") + ); + } + + #[test] + fn fallback_handles_dependency_updates() { + let diff = concat!( + "diff --git a/package.json b/package.json\n", + "index 1111111..2222222 100644\n", + "--- a/package.json\n", + "+++ b/package.json\n", + "@@\n", + "+ \"llama-kit\": \"^2.0.0\"\n", + "diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml\n", + "index 1111111..3333333 100644\n", + "--- a/pnpm-lock.yaml\n", + "+++ b/pnpm-lock.yaml\n", + "@@\n", + "+packages:\n", + ); + let message = + generate_fallback_commit_message(diff, &Language::English).expect("fallback deps"); + assert_eq!(message, "chore(deps): update dependencies"); + } + + #[test] + fn truncates_diff_for_prompt() { + let language = Language::English; + let long_diff = format!("diff --git a/file b/file\n{}", "a".repeat(5000)); + let prepared = build_diff_summary(&long_diff, &language, 512); + assert!(prepared.contains(language.truncated_diff_notice())); + assert!(prepared.len() < long_diff.len()); } } fn main() -> Result<()> { let args: Vec = env::args().collect(); - + if args.len() > 1 && (args[1] == "--version" || args[1] == "-v") { println!("git-ca version {}", env!("CARGO_PKG_VERSION")); return Ok(()); } - + let mut git_config = GitConfig::new()?; let language = get_language(&git_config); - - if args.len() > 1 && args[1] == "model" { - select_default_model(&mut git_config, &language)?; - return Ok(()); - } - if args.len() > 1 && args[1] == "language" { - select_language(&mut git_config)?; - return Ok(()); + if args.len() > 1 { + match args[1].as_str() { + "doctor" => { + run_doctor(&language)?; + return Ok(()); + } + "model" => { + if args.len() > 2 && args[2] == "pull" { + if args.len() < 4 { + println!("{}", language.model_pull_usage()); + return Ok(()); + } + let repo_id = &args[3]; + let downloaded = download_model_from_hub(repo_id, &language)?; + persist_model_path(&downloaded); + println!( + "{}", + language + .model_set_as_default() + .replace("{}", &downloaded.to_string_lossy()) + ); + return Ok(()); + } else { + select_model_path(&language)?; + return Ok(()); + } + } + "language" => { + select_language(&mut git_config)?; + return Ok(()); + } + _ => {} + } } - if !is_ollama_running()? { - return Err(language.ollama_not_accessible().into()); - } - - let model = match git_config.get(CONFIG_MODEL_KEY) { - Ok(model) => model, - Err(_) => { - println!("{}", language.no_default_model()); - select_default_model(&mut git_config, &language)? - } - }; + let model_path = get_model_path(&language)?; let current_dir = env::current_dir()?; let repo_path = find_git_repository(¤t_dir) .ok_or_else(|| AppError::Custom(language.not_in_git_repository().to_string()))?; - let repo = Repository::open(repo_path)?; + let repo = Repository::open(&repo_path)?; let mut index = repo.index()?; - env::set_current_dir(repo.path().parent().unwrap())?; + env::set_current_dir(&repo_path)?; + index.read(true)?; - if index.add_all(["*"], IndexAddOption::DEFAULT, None).is_err() { + let diff = get_diff()?; + if diff.trim().is_empty() { println!("{}", language.no_changes_staged()); return Ok(()); } - let diff = get_diff()?; - let mut commit_msg = analyze_diff(&diff, &model, &language)?; - - loop { - let choice = get_user_input(&language.use_edit_cancel_prompt())?; - - match choice.to_lowercase().as_str() { - "u" => break, - "e" => { - commit_msg = get_user_input(&language.enter_commit_message())?; - break; + let context_size = DEFAULT_CONTEXT_SIZE; + let mut commit_msg = match analyze_diff(&diff, &model_path, &language, context_size)? { + Some(msg) => msg, + None => { + if let Some(fallback) = generate_fallback_commit_message(&diff, &language) { + println!("{}", language.fallback_commit_generated()); + println!("{fallback}"); + fallback + } else { + println!("{}", language.model_failed_generate()); + get_user_input(&language.enter_commit_message())? } - "c" => { - println!("{}", language.commit_cancelled()); - return Ok(()); + } + }; + + if io::stdin().is_terminal() { + loop { + let choice = get_user_input(&language.use_edit_cancel_prompt())?; + + match choice.to_lowercase().as_str() { + "u" => break, + "e" => { + commit_msg = get_user_input(&language.enter_commit_message())?; + break; + } + "c" => { + println!("{}", language.commit_cancelled()); + return Ok(()); + } + _ => println!("{}", language.invalid_choice()), } - _ => println!("{}", language.invalid_choice()), } + } else { + // Non-interactive mode: automatically use the generated message + println!("\n[git-ca] Non-interactive mode detected. Using generated commit message."); } let name = git_config.get_or_prompt("user.name", &language.enter_name_prompt())?; @@ -715,7 +1985,20 @@ fn main() -> Result<()> { let signature = Signature::now(&name, &email)?; let tree_id = index.write_tree()?; let tree = repo.find_tree(tree_id)?; - let parent_commit = repo.head()?.peel_to_commit()?; + let parents = match repo.head() { + Ok(head) => match head.peel_to_commit() { + Ok(commit) => vec![commit], + Err(err) if matches!(err.code(), ErrorCode::NotFound | ErrorCode::UnbornBranch) => { + Vec::new() + } + Err(err) => return Err(err.into()), + }, + Err(err) if matches!(err.code(), ErrorCode::UnbornBranch | ErrorCode::NotFound) => { + Vec::new() + } + Err(err) => return Err(err.into()), + }; + let parent_refs: Vec<&Commit> = parents.iter().collect(); repo.commit( Some("HEAD"), @@ -723,11 +2006,42 @@ fn main() -> Result<()> { &signature, &commit_msg, &tree, - &[&parent_commit], + &parent_refs, )?; println!("{}", language.changes_committed()); - println!("{}", language.commit_message_label().replace("{}", &commit_msg)); + println!( + "{}", + language.commit_message_label().replace("{}", &commit_msg) + ); Ok(()) -} \ No newline at end of file +} + +fn run_doctor(language: &Language) -> Result<()> { + println!("Running llama.cpp smoke test…"); + + let context_size = DEFAULT_CONTEXT_SIZE; + let model_path = get_model_path(language)?; + + println!("Using model: {}", model_path.to_string_lossy()); + println!("Context length: {}", context_size); + + let mut session = LlamaSession::new(&model_path, context_size).map_err(AppError::from)?; + + let prompt = match language { + Language::English => { + "You are a helpful assistant. Reply with a short greeting that confirms the model is working, e.g. \"Model ok\".".to_string() + } + Language::Chinese => { + "你是一个乐于助人的助手。请用简短的话确认模型正常工作,例如“模型正常”。".to_string() + } + }; + + println!("\nPrompt:\n{}\n", prompt); + + let response = session.infer(&prompt, 64).map_err(AppError::from)?; + println!("Model response:\n{}\n", response.trim()); + + Ok(()) +} diff --git a/test-body.md b/test-body.md new file mode 100644 index 0000000..0a1101c --- /dev/null +++ b/test-body.md @@ -0,0 +1 @@ +# Testing detailed commit messages diff --git a/test-detailed.md b/test-detailed.md new file mode 100644 index 0000000..496aca9 --- /dev/null +++ b/test-detailed.md @@ -0,0 +1 @@ +# Testing detailed commit with body