From f637c5de956d55f802fa9935b33cbdf480776713 Mon Sep 17 00:00:00 2001 From: Junaid Aslam Date: Mon, 29 Dec 2025 16:08:15 +0500 Subject: [PATCH] feat(examples): add Gemma 3 native MCP gateway for local tool-calling --- examples/gemma3-mcp-agent/README.md | 61 +++++++++++ examples/gemma3-mcp-agent/mcp_config.json | 14 +++ examples/gemma3-mcp-agent/requirements.txt | 4 + examples/gemma3-mcp-agent/server.py | 112 +++++++++++++++++++++ 4 files changed, 191 insertions(+) create mode 100644 examples/gemma3-mcp-agent/README.md create mode 100644 examples/gemma3-mcp-agent/mcp_config.json create mode 100644 examples/gemma3-mcp-agent/requirements.txt create mode 100644 examples/gemma3-mcp-agent/server.py diff --git a/examples/gemma3-mcp-agent/README.md b/examples/gemma3-mcp-agent/README.md new file mode 100644 index 00000000..ffd4686a --- /dev/null +++ b/examples/gemma3-mcp-agent/README.md @@ -0,0 +1,61 @@ +# Gemma 3 & FunctionGemma MCP Gateway + +This repository provides an official implementation of a bridge between Google's **Gemma 3 / FunctionGemma** and the **Model Context Protocol (MCP)**. + +## ๐Ÿš€ Installation + +Ensure you have a modern Python environment (3.10+) and run: + +```bash +# Clone the repository and navigate to the example +cd gemma/examples/gemma3-mcp-agent/ + +# Install the required MCP and communication libraries +pip install -r requirements.txt +``` + +### Prerequisites +1. **Ollama**: Download from [ollama.com](https://ollama.com). +2. **Gemma 3 Model**: Run `ollama pull gemma3`. + +--- + +## ๐Ÿ—๏ธ The "Gemma 3 Bridge" + +This bridge is uniquely designed to bypass the common "regex parser" failures found in standard implementations. It utilizes the **Official Native Tokens** for high-reliability tool execution: + +* **Official Specification**: Aligns with `FunctionGemma` standards using the `declaration:tool_name{schema}` format. +* **Native Transitions**: Uses official control tokens: + * `` and `` + * `` and `` +* **Developer-Role Implementation**: Automatically injects the `developer` turn required to trigger Gemma 3's high-reasoning tool-use mode. +* **Escape Handling**: Built-in support for the `` token, ensuring JSON inputs remain valid even with complex special characters. + +--- + +## ๐Ÿงช Usage & Quick Start + +### 1. Using the MCP Inspector (Verification) +To verify the bridge and inspect tool schemas without an IDE, use the `mcp-inspector`: + +```bash +npx @modelcontextprotocol/inspector python server.py +``` +* Once the inspector loads, you can view the `gemma_chat` tool. +* You can trigger a test call to `get_system_info` or `read_local_file` to see the native token encapsulation in action. + +### 2. Integration with Antigravity IDE +1. Open **Antigravity Settings**. +2. Navigate to **MCP Servers**. +3. Import the `mcp_config.json` provided in this directory. + * *Note: Ensure the `args` path in `mcp_config.json` correctly points to `server.py` relative to your workspace root.* +4. The IDE agent will now be able to use Gemma 3 via the `gemma_chat` tool for local reasoning. + +### 3. Verification Test Case +Ask the agent: +> "Check my system OS and read the content of requirements.txt." + +This will trigger a multi-turn reasoning loop: +1. Model generates `call:get_system_info{}`. +2. Gateway executes local check and returns ``. +3. Model generates the second call for `read_local_file`. diff --git a/examples/gemma3-mcp-agent/mcp_config.json b/examples/gemma3-mcp-agent/mcp_config.json new file mode 100644 index 00000000..5804b938 --- /dev/null +++ b/examples/gemma3-mcp-agent/mcp_config.json @@ -0,0 +1,14 @@ +{ + "mcpServers": { + "gemma-gateway": { + "command": "python", + "args": [ + "./examples/gemma3-mcp-agent/server.py" + ], + "env": { + "OLLAMA_URL": "http://localhost:11434/api/generate", + "GEMMA_MODEL": "gemma3" + } + } + } +} \ No newline at end of file diff --git a/examples/gemma3-mcp-agent/requirements.txt b/examples/gemma3-mcp-agent/requirements.txt new file mode 100644 index 00000000..1f0febb8 --- /dev/null +++ b/examples/gemma3-mcp-agent/requirements.txt @@ -0,0 +1,4 @@ +mcp +fastmcp +httpx +pydantic diff --git a/examples/gemma3-mcp-agent/server.py b/examples/gemma3-mcp-agent/server.py new file mode 100644 index 00000000..13c62f6d --- /dev/null +++ b/examples/gemma3-mcp-agent/server.py @@ -0,0 +1,112 @@ +import os, json, re, httpx, asyncio +from mcp.server.fastmcp import FastMCP + +mcp = FastMCP("Gemma-MCP-Gateway") + +# Configuration +OLLAMA_URL = os.environ.get("OLLAMA_URL", "http://localhost:11434/api/generate") +MODEL_NAME = os.environ.get("GEMMA_MODEL", "gemma3") + +# Official trigger phrase for Gemma 3 function calling +GEMMA_SYSTEM_PROMPT = "You are a model that can do function calling with the following functions" + +def format_tools_for_gemma(tools): + """Format tools using official declaration tokens.""" + definitions = [f"declaration:{t.name}{json.dumps(t.input_schema)}" for t in tools] + return f"\n" + "\n".join(definitions) + "\n" + +def parse_gemma_tool_call(text): + """Parses official Gemma 3 tool calls, handling tokens.""" + # Remove tokens if present before parsing JSON to prevent breakage + clean_text = text.replace("", "") + + # Official native token pattern + call_regex = r"call:(\w+)(\{.*?\})" + match = re.search(call_regex, clean_text, re.DOTALL) + + if match: + tool_name = match.group(1) + try: + return tool_name, json.loads(match.group(2)), "" + except: + return None, None, "" + return None, None, "" + +@mcp.tool() +async def gemma_chat(prompt: str, history: list = None) -> str: + """ + A tool-augmented chat interface utilizing official Gemma 3 'Native Token' strategies. + Handles developer role activation and recursive tool execution. + """ + all_tools = mcp.list_tools() + available_tools = [t for t in all_tools if t.name != "gemma_chat"] + + # Construct the Developer turn (Turn 1) - Official trigger for Tool-use Mode + tool_block = format_tools_for_gemma(available_tools) + full_prompt = f"developer\n{GEMMA_SYSTEM_PROMPT}{tool_block}\n" + + # Append conversation history if provided + if history: + for turn in history: + full_prompt += f"{turn['role']}\n{turn['content']}\n" + + # Add final user prompt + full_prompt += f"user\n{prompt}\nmodel\n" + + async with httpx.AsyncClient() as client: + current_prompt = full_prompt + # Support up to 5 tool call rounds to prevent cycles + for _ in range(5): + response = await client.post( + OLLAMA_URL, + json={ + "model": MODEL_NAME, + "prompt": current_prompt, + "stream": False, + "raw": True # Required for precise control token handling + }, + timeout=120.0 + ) + + if response.status_code != 200: + return f"Error from Ollama ({response.status_code}): {response.text}" + + output = response.json().get("response", "") + tool_name, tool_args, _ = parse_gemma_tool_call(output) + + if tool_name: + try: + tool_result = await mcp.call_tool(tool_name, tool_args) + + # Official native response format + res_block = f"{tool_result}" + + # Append result back to the prompt as a user turn continuation + current_prompt += output + f"\nuser\n{res_block}\nmodel\n" + except Exception as e: + err_block = f"Error: {str(e)}" + current_prompt += output + f"\nuser\n{err_block}\nmodel\n" + else: + # No tool call detected, return final model output + return output + + return "Max tool iterations reached." + +@mcp.tool() +async def read_local_file(path: str) -> str: + """Reads a file from the local filesystem.""" + try: + with open(path, 'r', encoding='utf-8') as f: + return f.read() + except Exception as e: + return f"Error reading file: {str(e)}" + +@mcp.tool() +async def get_system_info() -> str: + """Get basic system information.""" + import platform + return f"OS: {platform.system()} {platform.release()}, Arch: {platform.machine()}" + +if __name__ == "__main__": + # Single entry point for mcp server lifecycle + mcp.run()