From 8961b02455f1a928be1e1efd1ddd6c5a8bab44a8 Mon Sep 17 00:00:00 2001 From: windweller Date: Fri, 3 Oct 2025 15:40:37 -0400 Subject: [PATCH 01/51] initial changes --- opto/optimizers/optoprime_v2.py | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index cc898bac..ce692bd2 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -37,6 +37,7 @@ class OptimizerPromptSymbolSet: instruction_section_title = "# Instruction" code_section_title = "# Code" documentation_section_title = "# Documentation" + context_section_title = "# Context" node_tag = "node" # nodes that are constants in the graph variable_tag = "variable" # nodes that can be changed @@ -141,6 +142,7 @@ def default_prompt_symbols(self) -> Dict[str, str]: "instruction": self.instruction_section_title, "code": self.code_section_title, "documentation": self.documentation_section_title, + "context": self.context_section_title } @@ -242,6 +244,7 @@ class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet): instruction_section_title = "# Instruction" code_section_title = "# Code" documentation_section_title = "# Documentation" + context_section_title = "# Context" node_tag = "const" # nodes that are constants in the graph variable_tag = "var" # nodes that can be changed @@ -264,6 +267,7 @@ class ProblemInstance: others: str outputs: str feedback: str + context: str optimizer_prompt_symbol_set: OptimizerPromptSymbolSet @@ -292,6 +296,9 @@ class ProblemInstance: # Feedback {feedback} + + # Context + {context} """ ) @@ -305,6 +312,7 @@ def __repr__(self) -> str: outputs=self.outputs, others=self.others, feedback=self.feedback, + context=self.context ) @@ -359,6 +367,7 @@ class OptoPrimeV2(OptoPrime): - {others_section_title}: the intermediate values created through the code execution. - {outputs_section_title}: the result of the code output. - {feedback_section_title}: the feedback about the code's execution result. + - {context_section_title}: the context information that might be useful to solve the problem. In `{variables_section_title}`, `{inputs_section_title}`, `{outputs_section_title}`, and `{others_section_title}`, the format is: @@ -413,17 +422,22 @@ class OptoPrimeV2(OptoPrime): example_prompt = dedent( """ - Here are some feasible but not optimal solutions for the current problem instance. Consider this as a hint to help you understand the problem better. ================================ - {examples} - ================================ """ ) + context_prompt = dedent( + """ + Here is some additional **context** to solving this problem: + + {context} + """ + ) + final_prompt = dedent( """ What are your suggestions on variables {names}? @@ -476,6 +490,7 @@ def __init__( ) self.example_problem_summary.variables = {'a': (5, "a > 0")} self.example_problem_summary.inputs = {'b': (1, None), 'c': (5, None)} + self.example_problem_summary.context = "" self.example_problem = self.problem_instance(self.example_problem_summary) self.example_response = self.optimizer_prompt_symbol_set.example_output( @@ -656,6 +671,7 @@ def problem_instance(self, summary, mask=None): constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag) if self.optimizer_prompt_symbol_set.others_section_title not in mask else "" ), feedback=summary.user_feedback if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else "", + context=summary.context if self.optimizer_prompt_symbol_set.context_section_title not in mask else "", optimizer_prompt_symbol_set=self.optimizer_prompt_symbol_set ) From 949d8ff99c2714cd4a65944999a77dcd44b4fd11 Mon Sep 17 00:00:00 2001 From: windweller Date: Fri, 3 Oct 2025 15:54:11 -0400 Subject: [PATCH 02/51] make context optional --- opto/optimizers/optoprime_v2.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index ce692bd2..b56f4d56 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -267,7 +267,7 @@ class ProblemInstance: others: str outputs: str feedback: str - context: str + context: Optional[str] optimizer_prompt_symbol_set: OptimizerPromptSymbolSet @@ -296,14 +296,11 @@ class ProblemInstance: # Feedback {feedback} - - # Context - {context} """ ) def __repr__(self) -> str: - return self.problem_template.format( + optimization_query = self.problem_template.format( instruction=self.instruction, code=self.code, documentation=self.documentation, @@ -315,6 +312,18 @@ def __repr__(self) -> str: context=self.context ) + context_section = dedent(""" + + # Context + {context} + """) + + if self.context is not None and self.context.strip() != "": + context_section.format(context=self.context) + optimization_query += context_section + + return optimization_query + @dataclass class MemoryInstance: From e36dd7c578e8560056a1d8a81ac2b34e8388b05e Mon Sep 17 00:00:00 2001 From: windweller Date: Fri, 3 Oct 2025 17:00:44 -0400 Subject: [PATCH 03/51] finish adding image support to optoprime_v2 --- docs/tutorials/minibatch.ipynb | 22 +++++++------- opto/optimizers/optoprime_v2.py | 51 ++++++++++++++++++++++++++++----- opto/optimizers/utils.py | 17 +++++++++++ 3 files changed, 72 insertions(+), 18 deletions(-) diff --git a/docs/tutorials/minibatch.ipynb b/docs/tutorials/minibatch.ipynb index f752d866..e7cd4233 100644 --- a/docs/tutorials/minibatch.ipynb +++ b/docs/tutorials/minibatch.ipynb @@ -601,11 +601,11 @@ "name": "stdout", "output_type": "stream", "text": [ - "[Step 1] \u001b[92mAverage test score: 1.0\u001b[0m\n", + "[Step 1] \u001B[92mAverage test score: 1.0\u001B[0m\n", "Epoch: 0. Iteration: 1\n", "[Step 1] Instantaneous train score: 1.0\n", "[Step 1] Average train score: 1.0\n", - "[Step 1] \u001b[91mParameter: str:20: You're a helpful agent\u001b[0m\n" + "[Step 1] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n" ] }, { @@ -641,11 +641,11 @@ "name": "stdout", "output_type": "stream", "text": [ - "[Step 2] \u001b[92mAverage test score: 1.0\u001b[0m\n", + "[Step 2] \u001B[92mAverage test score: 1.0\u001B[0m\n", "Epoch: 0. Iteration: 2\n", "[Step 2] Instantaneous train score: 1.0\n", "[Step 2] Average train score: 1.0\n", - "[Step 2] \u001b[91mParameter: str:20: You're a helpful agent\u001b[0m\n" + "[Step 2] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n" ] }, { @@ -677,11 +677,11 @@ "name": "stdout", "output_type": "stream", "text": [ - "[Step 3] \u001b[92mAverage test score: 1.0\u001b[0m\n", + "[Step 3] \u001B[92mAverage test score: 1.0\u001B[0m\n", "Epoch: 0. Iteration: 3\n", "[Step 3] Instantaneous train score: 1.0\n", "[Step 3] Average train score: 1.0\n", - "[Step 3] \u001b[91mParameter: str:20: You're a helpful agent\u001b[0m\n" + "[Step 3] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n" ] }, { @@ -714,11 +714,11 @@ "name": "stdout", "output_type": "stream", "text": [ - "[Step 4] \u001b[92mAverage test score: 1.0\u001b[0m\n", + "[Step 4] \u001B[92mAverage test score: 1.0\u001B[0m\n", "Epoch: 0. Iteration: 4\n", "[Step 4] Instantaneous train score: 1.0\n", "[Step 4] Average train score: 1.0\n", - "[Step 4] \u001b[91mParameter: str:20: You're a helpful agent\u001b[0m\n" + "[Step 4] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n" ] }, { @@ -751,11 +751,11 @@ "name": "stdout", "output_type": "stream", "text": [ - "[Step 5] \u001b[92mAverage test score: 1.0\u001b[0m\n", + "[Step 5] \u001B[92mAverage test score: 1.0\u001B[0m\n", "Epoch: 0. Iteration: 5\n", "[Step 5] Instantaneous train score: 1.0\n", "[Step 5] Average train score: 1.0\n", - "[Step 5] \u001b[91mParameter: str:20: You're a helpful agent\u001b[0m\n", + "[Step 5] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n", "FINISHED TRAINING\n" ] }, @@ -831,4 +831,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index b56f4d56..87a57446 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -3,7 +3,7 @@ from dataclasses import dataclass, asdict from opto.optimizers.optoprime import OptoPrime, FunctionFeedback from opto.trace.utils import dedent -from opto.optimizers.utils import truncate_expression, extract_xml_like_data +from opto.optimizers.utils import truncate_expression, extract_xml_like_data, encode_image_to_base64 from opto.trace.nodes import ParameterNode, Node, MessageNode from opto.trace.propagators import TraceGraph, GraphPropagator @@ -17,6 +17,11 @@ from typing import Dict, Any +@dataclass +class MultiModalPayload: + image_bytes: Optional[str] = None # base64 encoded image bytes + + class OptimizerPromptSymbolSet: """ By inheriting this class and pass into the optimizer. People can change the optimizer documentation @@ -327,7 +332,7 @@ def __repr__(self) -> str: @dataclass class MemoryInstance: - variables: Dict[str, Tuple[Any, str]] # name -> (data, constraint) + variables: Dict[str, Tuple[Any, str]] # name -> (data, constraint) feedback: str optimizer_prompt_symbol_set: OptimizerPromptSymbolSet @@ -472,11 +477,14 @@ def __init__( optimizer_prompt_symbol_set: OptimizerPromptSymbolSet = OptimizerPromptSymbolSet(), use_json_object_format=True, # whether to use json object format for the response when calling LLM truncate_expression=truncate_expression, + problem_context: Optional[str] = None, **kwargs, ): super().__init__(parameters, *args, propagator=propagator, **kwargs) self.truncate_expression = truncate_expression + self.problem_context = problem_context + self.multimodal_payload = MultiModalPayload() self.use_json_object_format = use_json_object_format if optimizer_prompt_symbol_set.expect_json and use_json_object_format else False self.ignore_extraction_error = ignore_extraction_error @@ -499,7 +507,6 @@ def __init__( ) self.example_problem_summary.variables = {'a': (5, "a > 0")} self.example_problem_summary.inputs = {'b': (1, None), 'c': (5, None)} - self.example_problem_summary.context = "" self.example_problem = self.problem_instance(self.example_problem_summary) self.example_response = self.optimizer_prompt_symbol_set.example_output( @@ -520,6 +527,23 @@ def __init__( self.prompt_symbols = copy.deepcopy(self.default_prompt_symbols) self.initialize_prompt() + def add_image_context(self, image_path: str, context: str = ""): + if self.problem_context is None: + self.problem_context = "" + self.problem_context += f"{context}\n\n" + + # we load in the image and convert to base64 + data_url = encode_image_to_base64(image_path) + self.multimodal_payload.image_bytes = data_url + + self.initialize_prompt() + + def add_context(self, context: str): + if self.problem_context is None: + self.problem_context = "" + self.problem_context += f"{context}\n\n" + self.initialize_prompt() + def initialize_prompt(self): self.representation_prompt = self.representation_prompt.format( variable_expression_format=dedent(f""" @@ -540,7 +564,8 @@ def initialize_prompt(self): instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""), code_section_title=self.optimizer_prompt_symbol_set.code_section_title.replace(" ", ""), documentation_section_title=self.optimizer_prompt_symbol_set.documentation_section_title.replace(" ", ""), - others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", "") + others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", ""), + context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "") ) self.output_format_prompt = self.output_format_prompt_template.format( output_format=self.optimizer_prompt_symbol_set.output_format, @@ -553,7 +578,8 @@ def initialize_prompt(self): documentation_section_title=self.optimizer_prompt_symbol_set.documentation_section_title.replace(" ", ""), variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""), inputs_section_title=self.optimizer_prompt_symbol_set.inputs_section_title.replace(" ", ""), - others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", "") + others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", ""), + context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "") ) def repr_node_value(self, node_dict, node_tag="node", @@ -680,7 +706,7 @@ def problem_instance(self, summary, mask=None): constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag) if self.optimizer_prompt_symbol_set.others_section_title not in mask else "" ), feedback=summary.user_feedback if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else "", - context=summary.context if self.optimizer_prompt_symbol_set.context_section_title not in mask else "", + context=self.problem_context if self.optimizer_prompt_symbol_set.context_section_title not in mask else "", optimizer_prompt_symbol_set=self.optimizer_prompt_symbol_set ) @@ -742,9 +768,20 @@ def call_llm( if verbose not in (False, "output"): print("Prompt\n", system_prompt + user_prompt) + user_message_content = [] + if self.multimodal_payload.image_bytes is not None: + user_message_content.append({ + "type": "image_url", + "image_url": { + "url": self.multimodal_payload.image_bytes + } + }) + + user_message_content.append({"type": "text", "text": user_prompt}) + messages = [ {"role": "system", "content": system_prompt}, - {"role": "user", "content": user_prompt}, + {"role": "user", "content": user_message_content}, ] response_format = {"type": "json_object"} if self.use_json_object_format else None diff --git a/opto/optimizers/utils.py b/opto/optimizers/utils.py index 13a5ad01..4fbec459 100644 --- a/opto/optimizers/utils.py +++ b/opto/optimizers/utils.py @@ -1,5 +1,8 @@ +import base64 +import mimetypes from typing import Dict, Any + def print_color(message, color=None, logger=None): colors = { "red": "\033[91m", @@ -134,3 +137,17 @@ def extract_xml_like_data(text: str, reasoning_tag: str = "reasoning", if var_name: # Only require name to be non-empty, value can be empty result['variables'][var_name] = var_value return result + + +def encode_image_to_base64(path: str) -> str: + # Read binary + with open(path, "rb") as f: + image_bytes = f.read() + # Guess MIME type from file extension + mime_type, _ = mimetypes.guess_type(path) + if mime_type is None: + # fallback + mime_type = "image/jpeg" + b64 = base64.b64encode(image_bytes).decode("utf-8") + data_url = f"data:{mime_type};base64,{b64}" + return data_url From 7dcb880644e909c5f4a7fcf9c210e95eadf6bc19 Mon Sep 17 00:00:00 2001 From: windweller Date: Fri, 3 Oct 2025 17:20:09 -0400 Subject: [PATCH 04/51] Finish updating OPRO to accept additional context --- opto/optimizers/opro_v2.py | 43 ++++++++++++++++++++++++++++----- opto/optimizers/optoprime_v2.py | 4 +-- 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/opto/optimizers/opro_v2.py b/opto/optimizers/opro_v2.py index ff5c801d..19b33e58 100644 --- a/opto/optimizers/opro_v2.py +++ b/opto/optimizers/opro_v2.py @@ -1,7 +1,7 @@ import json from textwrap import dedent from dataclasses import dataclass, asdict -from typing import Dict +from typing import Dict, Optional from opto.optimizers.optoprime_v2 import OptoPrimeV2, OptimizerPromptSymbolSet @@ -15,8 +15,8 @@ class OPROPromptSymbolSet(OptimizerPromptSymbolSet): Attributes ---------- - problem_context_section_title : str - Title for the problem context section in prompts. + instruction_section_title : str + Title for the instruction section in prompts. variable_section_title : str Title for the variable/solution section in prompts. feedback_section_title : str @@ -49,9 +49,10 @@ class OPROPromptSymbolSet(OptimizerPromptSymbolSet): more focused set of symbols specifically for OPRO optimization. """ - problem_context_section_title = "# Problem Context" + instruction_section_title = "# Instruction" variable_section_title = "# Solution" feedback_section_title = "# Feedback" + context_section_title = "# Context" node_tag = "node" # nodes that are constants in the graph variable_tag = "solution" # nodes that can be changed @@ -72,6 +73,7 @@ def default_prompt_symbols(self) -> Dict[str, str]: "variables": self.variables_section_title, "feedback": self.feedback_section_title, "instruction": self.instruction_section_title, + "context": self.context_section_title } @dataclass @@ -89,6 +91,9 @@ class ProblemInstance: The current proposed solution that can be modified. feedback : str Feedback about the current solution. + context: str + Optional context information that might be useful to solve the problem. + optimizer_prompt_symbol_set : OPROPromptSymbolSet The symbol set used for formatting the problem. problem_template : str @@ -107,12 +112,13 @@ class ProblemInstance: instruction: str variables: str feedback: str + context: Optional[str] optimizer_prompt_symbol_set: OPROPromptSymbolSet problem_template = dedent( """ - # Problem Context + # Instruction {instruction} # Solution @@ -124,12 +130,24 @@ class ProblemInstance: ) def __repr__(self) -> str: - return self.problem_template.format( + optimization_query = self.problem_template.format( instruction=self.instruction, variables=self.variables, feedback=self.feedback, ) + context_section = dedent(""" + + # Context + {context} + """) + + if self.context is not None and self.context.strip() != "": + context_section.format(context=self.context) + optimization_query += context_section + + return optimization_query + class OPROv2(OptoPrimeV2): """OPRO (Optimization by PROmpting) optimizer version 2. @@ -197,6 +215,7 @@ class OPROv2(OptoPrimeV2): - {instruction_section_title}: the instruction which describes the things you need to do or the question you should answer. - {variables_section_title}: the proposed solution that you can change/tweak (trainable). - {feedback_section_title}: the feedback about the solution. + - {context_section_title}: the context information that might be useful to solve the problem. If `data_type` is `code`, it means `{value_tag}` is the source code of a python code, which may include docstring and definitions. """ @@ -229,6 +248,14 @@ class OPROv2(OptoPrimeV2): """ ) + context_prompt = dedent( + """ + Here is some additional **context** to solving this problem: + + {context} + """ + ) + final_prompt = dedent( """ What are your revised solutions on {names}? @@ -244,6 +271,7 @@ def __init__(self, *args, optimizer_prompt_symbol_set: OptimizerPromptSymbolSet = None, include_example=False, # default example in OptoPrimeV2 does not work in OPRO memory_size=5, + problem_context: Optional[str] = None, **kwargs): """Initialize the OPROv2 optimizer. @@ -264,6 +292,7 @@ def __init__(self, *args, optimizer_prompt_symbol_set = optimizer_prompt_symbol_set or OPROPromptSymbolSet() super().__init__(*args, optimizer_prompt_symbol_set=optimizer_prompt_symbol_set, include_example=include_example, memory_size=memory_size, + problem_context=problem_context, **kwargs) def problem_instance(self, summary, mask=None): @@ -328,6 +357,7 @@ def initialize_prompt(self): variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""), feedback_section_title=self.optimizer_prompt_symbol_set.feedback_section_title.replace(" ", ""), instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""), + context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "") ) self.output_format_prompt = self.output_format_prompt_template.format( output_format=self.optimizer_prompt_symbol_set.output_format, @@ -336,4 +366,5 @@ def initialize_prompt(self): instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""), feedback_section_title=self.optimizer_prompt_symbol_set.feedback_section_title.replace(" ", ""), variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""), + context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "") ) diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index 87a57446..2486710b 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -156,7 +156,7 @@ class OptimizerPromptSymbolSetJSON(OptimizerPromptSymbolSet): expect_json = True - custom_output_format_instruction = """ + custom_output_format_instruction = dedent(""" {{ "reasoning": , "suggestion": {{ @@ -164,7 +164,7 @@ class OptimizerPromptSymbolSetJSON(OptimizerPromptSymbolSet): : , }} }} - """ + """) def example_output(self, reasoning, variables): """ From 8a31e8b3d487ac9e563fd13413c571e2020599d7 Mon Sep 17 00:00:00 2001 From: windweller Date: Sun, 5 Oct 2025 13:50:01 -0400 Subject: [PATCH 05/51] add context prompt into pickle save/load. Modify `test_priority_search`'s mock test to expect different kind of input --- opto/optimizers/optoprime_v2.py | 2 ++ tests/unit_tests/test_priority_search.py | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index 2486710b..2eb0a862 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -811,6 +811,7 @@ def save(self, path: str): "prompt_symbols": self.prompt_symbols, "representation_prompt": self.representation_prompt, "output_format_prompt": self.output_format_prompt, + 'context_prompt': self.context_prompt }, f) def load(self, path: str): @@ -830,3 +831,4 @@ def load(self, path: str): self.prompt_symbols = state["prompt_symbols"] self.representation_prompt = state["representation_prompt"] self.output_format_prompt = state["output_format_prompt"] + self.context_prompt = state["context_prompt"] diff --git a/tests/unit_tests/test_priority_search.py b/tests/unit_tests/test_priority_search.py index 2ebda047..4a698fba 100644 --- a/tests/unit_tests/test_priority_search.py +++ b/tests/unit_tests/test_priority_search.py @@ -121,6 +121,15 @@ def _llm_callable(messages, **kwargs): A dummy LLM callable that simulates a response. """ problem = messages[1]['content'] + # in newer LLM API (LiteLLM, OpenAI client, etc.), the user message content is now a list of typed messages: + # [{'type': 'text', 'text': '...'}, {'type': 'image', 'image_url': '...'}] + # this expansion is necessary for multi-modal inputs + + if type(problem) is list: + for typed_message in problem: + if typed_message['type'] == 'text': + problem = typed_message['text'] + break # extract name from name = re.findall(r"", problem) From c271d9c468e5e5ac1f67d3196fcbb75d1ecc31c4 Mon Sep 17 00:00:00 2001 From: windweller Date: Sun, 5 Oct 2025 14:58:49 -0400 Subject: [PATCH 06/51] comment out the small-LLM test --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7889b69d..1fdcd036 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -67,6 +67,6 @@ jobs: run: pytest tests/unit_tests/ # 9) Run basic tests for each optimizer (some will fail due to the small LLM model chosen for free GitHub CI) - - name: Run optimizers test suite - run: pytest tests/llm_optimizers_tests/test_optimizer.py || true - continue-on-error: true +# - name: Run optimizers test suite +# run: pytest tests/llm_optimizers_tests/test_optimizer.py || true +# continue-on-error: true From 148d4bc7e6ac2560f414bc5687e2bf88d6e13d69 Mon Sep 17 00:00:00 2001 From: windweller Date: Mon, 6 Oct 2025 13:33:53 -0400 Subject: [PATCH 07/51] add multi-modal support for the LLM model as well --- opto/features/flows/compose.py | 17 ++++++--- opto/features/flows/types.py | 68 +++++++++++++++++++++++++++++++++- 2 files changed, 78 insertions(+), 7 deletions(-) diff --git a/opto/features/flows/compose.py b/opto/features/flows/compose.py index f075ac4b..0d05801a 100644 --- a/opto/features/flows/compose.py +++ b/opto/features/flows/compose.py @@ -1,6 +1,7 @@ import opto.trace as trace from typing import Union, get_type_hints, Any, Dict, List, Optional from opto.utils.llm import AbstractModel, LLM +from opto.features.flows.types import MultiModalPayload, QueryModel import contextvars """ @@ -178,7 +179,8 @@ def __init__(self, self.model_name = model_name if model_name else f"TracedLLM{len(current_llm_sessions)}" current_llm_sessions.append(1) # just a marker - def forward(self, user_query: str, chat_history_on: Optional[bool] = None) -> str: + def forward(self, user_query: str, chat_history_on: Optional[bool] = None, + payload: Optional[MultiModalPayload] = None) -> str: """This function takes user_query as input, and returns the response from the LLM, with the system prompt prepended. This method will always save chat history. @@ -187,17 +189,19 @@ def forward(self, user_query: str, chat_history_on: Optional[bool] = None) -> st If chat_history_on is True, the chat history will be included in the LLM input. Args: - user_query: The user query to send to the LLM + user_query: The user query to send to the LLM. Can be Returns: str: For direct pattern """ chat_history_on = self.chat_history_on if chat_history_on is None else chat_history_on + user_message = QueryModel(query=user_query, multimodal_payload=payload).query + messages = [{"role": "system", "content": self.system_prompt.data}] if chat_history_on: messages.extend(self.chat_history.get_messages()) - messages.append({"role": "user", "content": user_query}) + messages.append({"role": "user", "content": user_message}) response = self.llm(messages=messages) @@ -226,5 +230,8 @@ def call_llm(*args) -> str: return response_node - def chat(self, user_query: str) -> str: - return self.forward(user_query) + def chat(self, user_query: str, chat_history_on: Optional[bool] = None, + payload: Optional[MultiModalPayload] = None) -> str: + """Note that chat/forward always assumes it's a single turn of the conversation. History/context management will be accomplished + through other APIs""" + return self.forward(user_query, chat_history_on, payload) diff --git a/opto/features/flows/types.py b/opto/features/flows/types.py index 4196b926..e5589bed 100644 --- a/opto/features/flows/types.py +++ b/opto/features/flows/types.py @@ -1,10 +1,74 @@ """Types for opto flows.""" -from pydantic import BaseModel, Field, create_model, ConfigDict +from typing import List, Dict, Union +from pydantic import BaseModel, model_validator from typing import Any, Optional, Callable, Dict, Union, Type, List +from dataclasses import dataclass import re import json +from opto.optimizers.utils import encode_image_to_base64 + class TraceObject: def __str__(self): # Any subclass that inherits this will be friendly to the optimizer - raise NotImplementedError("Subclasses must implement __str__") \ No newline at end of file + raise NotImplementedError("Subclasses must implement __str__") + + +class MultiModalPayload(BaseModel): + image_bytes: Optional[str] = None # base64-encoded data URL + + @classmethod + def from_path(cls, path: str) -> "MultiModalPayload": + """Create a payload by loading an image from a local file path.""" + data_url = encode_image_to_base64(path) + return cls(image_bytes=data_url) + + def load_image(self, path: str) -> None: + """Mutate the current payload to include a new image.""" + self.image_bytes = encode_image_to_base64(path) + +class QueryModel(BaseModel): + # Expose "query" as already-normalized: always a List[Dict[str, Any]] + query: List[Dict[str, Any]] + multimodal_payload: Optional[MultiModalPayload] = None + + @model_validator(mode="before") + @classmethod + def normalize(cls, data: Any): + """ + Accepts: + { "query": "hello" } + { "query": "hello", "multimodal_payload": {"image_bytes": "..."} } + And always produces: + { "query": [ {text block}, maybe {image_url block} ], "multimodal_payload": ...} + """ + if not isinstance(data, dict): + raise TypeError("QueryModel input must be a dict") + + raw_query: str = data.get("query") + + # 1) Start with the text part + if isinstance(raw_query, str): + out: List[Dict[str, Any]] = [{"type": "text", "text": raw_query}] + else: + raise TypeError("`query` must be a string or a list of dicts") + + # 2) If we have an image, append an image block + payload = data.get("multimodal_payload") + image_bytes: Optional[str] = None + if payload is not None: + if isinstance(payload, dict): + image_bytes = payload.get("image_bytes") + else: + # Could be already-parsed MultiModalPayload + image_bytes = getattr(payload, "image_bytes", None) + + if image_bytes: + out = out + [{ + "type": "image_url", + "image_url": {"url": image_bytes} + }] + + # 3) Write back normalized fields + data["query"] = out + return data From b340d91c5940fed4935e5383dbe5b15572cf64ad Mon Sep 17 00:00:00 2001 From: windweller Date: Mon, 6 Oct 2025 14:32:32 -0400 Subject: [PATCH 08/51] update the image-context prompt on optimizer. Make LLM module better. --- opto/features/flows/compose.py | 15 ++++++++------- opto/optimizers/optoprime_v2.py | 4 ++++ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/opto/features/flows/compose.py b/opto/features/flows/compose.py index 0d05801a..93ac91e1 100644 --- a/opto/features/flows/compose.py +++ b/opto/features/flows/compose.py @@ -158,6 +158,7 @@ def __init__(self, system_prompt: The system prompt to use for LLM calls. If None and the class has a docstring, the docstring will be used. llm: The LLM model to use for inference chat_history_on: if on, maintain chat history for multi-turn conversations + model_name: override the default name of the model """ if system_prompt is None: system_prompt = "You are a helpful assistant." @@ -176,11 +177,12 @@ def __init__(self, self.chat_history_on = chat_history_on current_llm_sessions = USED_TracedLLM.get() - self.model_name = model_name if model_name else f"TracedLLM{len(current_llm_sessions)}" + self.model_name = model_name if model_name else f"{self.__class__.__name__}{len(current_llm_sessions)}" current_llm_sessions.append(1) # just a marker - def forward(self, user_query: str, chat_history_on: Optional[bool] = None, - payload: Optional[MultiModalPayload] = None) -> str: + def forward(self, user_query: str, + payload: Optional[MultiModalPayload] = None, + chat_history_on: Optional[bool] = None) -> str: """This function takes user_query as input, and returns the response from the LLM, with the system prompt prepended. This method will always save chat history. @@ -205,7 +207,7 @@ def forward(self, user_query: str, chat_history_on: Optional[bool] = None, response = self.llm(messages=messages) - @trace.bundle(output_name="TracedLLM_response") + @trace.bundle(output_name=f"{self.model_name}_response") def call_llm(*args) -> str: """Call the LLM model. Args: @@ -230,8 +232,7 @@ def call_llm(*args) -> str: return response_node - def chat(self, user_query: str, chat_history_on: Optional[bool] = None, - payload: Optional[MultiModalPayload] = None) -> str: + def chat(self, user_query: str, payload: Optional[MultiModalPayload] = None, chat_history_on: Optional[bool] = None) -> str: """Note that chat/forward always assumes it's a single turn of the conversation. History/context management will be accomplished through other APIs""" - return self.forward(user_query, chat_history_on, payload) + return self.forward(user_query, payload, chat_history_on) diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index 2eb0a862..376f0a26 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -530,6 +530,10 @@ def __init__( def add_image_context(self, image_path: str, context: str = ""): if self.problem_context is None: self.problem_context = "" + + if context == "": + context = "The attached image is given to the workflow. You should use the image to help you understand the problem and provide better suggestions. You can refer to the image when providing your suggestions." + self.problem_context += f"{context}\n\n" # we load in the image and convert to base64 From acae873b6aa9f245a5d3ffc589ab60b6f7ab41ef Mon Sep 17 00:00:00 2001 From: windweller Date: Mon, 6 Oct 2025 14:47:14 -0400 Subject: [PATCH 09/51] fix a bug on QueryModel not handling Node as input --- opto/features/flows/compose.py | 5 ++--- opto/features/flows/types.py | 9 ++++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/opto/features/flows/compose.py b/opto/features/flows/compose.py index 93ac91e1..3059a9fa 100644 --- a/opto/features/flows/compose.py +++ b/opto/features/flows/compose.py @@ -208,11 +208,10 @@ def forward(self, user_query: str, response = self.llm(messages=messages) @trace.bundle(output_name=f"{self.model_name}_response") - def call_llm(*args) -> str: + def call_llm(*messages) -> str: """Call the LLM model. Args: - All the conversation history so far, starting from system prompt, to alternating user/assistant messages, ending with the current user query. - + messages: All the conversation history so far, starting from system prompt, to alternating user/assistant messages, ending with the current user query. Returns: response from the LLM """ diff --git a/opto/features/flows/types.py b/opto/features/flows/types.py index e5589bed..33712944 100644 --- a/opto/features/flows/types.py +++ b/opto/features/flows/types.py @@ -6,7 +6,7 @@ import re import json from opto.optimizers.utils import encode_image_to_base64 - +from opto import trace class TraceObject: def __str__(self): @@ -45,13 +45,16 @@ def normalize(cls, data: Any): if not isinstance(data, dict): raise TypeError("QueryModel input must be a dict") - raw_query: str = data.get("query") + raw_query: Any = data.get("query") + if isinstance(raw_query, trace.Node): + assert isinstance(raw_query.data, (str, list)), "If using trace.Node, its data must be str" + raw_query = raw_query.data # 1) Start with the text part if isinstance(raw_query, str): out: List[Dict[str, Any]] = [{"type": "text", "text": raw_query}] else: - raise TypeError("`query` must be a string or a list of dicts") + raise TypeError("`query` must be a string") # 2) If we have an image, append an image block payload = data.get("multimodal_payload") From 1791b15f0b05162ad5f284c513043d99443704f5 Mon Sep 17 00:00:00 2001 From: windweller Date: Mon, 10 Nov 2025 17:11:09 -0500 Subject: [PATCH 10/51] add three types of image loading: from numpy array, from url, from local file. --- opto/features/flows/types.py | 66 ++++++++++++++++++++++++-- opto/optimizers/optoprime_v2.py | 82 +++++++++++++++++++++++++++------ opto/optimizers/utils.py | 60 +++++++++++++++++++++++- 3 files changed, 191 insertions(+), 17 deletions(-) diff --git a/opto/features/flows/types.py b/opto/features/flows/types.py index 33712944..b42bfc42 100644 --- a/opto/features/flows/types.py +++ b/opto/features/flows/types.py @@ -5,7 +5,7 @@ from dataclasses import dataclass import re import json -from opto.optimizers.utils import encode_image_to_base64 +from opto.optimizers.utils import encode_image_to_base64, encode_numpy_to_base64 from opto import trace class TraceObject: @@ -15,17 +15,77 @@ def __str__(self): class MultiModalPayload(BaseModel): - image_bytes: Optional[str] = None # base64-encoded data URL + """ + A payload for multimodal content, particularly images. + + Supports three types of image inputs: + 1. URL (string starting with 'http://' or 'https://') + 2. Local file path (string path to image file) + 3. Numpy array (RGB image array) + """ + image_bytes: Optional[str] = None # Can be URL or base64-encoded data URL @classmethod def from_path(cls, path: str) -> "MultiModalPayload": """Create a payload by loading an image from a local file path.""" data_url = encode_image_to_base64(path) return cls(image_bytes=data_url) + + @classmethod + def from_url(cls, url: str) -> "MultiModalPayload": + """Create a payload from an image URL.""" + return cls(image_bytes=url) + + @classmethod + def from_array(cls, array: Any, format: str = "PNG") -> "MultiModalPayload": + """Create a payload from a numpy array or array-like RGB image.""" + data_url = encode_numpy_to_base64(array, format=format) + return cls(image_bytes=data_url) def load_image(self, path: str) -> None: - """Mutate the current payload to include a new image.""" + """Mutate the current payload to include a new image from a file path.""" self.image_bytes = encode_image_to_base64(path) + + def set_image(self, image: Union[str, Any], format: str = "PNG") -> None: + """ + Set the image from various input formats. + + Args: + image: Can be: + - URL string (starting with 'http://' or 'https://') + - Local file path (string) + - Numpy array or array-like RGB image + format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG + """ + if isinstance(image, str): + # Check if it's a URL + if image.startswith('http://') or image.startswith('https://'): + # Direct URL - litellm supports this + self.image_bytes = image + else: + # Assume it's a local file path + self.image_bytes = encode_image_to_base64(image) + else: + # Assume it's a numpy array or array-like object + self.image_bytes = encode_numpy_to_base64(image, format=format) + + def get_content_block(self) -> Optional[Dict[str, Any]]: + """ + Get the content block for the image in litellm format. + + Returns: + Dict with format: {"type": "image_url", "image_url": {"url": ...}} + or None if no image data is set + """ + if self.image_bytes is None: + return None + + return { + "type": "image_url", + "image_url": { + "url": self.image_bytes + } + } class QueryModel(BaseModel): # Expose "query" as already-normalized: always a List[Dict[str, Any]] diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index 376f0a26..9319dcee 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -3,7 +3,7 @@ from dataclasses import dataclass, asdict from opto.optimizers.optoprime import OptoPrime, FunctionFeedback from opto.trace.utils import dedent -from opto.optimizers.utils import truncate_expression, extract_xml_like_data, encode_image_to_base64 +from opto.optimizers.utils import truncate_expression, extract_xml_like_data, encode_image_to_base64, encode_numpy_to_base64 from opto.trace.nodes import ParameterNode, Node, MessageNode from opto.trace.propagators import TraceGraph, GraphPropagator @@ -19,7 +19,56 @@ @dataclass class MultiModalPayload: - image_bytes: Optional[str] = None # base64 encoded image bytes + """ + A payload for multimodal content, particularly images. + + Supports three types of image inputs: + 1. URL (string starting with 'http://' or 'https://') + 2. Local file path (string path to image file) + 3. Numpy array (RGB image array) + """ + image_data: Optional[str] = None # Can be URL or base64 data URL + + def set_image(self, image: Union[str, Any], format: str = "PNG") -> None: + """ + Set the image from various input formats. + + Args: + image: Can be: + - URL string (starting with 'http://' or 'https://') + - Local file path (string) + - Numpy array or array-like RGB image + format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG + """ + if isinstance(image, str): + # Check if it's a URL + if image.startswith('http://') or image.startswith('https://'): + # Direct URL - litellm supports this + self.image_data = image + else: + # Assume it's a local file path + self.image_data = encode_image_to_base64(image) + else: + # Assume it's a numpy array or array-like object + self.image_data = encode_numpy_to_base64(image, format=format) + + def get_content_block(self) -> Optional[Dict[str, Any]]: + """ + Get the content block for the image in litellm format. + + Returns: + Dict with format: {"type": "image_url", "image_url": {"url": ...}} + or None if no image data is set + """ + if self.image_data is None: + return None + + return { + "type": "image_url", + "image_url": { + "url": self.image_data + } + } class OptimizerPromptSymbolSet: @@ -527,7 +576,18 @@ def __init__( self.prompt_symbols = copy.deepcopy(self.default_prompt_symbols) self.initialize_prompt() - def add_image_context(self, image_path: str, context: str = ""): + def add_image_context(self, image: Union[str, Any], context: str = "", format: str = "PNG"): + """ + Add an image to the optimizer context. + + Args: + image: Can be: + - URL string (starting with 'http://' or 'https://') + - Local file path (string) + - Numpy array or array-like RGB image + context: Optional context text to describe the image. If empty, uses default. + format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG + """ if self.problem_context is None: self.problem_context = "" @@ -536,9 +596,8 @@ def add_image_context(self, image_path: str, context: str = ""): self.problem_context += f"{context}\n\n" - # we load in the image and convert to base64 - data_url = encode_image_to_base64(image_path) - self.multimodal_payload.image_bytes = data_url + # Set the image using the multimodal payload + self.multimodal_payload.set_image(image, format=format) self.initialize_prompt() @@ -773,13 +832,10 @@ def call_llm( print("Prompt\n", system_prompt + user_prompt) user_message_content = [] - if self.multimodal_payload.image_bytes is not None: - user_message_content.append({ - "type": "image_url", - "image_url": { - "url": self.multimodal_payload.image_bytes - } - }) + # Add image content block if available + image_block = self.multimodal_payload.get_content_block() + if image_block is not None: + user_message_content.append(image_block) user_message_content.append({"type": "text", "text": user_prompt}) diff --git a/opto/optimizers/utils.py b/opto/optimizers/utils.py index 4fbec459..6e4649bd 100644 --- a/opto/optimizers/utils.py +++ b/opto/optimizers/utils.py @@ -1,6 +1,12 @@ import base64 import mimetypes -from typing import Dict, Any +import io +from typing import Dict, Any, Union, Optional +try: + import numpy as np + NUMPY_AVAILABLE = True +except ImportError: + NUMPY_AVAILABLE = False def print_color(message, color=None, logger=None): @@ -140,6 +146,7 @@ def extract_xml_like_data(text: str, reasoning_tag: str = "reasoning", def encode_image_to_base64(path: str) -> str: + """Encode a local image file to base64 data URL.""" # Read binary with open(path, "rb") as f: image_bytes = f.read() @@ -151,3 +158,54 @@ def encode_image_to_base64(path: str) -> str: b64 = base64.b64encode(image_bytes).decode("utf-8") data_url = f"data:{mime_type};base64,{b64}" return data_url + + +def encode_numpy_to_base64(array, format: str = "PNG") -> str: + """ + Encode a numpy array to base64 data URL. + + Args: + array: numpy array representing an image (H, W, C) with values in [0, 255] or [0, 1] + format: Image format (PNG, JPEG, etc.) + + Returns: + Base64 encoded data URL string + """ + if not NUMPY_AVAILABLE: + raise ImportError("numpy is required to encode numpy arrays. Install it with: pip install numpy") + + try: + from PIL import Image + except ImportError: + raise ImportError("Pillow is required to encode numpy arrays. Install it with: pip install Pillow") + + # Convert to numpy array if not already + if not isinstance(array, np.ndarray): + array = np.array(array) + + # Normalize to [0, 255] if needed + if array.dtype == np.float32 or array.dtype == np.float64: + if array.max() <= 1.0: + array = (array * 255).astype(np.uint8) + else: + array = array.astype(np.uint8) + elif array.dtype != np.uint8: + array = array.astype(np.uint8) + + # Convert to PIL Image + image = Image.fromarray(array) + + # Save to bytes buffer + buffer = io.BytesIO() + image.save(buffer, format=format.upper()) + buffer.seek(0) + + # Encode to base64 + image_bytes = buffer.getvalue() + b64 = base64.b64encode(image_bytes).decode("utf-8") + + # Determine MIME type + mime_type = f"image/{format.lower()}" + data_url = f"data:{mime_type};base64,{b64}" + + return data_url From 51dfc4b63162ba235dee97971fe4264525a60998 Mon Sep 17 00:00:00 2001 From: Allen Nie Date: Mon, 10 Nov 2025 14:17:08 -0800 Subject: [PATCH 11/51] Update opto/optimizers/optoprime_v2.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- opto/optimizers/optoprime_v2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index 1f8e20d2..55af988a 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -373,7 +373,7 @@ def __repr__(self) -> str: """) if self.context is not None and self.context.strip() != "": - context_section.format(context=self.context) + context_section = context_section.format(context=self.context) optimization_query += context_section return optimization_query From 978fdf65596c659db40fdbb6efe95fd6ededd7a0 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 10 Nov 2025 22:17:32 +0000 Subject: [PATCH 12/51] Initial plan From 34f9747f6c6ffe17fd26999337557acb24615eec Mon Sep 17 00:00:00 2001 From: Allen Nie Date: Mon, 10 Nov 2025 14:19:10 -0800 Subject: [PATCH 13/51] Update opto/optimizers/optoprime_v2.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- opto/optimizers/optoprime_v2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index 55af988a..2f5f290d 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -871,7 +871,7 @@ def save(self, path: str): "prompt_symbols": self.prompt_symbols, "representation_prompt": self.representation_prompt, "output_format_prompt": self.output_format_prompt, - 'context_prompt': self.context_prompt + "context_prompt": self.context_prompt }, f) def load(self, path: str): From 0118979507d9051330a25763531eda3dc20a90d0 Mon Sep 17 00:00:00 2001 From: Allen Nie Date: Mon, 10 Nov 2025 14:19:28 -0800 Subject: [PATCH 14/51] Update opto/optimizers/optoprime_v2.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- opto/optimizers/optoprime_v2.py | 65 +++------------------------------ 1 file changed, 5 insertions(+), 60 deletions(-) diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index 2f5f290d..99b45525 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -229,66 +229,11 @@ def example_output(self, reasoning, variables): return json.dumps(output, indent=2) def output_response_extractor(self, response: str) -> Dict[str, Any]: - reasoning = "" - suggestion_tag = "suggestion" - - if "```" in response: - response = response.replace("```", "").strip() - - suggestion = {} - attempt_n = 0 - while attempt_n < 2: - try: - suggestion = json.loads(response)[suggestion_tag] - reasoning = json.loads(response)[self.reasoning_tag] - break - except json.JSONDecodeError: - # Remove things outside the brackets - response = re.findall(r"{.*}", response, re.DOTALL) - if len(response) > 0: - response = response[0] - attempt_n += 1 - except Exception: - attempt_n += 1 - - if not isinstance(suggestion, dict): - suggestion = {} - - if len(suggestion) == 0: - # we try to extract key/value separately and return it as a dictionary - pattern = rf'"{suggestion_tag}"\s*:\s*\{{(.*?)\}}' - suggestion_match = re.search(pattern, str(response), re.DOTALL) - if suggestion_match: - suggestion = {} - # Extract the entire content of the suggestion dictionary - suggestion_content = suggestion_match.group(1) - # Regex to extract each key-value pair; - # This scheme assumes double quotes but is robust to missing commas at the end of the line - pair_pattern = r'"([a-zA-Z0-9_]+)"\s*:\s*"(.*)"' - # Find all matches of key-value pairs - pairs = re.findall(pair_pattern, suggestion_content, re.DOTALL) - for key, value in pairs: - suggestion[key] = value - - if len(suggestion) == 0: - print(f"Cannot extract suggestion from LLM's response:") - print(response) - - # if the suggested value is a code, and the entire code body is empty (i.e., not even function signature is present) - # then we remove such suggestion - keys_to_remove = [] - for key, value in suggestion.items(): - if "__code" in key and value.strip() == "": - keys_to_remove.append(key) - for key in keys_to_remove: - del suggestion[key] - - extracted_data = {"reasoning": reasoning, - "variables": suggestion} - - return extracted_data - - + """ + Extracts reasoning and suggestion variables from the LLM response using OptoPrime's extraction logic. + """ + # Use the centralized extraction logic from OptoPrime + return OptoPrime.extract_llm_suggestion(response) class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet): variables_section_title = "# Variables" inputs_section_title = "# Inputs" From a32136e710c60d65b9705c668052cdb07b40dcd3 Mon Sep 17 00:00:00 2001 From: Allen Nie Date: Mon, 10 Nov 2025 14:19:38 -0800 Subject: [PATCH 15/51] Update opto/optimizers/opro_v2.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- opto/optimizers/opro_v2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opto/optimizers/opro_v2.py b/opto/optimizers/opro_v2.py index 19b33e58..5e6e4b02 100644 --- a/opto/optimizers/opro_v2.py +++ b/opto/optimizers/opro_v2.py @@ -143,7 +143,7 @@ def __repr__(self) -> str: """) if self.context is not None and self.context.strip() != "": - context_section.format(context=self.context) + context_section = context_section.format(context=self.context) optimization_query += context_section return optimization_query From 52d5eb0711b141ec75aed659014f0ddd05833c39 Mon Sep 17 00:00:00 2001 From: Allen Nie Date: Mon, 10 Nov 2025 14:29:18 -0800 Subject: [PATCH 16/51] Update opto/features/flows/types.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- opto/features/flows/types.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/opto/features/flows/types.py b/opto/features/flows/types.py index b42bfc42..957df332 100644 --- a/opto/features/flows/types.py +++ b/opto/features/flows/types.py @@ -113,8 +113,18 @@ def normalize(cls, data: Any): # 1) Start with the text part if isinstance(raw_query, str): out: List[Dict[str, Any]] = [{"type": "text", "text": raw_query}] + elif isinstance(raw_query, list): + # Normalize each element in the list + out = [] + for item in raw_query: + if isinstance(item, str): + out.append({"type": "text", "text": item}) + elif isinstance(item, dict): + out.append(item) + else: + raise TypeError("Elements of `query` list must be str or dict") else: - raise TypeError("`query` must be a string") + raise TypeError("`query` must be a string or list") # 2) If we have an image, append an image block payload = data.get("multimodal_payload") From d502eebe70a5259f8deae46e90dc2e4df387d72f Mon Sep 17 00:00:00 2001 From: Allen Nie Date: Mon, 10 Nov 2025 14:33:27 -0800 Subject: [PATCH 17/51] Update opto/optimizers/optoprime_v2.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- opto/optimizers/optoprime_v2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index 99b45525..4cf93c9b 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -233,7 +233,8 @@ def output_response_extractor(self, response: str) -> Dict[str, Any]: Extracts reasoning and suggestion variables from the LLM response using OptoPrime's extraction logic. """ # Use the centralized extraction logic from OptoPrime - return OptoPrime.extract_llm_suggestion(response) + optoprime_instance = OptoPrime() + return optoprime_instance.extract_llm_suggestion(response) class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet): variables_section_title = "# Variables" inputs_section_title = "# Inputs" From e511a851877719cbab4012a55ba8a90e89a563c5 Mon Sep 17 00:00:00 2001 From: Allen Nie Date: Mon, 10 Nov 2025 14:33:40 -0800 Subject: [PATCH 18/51] Update opto/features/flows/compose.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- opto/features/flows/compose.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opto/features/flows/compose.py b/opto/features/flows/compose.py index 3059a9fa..3604723a 100644 --- a/opto/features/flows/compose.py +++ b/opto/features/flows/compose.py @@ -191,7 +191,7 @@ def forward(self, user_query: str, If chat_history_on is True, the chat history will be included in the LLM input. Args: - user_query: The user query to send to the LLM. Can be + user_query: The user query to send to the LLM. This should be a string containing the user's input or question. Returns: str: For direct pattern From 2633ac70c7247663c677f32458ea3486c6d0440a Mon Sep 17 00:00:00 2001 From: Allen Nie Date: Mon, 10 Nov 2025 14:34:48 -0800 Subject: [PATCH 19/51] Update opto/optimizers/optoprime_v2.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- opto/optimizers/optoprime_v2.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index 4cf93c9b..9b39d474 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -308,8 +308,7 @@ def __repr__(self) -> str: inputs=self.inputs, outputs=self.outputs, others=self.others, - feedback=self.feedback, - context=self.context + feedback=self.feedback ) context_section = dedent(""" From bbe1b40adfbbac3f26322b960534c72ab43f1e7f Mon Sep 17 00:00:00 2001 From: windweller Date: Mon, 10 Nov 2025 17:36:01 -0500 Subject: [PATCH 20/51] partial commit --- opto/optimizers/optoprime_v2.py | 57 +-------------------------------- opto/optimizers/utils.py | 52 ++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 56 deletions(-) diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index 99b45525..77d6c3e3 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -3,7 +3,7 @@ from dataclasses import dataclass, asdict from opto.optimizers.optoprime import OptoPrime, FunctionFeedback from opto.trace.utils import dedent -from opto.optimizers.utils import truncate_expression, extract_xml_like_data, encode_image_to_base64, encode_numpy_to_base64 +from opto.optimizers.utils import truncate_expression, extract_xml_like_data, MultiModalPayload from opto.trace.nodes import ParameterNode, Node, MessageNode from opto.trace.propagators import TraceGraph, GraphPropagator @@ -16,61 +16,6 @@ import re from typing import Dict, Any - -@dataclass -class MultiModalPayload: - """ - A payload for multimodal content, particularly images. - - Supports three types of image inputs: - 1. URL (string starting with 'http://' or 'https://') - 2. Local file path (string path to image file) - 3. Numpy array (RGB image array) - """ - image_data: Optional[str] = None # Can be URL or base64 data URL - - def set_image(self, image: Union[str, Any], format: str = "PNG") -> None: - """ - Set the image from various input formats. - - Args: - image: Can be: - - URL string (starting with 'http://' or 'https://') - - Local file path (string) - - Numpy array or array-like RGB image - format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG - """ - if isinstance(image, str): - # Check if it's a URL - if image.startswith('http://') or image.startswith('https://'): - # Direct URL - litellm supports this - self.image_data = image - else: - # Assume it's a local file path - self.image_data = encode_image_to_base64(image) - else: - # Assume it's a numpy array or array-like object - self.image_data = encode_numpy_to_base64(image, format=format) - - def get_content_block(self) -> Optional[Dict[str, Any]]: - """ - Get the content block for the image in litellm format. - - Returns: - Dict with format: {"type": "image_url", "image_url": {"url": ...}} - or None if no image data is set - """ - if self.image_data is None: - return None - - return { - "type": "image_url", - "image_url": { - "url": self.image_data - } - } - - class OptimizerPromptSymbolSet: """ By inheriting this class and pass into the optimizer. People can change the optimizer documentation diff --git a/opto/optimizers/utils.py b/opto/optimizers/utils.py index 6e4649bd..793333e9 100644 --- a/opto/optimizers/utils.py +++ b/opto/optimizers/utils.py @@ -145,6 +145,58 @@ def extract_xml_like_data(text: str, reasoning_tag: str = "reasoning", return result +class MultiModalPayload: + """ + A payload for multimodal content, particularly images. + + Supports three types of image inputs: + 1. URL (string starting with 'http://' or 'https://') + 2. Local file path (string path to image file) + 3. Numpy array (RGB image array) + """ + image_data: Optional[str] = None # Can be URL or base64 data URL + + def set_image(self, image: Union[str, Any], format: str = "PNG") -> None: + """ + Set the image from various input formats. + + Args: + image: Can be: + - URL string (starting with 'http://' or 'https://') + - Local file path (string) + - Numpy array or array-like RGB image + format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG + """ + if isinstance(image, str): + # Check if it's a URL + if image.startswith('http://') or image.startswith('https://'): + # Direct URL - litellm supports this + self.image_data = image + else: + # Assume it's a local file path + self.image_data = encode_image_to_base64(image) + else: + # Assume it's a numpy array or array-like object + self.image_data = encode_numpy_to_base64(image, format=format) + + def get_content_block(self) -> Optional[Dict[str, Any]]: + """ + Get the content block for the image in litellm format. + + Returns: + Dict with format: {"type": "image_url", "image_url": {"url": ...}} + or None if no image data is set + """ + if self.image_data is None: + return None + + return { + "type": "image_url", + "image_url": { + "url": self.image_data + } + } + def encode_image_to_base64(path: str) -> str: """Encode a local image file to base64 data URL.""" # Read binary From a4057ef63d4eef1584e2a5e930bf98b6a1fb96b8 Mon Sep 17 00:00:00 2001 From: windweller Date: Sat, 22 Nov 2025 11:38:18 -0500 Subject: [PATCH 21/51] add a `is_image` check to Node --- opto/trace/nodes.py | 110 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) diff --git a/opto/trace/nodes.py b/opto/trace/nodes.py index 0f4c85e5..fec170f5 100644 --- a/opto/trace/nodes.py +++ b/opto/trace/nodes.py @@ -361,6 +361,116 @@ def data(self): if len(current_used_nodes) > 0 and GRAPH.TRACE: # We're within trace_nodes context. current_used_nodes[-1].add(self) return self.__getattribute__("_data") + + @property + def is_image(self) -> bool: + """Check if the node is an image node. + + Returns: + bool: True if the node is an image node, False otherwise. + + Notes: + Supports four types of image data: + 1. Base64 encoded string (data URL format) + 2. Numpy array (RGB image array) + 3. PIL Image object + 4. URL string pointing to an image (pattern-based check, no network request) + + For URLs, this performs a fast pattern-based check only. For verification + with a network request, use verify_image_url() method. + """ + try: + from PIL import Image + if isinstance(self._data, Image.Image): + return True + except ImportError: + pass + + # Check if it's a base64 data URL string + if isinstance(self._data, str) and self._data.startswith('data:image/'): + return True + + # Check if it's a numpy array (RGB image) + try: + import numpy as np + if isinstance(self._data, np.ndarray): + # Check if it's a valid image array (2D or 3D with 3 or 4 channels) + if len(self._data.shape) == 2: # Grayscale + return True + elif len(self._data.shape) == 3 and self._data.shape[2] in [3, 4]: # RGB or RGBA + return True + except ImportError: + pass + + # Check if it's an image URL (pattern-based, no network request) + if isinstance(self._data, str): + try: + from urllib.parse import urlparse + parsed = urlparse(self._data) + if parsed.scheme in ('http', 'https'): + path = parsed.path.lower() + # Common image extensions + image_extensions = ('.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp', + '.svg', '.ico', '.tiff', '.tif', '.heic', '.heif') + if any(path.endswith(ext) for ext in image_extensions): + return True + except (ValueError, AttributeError): + pass + + return False + + def verify_data_is_image_url(self, timeout: float = 1.0) -> bool: + """Verify if the node's data is an image URL by checking Content-Type via HEAD request. + + This method performs an actual network request to verify that a URL points to an image. + It should be used when you need definitive verification beyond pattern matching. + + Args: + timeout: Maximum time in seconds to wait for the request. Default is 1.0. + + Returns: + bool: True if the URL returns an image Content-Type, False otherwise. + + Notes: + - This method only applies to http/https URLs + - Returns False for non-URL data or if the request fails + - Uses HEAD request to avoid downloading the full image + - Requires network connectivity + + Example: + >>> node = Node("https://example.com/photo.jpg") + >>> node.is_image # Fast pattern check: True + >>> node.verify_image_url() # Network verification: True/False + """ + if not isinstance(self._data, str): + return False + + try: + from urllib.parse import urlparse + parsed = urlparse(self._data) + + # Only verify http/https URLs + if parsed.scheme not in ('http', 'https'): + return False + + # Perform HEAD request to check Content-Type + try: + import requests + response = requests.head(self._data, timeout=timeout, allow_redirects=True) + content_type = response.headers.get('content-type', '').lower() + return content_type.startswith('image/') + except ImportError: + warnings.warn( + "requests library not available. Install with: pip install requests", + ImportWarning + ) + return False + except (requests.RequestException, Exception): + # Network errors, timeouts, invalid URLs, etc. + return False + + except (ValueError, AttributeError): + return False @property def parents(self): From f6ce6b7ae377499bdb0c265c792c80162ac4aee9 Mon Sep 17 00:00:00 2001 From: windweller Date: Sat, 22 Nov 2025 12:11:13 -0500 Subject: [PATCH 22/51] add parameter check to ensure 1 image can be parameter --- opto/optimizers/opro.py | 20 ++++++++++++++++++++ opto/optimizers/opro_v2.py | 25 +++++++++++++++++++++++++ opto/optimizers/optimizer.py | 19 +++++++++++++++++-- opto/optimizers/optoprime.py | 18 ++++++++++++++++++ opto/optimizers/optoprime_v2.py | 26 ++++++++++++++++++++++++++ 5 files changed, 106 insertions(+), 2 deletions(-) diff --git a/opto/optimizers/opro.py b/opto/optimizers/opro.py index 24f5a2cd..8c02ad2d 100644 --- a/opto/optimizers/opro.py +++ b/opto/optimizers/opro.py @@ -1,7 +1,9 @@ import json from textwrap import dedent +from typing import List from opto.optimizers.optoprime import OptoPrime +from opto.trace.nodes import ParameterNode class OPRO(OptoPrime): @@ -84,6 +86,24 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.buffer = [] + def parameter_check(self, parameters: List[ParameterNode]): + """Check if the parameters are valid. + This can be overloaded by subclasses to add more checks. + + Args: + parameters: List[ParameterNode] + The parameters to check. + + Raises: + AssertionError: If any parameter contains image data. + """ + # Ensure no parameters contain image data + for param in parameters: + assert not param.is_image, ( + f"Parameter '{param.name}' contains image data. " + f"OPROv1 optimizer does not support image parameters." + ) + def construct_prompt(self, summary, mask=None, *args, **kwargs): """Construct system and user prompts using historical examples. diff --git a/opto/optimizers/opro_v2.py b/opto/optimizers/opro_v2.py index 5e6e4b02..bd6f3db2 100644 --- a/opto/optimizers/opro_v2.py +++ b/opto/optimizers/opro_v2.py @@ -294,6 +294,31 @@ def __init__(self, *args, include_example=include_example, memory_size=memory_size, problem_context=problem_context, **kwargs) + + def parameter_check(self, parameters: List[ParameterNode]): + """Check if the parameters are valid. + This can be overloaded by subclasses to add more checks. + + Args: + parameters: List[ParameterNode] + The parameters to check. + + Raises: + AssertionError: If more than one parameter contains image data. + + Notes: + OPROv2 supports image parameters, but only one parameter can be + an image at a time since LLMs can only generate one image per inference. + """ + # Count image parameters + image_params = [param for param in parameters if param.is_image] + + if len(image_params) > 1: + param_names = ', '.join([f"'{p.name}'" for p in image_params]) + raise AssertionError( + f"OPROv2 supports at most one image parameter, but found {len(image_params)}: " + f"{param_names}. LLMs can only generate one image at a time." + ) def problem_instance(self, summary, mask=None): """Create a ProblemInstance from an optimization summary. diff --git a/opto/optimizers/optimizer.py b/opto/optimizers/optimizer.py index 79b37370..9882d986 100644 --- a/opto/optimizers/optimizer.py +++ b/opto/optimizers/optimizer.py @@ -69,9 +69,13 @@ class AbstractOptimizer: """ def __init__(self, parameters: List[ParameterNode], *args, **kwargs): - assert type(parameters) is list - assert all([isinstance(p, ParameterNode) for p in parameters]) + self.parameter_check(parameters) + # this is a guaranteed basic check, not possible to be overloaded by subclasses + assert type(parameters) is list, "Parameters must be a list." + assert all([isinstance(p, ParameterNode) for p in parameters]), "Parameters must be a list of ParameterNode instances." assert len(parameters) > 0, 'Parameters list is empty.' + for p in parameters: + assert p.trainable, "Parameter {} must be trainable.".format(p.name) self.parameters = parameters def step(self): @@ -87,6 +91,17 @@ def propagator(self): """Return a Propagator object that can be used to propagate feedback in backward.""" raise NotImplementedError + def parameter_check(self, parameters: List[ParameterNode]): + """Check if the parameters are valid. + This can be overloaded by subclasses to add more checks. + + Args: + parameters: List[ParameterNode] + The parameters to check. + """ + pass + + class Optimizer(AbstractOptimizer): """Base class for graph-based optimizers in the Trace framework. diff --git a/opto/optimizers/optoprime.py b/opto/optimizers/optoprime.py index 8727f743..67c2018c 100644 --- a/opto/optimizers/optoprime.py +++ b/opto/optimizers/optoprime.py @@ -525,6 +525,24 @@ def __init__( self.use_json_object_format = use_json_object_format self.highlight_variables = highlight_variables + def parameter_check(self, parameters: List[ParameterNode]): + """Check if the parameters are valid. + This can be overloaded by subclasses to add more checks. + + Args: + parameters: List[ParameterNode] + The parameters to check. + + Raises: + AssertionError: If any parameter contains image data. + """ + # Ensure no parameters contain image data + for param in parameters: + assert not param.is_image, ( + f"Parameter '{param.name}' contains image data. " + f"OptoPrimeV1 optimizer does not support image parameters." + ) + def default_propagator(self): """Return the default Propagator object of the optimizer.""" return GraphPropagator() diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index 878f7cba..72b5b49b 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -180,6 +180,7 @@ def output_response_extractor(self, response: str) -> Dict[str, Any]: # Use the centralized extraction logic from OptoPrime optoprime_instance = OptoPrime() return optoprime_instance.extract_llm_suggestion(response) + class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet): variables_section_title = "# Variables" inputs_section_title = "# Inputs" @@ -466,6 +467,31 @@ def __init__( self.prompt_symbols = copy.deepcopy(self.default_prompt_symbols) self.initialize_prompt() + def parameter_check(self, parameters: List[ParameterNode]): + """Check if the parameters are valid. + This can be overloaded by subclasses to add more checks. + + Args: + parameters: List[ParameterNode] + The parameters to check. + + Raises: + AssertionError: If more than one parameter contains image data. + + Notes: + OptoPrimeV2 supports image parameters, but only one parameter can be + an image at a time since LLMs can only generate one image per inference. + """ + # Count image parameters + image_params = [param for param in parameters if param.is_image] + + if len(image_params) > 1: + param_names = ', '.join([f"'{p.name}'" for p in image_params]) + raise AssertionError( + f"OptoPrimeV2 supports at most one image parameter, but found {len(image_params)}: " + f"{param_names}. LLMs can only generate one image at a time." + ) + def add_image_context(self, image: Union[str, Any], context: str = "", format: str = "PNG"): """ Add an image to the optimizer context. From aa968075e696ed59e1fb6847854ed555d223a79a Mon Sep 17 00:00:00 2001 From: windweller Date: Sat, 22 Nov 2025 13:11:21 -0500 Subject: [PATCH 23/51] fix the import error --- opto/optimizers/opro_v2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/opto/optimizers/opro_v2.py b/opto/optimizers/opro_v2.py index bd6f3db2..ad0fd677 100644 --- a/opto/optimizers/opro_v2.py +++ b/opto/optimizers/opro_v2.py @@ -1,7 +1,8 @@ import json from textwrap import dedent from dataclasses import dataclass, asdict -from typing import Dict, Optional +from typing import Dict, Optional, List +from opto.trace.nodes import ParameterNode from opto.optimizers.optoprime_v2 import OptoPrimeV2, OptimizerPromptSymbolSet From 172d00ff928bd5820074728b83e8c5156137253b Mon Sep 17 00:00:00 2001 From: windweller Date: Wed, 26 Nov 2025 10:32:29 -0500 Subject: [PATCH 24/51] refactored/moved out image data check --- opto/optimizers/optoprime_v2.py | 4 +- opto/trace/nodes.py | 218 +++++++++++++++++--------------- 2 files changed, 122 insertions(+), 100 deletions(-) diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py index 72b5b49b..6731c49b 100644 --- a/opto/optimizers/optoprime_v2.py +++ b/opto/optimizers/optoprime_v2.py @@ -605,7 +605,9 @@ def repr_node_value_compact(self, node_dict, node_tag="node", return "\n".join(temp_list) def construct_prompt(self, summary, mask=None, *args, **kwargs): - """Construct the system and user prompt.""" + """Construct the system and user prompt. + Expanded to construct a list of content blocks + """ system_prompt = ( self.representation_prompt + self.output_format_prompt ) # generic representation + output rule diff --git a/opto/trace/nodes.py b/opto/trace/nodes.py index fec170f5..327916bc 100644 --- a/opto/trace/nodes.py +++ b/opto/trace/nodes.py @@ -6,6 +6,10 @@ import re import heapq import contextvars +import requests +from PIL import Image +from io import BytesIO +from urllib.parse import urlparse def node(data, name=None, trainable=False, description=None): @@ -275,16 +279,118 @@ def __len__(self): GRAPH = Graph() # This is a global registry of all the nodes. -# USED_NODES = ( -# list() -# ) # A stack of sets. This is a global registry to track which nodes are read. - USED_NODES = contextvars.ContextVar('USED_NODES', default=list()) # A stack of sets. This is a global registry to track which nodes are read. T = TypeVar("T") +def verify_data_is_image_url(url: str, timeout: float = 1.0) -> bool: + """Verify if the node's data is an image URL by checking Content-Type via HEAD request. + + This method performs an actual network request to verify that a URL points to an image. + It should be used when you need definitive verification beyond pattern matching. + + Args: + timeout: Maximum time in seconds to wait for the request. Default is 1.0. + + Returns: + bool: True if the URL returns an image Content-Type, False otherwise. + + Notes: + - This method only applies to http/https URLs + - Returns False for non-URL data or if the request fails + - Uses HEAD request to avoid downloading the full image + - Requires network connectivity + + Example: + >>> result = verify_data_is_image_url("https://example.com/photo.jpg") + >>> result # Network verification: True/False + """ + if not isinstance(url, str): + return False + + try: + parsed = urlparse(url) + + # Only verify http/https URLs + if parsed.scheme not in ('http', 'https'): + return False + + # Perform HEAD request to check Content-Type + try: + response = requests.head(url, timeout=timeout, allow_redirects=True) + content_type = response.headers.get('content-type', '').lower() + return content_type.startswith('image/') + except ImportError: + warnings.warn( + "requests library not available. Install with: pip install requests", + ImportWarning + ) + return False + except (requests.RequestException, Exception): + # Network errors, timeouts, invalid URLs, etc. + return False + + except (ValueError, AttributeError): + return False + + +def is_image(data) -> bool: + """Check if the node is an image node. + This is a shared type check + + Returns: + bool: True if the node is an image node, False otherwise. + + Notes: + Supports four types of image data: + 1. Base64 encoded string (data URL format) + 2. PIL Image object + 3. Raw image bytes + 4. URL string pointing to an image (pattern-based check, no network request) + + For URLs, this performs a fast pattern-based check only. For verification + with a network request, use verify_image_url() method. + + If you have a numpy array, convert it to PIL Image first: + from PIL import Image + pil_image = Image.fromarray(numpy_array) + """ + try: + if isinstance(data, Image.Image): + return True + except ImportError: + pass + + # Check if it's a base64 data URL string + if isinstance(data, str) and data.startswith('data:image/'): + return True + + # Check if it's raw image bytes + if isinstance(data, bytes): + try: + Image.open(BytesIO(data)) + return True + except Exception: + pass + + # Check if it's an image URL (pattern-based, no network request) + if isinstance(data, str): + try: + parsed = urlparse(data) + if parsed.scheme in ('http', 'https'): + path = parsed.path.lower() + # Common image extensions + image_extensions = ('.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp', + '.svg', '.ico', '.tiff', '.tif', '.heic', '.heif') + if any(path.endswith(ext) for ext in image_extensions): + return True + except (ValueError, AttributeError): + pass + + return False + class AbstractNode(Generic[T]): """AbstractNode represents an abstract data node in a directed graph. @@ -365,6 +471,7 @@ def data(self): @property def is_image(self) -> bool: """Check if the node is an image node. + This is a shared type check Returns: bool: True if the node is an image node, False otherwise. @@ -372,105 +479,18 @@ def is_image(self) -> bool: Notes: Supports four types of image data: 1. Base64 encoded string (data URL format) - 2. Numpy array (RGB image array) - 3. PIL Image object + 2. PIL Image object + 3. Raw image bytes 4. URL string pointing to an image (pattern-based check, no network request) - + For URLs, this performs a fast pattern-based check only. For verification with a network request, use verify_image_url() method. + + If you have a numpy array, convert it to PIL Image first: + from PIL import Image + pil_image = Image.fromarray(numpy_array) """ - try: - from PIL import Image - if isinstance(self._data, Image.Image): - return True - except ImportError: - pass - - # Check if it's a base64 data URL string - if isinstance(self._data, str) and self._data.startswith('data:image/'): - return True - - # Check if it's a numpy array (RGB image) - try: - import numpy as np - if isinstance(self._data, np.ndarray): - # Check if it's a valid image array (2D or 3D with 3 or 4 channels) - if len(self._data.shape) == 2: # Grayscale - return True - elif len(self._data.shape) == 3 and self._data.shape[2] in [3, 4]: # RGB or RGBA - return True - except ImportError: - pass - - # Check if it's an image URL (pattern-based, no network request) - if isinstance(self._data, str): - try: - from urllib.parse import urlparse - parsed = urlparse(self._data) - if parsed.scheme in ('http', 'https'): - path = parsed.path.lower() - # Common image extensions - image_extensions = ('.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp', - '.svg', '.ico', '.tiff', '.tif', '.heic', '.heif') - if any(path.endswith(ext) for ext in image_extensions): - return True - except (ValueError, AttributeError): - pass - - return False - - def verify_data_is_image_url(self, timeout: float = 1.0) -> bool: - """Verify if the node's data is an image URL by checking Content-Type via HEAD request. - - This method performs an actual network request to verify that a URL points to an image. - It should be used when you need definitive verification beyond pattern matching. - - Args: - timeout: Maximum time in seconds to wait for the request. Default is 1.0. - - Returns: - bool: True if the URL returns an image Content-Type, False otherwise. - - Notes: - - This method only applies to http/https URLs - - Returns False for non-URL data or if the request fails - - Uses HEAD request to avoid downloading the full image - - Requires network connectivity - - Example: - >>> node = Node("https://example.com/photo.jpg") - >>> node.is_image # Fast pattern check: True - >>> node.verify_image_url() # Network verification: True/False - """ - if not isinstance(self._data, str): - return False - - try: - from urllib.parse import urlparse - parsed = urlparse(self._data) - - # Only verify http/https URLs - if parsed.scheme not in ('http', 'https'): - return False - - # Perform HEAD request to check Content-Type - try: - import requests - response = requests.head(self._data, timeout=timeout, allow_redirects=True) - content_type = response.headers.get('content-type', '').lower() - return content_type.startswith('image/') - except ImportError: - warnings.warn( - "requests library not available. Install with: pip install requests", - ImportWarning - ) - return False - except (requests.RequestException, Exception): - # Network errors, timeouts, invalid URLs, etc. - return False - - except (ValueError, AttributeError): - return False + return is_image(self._data) @property def parents(self): From 40dfdab5729c89fcd9a28b068063a9e4c33f6ed0 Mon Sep 17 00:00:00 2001 From: windweller Date: Sun, 30 Nov 2025 20:57:32 -0800 Subject: [PATCH 25/51] fix a few optimizer test issues, update setup dependencies to include pillow. Add history manager. --- opto/optimizers/backbone.py | 569 ++++++++++++ opto/optimizers/opro_v3.py | 403 +++++++++ opto/optimizers/optoprime_v3.py | 838 ++++++++++++++++++ opto/optimizers/utils.py | 106 ++- opto/trace/nodes.py | 2 + setup.py | 1 + .../llm_optimizers_tests/test_optoprime_v2.py | 20 +- .../llm_optimizers_tests/test_optoprime_v3.py | 177 ++++ 8 files changed, 2103 insertions(+), 13 deletions(-) create mode 100644 opto/optimizers/backbone.py create mode 100644 opto/optimizers/opro_v3.py create mode 100644 opto/optimizers/optoprime_v3.py create mode 100644 tests/llm_optimizers_tests/test_optoprime_v3.py diff --git a/opto/optimizers/backbone.py b/opto/optimizers/backbone.py new file mode 100644 index 00000000..5d918f1e --- /dev/null +++ b/opto/optimizers/backbone.py @@ -0,0 +1,569 @@ +""" +Flexible conversation manager for multi-turn LLM conversations. +Uses LiteLLM unified format for all providers (OpenAI, Anthropic, Google, etc.). +""" + +from typing import List, Dict, Any, Optional, Literal, Union +from dataclasses import dataclass, field +import json +import base64 +from pathlib import Path +import warnings + + +@dataclass +class TextContent: + """Text content block""" + type: Literal["text"] = "text" + text: str = "" + + def to_dict(self) -> Dict[str, Any]: + return {"type": self.type, "text": self.text} + + +@dataclass +class ImageContent: + """Image content block - supports URLs or base64""" + type: Literal["image"] = "image" + image_url: Optional[str] = None + image_data: Optional[str] = None # base64 encoded + media_type: str = "image/jpeg" # image/jpeg, image/png, image/gif, image/webp + detail: Optional[str] = None # OpenAI: "auto", "low", "high" + + def to_dict(self) -> Dict[str, Any]: + if self.image_url: + return { + "type": self.type, + "image_url": self.image_url, + "media_type": self.media_type + } + else: + return { + "type": self.type, + "image_data": self.image_data, + "media_type": self.media_type + } + + @classmethod + def from_file(cls, filepath: str, media_type: Optional[str] = None): + """Load image from file""" + path = Path(filepath) + if not media_type: + ext_to_type = { + '.jpg': 'image/jpeg', + '.jpeg': 'image/jpeg', + '.png': 'image/png', + '.gif': 'image/gif', + '.webp': 'image/webp' + } + media_type = ext_to_type.get(path.suffix.lower(), 'image/jpeg') + + with open(filepath, 'rb') as f: + image_data = base64.b64encode(f.read()).decode('utf-8') + + return cls(image_data=image_data, media_type=media_type) + + +@dataclass +class PDFContent: + """PDF content block""" + type: Literal["pdf"] = "pdf" + pdf_url: Optional[str] = None + pdf_data: Optional[str] = None # base64 encoded + filename: Optional[str] = None + + def to_dict(self) -> Dict[str, Any]: + if self.pdf_url: + return { + "type": "document", + "source": {"type": "url", "url": self.pdf_url}, + "filename": self.filename + } + else: + return { + "type": "document", + "source": { + "type": "base64", + "media_type": "application/pdf", + "data": self.pdf_data + }, + "filename": self.filename + } + + @classmethod + def from_file(cls, filepath: str): + """Load PDF from file""" + path = Path(filepath) + with open(filepath, 'rb') as f: + pdf_data = base64.b64encode(f.read()).decode('utf-8') + + return cls(pdf_data=pdf_data, filename=path.name) + + +@dataclass +class FileContent: + """Generic file content block (for code, data files, etc.)""" + file_data: str # Could be text content or base64 for binary + filename: str + type: Literal["file"] = "file" + mime_type: str = "text/plain" + is_binary: bool = False + + def to_dict(self) -> Dict[str, Any]: + return { + "type": self.type, + "filename": self.filename, + "mime_type": self.mime_type, + "file_data": self.file_data, + "is_binary": self.is_binary + } + + @classmethod + def from_file(cls, filepath: str, mime_type: Optional[str] = None): + """Load file from disk""" + path = Path(filepath) + + # Try to read as text first + try: + with open(filepath, 'r', encoding='utf-8') as f: + file_data = f.read() + is_binary = False + except UnicodeDecodeError: + # Fall back to binary + with open(filepath, 'rb') as f: + file_data = base64.b64encode(f.read()).decode('utf-8') + is_binary = True + + if not mime_type: + # Simple mime type detection + ext_to_type = { + '.py': 'text/x-python', + '.js': 'text/javascript', + '.json': 'application/json', + '.csv': 'text/csv', + '.txt': 'text/plain', + '.md': 'text/markdown', + '.html': 'text/html', + } + mime_type = ext_to_type.get(path.suffix.lower(), 'application/octet-stream') + + return cls( + file_data=file_data, + filename=path.name, + mime_type=mime_type, + is_binary=is_binary + ) + + +# Union type for all content types +ContentBlock = Union[TextContent, ImageContent, PDFContent, FileContent] + + +@dataclass +class ToolCall: + """Represents a tool call made by the LLM""" + id: str + type: str # "function", "web_search", etc. + name: Optional[str] = None # function name + arguments: Optional[Dict[str, Any]] = None # function arguments + + def to_dict(self) -> Dict[str, Any]: + result = {"id": self.id, "type": self.type} + if self.name: + result["name"] = self.name + if self.arguments: + result["arguments"] = self.arguments + return result + + +@dataclass +class ToolResult: + """Represents the result of a tool execution""" + tool_call_id: str + content: str # Result as string (can be JSON stringified) + is_error: bool = False + + def to_dict(self) -> Dict[str, Any]: + return { + "tool_call_id": self.tool_call_id, + "content": self.content, + "is_error": self.is_error + } + + +@dataclass +class ToolDefinition: + """Defines a tool that the LLM can use""" + type: str # "function", "web_search", "file_search", etc. + name: Optional[str] = None + description: Optional[str] = None + parameters: Optional[Dict[str, Any]] = None + strict: bool = False # OpenAI strict mode + # Provider-specific fields + extra: Dict[str, Any] = field(default_factory=dict) + + def to_dict(self) -> Dict[str, Any]: + result = {"type": self.type} + if self.name: + result["name"] = self.name + if self.description: + result["description"] = self.description + if self.parameters: + result["parameters"] = self.parameters + if self.strict: + result["strict"] = self.strict + result.update(self.extra) + return result + + +@dataclass +class UserTurn: + """Represents a user message turn in the conversation""" + content: List[ContentBlock] = field(default_factory=list) + tools: List[ToolDefinition] = field(default_factory=list) + + # Provider-specific settings + temperature: Optional[float] = None + max_tokens: Optional[int] = None + top_p: Optional[float] = None + + # Metadata + timestamp: Optional[str] = None + metadata: Dict[str, Any] = field(default_factory=dict) + + def add_text(self, text: str) -> 'UserTurn': + """Add text content""" + self.content.append(TextContent(text=text)) + return self + + def add_image(self, url: Optional[str] = None, data: Optional[str] = None, + media_type: str = "image/jpeg") -> 'UserTurn': + """Add image content""" + self.content.append(ImageContent( + image_url=url, + image_data=data, + media_type=media_type + )) + return self + + def add_image_file(self, filepath: str) -> 'UserTurn': + """Add image from file""" + self.content.append(ImageContent.from_file(filepath)) + return self + + def add_pdf(self, url: Optional[str] = None, data: Optional[str] = None) -> 'UserTurn': + """Add PDF content""" + self.content.append(PDFContent(pdf_url=url, pdf_data=data)) + return self + + def add_pdf_file(self, filepath: str) -> 'UserTurn': + """Add PDF from file""" + self.content.append(PDFContent.from_file(filepath)) + return self + + def add_file(self, filepath: str, mime_type: Optional[str] = None) -> 'UserTurn': + """Add file from disk""" + self.content.append(FileContent.from_file(filepath, mime_type)) + return self + + def add_tool(self, tool: ToolDefinition) -> 'UserTurn': + """Add a tool definition""" + self.tools.append(tool) + return self + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary format""" + return { + "role": "user", + "content": [c.to_dict() for c in self.content], + "tools": [t.to_dict() for t in self.tools] if self.tools else None, + "temperature": self.temperature, + "max_tokens": self.max_tokens, + "top_p": self.top_p, + "metadata": self.metadata + } + + def to_litellm_format(self) -> Dict[str, Any]: + """Convert to LiteLLM format (OpenAI-compatible, works with all providers)""" + content = [] + for block in self.content: + if isinstance(block, TextContent): + content.append({"type": "text", "text": block.text}) + elif isinstance(block, ImageContent): + if block.image_url: + img_dict = {"type": "image_url", "image_url": {"url": block.image_url}} + if block.detail: + img_dict["image_url"]["detail"] = block.detail + content.append(img_dict) + else: + data_url = f"data:{block.media_type};base64,{block.image_data}" + content.append({"type": "image_url", "image_url": {"url": data_url}}) + elif isinstance(block, PDFContent): + # LiteLLM supports PDFs for providers like Claude + # Use image_url type with PDF data URL for compatibility + if block.pdf_url: + warnings.warn("PDF URLs may not be supported by all providers through LiteLLM") + content.append({"type": "text", "text": f"[PDF: {block.pdf_url}]"}) + else: + # Encode as data URL for providers that support PDFs + data_url = f"data:application/pdf;base64,{block.pdf_data}" + content.append({"type": "image_url", "image_url": {"url": data_url}}) + elif isinstance(block, FileContent): + # For file content, add as text or data URL based on type + if block.is_binary: + data_url = f"data:{block.mime_type};base64,{block.file_data}" + content.append({"type": "text", "text": f"[File: {block.filename}]\n{data_url}"}) + else: + content.append({"type": "text", "text": f"[File: {block.filename}]\n{block.file_data}"}) + + return { + "role": "user", + "content": content + } + + +@dataclass +class AssistantTurn: + """Represents an assistant message turn in the conversation""" + content: List[ContentBlock] = field(default_factory=list) + + # Tool usage (Option B: Everything in AssistantTurn) + tool_calls: List[ToolCall] = field(default_factory=list) + tool_results: List[ToolResult] = field(default_factory=list) + + # Provider-specific features + reasoning: Optional[str] = None # OpenAI reasoning/thinking + finish_reason: Optional[str] = None # "stop", "length", "tool_calls", etc. + + # Token usage + prompt_tokens: Optional[int] = None + completion_tokens: Optional[int] = None + + # Metadata + model: Optional[str] = None + timestamp: Optional[str] = None + metadata: Dict[str, Any] = field(default_factory=dict) + + def add_text(self, text: str) -> 'AssistantTurn': + """Add text content""" + self.content.append(TextContent(text=text)) + return self + + def add_image(self, url: Optional[str] = None, data: Optional[str] = None, + media_type: str = "image/jpeg") -> 'AssistantTurn': + """Add image content (some models can generate images)""" + self.content.append(ImageContent( + image_url=url, + image_data=data, + media_type=media_type + )) + return self + + def add_tool_call(self, tool_call: ToolCall) -> 'AssistantTurn': + """Add a tool call""" + self.tool_calls.append(tool_call) + return self + + def add_tool_result(self, result: ToolResult) -> 'AssistantTurn': + """Add a tool result""" + self.tool_results.append(result) + return self + + def get_text(self) -> str: + """Get all text content concatenated""" + return " ".join( + block.text for block in self.content + if isinstance(block, TextContent) + ) + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary format""" + return { + "role": "assistant", + "content": [c.to_dict() for c in self.content], + "tool_calls": [tc.to_dict() for tc in self.tool_calls] if self.tool_calls else None, + "tool_results": [tr.to_dict() for tr in self.tool_results] if self.tool_results else None, + "reasoning": self.reasoning, + "finish_reason": self.finish_reason, + "prompt_tokens": self.prompt_tokens, + "completion_tokens": self.completion_tokens, + "model": self.model, + "metadata": self.metadata + } + + def to_litellm_format(self) -> Dict[str, Any]: + """Convert to LiteLLM format (OpenAI-compatible, works with all providers)""" + result = {"role": "assistant"} + + if self.content: + # For multimodal or simple text response + text = self.get_text() + if text: + result["content"] = text + + if self.tool_calls: + result["tool_calls"] = [ + { + "id": tc.id, + "type": tc.type, + "function": { + "name": tc.name, + "arguments": json.dumps(tc.arguments) if tc.arguments else "{}" + } + } + for tc in self.tool_calls + ] + + return result + + +@dataclass +class ConversationHistory: + """Manages conversation history across multiple turns using LiteLLM unified format""" + turns: List[Union[UserTurn, AssistantTurn]] = field(default_factory=list) + system_prompt: Optional[str] = None + + def add_user_turn(self, turn: UserTurn) -> 'ConversationHistory': + """Add a user turn""" + self.turns.append(turn) + return self + + def add_assistant_turn(self, turn: AssistantTurn) -> 'ConversationHistory': + """Add an assistant turn""" + self.turns.append(turn) + return self + + def get_last_user_turn(self) -> Optional[UserTurn]: + """Get the most recent user turn""" + for turn in reversed(self.turns): + if isinstance(turn, UserTurn): + return turn + return None + + def get_last_assistant_turn(self) -> Optional[AssistantTurn]: + """Get the most recent assistant turn""" + for turn in reversed(self.turns): + if isinstance(turn, AssistantTurn): + return turn + return None + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary format""" + return { + "system_prompt": self.system_prompt, + "turns": [turn.to_dict() for turn in self.turns] + } + + def to_litellm_format( + self, + n: int = -1, + truncate_strategy: Literal["from_start", "from_end"] = "from_start" + ) -> List[Dict[str, Any]]: + """ + Convert to LiteLLM messages format (OpenAI-compatible, works with all providers) + + Args: + n: Number of historical rounds (user+assistant pairs) to include. + -1 means all history (default: -1). + The current (potentially incomplete) round is always included. + truncate_strategy: How to truncate when n is specified: + - "from_start": Remove oldest rounds, keep the most recent n rounds (default) + - "from_end": Remove newest rounds, keep the oldest n rounds + + Returns: + List of message dictionaries in LiteLLM format + """ + # Apply truncation to turns + if n == -1: + selected_turns = self.turns + else: + # n = number of historical rounds (pairs) + # Each round = 2 turns (user + assistant) + # Plus include current incomplete round (if last turn is user, +1) + has_incomplete_round = len(self.turns) > 0 and isinstance(self.turns[-1], UserTurn) + n_turns = n * 2 + (1 if has_incomplete_round else 0) + + if truncate_strategy == "from_start": + # Keep last n_turns (remove from start) + selected_turns = self.turns[-n_turns:] if n_turns > 0 else [] + elif truncate_strategy == "from_end": + # Keep first n_turns (remove from end) + selected_turns = self.turns[:n_turns] if n_turns > 0 else [] + else: + raise ValueError(f"Unknown truncate_strategy: {truncate_strategy}. Use 'from_start' or 'from_end'") + + messages = [] + + if self.system_prompt: + messages.append({"role": "system", "content": self.system_prompt}) + + for turn in selected_turns: + messages.append(turn.to_litellm_format()) + + # Add tool results as separate messages in LiteLLM/OpenAI format + if isinstance(turn, AssistantTurn) and turn.tool_results: + for result in turn.tool_results: + messages.append({ + "role": "tool", + "tool_call_id": result.tool_call_id, + "content": result.content + }) + + return messages + + def to_messages( + self, + n: int = -1, + truncate_strategy: Literal["from_start", "from_end"] = "from_start" + ) -> List[Dict[str, Any]]: + """ + Alias for to_litellm_format() for convenience + + Args: + n: Number of historical rounds (user+assistant pairs) to include. + -1 means all history (default: -1). + The current (potentially incomplete) round is always included. + truncate_strategy: How to truncate when n is specified: + - "from_start": Remove oldest rounds, keep the most recent n rounds (default) + - "from_end": Remove newest rounds, keep the oldest n rounds + + Returns: + List of message dictionaries in LiteLLM format + """ + return self.to_litellm_format(n=n, truncate_strategy=truncate_strategy) + + def save_to_file(self, filepath: str): + """Save conversation history to JSON file""" + with open(filepath, 'w') as f: + json.dump(self.to_dict(), f, indent=2) + + @classmethod + def load_from_file(cls, filepath: str) -> 'ConversationHistory': + """Load conversation history from JSON file""" + with open(filepath, 'r') as f: + data = json.load(f) + + # This is a simplified loader - you'd want more robust deserialization + history = cls( + system_prompt=data.get('system_prompt') + ) + + # Note: Full deserialization would require reconstructing objects from dicts + # This is left as an exercise since it depends on your exact needs + + return history + + def clear(self): + """Clear all turns from history""" + self.turns.clear() + + def get_token_count_estimate(self) -> int: + """Rough estimate of token count (actual count requires tokenizer)""" + total = 0 + for turn in self.turns: + if isinstance(turn, (UserTurn, AssistantTurn)): + for block in turn.content: + if isinstance(block, TextContent): + # Very rough estimate: ~4 chars per token + total += len(block.text) // 4 + return total \ No newline at end of file diff --git a/opto/optimizers/opro_v3.py b/opto/optimizers/opro_v3.py new file mode 100644 index 00000000..38f0f727 --- /dev/null +++ b/opto/optimizers/opro_v3.py @@ -0,0 +1,403 @@ +""" +Key difference to v2: +1. Use the new backbone conversation history manager +2. Support multimodal node (both trainable and non-trainable) +3. Break from the OptoPrime style template, support more customizable template from user, for brevity and streamlined usage. +""" + +import json +from textwrap import dedent +from dataclasses import dataclass, asdict +from typing import Dict, Optional, List +from opto.trace.nodes import ParameterNode + +from opto.optimizers.optoprime_v2 import OptoPrimeV2, OptimizerPromptSymbolSet + +# Not inheriting from optoprime_v2 because this should have a smaller set +class OPROPromptSymbolSet(OptimizerPromptSymbolSet): + """Prompt symbol set for OPRO optimizer. + + This class defines the tags and symbols used in the OPRO optimizer's prompts + and output parsing. It provides a structured way to format problems and parse + responses from the language model. + + Attributes + ---------- + instruction_section_title : str + Title for the instruction section in prompts. + variable_section_title : str + Title for the variable/solution section in prompts. + feedback_section_title : str + Title for the feedback section in prompts. + node_tag : str + Tag used to identify constant nodes in the computation graph. + variable_tag : str + Tag used to identify variable nodes that can be optimized. + value_tag : str + Tag used to wrap the value of a node. + constraint_tag : str + Tag used to wrap constraint expressions for nodes. + reasoning_tag : str + Tag used to wrap reasoning in the output. + improved_variable_tag : str + Tag used to wrap improved variable values in the output. + name_tag : str + Tag used to wrap variable names. + expect_json : bool + Whether to expect JSON output format (default: False). + + Methods + ------- + default_prompt_symbols + Returns default prompt symbols dictionary. + + Notes + ----- + This class inherits from OptimizerPromptSymbolSet but defines a smaller, + more focused set of symbols specifically for OPRO optimization. + """ + + instruction_section_title = "# Instruction" + variable_section_title = "# Solution" + feedback_section_title = "# Feedback" + context_section_title = "# Context" + + node_tag = "node" # nodes that are constants in the graph + variable_tag = "solution" # nodes that can be changed + value_tag = "value" # inside node, we have value tag + constraint_tag = "constraint" # inside node, we have constraint tag + + # output format + # Note: we currently don't support extracting format's like "```code```" because we assume supplied tag is name-only, i.e., + reasoning_tag = "reasoning" + improved_variable_tag = "variable" + name_tag = "name" + + expect_json = False # this will stop `enforce_json` arguments passed to LLM calls + + @property + def default_prompt_symbols(self) -> Dict[str, str]: + return { + "variables": self.variables_section_title, + "feedback": self.feedback_section_title, + "instruction": self.instruction_section_title, + "context": self.context_section_title + } + +@dataclass +class ProblemInstance: + """Represents a problem instance for OPRO optimization. + + This dataclass encapsulates a complete problem instance including the + instruction, current variables/solution, and feedback received. + + Attributes + ---------- + instruction : str + The instruction describing what needs to be done or the question to answer. + variables : str + The current proposed solution that can be modified. + feedback : str + Feedback about the current solution. + context: str + Optional context information that might be useful to solve the problem. + + optimizer_prompt_symbol_set : OPROPromptSymbolSet + The symbol set used for formatting the problem. + problem_template : str + Template for formatting the problem instance as a string. + + Methods + ------- + __repr__() + Returns a formatted string representation of the problem instance. + + Notes + ----- + The problem instance is formatted using the problem_template which + organizes the instruction, variables, and feedback into a structured format. + """ + instruction: str + variables: str + feedback: str + context: Optional[str] + + optimizer_prompt_symbol_set: OPROPromptSymbolSet + + problem_template = dedent( + """ + # Instruction + {instruction} + + # Solution + {variables} + + # Feedback + {feedback} + """ + ) + + def __repr__(self) -> str: + optimization_query = self.problem_template.format( + instruction=self.instruction, + variables=self.variables, + feedback=self.feedback, + ) + + context_section = dedent(""" + + # Context + {context} + """) + + if self.context is not None and self.context.strip() != "": + context_section = context_section.format(context=self.context) + optimization_query += context_section + + return optimization_query + +class OPROv2(OptoPrimeV2): + """OPRO (Optimization by PROmpting) optimizer version 2. + + OPRO is an optimization algorithm that leverages large language models to + iteratively improve solutions based on feedback. It treats optimization as + a natural language problem where the LLM proposes improvements to variables + based on instruction and feedback. + + Parameters + ---------- + *args + Variable length argument list passed to parent class. + optimizer_prompt_symbol_set : OptimizerPromptSymbolSet, optional + The symbol set for formatting prompts and parsing outputs. + Defaults to OPROPromptSymbolSet(). + include_example : bool, optional + Whether to include examples in the prompt. Default is False as + the default example in OptoPrimeV2 does not work well with OPRO. + memory_size : int, optional + Number of past optimization steps to remember. Default is 5. + **kwargs + Additional keyword arguments passed to parent class. + + Attributes + ---------- + representation_prompt : str + Template for explaining the problem representation to the LLM. + output_format_prompt_template : str + Template for specifying the expected output format. + user_prompt_template : str + Template for presenting the problem instance to the LLM. + final_prompt : str + Template for requesting the final revised solutions. + default_objective : str + Default objective when none is specified. + + Methods + ------- + problem_instance(summary, mask=None) + Creates a ProblemInstance from an optimization summary. + initialize_prompt() + Initializes and formats the prompt templates. + + Notes + ----- + OPRO differs from OptoPrime by focusing on simpler problem representations + and clearer feedback incorporation. It is particularly effective for + problems where the optimization can be expressed in natural language. + + See Also + -------- + OptoPrimeV2 : Parent class providing core optimization functionality. + OPROPromptSymbolSet : Symbol set used for formatting. + + Examples + -------- + >>> optimizer = OPROv2(memory_size=10) + >>> # Use optimizer to improve solutions based on feedback + """ + representation_prompt = dedent( + """ + You're tasked to change the proposed solution according to feedback. + + Specifically, a problem will be composed of the following parts: + - {instruction_section_title}: the instruction which describes the things you need to do or the question you should answer. + - {variables_section_title}: the proposed solution that you can change/tweak (trainable). + - {feedback_section_title}: the feedback about the solution. + - {context_section_title}: the context information that might be useful to solve the problem. + + If `data_type` is `code`, it means `{value_tag}` is the source code of a python code, which may include docstring and definitions. + """ + ) + + output_format_prompt_template = dedent( + """ + Output_format: Your output should be in the following XML/HTML format: + + ``` + {output_format} + ``` + + In <{reasoning_tag}>, explain the problem: 1. what the {instruction_section_title} means 2. what the {feedback_section_title} means to {variables_section_title} considering how {variables_section_title} follow {instruction_section_title}. 3. Reasoning about the suggested changes in {variables_section_title} (if needed) and the expected result. + + If you need to suggest a change in the values of {variables_section_title}, write down the suggested values in <{improved_variable_tag}>. Remember you can change only the values in {variables_section_title}, not others. When `type` of a variable is `code`, you should write the new definition in the format of python code without syntax errors, and you should not change the function name or the function signature. + + If no changes are needed, just output TERMINATE. + """ + ) + + user_prompt_template = dedent( + """ + Now you see problem instance: + + ================================ + {problem_instance} + ================================ + + """ + ) + + context_prompt = dedent( + """ + Here is some additional **context** to solving this problem: + + {context} + """ + ) + + final_prompt = dedent( + """ + What are your revised solutions on {names}? + + Your response: + """ + ) + + # Default Objective becomes instruction for the next block + default_objective = "Propose a new solution that will incorporate the feedback." + + def __init__(self, *args, + optimizer_prompt_symbol_set: OptimizerPromptSymbolSet = None, + include_example=False, # default example in OptoPrimeV2 does not work in OPRO + memory_size=5, + problem_context: Optional[str] = None, + **kwargs): + """Initialize the OPROv2 optimizer. + + Parameters + ---------- + *args + Variable length argument list passed to parent class. + optimizer_prompt_symbol_set : OptimizerPromptSymbolSet, optional + The symbol set for formatting prompts and parsing outputs. + If None, uses OPROPromptSymbolSet(). + include_example : bool, optional + Whether to include examples in the prompt. Default is False. + memory_size : int, optional + Number of past optimization steps to remember. Default is 5. + **kwargs + Additional keyword arguments passed to parent class. + """ + optimizer_prompt_symbol_set = optimizer_prompt_symbol_set or OPROPromptSymbolSet() + super().__init__(*args, optimizer_prompt_symbol_set=optimizer_prompt_symbol_set, + include_example=include_example, memory_size=memory_size, + problem_context=problem_context, + **kwargs) + + def parameter_check(self, parameters: List[ParameterNode]): + """Check if the parameters are valid. + This can be overloaded by subclasses to add more checks. + + Args: + parameters: List[ParameterNode] + The parameters to check. + + Raises: + AssertionError: If more than one parameter contains image data. + + Notes: + OPROv2 supports image parameters, but only one parameter can be + an image at a time since LLMs can only generate one image per inference. + """ + # Count image parameters + image_params = [param for param in parameters if param.is_image] + + if len(image_params) > 1: + param_names = ', '.join([f"'{p.name}'" for p in image_params]) + raise AssertionError( + f"OPROv2 supports at most one image parameter, but found {len(image_params)}: " + f"{param_names}. LLMs can only generate one image at a time." + ) + + def problem_instance(self, summary, mask=None): + """Create a ProblemInstance from an optimization summary. + + Parameters + ---------- + summary : object + The optimization summary containing variables and feedback. + mask : list, optional + List of sections to mask/hide in the problem instance. + Can include "#Instruction", variable section title, or feedback section title. + + Returns + ------- + ProblemInstance + A formatted problem instance ready for presentation to the LLM. + + Notes + ----- + The mask parameter allows selective hiding of problem components, + useful for ablation studies or specific optimization strategies. + """ + mask = mask or [] + return ProblemInstance( + instruction=self.objective if "#Instruction" not in mask else "", + variables=( + self.repr_node_value_compact(summary.variables, node_tag=self.optimizer_prompt_symbol_set.variable_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag) + if self.optimizer_prompt_symbol_set.variables_section_title not in mask + else "" + ), + feedback=summary.user_feedback if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else "", + optimizer_prompt_symbol_set=self.optimizer_prompt_symbol_set + ) + + def initialize_prompt(self): + """Initialize and format the prompt templates. + + This method formats the representation_prompt and output_format_prompt + templates with the appropriate symbols from the optimizer_prompt_symbol_set. + It prepares the prompts for use in optimization. + + Notes + ----- + This method should be called during initialization to ensure all + prompt templates are properly formatted with the correct tags and symbols. + """ + self.representation_prompt = self.representation_prompt.format( + variable_expression_format=dedent(f""" + <{self.optimizer_prompt_symbol_set.variable_tag} name="variable_name" type="data_type"> + <{self.optimizer_prompt_symbol_set.value_tag}> + value + + <{self.optimizer_prompt_symbol_set.constraint_tag}> + constraint_expression + + + """), + value_tag=self.optimizer_prompt_symbol_set.value_tag, + variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""), + feedback_section_title=self.optimizer_prompt_symbol_set.feedback_section_title.replace(" ", ""), + instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""), + context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "") + ) + self.output_format_prompt = self.output_format_prompt_template.format( + output_format=self.optimizer_prompt_symbol_set.output_format, + reasoning_tag=self.optimizer_prompt_symbol_set.reasoning_tag, + improved_variable_tag=self.optimizer_prompt_symbol_set.improved_variable_tag, + instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""), + feedback_section_title=self.optimizer_prompt_symbol_set.feedback_section_title.replace(" ", ""), + variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""), + context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "") + ) diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py new file mode 100644 index 00000000..a8c6570c --- /dev/null +++ b/opto/optimizers/optoprime_v3.py @@ -0,0 +1,838 @@ +""" +Key difference to v2: +1. Use the new backbone conversation history manager +2. Support multimodal node (both trainable and non-trainable) +""" + +import json +from typing import Any, List, Dict, Union, Tuple, Optional +from dataclasses import dataclass, asdict +from opto.optimizers.optoprime import OptoPrime, FunctionFeedback +from opto.trace.utils import dedent +from opto.optimizers.utils import truncate_expression, extract_xml_like_data, MultiModalPayload + +from opto.trace.nodes import ParameterNode, Node, MessageNode +from opto.trace.propagators import TraceGraph, GraphPropagator +from opto.trace.propagators.propagators import Propagator + +from opto.utils.llm import AbstractModel, LLM +from opto.optimizers.buffers import FIFOBuffer +from opto.optimizers.backbone import ConversationHistory, UserTurn, AssistantTurn +import copy +import pickle +import re +from typing import Dict, Any + +class OptimizerPromptSymbolSet: + """ + By inheriting this class and pass into the optimizer. People can change the optimizer documentation + + This divides into three parts: + - Section titles: the title of each section in the prompt + - Node tags: the tags that capture the graph structure (only tag names are allowed to be changed) + - Output format: the format of the output of the optimizer + """ + + # Titles should be written as markdown titles (space between # and title) + # In text, we automatically remove space in the title, so it will become `#Title` + variables_section_title = "# Variables" + inputs_section_title = "# Inputs" + outputs_section_title = "# Outputs" + others_section_title = "# Others" + feedback_section_title = "# Feedback" + instruction_section_title = "# Instruction" + code_section_title = "# Code" + documentation_section_title = "# Documentation" + context_section_title = "# Context" + + node_tag = "node" # nodes that are constants in the graph + variable_tag = "variable" # nodes that can be changed + value_tag = "value" # inside node, we have value tag + constraint_tag = "constraint" # inside node, we have constraint tag + + # output format + # Note: we currently don't support extracting format's like "```code```" because we assume supplied tag is name-only, i.e., + reasoning_tag = "reasoning" + improved_variable_tag = "variable" + name_tag = "name" + + expect_json = False # this will stop `enforce_json` arguments passed to LLM calls + + # custom output format + # if this is not None, then the user needs to implement the following functions: + # - output_response_extractor + # - example_output + custom_output_format_instruction = None + + @property + def output_format(self) -> str: + """ + This function defines the input to: + ``` + {output_format} + ``` + In the self.output_format_prompt_template in the OptoPrimeV2 + """ + if self.custom_output_format_instruction is None: + # we use a default XML like format + return dedent(f""" + <{self.reasoning_tag}> + reasoning + + <{self.improved_variable_tag}> + <{self.name_tag}>variable_name + <{self.value_tag}> + value + + + """) + else: + return self.custom_output_format_instruction.strip() + + def example_output(self, reasoning, variables): + """ + reasoning: str + variables: format {variable_name, value} + """ + if self.custom_output_format_instruction is not None: + raise NotImplementedError + else: + # Build the output string in the same XML-like format as self.output_format + output = [] + if reasoning != "": + output.append(f"<{self.reasoning_tag}>") + output.append(reasoning) + output.append(f"") + for var_name, value in variables.items(): + output.append(f"<{self.improved_variable_tag}>") + output.append(f"<{self.name_tag}>{var_name}") + output.append(f"<{self.value_tag}>") + output.append(str(value)) + output.append(f"") + output.append(f"") + return "\n".join(output) + + def output_response_extractor(self, response: str) -> Dict[str, Any]: + # the response here should just be plain text + + if self.custom_output_format_instruction is None: + extracted_data = extract_xml_like_data(response, + reasoning_tag=self.reasoning_tag, + improved_variable_tag=self.improved_variable_tag, + name_tag=self.name_tag, + value_tag=self.value_tag) + + # if the suggested value is a code, and the entire code body is empty (i.e., not even function signature is present) + # then we remove such suggestion + keys_to_remove = [] + for key, value in extracted_data['variables'].items(): + if "__code" in key and value.strip() == "": + keys_to_remove.append(key) + + for key in keys_to_remove: + del extracted_data['variables'][key] + + return extracted_data + else: + raise NotImplementedError( + "If you supplied a custom output format prompt template, you need to implement your own response extractor") + + @property + def default_prompt_symbols(self) -> Dict[str, str]: + return { + "variables": self.variables_section_title, + "inputs": self.inputs_section_title, + "outputs": self.outputs_section_title, + "others": self.others_section_title, + "feedback": self.feedback_section_title, + "instruction": self.instruction_section_title, + "code": self.code_section_title, + "documentation": self.documentation_section_title, + "context": self.context_section_title + } + + +class OptimizerPromptSymbolSetJSON(OptimizerPromptSymbolSet): + """We enforce a JSON output format extraction""" + + expect_json = True + + custom_output_format_instruction = dedent(""" + {{ + "reasoning": , + "suggestion": {{ + : , + : , + }} + }} + """) + + def example_output(self, reasoning, variables): + """ + reasoning: str + variables: format {variable_name, value} + """ + + # Build the output string in the same JSON format as described in custom_output_format_instruction + output = { + "reasoning": reasoning, + "suggestion": {var_name: value for var_name, value in variables.items()} + } + return json.dumps(output, indent=2) + + def output_response_extractor(self, response: str) -> Dict[str, Any]: + """ + Extracts reasoning and suggestion variables from the LLM response using OptoPrime's extraction logic. + """ + # Use the centralized extraction logic from OptoPrime + optoprime_instance = OptoPrime() + return optoprime_instance.extract_llm_suggestion(response) + +class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet): + variables_section_title = "# Variables" + inputs_section_title = "# Inputs" + outputs_section_title = "# Outputs" + others_section_title = "# Others" + feedback_section_title = "# Feedback" + instruction_section_title = "# Instruction" + code_section_title = "# Code" + documentation_section_title = "# Documentation" + context_section_title = "# Context" + + node_tag = "const" # nodes that are constants in the graph + variable_tag = "var" # nodes that can be changed + value_tag = "data" # inside node, we have value tag + constraint_tag = "constraint" # inside node, we have constraint tag + + # output format + reasoning_tag = "reason" + improved_variable_tag = "var" + name_tag = "name" + + +@dataclass +class ProblemInstance: + instruction: str + code: str + documentation: str + variables: str + inputs: str + others: str + outputs: str + feedback: str + context: Optional[str] + + optimizer_prompt_symbol_set: OptimizerPromptSymbolSet + + problem_template = dedent( + """ + # Instruction + {instruction} + + # Code + {code} + + # Documentation + {documentation} + + # Variables + {variables} + + # Inputs + {inputs} + + # Others + {others} + + # Outputs + {outputs} + + # Feedback + {feedback} + """ + ) + + def __repr__(self) -> str: + optimization_query = self.problem_template.format( + instruction=self.instruction, + code=self.code, + documentation=self.documentation, + variables=self.variables, + inputs=self.inputs, + outputs=self.outputs, + others=self.others, + feedback=self.feedback + ) + + context_section = dedent(""" + + # Context + {context} + """) + + if self.context is not None and self.context.strip() != "": + context_section = context_section.format(context=self.context) + optimization_query += context_section + + return optimization_query + + +@dataclass +class MemoryInstance: + variables: Dict[str, Tuple[Any, str]] # name -> (data, constraint) + feedback: str + optimizer_prompt_symbol_set: OptimizerPromptSymbolSet + + memory_example_template = dedent( + """{variables}{feedback} + """ + ) + + def __init__(self, variables: Dict[str, Any], feedback: str, optimizer_prompt_symbol_set: OptimizerPromptSymbolSet, + index: Optional[int] = None): + self.feedback = feedback + self.optimizer_prompt_symbol_set = optimizer_prompt_symbol_set + self.variables = variables + self.index = index + + def __str__(self) -> str: + var_repr = "" + for k, v in self.variables.items(): + var_repr += dedent(f""" + <{self.optimizer_prompt_symbol_set.improved_variable_tag}> + <{self.optimizer_prompt_symbol_set.name_tag}>{k} + <{self.optimizer_prompt_symbol_set.value_tag}> + {v[0]} + + + """) + + return self.memory_example_template.format( + variables=var_repr, + feedback=self.feedback, + index=" " + str(self.index) if self.index is not None else "" + ) + + +class OptoPrimeV3(OptoPrime): + # This is generic representation prompt, which just explains how to read the problem. + representation_prompt = dedent( + """You're tasked to solve a coding/algorithm problem. You will see the instruction, the code, the documentation of each function used in the code, and the feedback about the execution result. + + Specifically, a problem will be composed of the following parts: + - {instruction_section_title}: the instruction which describes the things you need to do or the question you should answer. + - {code_section_title}: the code defined in the problem. + - {documentation_section_title}: the documentation of each function used in #Code. The explanation might be incomplete and just contain high-level description. You can use the values in #Others to help infer how those functions work. + - {variables_section_title}: the input variables that you can change/tweak (trainable). + - {inputs_section_title}: the values of fixed inputs to the code, which CANNOT be changed (fixed). + - {others_section_title}: the intermediate values created through the code execution. + - {outputs_section_title}: the result of the code output. + - {feedback_section_title}: the feedback about the code's execution result. + - {context_section_title}: the context information that might be useful to solve the problem. + + In `{variables_section_title}`, `{inputs_section_title}`, `{outputs_section_title}`, and `{others_section_title}`, the format is: + + For variables we express as this: + {variable_expression_format} + + If `data_type` is `code`, it means `{value_tag}` is the source code of a python code, which may include docstring and definitions.""" + ) + + # Optimization + default_objective = "You need to change the `{value_tag}` of the variables in {variables_section_title} to improve the output in accordance to {feedback_section_title}." + + output_format_prompt_template = dedent( + """ + Output_format: Your output should be in the following XML/HTML format: + + ``` + {output_format} + ``` + + In <{reasoning_tag}>, explain the problem: 1. what the {instruction_section_title} means 2. what the {feedback_section_title} on {outputs_section_title} means to {variables_section_title} considering how {variables_section_title} are used in {code_section_title} and other values in {documentation_section_title}, {inputs_section_title}, {others_section_title}. 3. Reasoning about the suggested changes in {variables_section_title} (if needed) and the expected result. + + If you need to suggest a change in the values of {variables_section_title}, write down the suggested values in <{improved_variable_tag}>. Remember you can change only the values in {variables_section_title}, not others. When `type` of a variable is `code`, you should write the new definition in the format of python code without syntax errors, and you should not change the function name or the function signature. + + If no changes are needed, just output TERMINATE. + """ + ) + + example_problem_template = dedent( + """ + Here is an example of problem instance and response: + + ================================ + {example_problem} + ================================ + + Your response: + {example_response} + """ + ) + + user_prompt_template = dedent( + """ + Now you see problem instance: + + ================================ + {problem_instance} + ================================ + + """ + ) + + example_prompt = dedent( + """ + Here are some feasible but not optimal solutions for the current problem instance. Consider this as a hint to help you understand the problem better. + + ================================ + {examples} + ================================ + """ + ) + + context_prompt = dedent( + """ + Here is some additional **context** to solving this problem: + + {context} + """ + ) + + final_prompt = dedent( + """ + What are your suggestions on variables {names}? + + Your response: + """ + ) + + def __init__( + self, + parameters: List[ParameterNode], + llm: AbstractModel = None, + *args, + propagator: Propagator = None, + objective: Union[None, str] = None, + ignore_extraction_error: bool = True, + # ignore the type conversion error when extracting updated values from LLM's suggestion + include_example=False, + memory_size=0, # Memory size to store the past feedback + max_tokens=8192, + log=True, + initial_var_char_limit=2000, + optimizer_prompt_symbol_set: OptimizerPromptSymbolSet = OptimizerPromptSymbolSet(), + use_json_object_format=True, # whether to use json object format for the response when calling LLM + truncate_expression=truncate_expression, + problem_context: Optional[str] = None, + **kwargs, + ): + super().__init__(parameters, *args, propagator=propagator, **kwargs) + + self.truncate_expression = truncate_expression + self.problem_context = problem_context + self.multimodal_payload = MultiModalPayload() + + self.use_json_object_format = use_json_object_format if optimizer_prompt_symbol_set.expect_json and use_json_object_format else False + self.ignore_extraction_error = ignore_extraction_error + self.llm = llm or LLM() + self.objective = objective or self.default_objective.format(value_tag=optimizer_prompt_symbol_set.value_tag, + variables_section_title=optimizer_prompt_symbol_set.variables_section_title, + feedback_section_title=optimizer_prompt_symbol_set.feedback_section_title) + self.initial_var_char_limit = initial_var_char_limit + self.optimizer_prompt_symbol_set = optimizer_prompt_symbol_set + + self.example_problem_summary = FunctionFeedback(graph=[(1, 'y = add(x=a,y=b)'), (2, "z = subtract(x=y, y=c)")], + documentation={'add': 'This is an add operator of x and y.', + 'subtract': "subtract y from x"}, + others={'y': (6, None)}, + roots={'a': (5, "a > 0"), + 'b': (1, None), + 'c': (5, None)}, + output={'z': (1, None)}, + user_feedback='The result of the code is not as expected. The result should be 10, but the code returns 1' + ) + self.example_problem_summary.variables = {'a': (5, "a > 0")} + self.example_problem_summary.inputs = {'b': (1, None), 'c': (5, None)} + + self.example_problem = self.problem_instance(self.example_problem_summary) + self.example_response = self.optimizer_prompt_symbol_set.example_output( + reasoning="In this case, the desired response would be to change the value of input a to 14, as that would make the code return 10.", + variables={ + 'a': 10, + } + ) + + self.include_example = include_example + self.max_tokens = max_tokens + self.log = [] if log else None + self.summary_log = [] if log else None + self.memory = FIFOBuffer(memory_size) + self.conversation_history = ConversationHistory() + self.conversation_length = memory_size # Number of conversation turns to keep + + self.default_prompt_symbols = self.optimizer_prompt_symbol_set.default_prompt_symbols + + self.prompt_symbols = copy.deepcopy(self.default_prompt_symbols) + self.initialize_prompt() + + def parameter_check(self, parameters: List[ParameterNode]): + """Check if the parameters are valid. + This can be overloaded by subclasses to add more checks. + + Args: + parameters: List[ParameterNode] + The parameters to check. + + Raises: + AssertionError: If more than one parameter contains image data. + + Notes: + OptoPrimeV2 supports image parameters, but only one parameter can be + an image at a time since LLMs can only generate one image per inference. + """ + # Count image parameters + image_params = [param for param in parameters if param.is_image] + + if len(image_params) > 1: + param_names = ', '.join([f"'{p.name}'" for p in image_params]) + raise AssertionError( + f"OptoPrimeV2 supports at most one image parameter, but found {len(image_params)}: " + f"{param_names}. LLMs can only generate one image at a time." + ) + + def add_image_context(self, image: Union[str, Any], context: str = "", format: str = "PNG"): + """ + Add an image to the optimizer context. + + Args: + image: Can be: + - URL string (starting with 'http://' or 'https://') + - Local file path (string) + - Numpy array or array-like RGB image + context: Optional context text to describe the image. If empty, uses default. + format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG + """ + if self.problem_context is None: + self.problem_context = "" + + if context == "": + context = "The attached image is given to the workflow. You should use the image to help you understand the problem and provide better suggestions. You can refer to the image when providing your suggestions." + + self.problem_context += f"{context}\n\n" + + # Set the image using the multimodal payload + self.multimodal_payload.set_image(image, format=format) + + self.initialize_prompt() + + def add_context(self, context: str): + if self.problem_context is None: + self.problem_context = "" + self.problem_context += f"{context}\n\n" + self.initialize_prompt() + + def initialize_prompt(self): + self.representation_prompt = self.representation_prompt.format( + variable_expression_format=dedent(f""" + <{self.optimizer_prompt_symbol_set.variable_tag} name="variable_name" type="data_type"> + <{self.optimizer_prompt_symbol_set.value_tag}> + value + + <{self.optimizer_prompt_symbol_set.constraint_tag}> + constraint_expression + + + """), + value_tag=self.optimizer_prompt_symbol_set.value_tag, + variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""), + inputs_section_title=self.optimizer_prompt_symbol_set.inputs_section_title.replace(" ", ""), + outputs_section_title=self.optimizer_prompt_symbol_set.outputs_section_title.replace(" ", ""), + feedback_section_title=self.optimizer_prompt_symbol_set.feedback_section_title.replace(" ", ""), + instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""), + code_section_title=self.optimizer_prompt_symbol_set.code_section_title.replace(" ", ""), + documentation_section_title=self.optimizer_prompt_symbol_set.documentation_section_title.replace(" ", ""), + others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", ""), + context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "") + ) + self.output_format_prompt = self.output_format_prompt_template.format( + output_format=self.optimizer_prompt_symbol_set.output_format, + reasoning_tag=self.optimizer_prompt_symbol_set.reasoning_tag, + improved_variable_tag=self.optimizer_prompt_symbol_set.improved_variable_tag, + instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""), + feedback_section_title=self.optimizer_prompt_symbol_set.feedback_section_title.replace(" ", ""), + outputs_section_title=self.optimizer_prompt_symbol_set.outputs_section_title.replace(" ", ""), + code_section_title=self.optimizer_prompt_symbol_set.code_section_title.replace(" ", ""), + documentation_section_title=self.optimizer_prompt_symbol_set.documentation_section_title.replace(" ", ""), + variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""), + inputs_section_title=self.optimizer_prompt_symbol_set.inputs_section_title.replace(" ", ""), + others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", ""), + context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "") + ) + + def repr_node_value(self, node_dict, node_tag="node", + value_tag="value", constraint_tag="constraint"): + temp_list = [] + for k, v in node_dict.items(): + if "__code" not in k: + if v[1] is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag: + constraint_expr = f"<{constraint_tag}>\n{v[1]}\n" + temp_list.append( + f"<{node_tag} name=\"{k}\" type=\"{type(v[0]).__name__}\">\n<{value_tag}>\n{v[0]}\n\n{constraint_expr}\n\n") + else: + temp_list.append( + f"<{node_tag} name=\"{k}\" type=\"{type(v[0]).__name__}\">\n<{value_tag}>\n{v[0]}\n\n\n") + else: + constraint_expr = f"\n{v[1]}\n" + signature = v[1].replace("The code should start with:\n", "") + func_body = v[0].replace(signature, "") + temp_list.append( + f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{func_body}\n\n{constraint_expr}\n\n") + return "\n".join(temp_list) + + def repr_node_value_compact(self, node_dict, node_tag="node", + value_tag="value", constraint_tag="constraint"): + temp_list = [] + for k, v in node_dict.items(): + if "__code" not in k: + node_value = self.truncate_expression(v[0], self.initial_var_char_limit) + if v[1] is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag: + constraint_expr = f"<{constraint_tag}>\n{v[1]}\n" + temp_list.append( + f"<{node_tag} name=\"{k}\" type=\"{type(v[0]).__name__}\">\n<{value_tag}>\n{node_value}\n\n{constraint_expr}\n\n") + else: + temp_list.append( + f"<{node_tag} name=\"{k}\" type=\"{type(v[0]).__name__}\">\n<{value_tag}>\n{node_value}\n\n\n") + else: + constraint_expr = f"<{constraint_tag}>\n{v[1]}\n" + # we only truncate the function body + signature = v[1].replace("The code should start with:\n", "") + func_body = v[0].replace(signature, "") + node_value = self.truncate_expression(func_body, self.initial_var_char_limit) + temp_list.append( + f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{node_value}\n\n{constraint_expr}\n\n") + return "\n".join(temp_list) + + def construct_prompt(self, summary, mask=None, *args, **kwargs): + """Construct the system and user prompt. + Expanded to construct a list of content blocks + """ + system_prompt = ( + self.representation_prompt + self.output_format_prompt + ) # generic representation + output rule + user_prompt = self.user_prompt_template.format( + problem_instance=str(self.problem_instance(summary, mask=mask)) + ) # problem instance + if self.include_example: + user_prompt = ( + self.example_problem_template.format( + example_problem=self.example_problem, + example_response=self.example_response, + ) + + user_prompt + ) + + var_names = [] + for k, v in summary.variables.items(): + var_names.append(f"{k}") # ({type(v[0]).__name__}) + var_names = ", ".join(var_names) + + user_prompt += self.final_prompt.format(names=var_names) + + # Add examples + if len(self.memory) > 0: + formatted_final = self.final_prompt.format(names=var_names) + prefix = user_prompt.split(formatted_final)[0] + examples = [] + index = 0 + for variables, feedback in self.memory: + index += 1 + examples.append(str(MemoryInstance(variables, feedback, self.optimizer_prompt_symbol_set, index=index))) + + examples = "\n".join(examples) + user_prompt = ( + prefix + + f"\nBelow are some variables and their feedbacks you received in the past.\n\n{examples}\n\n" + + formatted_final + ) + self.memory.add((summary.variables, summary.user_feedback)) + + return system_prompt, user_prompt + + def problem_instance(self, summary, mask=None): + mask = mask or [] + return ProblemInstance( + instruction=self.objective if "#Instruction" not in mask else "", + code=( + "\n".join([v for k, v in sorted(summary.graph)]) + if self.optimizer_prompt_symbol_set.inputs_section_title not in mask + else "" + ), + documentation=( + "\n".join([f"[{k}] {v}" for k, v in summary.documentation.items()]) + if self.optimizer_prompt_symbol_set.documentation_section_title not in mask + else "" + ), + variables=( + self.repr_node_value(summary.variables, node_tag=self.optimizer_prompt_symbol_set.variable_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag) + if self.optimizer_prompt_symbol_set.variables_section_title not in mask + else "" + ), + inputs=( + self.repr_node_value_compact(summary.inputs, node_tag=self.optimizer_prompt_symbol_set.node_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag) if self.optimizer_prompt_symbol_set.inputs_section_title not in mask else "" + ), + outputs=( + self.repr_node_value_compact(summary.output, node_tag=self.optimizer_prompt_symbol_set.node_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag) if self.optimizer_prompt_symbol_set.outputs_section_title not in mask else "" + ), + others=( + self.repr_node_value_compact(summary.others, node_tag=self.optimizer_prompt_symbol_set.node_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag) if self.optimizer_prompt_symbol_set.others_section_title not in mask else "" + ), + feedback=summary.user_feedback if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else "", + context=self.problem_context if self.optimizer_prompt_symbol_set.context_section_title not in mask else "", + optimizer_prompt_symbol_set=self.optimizer_prompt_symbol_set + ) + + def _step( + self, verbose=False, mask=None, *args, **kwargs + ) -> Dict[ParameterNode, Any]: + assert isinstance(self.propagator, GraphPropagator) + summary = self.summarize() + system_prompt, user_prompt = self.construct_prompt(summary, mask=mask) + + response = self.call_llm( + system_prompt=system_prompt, + user_prompt=user_prompt, + verbose=verbose, + max_tokens=self.max_tokens, + ) + + if "TERMINATE" in response: + return {} + + suggestion = self.extract_llm_suggestion(response) + update_dict = self.construct_update_dict(suggestion['variables']) + # suggestion has two keys: reasoning, and variables + + if self.log is not None: + self.log.append( + { + "system_prompt": system_prompt, + "user_prompt": user_prompt, + "response": response, + } + ) + self.summary_log.append( + {"problem_instance": self.problem_instance(summary), "summary": summary} + ) + + return update_dict + + def extract_llm_suggestion(self, response: str): + """Extract the suggestion from the response.""" + + suggestion = self.optimizer_prompt_symbol_set.output_response_extractor(response) + + if len(suggestion) == 0: + if not self.ignore_extraction_error: + print("Cannot extract suggestion from LLM's response:") + print(response) + + return suggestion + + def call_llm( + self, + system_prompt: str, + user_prompt: str, + verbose: Union[bool, str] = False, + max_tokens: int = 4096, + ): + """Call the LLM with a prompt and return the response.""" + if verbose not in (False, "output"): + print("Prompt\n", system_prompt + user_prompt) + + # Update system prompt in conversation history + self.conversation_history.system_prompt = system_prompt + + # Create user turn with text and optional image content + user_turn = UserTurn() + + # Add image content if available (image_data is URL or base64 data URL) + if self.multimodal_payload.image_data is not None: + user_turn.add_image(url=self.multimodal_payload.image_data) + + user_turn.add_text(user_prompt) + self.conversation_history.add_user_turn(user_turn) + + # Get messages with conversation length control (truncate from start) + # conversation_length = n historical rounds (user+assistant pairs) to keep + # The current user turn is automatically included by to_messages() + messages = self.conversation_history.to_messages( + n=self.conversation_length if self.conversation_length > 0 else -1, + truncate_strategy="from_start" + ) + + response_format = {"type": "json_object"} if self.use_json_object_format else None + + response = self.llm(messages=messages, max_tokens=max_tokens, response_format=response_format) + + response_content = response.choices[0].message.content + + # Store assistant response in conversation history + assistant_turn = AssistantTurn() + assistant_turn.add_text(response_content) + self.conversation_history.add_assistant_turn(assistant_turn) + + if verbose: + print("LLM response:\n", response_content) + return response_content + + def save(self, path: str): + """Save the optimizer state to a file.""" + with open(path, 'wb') as f: + pickle.dump({ + "truncate_expression": self.truncate_expression, + "use_json_object_format": self.use_json_object_format, + "ignore_extraction_error": self.ignore_extraction_error, + "objective": self.objective, + "initial_var_char_limit": self.initial_var_char_limit, + "optimizer_prompt_symbol_set": self.optimizer_prompt_symbol_set, + "include_example": self.include_example, + "max_tokens": self.max_tokens, + "memory": self.memory, + "conversation_history": self.conversation_history, + "conversation_length": self.conversation_length, + "default_prompt_symbols": self.default_prompt_symbols, + "prompt_symbols": self.prompt_symbols, + "representation_prompt": self.representation_prompt, + "output_format_prompt": self.output_format_prompt, + "context_prompt": self.context_prompt + }, f) + + def load(self, path: str): + """Load the optimizer state from a file.""" + with open(path, 'rb') as f: + state = pickle.load(f) + self.truncate_expression = state["truncate_expression"] + self.use_json_object_format = state["use_json_object_format"] + self.ignore_extraction_error = state["ignore_extraction_error"] + self.objective = state["objective"] + self.initial_var_char_limit = state["initial_var_char_limit"] + self.optimizer_prompt_symbol_set = state["optimizer_prompt_symbol_set"] + self.include_example = state["include_example"] + self.max_tokens = state["max_tokens"] + self.memory = state["memory"] + self.conversation_history = state.get("conversation_history", ConversationHistory()) + self.conversation_length = state.get("conversation_length", 0) + self.default_prompt_symbols = state["default_prompt_symbols"] + self.prompt_symbols = state["prompt_symbols"] + self.representation_prompt = state["representation_prompt"] + self.output_format_prompt = state["output_format_prompt"] + self.context_prompt = state["context_prompt"] diff --git a/opto/optimizers/utils.py b/opto/optimizers/utils.py index 793333e9..401b996e 100644 --- a/opto/optimizers/utils.py +++ b/opto/optimizers/utils.py @@ -1,13 +1,14 @@ import base64 import mimetypes import io -from typing import Dict, Any, Union, Optional +from typing import Dict, Any, Union, Optional, List try: import numpy as np NUMPY_AVAILABLE = True except ImportError: NUMPY_AVAILABLE = False +import opto.trace as trace def print_color(message, color=None, logger=None): colors = { @@ -261,3 +262,106 @@ def encode_numpy_to_base64(array, format: str = "PNG") -> str: data_url = f"data:{mime_type};base64,{b64}" return data_url + +class ChatHistory: + def __init__(self, max_turn=50, auto_summary=False): + """Initialize chat history for multi-turn conversation. + + Args: + max_turn: Maximum number of conversation turns to keep in history. + + auto_summary: Whether to automatically summarize old messages + """ + self.messages: List[Dict[str, Any]] = [] + self.max_len = max_turn * 2 + self.auto_summary = auto_summary + + def __len__(self): + return len(self.messages) + + def add(self, content: Union[trace.Node, str], role): + """Add a message to history with role validation. + + Args: + content: The content of the message + role: The role of the message ("user" or "assistant") + """ + if role not in ["user", "assistant"]: + raise ValueError(f"Invalid role '{role}'. Must be 'user' or 'assistant'.") + + # Check for alternating user/assistant pattern + if len(self.messages) > 0: + last_msg = self.messages[-1] + if last_msg["role"] == role: + print(f"Warning: Adding consecutive {role} messages. Consider alternating user/assistant messages.") + + self.messages.append({"role": role, "content": content}) + self._trim_history() + + def append(self, message: Dict[str, Any]): + """Append a message directly to history.""" + if "role" not in message or "content" not in message: + raise ValueError("Message must have 'role' and 'content' fields.") + self.add(message["content"], message["role"]) + + def __iter__(self): + return iter(self.messages) + + def get_messages(self) -> List[Dict[str, str]]: + messages = [] + for message in self.messages: + if isinstance(message['content'], trace.Node): + messages.append({"role": message["role"], "content": message["content"].data}) + else: + messages.append(message) + return messages + + def get_messages_as_node(self, llm_name="") -> List[trace.Node]: + node_list = [] + for message in self.messages: + # If user query is a node and has other computation attached, we can't rename it + if isinstance(message['content'], trace.Node): + node_list.append(message['content']) + else: + role = message["role"] + content = message["content"] + name = f"{llm_name}_{role}" if llm_name else f"{role}" + if role == 'user': + name += "_query" + elif role == 'assistant': + name += "_response" + node_list.append(trace.node(content, name=name)) + + return node_list + + def _trim_history(self): + """Trim history to max_len while preserving first user message.""" + if len(self.messages) <= self.max_len: + return + + # Find first user message index + first_user_idx = None + for i, msg in enumerate(self.messages): + if msg["role"] == "user": + first_user_idx = i + break + + # Keep first user message + protected_messages = [] + if first_user_idx is not None: + first_user_msg = self.messages[first_user_idx] + protected_messages.append(first_user_msg) + + # Calculate how many recent messages we can keep + remaining_slots = self.max_len - len(protected_messages) + if remaining_slots > 0: + # Get recent messages + recent_messages = self.messages[-remaining_slots:] + # Avoid duplicating first user message + if first_user_idx is not None: + first_user_msg = self.messages[first_user_idx] + recent_messages = [msg for msg in recent_messages if msg != first_user_msg] + + self.messages = protected_messages + recent_messages + else: + self.messages = protected_messages \ No newline at end of file diff --git a/opto/trace/nodes.py b/opto/trace/nodes.py index 327916bc..696c5339 100644 --- a/opto/trace/nodes.py +++ b/opto/trace/nodes.py @@ -291,6 +291,8 @@ def verify_data_is_image_url(url: str, timeout: float = 1.0) -> bool: This method performs an actual network request to verify that a URL points to an image. It should be used when you need definitive verification beyond pattern matching. + The method should be called before we convert image to base64 string (e.g., optimization step) + Args: timeout: Maximum time in seconds to wait for the request. Default is 1.0. diff --git a/setup.py b/setup.py index dbd60be5..ade71ac2 100644 --- a/setup.py +++ b/setup.py @@ -14,6 +14,7 @@ "litellm==1.75.0", "black", "scikit-learn", + "pillow", "tensorboardX", "tensorboard" ] diff --git a/tests/llm_optimizers_tests/test_optoprime_v2.py b/tests/llm_optimizers_tests/test_optoprime_v2.py index b1032f28..ce29d92e 100644 --- a/tests/llm_optimizers_tests/test_optoprime_v2.py +++ b/tests/llm_optimizers_tests/test_optoprime_v2.py @@ -101,18 +101,18 @@ def multiply(num): assert function_repr in part2, "Expected function representation to be present in part2" def test_big_data_truncation(): - num_1 = node(1, trainable=True) + num_1 = node("**2", trainable=True) - list_1 = node([1, 2, 3, 4, 5, 6, 7, 8, 9, 20] * 10, trainable=True) + list_1 = node("12345691912338" * 10, trainable=False) - result = num_1 + list_1[30] + result = list_1 + num_1 - optimizer = OptoPrimeV2([num_1, list_1], use_json_object_format=False, + optimizer = OptoPrimeV2([num_1], use_json_object_format=False, ignore_extraction_error=False, include_example=True, initial_var_char_limit=10) optimizer.zero_feedback() - optimizer.backward(result, 'make this number bigger') + optimizer.backward(result, 'compute the expression') summary = optimizer.summarize() part1, part2 = optimizer.construct_prompt(summary) @@ -120,11 +120,7 @@ def test_big_data_truncation(): part1 = optimizer.replace_symbols(part1, optimizer.prompt_symbols) part2 = optimizer.replace_symbols(part2, optimizer.prompt_symbols) - truncated_repr = """ - -[1, 2, 3, ...(skipped due to length limit) - -""" + truncated_repr = """1234569191...(skipped due to length limit)""" assert truncated_repr in part2, "Expected truncated list representation to be present in part2" @@ -177,5 +173,5 @@ def test_extraction_pipeline(): assert 'variables' in suggestion, "Expected 'variables' in suggestion" assert 'int0' in suggestion['variables'], "Expected 'int0' variable in suggestion" assert 'int1' in suggestion['variables'], "Expected 'int1' variable in suggestion" - assert suggestion['variables']['int0'] == 5, "Expected int0 to be incremented to 5" - assert suggestion['variables']['int1'] == 5, "Expected int1 to be incremented to 5" + assert suggestion['variables']['int0'] == '5', "Expected int0 to be incremented to 5" + assert suggestion['variables']['int1'] == '5', "Expected int1 to be incremented to 5" diff --git a/tests/llm_optimizers_tests/test_optoprime_v3.py b/tests/llm_optimizers_tests/test_optoprime_v3.py new file mode 100644 index 00000000..dffdedfb --- /dev/null +++ b/tests/llm_optimizers_tests/test_optoprime_v3.py @@ -0,0 +1,177 @@ +import os +import pytest +from opto.trace import bundle, node, GRAPH +import opto.optimizers +import importlib +import inspect +import json +import pickle +from opto.utils.llm import LLM + +from opto import trace +from opto.trace import node, bundle +from opto.optimizers.optoprime_v3 import OptoPrimeV3, OptimizerPromptSymbolSet2 + +# You can override for temporarly testing a specific optimizer ALL_OPTIMIZERS = [TextGrad] # [OptoPrimeMulti] ALL_OPTIMIZERS = [OptoPrime] + +# Skip tests if no API credentials are available +SKIP_REASON = "No API credentials found" +HAS_CREDENTIALS = os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get( + "OPENAI_API_KEY") +llm = LLM() + + +@pytest.fixture(autouse=True) +def clear_graph(): + """Reset the graph before each test""" + GRAPH.clear() + yield + GRAPH.clear() + + +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +def test_response_extraction(): + pass + + +def test_tag_template_change(): + num_1 = node(1, trainable=True) + num_2 = node(2, trainable=True, description="<=5") + result = num_1 + num_2 + optimizer = OptoPrimeV3([num_1, num_2], use_json_object_format=False, + ignore_extraction_error=False, + include_example=True, + optimizer_prompt_symbol_set=OptimizerPromptSymbolSet2()) + + optimizer.zero_feedback() + optimizer.backward(result, 'make this number bigger') + + summary = optimizer.summarize() + part1, part2 = optimizer.construct_prompt(summary) + + part1 = optimizer.replace_symbols(part1, optimizer.prompt_symbols) + part2 = optimizer.replace_symbols(part2, optimizer.prompt_symbols) + + assert """""" in part1, "Expected tag to be present in part1" + assert """""" in part2, "Expected tag to be present in part2" + + print(part1) + print(part2) + + +@bundle() +def transform(num): + """Add number""" + return num + 1 + + +@bundle(trainable=True) +def multiply(num): + return num * 5 + + +def test_function_repr(): + num_1 = node(1, trainable=False) + + result = multiply(transform(num_1)) + optimizer = OptoPrimeV3([multiply.parameter], use_json_object_format=False, + ignore_extraction_error=False, + include_example=True) + + optimizer.zero_feedback() + optimizer.backward(result, 'make this number bigger') + + summary = optimizer.summarize() + part1, part2 = optimizer.construct_prompt(summary) + + part1 = optimizer.replace_symbols(part1, optimizer.prompt_symbols) + part2 = optimizer.replace_symbols(part2, optimizer.prompt_symbols) + + function_repr = """ + +def multiply(num): + return num * 5 + + +The code should start with: +def multiply(num): + +""" + + assert function_repr in part2, "Expected function representation to be present in part2" + +def test_big_data_truncation(): + num_1 = node("**2", trainable=True) + + list_1 = node("12345691912338" * 10, trainable=False) + + result = list_1 + num_1 + + optimizer = OptoPrimeV3([num_1], use_json_object_format=False, + ignore_extraction_error=False, + include_example=True, initial_var_char_limit=10) + + optimizer.zero_feedback() + optimizer.backward(result, 'compute the expression') + + summary = optimizer.summarize() + part1, part2 = optimizer.construct_prompt(summary) + + part1 = optimizer.replace_symbols(part1, optimizer.prompt_symbols) + part2 = optimizer.replace_symbols(part2, optimizer.prompt_symbols) + + truncated_repr = """1234569191...(skipped due to length limit)""" + + assert truncated_repr in part2, "Expected truncated list representation to be present in part2" + +def test_extraction_pipeline(): + num_1 = node(1, trainable=True) + num_2 = node(2, trainable=True, description="<=5") + result = num_1 + num_2 + optimizer = OptoPrimeV3([num_1, num_2], use_json_object_format=False, + ignore_extraction_error=False, + include_example=True, + optimizer_prompt_symbol_set=OptimizerPromptSymbolSet2()) + + optimizer.zero_feedback() + optimizer.backward(result, 'make this number bigger') + + summary = optimizer.summarize() + part1, part2 = optimizer.construct_prompt(summary) + + part1 = optimizer.replace_symbols(part1, optimizer.prompt_symbols) + part2 = optimizer.replace_symbols(part2, optimizer.prompt_symbols) + + messages = [ + {"role": "system", "content": part1}, + {"role": "user", "content": part2}, + ] + + # response = optimizer.llm(messages=messages) + # response = response.choices[0].message.content + response = """ +The instruction suggests that the output, `add0`, needs to be made bigger than it currently is (3). The code performs an addition of `int0` and `int1` to produce `add0`. To increase `add0`, we can increase the values of `int0` or `int1`, or both. Given that `int1` has a constraint of being less than or equal to 5, we can set `int0` to a higher value, since it has no explicit constraint. By adjusting `int0` to a higher value, the output can be made larger in accordance with the feedback. + + + +int0 + +5 + + + + +int1 + +5 + +""" + reasoning = response + suggestion = optimizer.extract_llm_suggestion(response) + + assert 'reasoning' in suggestion, "Expected 'reasoning' in suggestion" + assert 'variables' in suggestion, "Expected 'variables' in suggestion" + assert 'int0' in suggestion['variables'], "Expected 'int0' variable in suggestion" + assert 'int1' in suggestion['variables'], "Expected 'int1' variable in suggestion" + assert suggestion['variables']['int0'] == '5', "Expected int0 to be incremented to 5" + assert suggestion['variables']['int1'] == '5', "Expected int1 to be incremented to 5" From 85edcdced25f2bf74906bcc59bb9c50ce18b300d Mon Sep 17 00:00:00 2001 From: windweller Date: Sun, 30 Nov 2025 21:07:49 -0800 Subject: [PATCH 26/51] add protected rounds --- opto/optimizers/backbone.py | 47 ++++++++++++++++++++++++++++--------- opto/optimizers/helix.py | 0 2 files changed, 36 insertions(+), 11 deletions(-) create mode 100644 opto/optimizers/helix.py diff --git a/opto/optimizers/backbone.py b/opto/optimizers/backbone.py index 5d918f1e..110cd1f0 100644 --- a/opto/optimizers/backbone.py +++ b/opto/optimizers/backbone.py @@ -422,6 +422,7 @@ class ConversationHistory: """Manages conversation history across multiple turns using LiteLLM unified format""" turns: List[Union[UserTurn, AssistantTurn]] = field(default_factory=list) system_prompt: Optional[str] = None + protected_rounds: int = 0 # Initial rounds to never truncate (task definition) def add_user_turn(self, turn: UserTurn) -> 'ConversationHistory': """Add a user turn""" @@ -451,13 +452,15 @@ def to_dict(self) -> Dict[str, Any]: """Convert to dictionary format""" return { "system_prompt": self.system_prompt, + "protected_rounds": self.protected_rounds, "turns": [turn.to_dict() for turn in self.turns] } def to_litellm_format( self, n: int = -1, - truncate_strategy: Literal["from_start", "from_end"] = "from_start" + truncate_strategy: Literal["from_start", "from_end"] = "from_start", + protected_rounds: Optional[int] = None ) -> List[Dict[str, Any]]: """ Convert to LiteLLM messages format (OpenAI-compatible, works with all providers) @@ -469,28 +472,46 @@ def to_litellm_format( truncate_strategy: How to truncate when n is specified: - "from_start": Remove oldest rounds, keep the most recent n rounds (default) - "from_end": Remove newest rounds, keep the oldest n rounds + protected_rounds: Number of initial rounds to never truncate (task definition). + If None, uses self.protected_rounds. These rounds count towards n, so + if n=5 and protected_rounds=1, you get 1 protected + 4 truncatable rounds. Returns: List of message dictionaries in LiteLLM format """ + # Determine protected rounds + n_protected = protected_rounds if protected_rounds is not None else self.protected_rounds + protected_turns = n_protected * 2 # Each round = user + assistant + # Apply truncation to turns if n == -1: selected_turns = self.turns else: - # n = number of historical rounds (pairs) + # Protected rounds count towards N + # So if N=5 and protected_rounds=1, we keep 1 protected + 4 from truncatable + remaining_rounds = max(0, n - n_protected) + + # Split into protected and truncatable turns + protected_part = self.turns[:protected_turns] + truncatable_part = self.turns[protected_turns:] + + # remaining_rounds = number of rounds (pairs) from the truncatable part # Each round = 2 turns (user + assistant) # Plus include current incomplete round (if last turn is user, +1) - has_incomplete_round = len(self.turns) > 0 and isinstance(self.turns[-1], UserTurn) - n_turns = n * 2 + (1 if has_incomplete_round else 0) + has_incomplete_round = len(truncatable_part) > 0 and isinstance(truncatable_part[-1], UserTurn) + n_turns = remaining_rounds * 2 + (1 if has_incomplete_round else 0) if truncate_strategy == "from_start": - # Keep last n_turns (remove from start) - selected_turns = self.turns[-n_turns:] if n_turns > 0 else [] + # Keep last n_turns from truncatable part (remove from start) + truncated_part = truncatable_part[-n_turns:] if n_turns > 0 else [] elif truncate_strategy == "from_end": - # Keep first n_turns (remove from end) - selected_turns = self.turns[:n_turns] if n_turns > 0 else [] + # Keep first n_turns from truncatable part (remove from end) + truncated_part = truncatable_part[:n_turns] if n_turns > 0 else [] else: raise ValueError(f"Unknown truncate_strategy: {truncate_strategy}. Use 'from_start' or 'from_end'") + + # Combine protected + truncated + selected_turns = protected_part + truncated_part messages = [] @@ -514,7 +535,8 @@ def to_litellm_format( def to_messages( self, n: int = -1, - truncate_strategy: Literal["from_start", "from_end"] = "from_start" + truncate_strategy: Literal["from_start", "from_end"] = "from_start", + protected_rounds: Optional[int] = None ) -> List[Dict[str, Any]]: """ Alias for to_litellm_format() for convenience @@ -526,11 +548,13 @@ def to_messages( truncate_strategy: How to truncate when n is specified: - "from_start": Remove oldest rounds, keep the most recent n rounds (default) - "from_end": Remove newest rounds, keep the oldest n rounds + protected_rounds: Number of initial rounds to never truncate (task definition). + If None, uses self.protected_rounds. Counts towards n. Returns: List of message dictionaries in LiteLLM format """ - return self.to_litellm_format(n=n, truncate_strategy=truncate_strategy) + return self.to_litellm_format(n=n, truncate_strategy=truncate_strategy, protected_rounds=protected_rounds) def save_to_file(self, filepath: str): """Save conversation history to JSON file""" @@ -545,7 +569,8 @@ def load_from_file(cls, filepath: str) -> 'ConversationHistory': # This is a simplified loader - you'd want more robust deserialization history = cls( - system_prompt=data.get('system_prompt') + system_prompt=data.get('system_prompt'), + protected_rounds=data.get('protected_rounds', 0) ) # Note: Full deserialization would require reconstructing objects from dicts diff --git a/opto/optimizers/helix.py b/opto/optimizers/helix.py new file mode 100644 index 00000000..e69de29b From 82a590d7c8e0f8a449f62fe5e5d8cbc4c07f25ba Mon Sep 17 00:00:00 2001 From: windweller Date: Mon, 1 Dec 2025 13:02:43 -0800 Subject: [PATCH 27/51] add content block --- opto/features/inference/dspy_example.py | 112 ++++ opto/optimizers/backbone.py | 268 +++++++- opto/optimizers/optoprime_v3.py | 630 ++++++++++++++---- .../llm_optimizers_tests/test_optoprime_v3.py | 258 ++++++- tests/unit_tests/test_multi_modal.py | 6 + tests/unit_tests/test_optimizer_backbone.py | 416 ++++++++++++ 6 files changed, 1560 insertions(+), 130 deletions(-) create mode 100644 opto/features/inference/dspy_example.py create mode 100644 tests/unit_tests/test_multi_modal.py create mode 100644 tests/unit_tests/test_optimizer_backbone.py diff --git a/opto/features/inference/dspy_example.py b/opto/features/inference/dspy_example.py new file mode 100644 index 00000000..25a72ab5 --- /dev/null +++ b/opto/features/inference/dspy_example.py @@ -0,0 +1,112 @@ +import dspy +import heapq +from typing import List, Dict, Any, Optional, Tuple +from dataclasses import dataclass + +dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) + + +@dataclass +class HistoryNode: + """A node representing a conversation history with its associated score.""" + history: dspy.History + score: float + depth: int # Number of questions asked so far + last_question: str + last_answer: str + + def __lt__(self, other): + # For max heap behavior, negate the score (heapq is min heap by default) + return self.score > other.score + + def __repr__(self): + return f"HistoryNode(score={self.score}, depth={self.depth}, last_q='{self.last_answer[:30]}...')" + + +class PriorityQueue: + """Priority queue for storing conversation histories ranked by score.""" + + def __init__(self, max_size: Optional[int] = None): + self.heap: List[HistoryNode] = [] + self.max_size = max_size + + def pop(self) -> Optional[HistoryNode]: + """Remove and return the highest-scoring history node.""" + if self.heap: + return heapq.heappop(self.heap) + return None + + def peek(self) -> Optional[HistoryNode]: + """Return the highest-scoring history node without removing it.""" + return self.heap[0] if self.heap else None + + def is_empty(self) -> bool: + """Check if the priority queue is empty.""" + return len(self.heap) == 0 + + def size(self) -> int: + """Return the number of items in the queue.""" + return len(self.heap) + + def get_all_sorted(self) -> List[HistoryNode]: + """Return all nodes sorted by score (highest first) without modifying the queue.""" + return sorted(self.heap, reverse=True) + + def clear(self): + """Remove all items from the queue.""" + self.heap.clear() + + +class HistoryPriorityQueue(PriorityQueue): + + # We keep the signature specific functions here to change things! + def push(self, history: dspy.History, score: float, question: str, answer: str, depth: int = 0): + """Add a new history node to the priority queue.""" + node = HistoryNode( + history=history, + score=score, + depth=depth, + last_question=question, + last_answer=answer + ) + + heapq.heappush(self.heap, node) + + # Maintain max size if specified + if self.max_size and len(self.heap) > self.max_size: + # Remove the lowest scoring item (at the end after popping highest) + temp_items = [] + # Keep the best items + for _ in range(min(self.max_size, len(self.heap))): + if self.heap: + temp_items.append(heapq.heappop(self.heap)) + + self.heap = temp_items + # Re-heapify + heapq.heapify(self.heap) + +class MySignature(dspy.Signature): + question: str = dspy.InputField() + history: dspy.History = dspy.InputField() + answer: str = dspy.OutputField() + +predict = dspy.Predict(MySignature) +outputs = predict(question="What is the capital of France?") +history = dspy.History(messages=[{"question": "What is the capital of France?", **outputs}]) +outputs_with_history = predict(question="Are you sure?", history=history) + +""" +Idea 1: Greedy explorer -- always pop off the highest scoring node +Idea 2: Discounted sum of reward -- after each proposal, we update the full PATH that leads to the node (need additional structure) +""" + +class GreedyExplorer: + def __init__(self, exploration_budget: int = 20, max_queue_size: int = 100): + self.pq = HistoryPriorityQueue(max_size=max_queue_size) + self.exploration_budget = exploration_budget + self.explored_nodes = 0 + self.final_result = None + self.initial_history = dspy.History(messages=[]) + +if __name__ == '__main__': + pass \ No newline at end of file diff --git a/opto/optimizers/backbone.py b/opto/optimizers/backbone.py index 110cd1f0..4f0dd8da 100644 --- a/opto/optimizers/backbone.py +++ b/opto/optimizers/backbone.py @@ -10,20 +10,73 @@ from pathlib import Path import warnings +from abc import ABC, abstractmethod + @dataclass -class TextContent: +class ContentBlock(ABC): + """Abstract base class for all content blocks.""" + + @abstractmethod + def to_dict(self) -> Dict[str, Any]: + """Convert the content block to a dictionary representation. + + Returns: + Dict[str, Any]: Dictionary representation of the content block + """ + raise NotImplementedError("Subclasses must implement this method") + +@dataclass +class TextContent(ContentBlock): """Text content block""" type: Literal["text"] = "text" text: str = "" def to_dict(self) -> Dict[str, Any]: return {"type": self.type, "text": self.text} + + def __add__(self, other) -> 'TextContent': + """Concatenate text content with strings or other TextContent objects. + + Args: + other: String or TextContent to concatenate + + Returns: + TextContent: New TextContent with concatenated text + """ + if isinstance(other, str): + return TextContent(text=self.text + other) + elif isinstance(other, TextContent): + return TextContent(text=self.text + other.text) + else: + return NotImplemented + + def __radd__(self, other) -> 'TextContent': + """Right-side concatenation (when string is on the left). + + Args: + other: String to concatenate + + Returns: + TextContent: New TextContent with concatenated text + """ + if isinstance(other, str): + return TextContent(text=other + self.text) + else: + return NotImplemented @dataclass -class ImageContent: - """Image content block - supports URLs or base64""" +class ImageContent(ContentBlock): + """Image content block - supports URLs, base64, file paths, and numpy arrays. + + Supports multiple ways to create an ImageContent: + 1. Direct instantiation with image_url or image_data + 2. from_file/from_path: Load from local file path + 3. from_url: Create from HTTP/HTTPS URL + 4. from_array: Create from numpy array or array-like RGB image + 5. from_value: Auto-detect and create from various formats + """ type: Literal["image"] = "image" image_url: Optional[str] = None image_data: Optional[str] = None # base64 encoded @@ -46,7 +99,7 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_file(cls, filepath: str, media_type: Optional[str] = None): - """Load image from file""" + """Load image from file path.""" path = Path(filepath) if not media_type: ext_to_type = { @@ -63,6 +116,207 @@ def from_file(cls, filepath: str, media_type: Optional[str] = None): return cls(image_data=image_data, media_type=media_type) + @classmethod + def from_path(cls, filepath: str, media_type: Optional[str] = None): + """Load image from file path. Alias for from_file.""" + return cls.from_file(filepath, media_type) + + @classmethod + def from_url(cls, url: str, media_type: str = "image/jpeg"): + """Create ImageContent from an HTTP/HTTPS URL. + + Args: + url: HTTP or HTTPS URL pointing to an image + media_type: MIME type of the image (default: image/jpeg) + """ + return cls(image_url=url, media_type=media_type) + + @classmethod + def from_array(cls, array: Any, format: str = "PNG"): + """Create ImageContent from a numpy array or array-like RGB image. + + Args: + array: numpy array representing an image (H, W, C) with values in [0, 255] or [0, 1] + format: Image format (PNG, JPEG, etc.). Default: PNG + + Returns: + ImageContent with base64-encoded image data + """ + try: + import numpy as np + except ImportError: + raise ImportError("numpy is required for from_array. Install with: pip install numpy") + + try: + from PIL import Image + except ImportError: + raise ImportError("Pillow is required for from_array. Install with: pip install Pillow") + + import io + + # Convert to numpy array if not already + if not isinstance(array, np.ndarray): + array = np.array(array) + + # Normalize to [0, 255] if needed + if array.dtype == np.float32 or array.dtype == np.float64: + if array.max() <= 1.0: + array = (array * 255).astype(np.uint8) + else: + array = array.astype(np.uint8) + elif array.dtype != np.uint8: + array = array.astype(np.uint8) + + # Convert to PIL Image and encode + image = Image.fromarray(array) + buffer = io.BytesIO() + image.save(buffer, format=format.upper()) + buffer.seek(0) + + image_data = base64.b64encode(buffer.getvalue()).decode('utf-8') + media_type = f"image/{format.lower()}" + + return cls(image_data=image_data, media_type=media_type) + + @classmethod + def from_pil(cls, image: Any, format: str = "PNG"): + """Create ImageContent from a PIL Image. + + Args: + image: PIL Image object + format: Image format (PNG, JPEG, etc.). Default: PNG + + Returns: + ImageContent with base64-encoded image data + """ + import io + + buffer = io.BytesIO() + img_format = image.format or format.upper() + image.save(buffer, format=img_format) + buffer.seek(0) + + image_data = base64.b64encode(buffer.getvalue()).decode('utf-8') + media_type = f"image/{img_format.lower()}" + + return cls(image_data=image_data, media_type=media_type) + + @classmethod + def from_bytes(cls, data: bytes, media_type: str = "image/jpeg"): + """Create ImageContent from raw image bytes. + + Args: + data: Raw image bytes + media_type: MIME type of the image (default: image/jpeg) + + Returns: + ImageContent with base64-encoded image data + """ + image_data = base64.b64encode(data).decode('utf-8') + return cls(image_data=image_data, media_type=media_type) + + @classmethod + def from_base64(cls, b64_data: str, media_type: str = "image/jpeg"): + """Create ImageContent from base64-encoded string. + + Args: + b64_data: Base64-encoded image data (without data URL prefix) + media_type: MIME type of the image (default: image/jpeg) + + Returns: + ImageContent with the provided base64 data + """ + return cls(image_data=b64_data, media_type=media_type) + + @classmethod + def from_data_url(cls, data_url: str): + """Create ImageContent from a data URL (data:image/...;base64,...). + + Args: + data_url: Data URL string in format data:image/;base64, + + Returns: + ImageContent with extracted base64 data and media type + """ + try: + header, b64_data = data_url.split(',', 1) + media_type = header.split(':')[1].split(';')[0] # e.g., "image/png" + return cls(image_data=b64_data, media_type=media_type) + except (ValueError, IndexError): + # Fallback: assume the whole thing is base64 data + return cls(image_data=data_url.split(',')[-1], media_type="image/jpeg") + + @classmethod + def from_value(cls, value: Any, format: str = "PNG"): + """Auto-detect format and create ImageContent from various input types. + + Args: + value: Can be: + - URL string (starting with 'http://' or 'https://') + - Data URL string (starting with 'data:image/') + - Local file path (string) + - Numpy array or array-like RGB image + - PIL Image object + - Raw bytes + format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG + + Returns: + ImageContent or None if the value cannot be converted + """ + # Handle string inputs + if isinstance(value, str): + # Data URL + if value.startswith('data:image/'): + return cls.from_data_url(value) + # HTTP/HTTPS URL + if value.startswith('http://') or value.startswith('https://'): + return cls.from_url(value) + # Assume it's a file path + if Path(value).exists(): + return cls.from_file(value) + return None + + # Handle bytes + if isinstance(value, bytes): + return cls.from_bytes(value) + + # Handle PIL Image + try: + from PIL import Image + if isinstance(value, Image.Image): + return cls.from_pil(value, format=format) + except ImportError: + pass + + # Handle numpy array or array-like + try: + import numpy as np + if isinstance(value, np.ndarray) or hasattr(value, '__array__'): + return cls.from_array(value, format=format) + except ImportError: + pass + + return None + + def set_image(self, image: Any, format: str = "PNG") -> None: + """Set the image from various input formats (mutates self). + + Args: + image: Can be: + - URL string (starting with 'http://' or 'https://') + - Data URL string (starting with 'data:image/') + - Local file path (string) + - Numpy array or array-like RGB image + - PIL Image object + - Raw bytes + format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG + """ + result = ImageContent.from_value(image, format=format) + if result: + self.image_url = result.image_url + self.image_data = result.image_data + self.media_type = result.media_type + @dataclass class PDFContent: @@ -160,7 +414,7 @@ def from_file(cls, filepath: str, mime_type: Optional[str] = None): @dataclass -class ToolCall: +class ToolCall(ContentBlock): """Represents a tool call made by the LLM""" id: str type: str # "function", "web_search", etc. @@ -177,7 +431,7 @@ def to_dict(self) -> Dict[str, Any]: @dataclass -class ToolResult: +class ToolResult(ContentBlock): """Represents the result of a tool execution""" tool_call_id: str content: str # Result as string (can be JSON stringified) @@ -192,7 +446,7 @@ def to_dict(self) -> Dict[str, Any]: @dataclass -class ToolDefinition: +class ToolDefinition(ContentBlock): """Defines a tool that the LLM can use""" type: str # "function", "web_search", "file_search", etc. name: Optional[str] = None diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py index a8c6570c..6647031f 100644 --- a/opto/optimizers/optoprime_v3.py +++ b/opto/optimizers/optoprime_v3.py @@ -6,23 +6,74 @@ import json from typing import Any, List, Dict, Union, Tuple, Optional -from dataclasses import dataclass, asdict +from dataclasses import dataclass, field, asdict from opto.optimizers.optoprime import OptoPrime, FunctionFeedback from opto.trace.utils import dedent from opto.optimizers.utils import truncate_expression, extract_xml_like_data, MultiModalPayload - -from opto.trace.nodes import ParameterNode, Node, MessageNode +from opto.trace.nodes import ParameterNode, Node, MessageNode, is_image from opto.trace.propagators import TraceGraph, GraphPropagator from opto.trace.propagators.propagators import Propagator from opto.utils.llm import AbstractModel, LLM from opto.optimizers.buffers import FIFOBuffer -from opto.optimizers.backbone import ConversationHistory, UserTurn, AssistantTurn +from opto.optimizers.backbone import ( + ConversationHistory, UserTurn, AssistantTurn, + ContentBlock, TextContent, ImageContent +) import copy import pickle import re from typing import Dict, Any + +def append_content_block(blocks: List[ContentBlock], block: ContentBlock) -> None: + """Append a content block to the list, merging consecutive TextContent blocks. + + If the last block in the list is a TextContent and the new block is also TextContent, + the text is appended to the existing block. Otherwise, the new block is added. + + Args: + blocks: The list of content blocks to append to (modified in place). + block: The content block to append. + """ + if isinstance(block, TextContent): + if blocks and isinstance(blocks[-1], TextContent): + # Merge with the previous TextContent block + blocks[-1] = TextContent(text=blocks[-1].text + block.text) + else: + blocks.append(block) + else: + # Non-text block (ImageContent, etc.) - just append + blocks.append(block) + + +def extend_content_blocks(blocks: List[ContentBlock], new_blocks: List[ContentBlock]) -> None: + """Extend content blocks list, merging consecutive TextContent blocks. + + Args: + blocks: The list of content blocks to extend (modified in place). + new_blocks: The list of content blocks to add. + """ + for block in new_blocks: + append_content_block(blocks, block) + + +def value_to_image_content(value: Any) -> Optional[ImageContent]: + """Convert a value to ImageContent if it's an image, otherwise return None. + + Uses is_image() from opto.trace.nodes for validation (stricter than ImageContent.from_value, + e.g., only accepts URLs with image extensions), then delegates to ImageContent.from_value(). + + Supports (via is_image detection): + - Base64 data URL strings (data:image/...) + - HTTP/HTTPS URLs pointing to images (pattern-based, must have image extension) + - PIL Image objects + - Raw image bytes + """ + if not is_image(value): + return None + return ImageContent.from_value(value) + class OptimizerPromptSymbolSet: """ By inheriting this class and pass into the optimizer. People can change the optimizer documentation @@ -212,13 +263,23 @@ class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet): @dataclass class ProblemInstance: + """Problem instance that can contain both text and multimodal content. + + Each field can be either: + - A string (text-only content) + - A List[ContentBlock] (multimodal content with text and/or images) + + The class provides: + - __repr__: Returns text-only representation (backward compatible) + - to_content_blocks(): Returns List[ContentBlock] for multimodal prompts + """ instruction: str code: str documentation: str - variables: str - inputs: str - others: str - outputs: str + variables: Union[str, List[ContentBlock]] + inputs: Union[str, List[ContentBlock]] + others: Union[str, List[ContentBlock]] + outputs: Union[str, List[ContentBlock]] feedback: str context: Optional[str] @@ -252,15 +313,30 @@ class ProblemInstance: """ ) + @staticmethod + def _content_to_text(content: Union[str, List[ContentBlock]]) -> str: + """Convert content (str or List[ContentBlock]) to text representation.""" + if isinstance(content, str): + return content + # Extract text from content blocks, skip images + text_parts = [] + for block in content: + if isinstance(block, TextContent): + text_parts.append(block.text) + elif isinstance(block, ImageContent): + text_parts.append("[IMAGE]") + return "".join(text_parts) + def __repr__(self) -> str: + """Return text-only representation for backward compatibility.""" optimization_query = self.problem_template.format( instruction=self.instruction, code=self.code, documentation=self.documentation, - variables=self.variables, - inputs=self.inputs, - outputs=self.outputs, - others=self.others, + variables=self._content_to_text(self.variables), + inputs=self._content_to_text(self.inputs), + outputs=self._content_to_text(self.outputs), + others=self._content_to_text(self.others), feedback=self.feedback ) @@ -276,43 +352,76 @@ def __repr__(self) -> str: return optimization_query + def _ensure_content_blocks(self, content: Union[str, List[ContentBlock]]) -> List[ContentBlock]: + """Ensure content is a list of ContentBlocks.""" + if isinstance(content, str): + return [TextContent(text=content)] if content else [] + return content -@dataclass -class MemoryInstance: - variables: Dict[str, Tuple[Any, str]] # name -> (data, constraint) - feedback: str - optimizer_prompt_symbol_set: OptimizerPromptSymbolSet - - memory_example_template = dedent( - """{variables}{feedback} - """ - ) + def to_content_blocks(self) -> List[ContentBlock]: + """Convert the problem instance to a list of ContentBlocks. + + Consecutive TextContent blocks are merged into a single block for efficiency. + Images and other non-text blocks are kept separate. + + Returns: + List[ContentBlock]: A list containing TextContent and ImageContent blocks + that represent the complete problem instance including any images + from variables, inputs, others, or outputs. + """ + blocks: List[ContentBlock] = [] + + # Header sections (always text) + header = dedent(f""" + # Instruction + {self.instruction} - def __init__(self, variables: Dict[str, Any], feedback: str, optimizer_prompt_symbol_set: OptimizerPromptSymbolSet, - index: Optional[int] = None): - self.feedback = feedback - self.optimizer_prompt_symbol_set = optimizer_prompt_symbol_set - self.variables = variables - self.index = index - - def __str__(self) -> str: - var_repr = "" - for k, v in self.variables.items(): - var_repr += dedent(f""" - <{self.optimizer_prompt_symbol_set.improved_variable_tag}> - <{self.optimizer_prompt_symbol_set.name_tag}>{k} - <{self.optimizer_prompt_symbol_set.value_tag}> - {v[0]} - - - """) + # Code + {self.code} - return self.memory_example_template.format( - variables=var_repr, - feedback=self.feedback, - index=" " + str(self.index) if self.index is not None else "" - ) + # Documentation + {self.documentation} + # Variables + """) + append_content_block(blocks, TextContent(text=header)) + + # Variables section (may contain images) + extend_content_blocks(blocks, self._ensure_content_blocks(self.variables)) + + # Inputs section + append_content_block(blocks, TextContent(text="\n\n# Inputs\n")) + extend_content_blocks(blocks, self._ensure_content_blocks(self.inputs)) + + # Others section + append_content_block(blocks, TextContent(text="\n\n# Others\n")) + extend_content_blocks(blocks, self._ensure_content_blocks(self.others)) + + # Outputs section + append_content_block(blocks, TextContent(text="\n\n# Outputs\n")) + extend_content_blocks(blocks, self._ensure_content_blocks(self.outputs)) + + # Feedback section + append_content_block(blocks, TextContent(text=f"\n\n# Feedback\n{self.feedback}")) + + # Context section (optional) + if self.context is not None and self.context.strip() != "": + append_content_block(blocks, TextContent(text=f"\n\n# Context\n{self.context}")) + + return blocks + + def has_images(self) -> bool: + """Check if this problem instance contains any images. + + Returns: + bool: True if any field contains ImageContent blocks. + """ + for field in [self.variables, self.inputs, self.others, self.outputs]: + if isinstance(field, list): + for block in field: + if isinstance(block, ImageContent): + return True + return False class OptoPrimeV3(OptoPrime): # This is generic representation prompt, which just explains how to read the problem. @@ -571,17 +680,20 @@ def initialize_prompt(self): ) def repr_node_value(self, node_dict, node_tag="node", - value_tag="value", constraint_tag="constraint"): + value_tag="value", constraint_tag="constraint") -> str: + """Returns text-only representation of node values (backward compatible).""" temp_list = [] for k, v in node_dict.items(): if "__code" not in k: + # For images, use placeholder text + value_repr = "[IMAGE]" if is_image(v[0]) else str(v[0]) if v[1] is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag: constraint_expr = f"<{constraint_tag}>\n{v[1]}\n" temp_list.append( - f"<{node_tag} name=\"{k}\" type=\"{type(v[0]).__name__}\">\n<{value_tag}>\n{v[0]}\n\n{constraint_expr}\n\n") + f"<{node_tag} name=\"{k}\" type=\"{type(v[0]).__name__}\">\n<{value_tag}>\n{value_repr}\n\n{constraint_expr}\n\n") else: temp_list.append( - f"<{node_tag} name=\"{k}\" type=\"{type(v[0]).__name__}\">\n<{value_tag}>\n{v[0]}\n\n\n") + f"<{node_tag} name=\"{k}\" type=\"{type(v[0]).__name__}\">\n<{value_tag}>\n{value_repr}\n\n\n") else: constraint_expr = f"\n{v[1]}\n" signature = v[1].replace("The code should start with:\n", "") @@ -591,11 +703,16 @@ def repr_node_value(self, node_dict, node_tag="node", return "\n".join(temp_list) def repr_node_value_compact(self, node_dict, node_tag="node", - value_tag="value", constraint_tag="constraint"): + value_tag="value", constraint_tag="constraint") -> str: + """Returns text-only compact representation of node values (backward compatible).""" temp_list = [] for k, v in node_dict.items(): if "__code" not in k: - node_value = self.truncate_expression(v[0], self.initial_var_char_limit) + # For images, use placeholder text + if is_image(v[0]): + node_value = "[IMAGE]" + else: + node_value = self.truncate_expression(v[0], self.initial_var_char_limit) if v[1] is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag: constraint_expr = f"<{constraint_tag}>\n{v[1]}\n" temp_list.append( @@ -613,54 +730,284 @@ def repr_node_value_compact(self, node_dict, node_tag="node", f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{node_value}\n\n{constraint_expr}\n\n") return "\n".join(temp_list) - def construct_prompt(self, summary, mask=None, *args, **kwargs): + def repr_node_value_as_content_blocks(self, node_dict, node_tag="node", + value_tag="value", constraint_tag="constraint") -> List[ContentBlock]: + """Returns a list of ContentBlocks representing node values, including images. + + Consecutive TextContent blocks are merged for efficiency. + For image values, the text before and after the image are separate blocks. + """ + blocks: List[ContentBlock] = [] + + for k, v in node_dict.items(): + value_data = v[0] + constraint = v[1] + + if "__code" not in k: + # Check if this is an image + image_content = value_to_image_content(value_data) + + if image_content is not None: + # Image node: output XML structure, then image, then closing + type_name = "image" + constraint_expr = f"<{constraint_tag}>\n{constraint}\n" if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag else "" + + xml_text = f"<{node_tag} name=\"{k}\" type=\"{type_name}\">\n<{value_tag}>\n" + append_content_block(blocks, TextContent(text=xml_text)) + blocks.append(image_content) # Image breaks the text flow + + closing_text = f"\n\n{constraint_expr}\n\n" if constraint_expr else f"\n\n\n\n" + append_content_block(blocks, TextContent(text=closing_text)) + else: + # Non-image node: text representation + if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag: + constraint_expr = f"<{constraint_tag}>\n{constraint}\n" + append_content_block(blocks, TextContent( + text=f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{value_data}\n\n{constraint_expr}\n\n\n" + )) + else: + append_content_block(blocks, TextContent( + text=f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{value_data}\n\n\n\n" + )) + else: + # Code node (never an image) + constraint_expr = f"<{constraint_tag}>\n{constraint}\n" + signature = constraint.replace("The code should start with:\n", "") + func_body = value_data.replace(signature, "") + append_content_block(blocks, TextContent( + text=f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{func_body}\n\n{constraint_expr}\n\n\n" + )) + + return blocks + + def repr_node_value_compact_as_content_blocks(self, node_dict, node_tag="node", + value_tag="value", constraint_tag="constraint") -> List[ContentBlock]: + """Returns a list of ContentBlocks with compact representation, including images. + + Consecutive TextContent blocks are merged for efficiency. + Non-image values are truncated. Images break the text flow. + """ + blocks: List[ContentBlock] = [] + + for k, v in node_dict.items(): + value_data = v[0] + constraint = v[1] + + if "__code" not in k: + # Check if this is an image + image_content = value_to_image_content(value_data) + + if image_content is not None: + # Image node: output XML structure, then image, then closing + type_name = "image" + constraint_expr = f"<{constraint_tag}>\n{constraint}\n" if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag else "" + + xml_text = f"<{node_tag} name=\"{k}\" type=\"{type_name}\">\n<{value_tag}>\n" + append_content_block(blocks, TextContent(text=xml_text)) + blocks.append(image_content) # Image breaks the text flow + + closing_text = f"\n\n{constraint_expr}\n\n" if constraint_expr else f"\n\n\n\n" + append_content_block(blocks, TextContent(text=closing_text)) + else: + # Non-image node: truncated text representation + node_value = self.truncate_expression(value_data, self.initial_var_char_limit) + if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag: + constraint_expr = f"<{constraint_tag}>\n{constraint}\n" + append_content_block(blocks, TextContent( + text=f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{node_value}\n\n{constraint_expr}\n\n\n" + )) + else: + append_content_block(blocks, TextContent( + text=f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{node_value}\n\n\n\n" + )) + else: + # Code node (never an image) + constraint_expr = f"<{constraint_tag}>\n{constraint}\n" + signature = constraint.replace("The code should start with:\n", "") + func_body = value_data.replace(signature, "") + node_value = self.truncate_expression(func_body, self.initial_var_char_limit) + append_content_block(blocks, TextContent( + text=f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{node_value}\n\n{constraint_expr}\n\n\n" + )) + + return blocks + + def construct_prompt(self, summary, mask=None, use_content_blocks=False, *args, **kwargs): """Construct the system and user prompt. - Expanded to construct a list of content blocks + + Args: + summary: The FunctionFeedback summary containing graph information. + mask: List of section titles to exclude from the problem instance. + use_content_blocks: If True, return user_prompt as List[ContentBlock] + for multimodal support. If False, return text-only (backward compatible). + + Returns: + Tuple of (system_prompt: str, user_prompt: Union[str, List[ContentBlock]]) + - system_prompt is always a string + - user_prompt is either a string or List[ContentBlock] based on use_content_blocks """ system_prompt = ( self.representation_prompt + self.output_format_prompt ) # generic representation + output rule - user_prompt = self.user_prompt_template.format( - problem_instance=str(self.problem_instance(summary, mask=mask)) - ) # problem instance - if self.include_example: - user_prompt = ( - self.example_problem_template.format( - example_problem=self.example_problem, - example_response=self.example_response, - ) - + user_prompt - ) - - var_names = [] - for k, v in summary.variables.items(): - var_names.append(f"{k}") # ({type(v[0]).__name__}) - var_names = ", ".join(var_names) - - user_prompt += self.final_prompt.format(names=var_names) - - # Add examples - if len(self.memory) > 0: - formatted_final = self.final_prompt.format(names=var_names) - prefix = user_prompt.split(formatted_final)[0] - examples = [] - index = 0 - for variables, feedback in self.memory: - index += 1 - examples.append(str(MemoryInstance(variables, feedback, self.optimizer_prompt_symbol_set, index=index))) - - examples = "\n".join(examples) - user_prompt = ( - prefix - + f"\nBelow are some variables and their feedbacks you received in the past.\n\n{examples}\n\n" - + formatted_final - ) - self.memory.add((summary.variables, summary.user_feedback)) + + problem_inst = self.problem_instance(summary, mask=mask, use_content_blocks=use_content_blocks) + + if use_content_blocks: + # Build user prompt as a list of ContentBlocks + # Consecutive TextContent blocks are merged for efficiency + user_content_blocks: List[ContentBlock] = [] + + # Add example if included + if self.include_example: + example_text = self.example_problem_template.format( + example_problem=str(self.example_problem), # Example is always text + example_response=self.example_response, + ) + append_content_block(user_content_blocks, TextContent(text=example_text)) + + # Add problem instance header + append_content_block(user_content_blocks, TextContent(text=dedent(""" + Now you see problem instance: - return system_prompt, user_prompt + ================================ + """))) + + # Add problem instance content blocks (may contain images) + extend_content_blocks(user_content_blocks, problem_inst.to_content_blocks()) + + # Add footer and final prompt + var_names = ", ".join(k for k in summary.variables.keys()) + + append_content_block(user_content_blocks, TextContent(text=dedent(""" + ================================ - def problem_instance(self, summary, mask=None): + """))) + append_content_block(user_content_blocks, TextContent(text=self.final_prompt.format(names=var_names))) + + return system_prompt, user_content_blocks + else: + # Text-only user prompt (backward compatible) + user_prompt = self.user_prompt_template.format( + problem_instance=str(problem_inst) + ) + if self.include_example: + user_prompt = ( + self.example_problem_template.format( + example_problem=self.example_problem, + example_response=self.example_response, + ) + + user_prompt + ) + + # variables to optimize + var_names = [] + for k, v in summary.variables.items(): + var_names.append(f"{k}") + var_names = ", ".join(var_names) + + user_prompt += self.final_prompt.format(names=var_names) + + return system_prompt, user_prompt + + def problem_instance(self, summary, mask=None, use_content_blocks=False): + """Create a ProblemInstance from the summary. + + Args: + summary: The FunctionFeedback summary containing graph information. + mask: List of section titles to exclude from the problem instance. + use_content_blocks: If True, use content blocks for multimodal sections + (variables, inputs, outputs, others). If False, use text-only. + + Returns: + ProblemInstance with either text-only or content block fields. + """ mask = mask or [] + + if use_content_blocks: + # Use content block representations for multimodal support + variables_content = ( + self.repr_node_value_as_content_blocks( + summary.variables, + node_tag=self.optimizer_prompt_symbol_set.variable_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag + ) + if self.optimizer_prompt_symbol_set.variables_section_title not in mask + else [] + ) + inputs_content = ( + self.repr_node_value_compact_as_content_blocks( + summary.inputs, + node_tag=self.optimizer_prompt_symbol_set.node_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag + ) + if self.optimizer_prompt_symbol_set.inputs_section_title not in mask + else [] + ) + outputs_content = ( + self.repr_node_value_compact_as_content_blocks( + summary.output, + node_tag=self.optimizer_prompt_symbol_set.node_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag + ) + if self.optimizer_prompt_symbol_set.outputs_section_title not in mask + else [] + ) + others_content = ( + self.repr_node_value_compact_as_content_blocks( + summary.others, + node_tag=self.optimizer_prompt_symbol_set.node_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag + ) + if self.optimizer_prompt_symbol_set.others_section_title not in mask + else [] + ) + else: + # Use text-only representations (backward compatible) + variables_content = ( + self.repr_node_value( + summary.variables, + node_tag=self.optimizer_prompt_symbol_set.variable_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag + ) + if self.optimizer_prompt_symbol_set.variables_section_title not in mask + else "" + ) + inputs_content = ( + self.repr_node_value_compact( + summary.inputs, + node_tag=self.optimizer_prompt_symbol_set.node_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag + ) + if self.optimizer_prompt_symbol_set.inputs_section_title not in mask + else "" + ) + outputs_content = ( + self.repr_node_value_compact( + summary.output, + node_tag=self.optimizer_prompt_symbol_set.node_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag + ) + if self.optimizer_prompt_symbol_set.outputs_section_title not in mask + else "" + ) + others_content = ( + self.repr_node_value_compact( + summary.others, + node_tag=self.optimizer_prompt_symbol_set.node_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag + ) + if self.optimizer_prompt_symbol_set.others_section_title not in mask + else "" + ) + return ProblemInstance( instruction=self.objective if "#Instruction" not in mask else "", code=( @@ -673,39 +1020,49 @@ def problem_instance(self, summary, mask=None): if self.optimizer_prompt_symbol_set.documentation_section_title not in mask else "" ), - variables=( - self.repr_node_value(summary.variables, node_tag=self.optimizer_prompt_symbol_set.variable_tag, - value_tag=self.optimizer_prompt_symbol_set.value_tag, - constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag) - if self.optimizer_prompt_symbol_set.variables_section_title not in mask - else "" - ), - inputs=( - self.repr_node_value_compact(summary.inputs, node_tag=self.optimizer_prompt_symbol_set.node_tag, - value_tag=self.optimizer_prompt_symbol_set.value_tag, - constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag) if self.optimizer_prompt_symbol_set.inputs_section_title not in mask else "" - ), - outputs=( - self.repr_node_value_compact(summary.output, node_tag=self.optimizer_prompt_symbol_set.node_tag, - value_tag=self.optimizer_prompt_symbol_set.value_tag, - constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag) if self.optimizer_prompt_symbol_set.outputs_section_title not in mask else "" - ), - others=( - self.repr_node_value_compact(summary.others, node_tag=self.optimizer_prompt_symbol_set.node_tag, - value_tag=self.optimizer_prompt_symbol_set.value_tag, - constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag) if self.optimizer_prompt_symbol_set.others_section_title not in mask else "" - ), + variables=variables_content, + inputs=inputs_content, + outputs=outputs_content, + others=others_content, feedback=summary.user_feedback if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else "", context=self.problem_context if self.optimizer_prompt_symbol_set.context_section_title not in mask else "", optimizer_prompt_symbol_set=self.optimizer_prompt_symbol_set ) + def _has_images_in_summary(self, summary) -> bool: + """Check if any node values in the summary contain images.""" + for node_dict in [summary.variables, summary.inputs, summary.output, summary.others]: + if node_dict: + for k, v in node_dict.items(): + if is_image(v[0]): + return True + return False + def _step( - self, verbose=False, mask=None, *args, **kwargs + self, verbose=False, mask=None, use_content_blocks=None, *args, **kwargs ) -> Dict[ParameterNode, Any]: + """Execute one optimization step. + + Args: + verbose: If True, print prompts and responses. + mask: List of section titles to exclude from the problem instance. + use_content_blocks: If True, force use of content blocks for multimodal. + If False, force text-only. If None (default), auto-detect based on + whether the summary contains images. + + Returns: + Dictionary mapping parameters to their updated values. + """ assert isinstance(self.propagator, GraphPropagator) summary = self.summarize() - system_prompt, user_prompt = self.construct_prompt(summary, mask=mask) + + # Auto-detect whether to use content blocks + if use_content_blocks is None: + use_content_blocks = self._has_images_in_summary(summary) or self.multimodal_payload.image_data is not None + + system_prompt, user_prompt = self.construct_prompt( + summary, mask=mask, use_content_blocks=use_content_blocks + ) response = self.call_llm( system_prompt=system_prompt, @@ -722,10 +1079,12 @@ def _step( # suggestion has two keys: reasoning, and variables if self.log is not None: + # For logging, always use text representation + log_user_prompt = user_prompt if isinstance(user_prompt, str) else str(self.problem_instance(summary)) self.log.append( { "system_prompt": system_prompt, - "user_prompt": user_prompt, + "user_prompt": log_user_prompt, "response": response, } ) @@ -750,25 +1109,52 @@ def extract_llm_suggestion(self, response: str): def call_llm( self, system_prompt: str, - user_prompt: str, + user_prompt: Union[str, List[ContentBlock]], verbose: Union[bool, str] = False, max_tokens: int = 4096, ): - """Call the LLM with a prompt and return the response.""" + """Call the LLM with a prompt and return the response. + + Args: + system_prompt: The system prompt (always a string). + user_prompt: The user prompt, either as a string or List[ContentBlock] + for multimodal content. + verbose: If True, print the prompt and response. If "output", only print response. + max_tokens: Maximum tokens in the response. + + Returns: + The LLM response content as a string. + """ if verbose not in (False, "output"): - print("Prompt\n", system_prompt + user_prompt) + if isinstance(user_prompt, str): + print("Prompt\n", system_prompt + user_prompt) + else: + # For content blocks, print text portions only + text_parts = [block.text for block in user_prompt if isinstance(block, TextContent)] + print("Prompt\n", system_prompt + "".join(text_parts) + " [+ images]") # Update system prompt in conversation history self.conversation_history.system_prompt = system_prompt - # Create user turn with text and optional image content + # Create user turn with content user_turn = UserTurn() - # Add image content if available (image_data is URL or base64 data URL) + # Add image content from multimodal_payload if available (legacy path) if self.multimodal_payload.image_data is not None: user_turn.add_image(url=self.multimodal_payload.image_data) - - user_turn.add_text(user_prompt) + + # Handle user_prompt based on type + if isinstance(user_prompt, str): + user_turn.add_text(user_prompt) + else: + # user_prompt is List[ContentBlock] + for block in user_prompt: + if isinstance(block, TextContent): + user_turn.content.append(block) + elif isinstance(block, ImageContent): + user_turn.content.append(block) + # Handle other content types if needed + self.conversation_history.add_user_turn(user_turn) # Get messages with conversation length control (truncate from start) diff --git a/tests/llm_optimizers_tests/test_optoprime_v3.py b/tests/llm_optimizers_tests/test_optoprime_v3.py index dffdedfb..a8f785d5 100644 --- a/tests/llm_optimizers_tests/test_optoprime_v3.py +++ b/tests/llm_optimizers_tests/test_optoprime_v3.py @@ -10,7 +10,11 @@ from opto import trace from opto.trace import node, bundle -from opto.optimizers.optoprime_v3 import OptoPrimeV3, OptimizerPromptSymbolSet2 +from opto.optimizers.optoprime_v3 import ( + OptoPrimeV3, OptimizerPromptSymbolSet2, ProblemInstance, + OptimizerPromptSymbolSet, value_to_image_content +) +from opto.optimizers.backbone import TextContent, ImageContent, ContentBlock # You can override for temporarly testing a specific optimizer ALL_OPTIMIZERS = [TextGrad] # [OptoPrimeMulti] ALL_OPTIMIZERS = [OptoPrime] @@ -175,3 +179,255 @@ def test_extraction_pipeline(): assert 'int1' in suggestion['variables'], "Expected 'int1' variable in suggestion" assert suggestion['variables']['int0'] == '5', "Expected int0 to be incremented to 5" assert suggestion['variables']['int1'] == '5', "Expected int1 to be incremented to 5" + + +# ==================== Multimodal / Content Block Tests ==================== + +def test_problem_instance_text_only(): + """Test that ProblemInstance with text-only content works correctly.""" + symbol_set = OptimizerPromptSymbolSet() + + instance = ProblemInstance( + instruction="Test instruction", + code="y = add(x=a, y=b)", + documentation="[add] Adds two numbers", + variables="5", + inputs="3", + others="", + outputs="8", + feedback="Result should be 10", + context="Some context", + optimizer_prompt_symbol_set=symbol_set + ) + + # Test __repr__ returns string + text_repr = str(instance) + assert "Test instruction" in text_repr + assert "y = add(x=a, y=b)" in text_repr + assert "Result should be 10" in text_repr + assert "Some context" in text_repr + + # Test to_content_blocks returns list + blocks = instance.to_content_blocks() + assert isinstance(blocks, list) + assert len(blocks) > 0 + assert all(isinstance(b, (TextContent, ImageContent)) for b in blocks) + + # Test has_images returns False for text-only + assert not instance.has_images() + + +def test_problem_instance_with_content_blocks(): + """Test ProblemInstance with List[ContentBlock] fields.""" + symbol_set = OptimizerPromptSymbolSet() + + # Create content blocks with an image + variables_blocks = [ + TextContent(text=""), + ImageContent(image_url="https://example.com/test.jpg"), + TextContent(text="") + ] + + instance = ProblemInstance( + instruction="Analyze the image", + code="result = analyze(img)", + documentation="[analyze] Analyzes an image", + variables=variables_blocks, # List[ContentBlock] + inputs="", + others="", + outputs="cat", + feedback="Result should be 'dog'", + context=None, + optimizer_prompt_symbol_set=symbol_set + ) + + # Test __repr__ handles content blocks (should show [IMAGE] placeholder) + text_repr = str(instance) + assert "Analyze the image" in text_repr + assert "[IMAGE]" in text_repr + + # Test to_content_blocks includes the image + blocks = instance.to_content_blocks() + assert isinstance(blocks, list) + + # Find the ImageContent block + image_blocks = [b for b in blocks if isinstance(b, ImageContent)] + assert len(image_blocks) == 1 + assert image_blocks[0].image_url == "https://example.com/test.jpg" + + # Test has_images returns True + assert instance.has_images() + + +def test_problem_instance_mixed_content(): + """Test ProblemInstance with mixed text and image content in multiple fields.""" + symbol_set = OptimizerPromptSymbolSet() + + # Variables with image + variables_blocks = [ + TextContent(text="Hello\n"), + TextContent(text=""), + ImageContent(image_data="base64data", media_type="image/png"), + TextContent(text="") + ] + + # Inputs with image + inputs_blocks = [ + TextContent(text=""), + ImageContent(image_url="https://example.com/ref.png"), + TextContent(text="") + ] + + instance = ProblemInstance( + instruction="Compare images", + code="result = compare(img, reference)", + documentation="[compare] Compares two images", + variables=variables_blocks, + inputs=inputs_blocks, + others=[], # Empty list + outputs="0.8", + feedback="Similarity should be higher", + context="Context text", + optimizer_prompt_symbol_set=symbol_set + ) + + # Test has_images + assert instance.has_images() + + # Test to_content_blocks + blocks = instance.to_content_blocks() + image_blocks = [b for b in blocks if isinstance(b, ImageContent)] + assert len(image_blocks) == 2 # One from variables, one from inputs + + +def test_value_to_image_content_url(): + """Test value_to_image_content with URL strings.""" + # Valid image URL + result = value_to_image_content("https://example.com/image.jpg") + assert result is not None + assert isinstance(result, ImageContent) + assert result.image_url == "https://example.com/image.jpg" + + # Non-image URL (no image extension) - is_image returns False for pattern check + result = value_to_image_content("https://example.com/page.html") + assert result is None + + # Non-URL string + result = value_to_image_content("just a regular string") + assert result is None + + +def test_value_to_image_content_base64(): + """Test value_to_image_content with base64 data URLs.""" + # Valid base64 data URL + data_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUg==" + result = value_to_image_content(data_url) + assert result is not None + assert isinstance(result, ImageContent) + assert result.image_data == "iVBORw0KGgoAAAANSUhEUg==" + assert result.media_type == "image/png" + + +def test_value_to_image_content_non_image(): + """Test value_to_image_content with non-image values.""" + # Integer + assert value_to_image_content(42) is None + + # List + assert value_to_image_content([1, 2, 3]) is None + + # Dict + assert value_to_image_content({"key": "value"}) is None + + # Regular string + assert value_to_image_content("hello world") is None + + +def test_construct_prompt_text_only(): + """Test construct_prompt with use_content_blocks=False (backward compatible).""" + num_1 = node(1, trainable=True) + num_2 = node(2, trainable=True) + result = num_1 + num_2 + + optimizer = OptoPrimeV3([num_1, num_2], use_json_object_format=False) + optimizer.zero_feedback() + optimizer.backward(result, 'make this number bigger') + + summary = optimizer.summarize() + system_prompt, user_prompt = optimizer.construct_prompt(summary, use_content_blocks=False) + + # Both should be strings + assert isinstance(system_prompt, str) + assert isinstance(user_prompt, str) + assert "int0" in user_prompt or "int1" in user_prompt + + +def test_construct_prompt_with_content_blocks(): + """Test construct_prompt with use_content_blocks=True.""" + num_1 = node(1, trainable=True) + num_2 = node(2, trainable=True) + result = num_1 + num_2 + + optimizer = OptoPrimeV3([num_1, num_2], use_json_object_format=False) + optimizer.zero_feedback() + optimizer.backward(result, 'make this number bigger') + + summary = optimizer.summarize() + system_prompt, user_prompt = optimizer.construct_prompt(summary, use_content_blocks=True) + + # system_prompt should be string, user_prompt should be List[ContentBlock] + assert isinstance(system_prompt, str) + assert isinstance(user_prompt, list) + assert all(isinstance(b, (TextContent, ImageContent)) for b in user_prompt) + + # Check that text content contains expected info + text_parts = [b.text for b in user_prompt if isinstance(b, TextContent)] + full_text = "".join(text_parts) + assert "int0" in full_text or "int1" in full_text + + +def test_repr_node_value_as_content_blocks(): + """Test repr_node_value_as_content_blocks method.""" + num_1 = node(1, trainable=True) + result = num_1 + 1 + + optimizer = OptoPrimeV3([num_1], use_json_object_format=False) + optimizer.zero_feedback() + optimizer.backward(result, 'test') + + # Test with non-image nodes + summary = optimizer.summarize() + blocks = optimizer.repr_node_value_as_content_blocks( + summary.variables, + node_tag=optimizer.optimizer_prompt_symbol_set.variable_tag, + value_tag=optimizer.optimizer_prompt_symbol_set.value_tag, + constraint_tag=optimizer.optimizer_prompt_symbol_set.constraint_tag + ) + + assert isinstance(blocks, list) + assert len(blocks) > 0 + assert all(isinstance(b, TextContent) for b in blocks) # No images in this case + + +def test_repr_node_value_compact_as_content_blocks(): + """Test repr_node_value_compact_as_content_blocks method.""" + long_string = "x" * 5000 # Long string that will be truncated + str_node = node(long_string, trainable=True) + result = str_node + "!" + + optimizer = OptoPrimeV3([str_node], use_json_object_format=False, initial_var_char_limit=100) + optimizer.zero_feedback() + optimizer.backward(result, 'test') + + summary = optimizer.summarize() + blocks = optimizer.repr_node_value_compact_as_content_blocks( + summary.inputs, + node_tag=optimizer.optimizer_prompt_symbol_set.node_tag, + value_tag=optimizer.optimizer_prompt_symbol_set.value_tag, + constraint_tag=optimizer.optimizer_prompt_symbol_set.constraint_tag + ) + + # Should be truncated + text_parts = [b.text for b in blocks if isinstance(b, TextContent)] + full_text = "".join(text_parts) + assert "skipped due to length limit" in full_text or len(full_text) < len(long_string) diff --git a/tests/unit_tests/test_multi_modal.py b/tests/unit_tests/test_multi_modal.py new file mode 100644 index 00000000..2ab72c75 --- /dev/null +++ b/tests/unit_tests/test_multi_modal.py @@ -0,0 +1,6 @@ +""" +We test +1). Image context API in the optimizer +2). Node as image, whether it will show up correctly in the optimizer +""" + diff --git a/tests/unit_tests/test_optimizer_backbone.py b/tests/unit_tests/test_optimizer_backbone.py new file mode 100644 index 00000000..590a064a --- /dev/null +++ b/tests/unit_tests/test_optimizer_backbone.py @@ -0,0 +1,416 @@ +""" +Comprehensive tests for optimizer backbone components (ConversationHistory, UserTurn, AssistantTurn) +Tests include: truncation strategies, multimodal content, and conversation management +""" +import pytest +import base64 +from opto.optimizers.backbone import ( + ConversationHistory, + UserTurn, + AssistantTurn, + TextContent, + ImageContent +) + + +# ============================================================================ +# Test Fixtures +# ============================================================================ + +def create_sample_conversation(): + """Create a sample conversation with multiple rounds""" + history = ConversationHistory(system_prompt="You are a helpful assistant.") + + # Round 1 + user1 = UserTurn().add_text("Hello, what's the weather?") + assistant1 = AssistantTurn().add_text("The weather is sunny today.") + history.add_user_turn(user1).add_assistant_turn(assistant1) + + # Round 2 + user2 = UserTurn().add_text("What about tomorrow?") + assistant2 = AssistantTurn().add_text("Tomorrow will be rainy.") + history.add_user_turn(user2).add_assistant_turn(assistant2) + + # Round 3 + user3 = UserTurn().add_text("Should I bring an umbrella?") + assistant3 = AssistantTurn().add_text("Yes, definitely bring an umbrella.") + history.add_user_turn(user3).add_assistant_turn(assistant3) + + # Round 4 + user4 = UserTurn().add_text("Thanks for the advice!") + assistant4 = AssistantTurn().add_text("You're welcome! Stay dry!") + history.add_user_turn(user4).add_assistant_turn(assistant4) + + return history + + +# ============================================================================ +# Truncation Tests +# ============================================================================ + +def test_default_all_history(): + """Test default behavior (n=-1) returns all history""" + history = create_sample_conversation() + + messages = history.to_messages() + + # Should have: system + 8 turns (4 user + 4 assistant) + assert len(messages) == 9 # 1 system + 8 messages + assert messages[0]["role"] == "system" + assert messages[0]["content"] == "You are a helpful assistant." + assert messages[-1]["role"] == "assistant" + + +def test_truncate_from_start(): + """Test truncate_from_start strategy - keeps last N turns""" + history = create_sample_conversation() + + # Keep last 3 turns + messages = history.to_messages(n=3, truncate_strategy="from_start") + + # Should have: system + 3 turns + assert len(messages) == 4 # 1 system + 3 messages + assert messages[0]["role"] == "system" + + # Should have the last 3 turns + # Last 3 turns are: assistant3, user4, assistant4 + assert messages[1]["role"] == "assistant" + assert "umbrella" in messages[1]["content"] + assert messages[2]["role"] == "user" + assert "Thanks" in messages[2]["content"][0]["text"] + assert messages[3]["role"] == "assistant" + assert "welcome" in messages[3]["content"] + + +def test_truncate_from_end(): + """Test truncate_from_end strategy - keeps first N turns""" + history = create_sample_conversation() + + # Keep first 3 turns + messages = history.to_messages(n=3, truncate_strategy="from_end") + + # Should have: system + 3 turns + assert len(messages) == 4 # 1 system + 3 messages + assert messages[0]["role"] == "system" + + # Should have the first 3 turns + # First 3 turns are: user1, assistant1, user2 + assert messages[1]["role"] == "user" + assert "Hello" in messages[1]["content"][0]["text"] + assert messages[2]["role"] == "assistant" + assert "sunny" in messages[2]["content"] + assert messages[3]["role"] == "user" + assert "tomorrow" in messages[3]["content"][0]["text"] + + +def test_truncate_zero_turns(): + """Test truncating to 0 turns""" + history = create_sample_conversation() + + messages = history.to_messages(n=0, truncate_strategy="from_start") + + # Should only have system message + assert len(messages) == 1 + assert messages[0]["role"] == "system" + + +def test_truncate_more_than_available(): + """Test requesting more turns than available""" + history = create_sample_conversation() + + # Request 100 turns but only have 8 + messages = history.to_messages(n=100, truncate_strategy="from_start") + + # Should return all available + assert len(messages) == 9 # 1 system + 8 messages + + +def test_empty_conversation(): + """Test truncation on empty conversation""" + history = ConversationHistory(system_prompt="Test") + + messages = history.to_messages(n=5) + + assert len(messages) == 1 # Just system + assert messages[0]["role"] == "system" + + +def test_to_litellm_format_with_truncation(): + """Test to_litellm_format() also supports truncation""" + history = create_sample_conversation() + + messages = history.to_litellm_format(n=2, truncate_strategy="from_end") + + # Should have: system + 2 turns + assert len(messages) == 3 + assert messages[0]["role"] == "system" + assert messages[1]["role"] == "user" + assert messages[2]["role"] == "assistant" + + +def test_invalid_strategy(): + """Test that invalid strategy raises error""" + history = create_sample_conversation() + + with pytest.raises(ValueError, match="Unknown truncate_strategy"): + history.to_messages(n=2, truncate_strategy="invalid_strategy") + + +def test_negative_n_values(): + """Test that n=-1 returns all history""" + history = create_sample_conversation() + + # n=-1 should return all + messages_all = history.to_messages(n=-1) + assert len(messages_all) == 9 + + # Verify it's the same as not passing n at all + messages_default = history.to_messages() + assert len(messages_all) == len(messages_default) + + +# ============================================================================ +# Multimodal / Multi-Image Tests +# ============================================================================ + +def test_user_turn_multiple_images(): + """Test that a user turn can have multiple images""" + history = ConversationHistory() + + # Create a user turn with text and multiple images (like the OpenAI example) + user_turn = (UserTurn() + .add_text("What are in these images? Is there any difference between them?") + .add_image(url="https://images.pexels.com/photos/736230/pexels-photo-736230.jpeg") + .add_image(url="https://images.contentstack.io/v3/assets/bltcedd8dbd5891265b/blt134818d279038650/6668df6434f6fb5cd48aac34/beautiful-flowers-rose.jpeg")) + + history.add_user_turn(user_turn) + + # Convert to LiteLLM format + messages = history.to_litellm_format() + + # Should have 1 message + assert len(messages) == 1 + + user_msg = messages[0] + assert user_msg["role"] == "user" + + # Content should be a list with 3 items: 1 text + 2 images + assert len(user_msg["content"]) == 3 + + # Check first item is text + assert user_msg["content"][0]["type"] == "text" + assert user_msg["content"][0]["text"] == "What are in these images? Is there any difference between them?" + + # Check second item is first image + assert user_msg["content"][1]["type"] == "image_url" + assert user_msg["content"][1]["image_url"]["url"] == "https://images.pexels.com/photos/736230/pexels-photo-736230.jpeg" + + # Check third item is second image + assert user_msg["content"][2]["type"] == "image_url" + assert user_msg["content"][2]["image_url"]["url"] == "https://images.contentstack.io/v3/assets/bltcedd8dbd5891265b/blt134818d279038650/6668df6434f6fb5cd48aac34/beautiful-flowers-rose.jpeg" + + +def test_assistant_turn_multiple_images(): + """Test that an assistant turn can also have multiple images (for models that generate images)""" + history = ConversationHistory() + + # Assistant turn with text and multiple images + assistant_turn = (AssistantTurn() + .add_text("Here are two generated images based on your request:") + .add_image(url="https://example.com/generated1.png") + .add_image(url="https://example.com/generated2.png")) + + history.add_assistant_turn(assistant_turn) + + # Convert to LiteLLM format + messages = history.to_litellm_format() + + assert len(messages) == 1 + assert messages[0]["role"] == "assistant" + + # Assistant should have text content + assert "Here are two generated images" in messages[0]["content"] + + +def test_mixed_content_types_in_turn(): + """Test mixing text, images, and other content types in a single turn""" + history = ConversationHistory() + + # Create a complex turn with multiple content types + user_turn = (UserTurn() + .add_text("Please analyze these images and this document:") + .add_image(url="https://example.com/chart1.png") + .add_image(url="https://example.com/chart2.png") + .add_text("What patterns do you see?")) + + history.add_user_turn(user_turn) + + messages = history.to_litellm_format() + + assert len(messages) == 1 + user_msg = messages[0] + + # Should have 4 content blocks: text, image, image, text + assert len(user_msg["content"]) == 4 + assert user_msg["content"][0]["type"] == "text" + assert user_msg["content"][1]["type"] == "image_url" + assert user_msg["content"][2]["type"] == "image_url" + assert user_msg["content"][3]["type"] == "text" + + +def test_multiple_images_with_base64(): + """Test multiple images using base64 encoding""" + history = ConversationHistory() + + # Create fake base64 image data + fake_image_data1 = base64.b64encode(b"fake image 1").decode('utf-8') + fake_image_data2 = base64.b64encode(b"fake image 2").decode('utf-8') + + user_turn = (UserTurn() + .add_text("Compare these two images:") + .add_image(data=fake_image_data1, media_type="image/png") + .add_image(data=fake_image_data2, media_type="image/jpeg")) + + history.add_user_turn(user_turn) + + messages = history.to_litellm_format() + + assert len(messages) == 1 + user_msg = messages[0] + + # Should have 3 content blocks + assert len(user_msg["content"]) == 3 + + # Check base64 data URLs are properly formatted + assert user_msg["content"][1]["type"] == "image_url" + assert user_msg["content"][1]["image_url"]["url"].startswith("data:image/png;base64,") + + assert user_msg["content"][2]["type"] == "image_url" + assert user_msg["content"][2]["image_url"]["url"].startswith("data:image/jpeg;base64,") + + +def test_conversation_with_multiple_multi_image_turns(): + """Test a full conversation where multiple turns each have multiple images""" + history = ConversationHistory(system_prompt="You are a helpful image analysis assistant.") + + # User turn 1: Multiple images + user1 = (UserTurn() + .add_text("What's the difference between these flowers?") + .add_image(url="https://example.com/rose.jpg") + .add_image(url="https://example.com/tulip.jpg")) + history.add_user_turn(user1) + + # Assistant response + assistant1 = AssistantTurn().add_text("The first is a rose with layered petals, the second is a tulip with a cup shape.") + history.add_assistant_turn(assistant1) + + # User turn 2: More images + user2 = (UserTurn() + .add_text("Now compare these landscapes:") + .add_image(url="https://example.com/mountain.jpg") + .add_image(url="https://example.com/beach.jpg") + .add_image(url="https://example.com/forest.jpg")) + history.add_user_turn(user2) + + messages = history.to_litellm_format() + + # Should have: system + user1 + assistant1 + user2 + assert len(messages) == 4 + + # Check user1 has 3 content blocks (1 text + 2 images) + assert len(messages[1]["content"]) == 3 + + # Check user2 has 4 content blocks (1 text + 3 images) + assert len(messages[3]["content"]) == 4 + + +# ============================================================================ +# Integration Tests - Truncation + Multimodal +# ============================================================================ + +def test_truncate_multimodal_conversation(): + """Test truncation works correctly with multimodal content""" + history = ConversationHistory(system_prompt="You are a vision assistant.") + + # Add several turns with images + for i in range(5): + user = (UserTurn() + .add_text(f"Analyze image {i}") + .add_image(url=f"https://example.com/image{i}.jpg")) + assistant = AssistantTurn().add_text(f"Analysis of image {i}") + history.add_user_turn(user).add_assistant_turn(assistant) + + # Truncate to last 2 turns + messages = history.to_messages(n=2, truncate_strategy="from_start") + + # Should have system + 2 turns + assert len(messages) == 3 + + # Check that multimodal content is preserved + assert len(messages[1]["content"]) == 2 # text + image + assert messages[1]["content"][1]["type"] == "image_url" + + +if __name__ == "__main__": + print("Running optimizer backbone tests...") + print("\n" + "="*80) + print("TRUNCATION TESTS") + print("="*80) + + test_default_all_history() + print("✓ Default all history") + + test_truncate_from_start() + print("✓ Truncate from start") + + test_truncate_from_end() + print("✓ Truncate from end") + + test_truncate_zero_turns() + print("✓ Truncate zero turns") + + test_truncate_more_than_available() + print("✓ Truncate more than available") + + test_empty_conversation() + print("✓ Empty conversation") + + test_to_litellm_format_with_truncation() + print("✓ LiteLLM format with truncation") + + test_invalid_strategy() + print("✓ Invalid strategy error handling") + + test_negative_n_values() + print("✓ Negative n values") + + print("\n" + "="*80) + print("MULTIMODAL TESTS") + print("="*80) + + test_user_turn_multiple_images() + print("✓ User turn with multiple images") + + test_assistant_turn_multiple_images() + print("✓ Assistant turn with multiple images") + + test_mixed_content_types_in_turn() + print("✓ Mixed content types in turn") + + test_multiple_images_with_base64() + print("✓ Multiple base64 images") + + test_conversation_with_multiple_multi_image_turns() + print("✓ Conversation with multiple multi-image turns") + + print("\n" + "="*80) + print("INTEGRATION TESTS") + print("="*80) + + test_truncate_multimodal_conversation() + print("✓ Truncate multimodal conversation") + + print("\n" + "="*80) + print("✅ All tests passed!") + print("="*80) + From f3668fcc548d1fffb19587c59a0d07dfe5e80e0a Mon Sep 17 00:00:00 2001 From: windweller Date: Tue, 2 Dec 2025 10:56:49 -0800 Subject: [PATCH 28/51] intermediate commit --- opto/optimizers/backbone.py | 120 ++++++++++++- opto/optimizers/opro_v3.py | 169 ++++++++++++++++-- opto/optimizers/optoprime_v3.py | 141 ++++++--------- .../llm_optimizers_tests/test_optoprime_v3.py | 94 +++++++++- 4 files changed, 417 insertions(+), 107 deletions(-) diff --git a/opto/optimizers/backbone.py b/opto/optimizers/backbone.py index 4f0dd8da..e8905e77 100644 --- a/opto/optimizers/backbone.py +++ b/opto/optimizers/backbone.py @@ -26,6 +26,121 @@ def to_dict(self) -> Dict[str, Any]: """ raise NotImplementedError("Subclasses must implement this method") +class ContentBlockList(list): + """List of content blocks with automatic type conversion. + + Supports automatic conversion from: + - str -> [TextContent(text=str)] + - TextContent -> [TextContent] + - ImageContent -> [ImageContent] + - List[ContentBlock] -> ContentBlockList + - None/empty -> [] + """ + + def __init__(self, content: Union[str, 'ContentBlock', List['ContentBlock'], None] = None): + """Initialize ContentBlockList with automatic type conversion. + + Args: + content: Can be a string (converted to TextContent), a single ContentBlock, + a list of ContentBlocks, or None (empty list). + """ + super().__init__() + if content is not None: + self.extend(self._normalize(content)) + + @staticmethod + def _normalize(content: Union[str, 'ContentBlock', List['ContentBlock'], None]) -> List['ContentBlock']: + """Normalize content to a list of ContentBlocks.""" + if content is None: + return [] + if isinstance(content, str): + return [TextContent(text=content)] if content else [] + if isinstance(content, list): + return content + # Single ContentBlock + return [content] + + @classmethod + def ensure(cls, content: Union[str, 'ContentBlock', List['ContentBlock'], None]) -> 'ContentBlockList': + """Ensure content is a ContentBlockList with automatic conversion. + + Args: + content: String, ContentBlock, list of ContentBlocks, or None + + Returns: + ContentBlockList with the content + """ + if isinstance(content, cls): + return content + return cls(content) + + def to_dict(self) -> Dict[str, Any]: + return {"type": "list", "blocks": [b.to_dict() for b in self]} + + def append(self, item: Union[str, 'ContentBlock']) -> 'ContentBlockList': + """Append a string or ContentBlock, merging consecutive text. + + Args: + item: String (auto-converted to TextContent) or ContentBlock. + If the last item is TextContent and item is also text, + they are merged into a single TextContent. + """ + if isinstance(item, str): + # String: merge with last TextContent or create new one + if self and isinstance(self[-1], TextContent): + self[-1] = TextContent(text=self[-1].text + item) + else: + super().append(TextContent(text=item)) + elif isinstance(item, TextContent): + # TextContent: merge with last TextContent or add + if self and isinstance(self[-1], TextContent): + self[-1] = TextContent(text=self[-1].text + item.text) + else: + super().append(item) + else: + # Other ContentBlock types (ImageContent, etc.): just add + super().append(item) + return self + + def extend(self, blocks: Union[str, 'ContentBlock', List['ContentBlock'], 'ContentBlockList', None]) -> 'ContentBlockList': + """Extend with blocks, merging consecutive TextContent. + + Args: + blocks: String, ContentBlock, list of ContentBlocks, or None. + Strings are auto-converted. Consecutive text is merged. + """ + normalized = self._normalize(blocks) + for block in normalized: + self.append(block) + return self + + def __add__(self, other) -> 'ContentBlockList': + """Concatenate content block lists with other content block lists or strings. + + Args: + other: ContentBlockList, List[ContentBlock], or string to concatenate + """ + if isinstance(other, (ContentBlockList, list)): + result = ContentBlockList(list(self)) + result.extend(other) + return result + elif isinstance(other, str): + result = ContentBlockList(list(self)) + result.append(TextContent(text=other)) + return result + else: + return NotImplemented + + def __radd__(self, other) -> 'ContentBlockList': + """Right-side concatenation (when string is on the left). + """ + if isinstance(other, str): + result = ContentBlockList([TextContent(text=other)]) + result.extend(self) + return result + else: + return NotImplemented + @dataclass class TextContent(ContentBlock): """Text content block""" @@ -409,8 +524,9 @@ def from_file(cls, filepath: str, mime_type: Optional[str] = None): ) -# Union type for all content types -ContentBlock = Union[TextContent, ImageContent, PDFContent, FileContent] +# Union type alias for common content types (for type hints) +# Note: ContentBlock remains the abstract base class for inheritance +ContentBlockUnion = Union[TextContent, ImageContent, PDFContent, FileContent] @dataclass diff --git a/opto/optimizers/opro_v3.py b/opto/optimizers/opro_v3.py index 38f0f727..564f82a9 100644 --- a/opto/optimizers/opro_v3.py +++ b/opto/optimizers/opro_v3.py @@ -8,10 +8,13 @@ import json from textwrap import dedent from dataclasses import dataclass, asdict -from typing import Dict, Optional, List -from opto.trace.nodes import ParameterNode +from typing import Dict, Optional, List, Union, Any +from opto.trace.nodes import ParameterNode, is_image from opto.optimizers.optoprime_v2 import OptoPrimeV2, OptimizerPromptSymbolSet +from opto.optimizers.backbone import ( + ContentBlock, TextContent, ImageContent, ContentBlockList +) # Not inheriting from optoprime_v2 because this should have a smaller set class OPROPromptSymbolSet(OptimizerPromptSymbolSet): @@ -90,13 +93,15 @@ class ProblemInstance: This dataclass encapsulates a complete problem instance including the instruction, current variables/solution, and feedback received. + + Supports multimodal content - variables can contain images. Attributes ---------- instruction : str The instruction describing what needs to be done or the question to answer. - variables : str - The current proposed solution that can be modified. + variables : Union[str, List[ContentBlock]] + The current proposed solution that can be modified. Can contain images. feedback : str Feedback about the current solution. context: str @@ -111,6 +116,10 @@ class ProblemInstance: ------- __repr__() Returns a formatted string representation of the problem instance. + to_content_blocks() + Returns a ContentBlockList for multimodal prompts. + has_images() + Returns True if the problem instance contains images. Notes ----- @@ -118,7 +127,7 @@ class ProblemInstance: organizes the instruction, variables, and feedback into a structured format. """ instruction: str - variables: str + variables: Union[str, List[ContentBlock]] feedback: str context: Optional[str] @@ -137,10 +146,25 @@ class ProblemInstance: """ ) + @staticmethod + def _content_to_text(content: Union[str, List[ContentBlock]]) -> str: + """Convert content (str or List[ContentBlock]) to text representation.""" + if isinstance(content, str): + return content + # Extract text from content blocks, skip images + text_parts = [] + for block in content: + if isinstance(block, TextContent): + text_parts.append(block.text) + elif isinstance(block, ImageContent): + text_parts.append("[IMAGE]") + return "".join(text_parts) + def __repr__(self) -> str: + """Return text-only representation for backward compatibility.""" optimization_query = self.problem_template.format( instruction=self.instruction, - variables=self.variables, + variables=self._content_to_text(self.variables), feedback=self.feedback, ) @@ -156,6 +180,45 @@ def __repr__(self) -> str: return optimization_query + def to_content_blocks(self) -> ContentBlockList: + """Convert the problem instance to a list of ContentBlocks. + + Consecutive TextContent blocks are merged into a single block for efficiency. + Images and other non-text blocks are kept separate. + + Returns: + ContentBlockList: A list containing TextContent and ImageContent blocks + that represent the complete problem instance. + """ + blocks = ContentBlockList() + + # Instruction section + blocks.append(f"# Instruction\n{self.instruction}\n\n# Solution\n") + + # Variables/Solution section (may contain images) + blocks.extend(self.variables) + + # Feedback section + blocks.append(f"\n\n# Feedback\n{self.feedback}") + + # Context section (optional) + if self.context is not None and self.context.strip() != "": + blocks.append(f"\n\n# Context\n{self.context}") + + return blocks + + def has_images(self) -> bool: + """Check if this problem instance contains any images. + + Returns: + bool: True if variables field contains ImageContent blocks. + """ + if isinstance(self.variables, list): + for block in self.variables: + if isinstance(block, ImageContent): + return True + return False + class OPROv2(OptoPrimeV2): """OPRO (Optimization by PROmpting) optimizer version 2. @@ -328,7 +391,7 @@ def parameter_check(self, parameters: List[ParameterNode]): f"{param_names}. LLMs can only generate one image at a time." ) - def problem_instance(self, summary, mask=None): + def problem_instance(self, summary, mask=None, use_content_blocks=False): """Create a ProblemInstance from an optimization summary. Parameters @@ -338,6 +401,9 @@ def problem_instance(self, summary, mask=None): mask : list, optional List of sections to mask/hide in the problem instance. Can include "#Instruction", variable section title, or feedback section title. + use_content_blocks : bool, optional + If True, use content blocks for multimodal support (images). + If False, use text-only representation. Returns ------- @@ -350,18 +416,93 @@ def problem_instance(self, summary, mask=None): useful for ablation studies or specific optimization strategies. """ mask = mask or [] - return ProblemInstance( - instruction=self.objective if "#Instruction" not in mask else "", - variables=( - self.repr_node_value_compact(summary.variables, node_tag=self.optimizer_prompt_symbol_set.variable_tag, - value_tag=self.optimizer_prompt_symbol_set.value_tag, - constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag) + + if use_content_blocks: + # Use content block representation for multimodal support + variables_content = ( + self.repr_node_value_compact_as_content_blocks( + summary.variables, + node_tag=self.optimizer_prompt_symbol_set.variable_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag + ) + if self.optimizer_prompt_symbol_set.variables_section_title not in mask + else ContentBlockList() + ) + else: + # Use text-only representation (backward compatible) + variables_content = ( + self.repr_node_value_compact( + summary.variables, + node_tag=self.optimizer_prompt_symbol_set.variable_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag + ) if self.optimizer_prompt_symbol_set.variables_section_title not in mask else "" - ), + ) + + return ProblemInstance( + instruction=self.objective if "#Instruction" not in mask else "", + variables=variables_content, feedback=summary.user_feedback if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else "", + context=self.problem_context if hasattr(self, 'problem_context') else None, optimizer_prompt_symbol_set=self.optimizer_prompt_symbol_set ) + + def repr_node_value_compact_as_content_blocks(self, node_dict, node_tag="node", + value_tag="value", constraint_tag="constraint") -> ContentBlockList: + """Returns a ContentBlockList with compact representation, including images. + + Consecutive TextContent blocks are merged for efficiency. + Non-image values are truncated. Images break the text flow. + """ + from opto.optimizers.optoprime_v3 import value_to_image_content + + blocks = ContentBlockList() + + for k, v in node_dict.items(): + value_data = v[0] + constraint = v[1] + + if "__code" not in k: + # Check if this is an image + image_content = value_to_image_content(value_data) + + if image_content is not None: + # Image node: output XML structure, then image, then closing + type_name = "image" + constraint_expr = f"<{constraint_tag}>\n{constraint}\n" if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag else "" + + xml_text = f"<{node_tag} name=\"{k}\" type=\"{type_name}\">\n<{value_tag}>\n" + blocks.append(xml_text) + blocks.append(image_content) # Image breaks the text flow + + closing_text = f"\n\n{constraint_expr}\n\n" if constraint_expr else f"\n\n\n\n" + blocks.append(closing_text) + else: + # Non-image node: truncated text representation + node_value = self.truncate_expression(value_data, self.initial_var_char_limit) + if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag: + constraint_expr = f"<{constraint_tag}>\n{constraint}\n" + blocks.append( + f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{node_value}\n\n{constraint_expr}\n\n\n" + ) + else: + blocks.append( + f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{node_value}\n\n\n\n" + ) + else: + # Code node (never an image) + constraint_expr = f"<{constraint_tag}>\n{constraint}\n" + signature = constraint.replace("The code should start with:\n", "") + func_body = value_data.replace(signature, "") + node_value = self.truncate_expression(func_body, self.initial_var_char_limit) + blocks.append( + f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{node_value}\n\n{constraint_expr}\n\n\n" + ) + + return blocks def initialize_prompt(self): """Initialize and format the prompt templates. diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py index 6647031f..d6f1fbb8 100644 --- a/opto/optimizers/optoprime_v3.py +++ b/opto/optimizers/optoprime_v3.py @@ -18,7 +18,7 @@ from opto.optimizers.buffers import FIFOBuffer from opto.optimizers.backbone import ( ConversationHistory, UserTurn, AssistantTurn, - ContentBlock, TextContent, ImageContent + ContentBlock, TextContent, ImageContent, ContentBlockList ) import copy import pickle @@ -26,38 +26,6 @@ from typing import Dict, Any -def append_content_block(blocks: List[ContentBlock], block: ContentBlock) -> None: - """Append a content block to the list, merging consecutive TextContent blocks. - - If the last block in the list is a TextContent and the new block is also TextContent, - the text is appended to the existing block. Otherwise, the new block is added. - - Args: - blocks: The list of content blocks to append to (modified in place). - block: The content block to append. - """ - if isinstance(block, TextContent): - if blocks and isinstance(blocks[-1], TextContent): - # Merge with the previous TextContent block - blocks[-1] = TextContent(text=blocks[-1].text + block.text) - else: - blocks.append(block) - else: - # Non-text block (ImageContent, etc.) - just append - blocks.append(block) - - -def extend_content_blocks(blocks: List[ContentBlock], new_blocks: List[ContentBlock]) -> None: - """Extend content blocks list, merging consecutive TextContent blocks. - - Args: - blocks: The list of content blocks to extend (modified in place). - new_blocks: The list of content blocks to add. - """ - for block in new_blocks: - append_content_block(blocks, block) - - def value_to_image_content(value: Any) -> Optional[ImageContent]: """Convert a value to ImageContent if it's an image, otherwise return None. @@ -352,24 +320,18 @@ def __repr__(self) -> str: return optimization_query - def _ensure_content_blocks(self, content: Union[str, List[ContentBlock]]) -> List[ContentBlock]: - """Ensure content is a list of ContentBlocks.""" - if isinstance(content, str): - return [TextContent(text=content)] if content else [] - return content - - def to_content_blocks(self) -> List[ContentBlock]: + def to_content_blocks(self) -> ContentBlockList: """Convert the problem instance to a list of ContentBlocks. Consecutive TextContent blocks are merged into a single block for efficiency. Images and other non-text blocks are kept separate. Returns: - List[ContentBlock]: A list containing TextContent and ImageContent blocks + ContentBlockList: A list containing TextContent and ImageContent blocks that represent the complete problem instance including any images from variables, inputs, others, or outputs. """ - blocks: List[ContentBlock] = [] + blocks = ContentBlockList() # Header sections (always text) header = dedent(f""" @@ -384,29 +346,29 @@ def to_content_blocks(self) -> List[ContentBlock]: # Variables """) - append_content_block(blocks, TextContent(text=header)) + blocks.append(header) # Variables section (may contain images) - extend_content_blocks(blocks, self._ensure_content_blocks(self.variables)) + blocks.extend(self.variables) # Inputs section - append_content_block(blocks, TextContent(text="\n\n# Inputs\n")) - extend_content_blocks(blocks, self._ensure_content_blocks(self.inputs)) + blocks.append("\n\n# Inputs\n") + blocks.extend(self.inputs) # Others section - append_content_block(blocks, TextContent(text="\n\n# Others\n")) - extend_content_blocks(blocks, self._ensure_content_blocks(self.others)) + blocks.append("\n\n# Others\n") + blocks.extend(self.others) # Outputs section - append_content_block(blocks, TextContent(text="\n\n# Outputs\n")) - extend_content_blocks(blocks, self._ensure_content_blocks(self.outputs)) + blocks.append("\n\n# Outputs\n") + blocks.extend(self.outputs) # Feedback section - append_content_block(blocks, TextContent(text=f"\n\n# Feedback\n{self.feedback}")) + blocks.append(f"\n\n# Feedback\n{self.feedback}") # Context section (optional) if self.context is not None and self.context.strip() != "": - append_content_block(blocks, TextContent(text=f"\n\n# Context\n{self.context}")) + blocks.append(f"\n\n# Context\n{self.context}") return blocks @@ -731,13 +693,13 @@ def repr_node_value_compact(self, node_dict, node_tag="node", return "\n".join(temp_list) def repr_node_value_as_content_blocks(self, node_dict, node_tag="node", - value_tag="value", constraint_tag="constraint") -> List[ContentBlock]: - """Returns a list of ContentBlocks representing node values, including images. + value_tag="value", constraint_tag="constraint") -> ContentBlockList: + """Returns a ContentBlockList representing node values, including images. Consecutive TextContent blocks are merged for efficiency. For image values, the text before and after the image are separate blocks. """ - blocks: List[ContentBlock] = [] + blocks = ContentBlockList() for k, v in node_dict.items(): value_data = v[0] @@ -753,41 +715,41 @@ def repr_node_value_as_content_blocks(self, node_dict, node_tag="node", constraint_expr = f"<{constraint_tag}>\n{constraint}\n" if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag else "" xml_text = f"<{node_tag} name=\"{k}\" type=\"{type_name}\">\n<{value_tag}>\n" - append_content_block(blocks, TextContent(text=xml_text)) + blocks.append(xml_text) blocks.append(image_content) # Image breaks the text flow closing_text = f"\n\n{constraint_expr}\n\n" if constraint_expr else f"\n\n\n\n" - append_content_block(blocks, TextContent(text=closing_text)) + blocks.append(closing_text) else: # Non-image node: text representation if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag: constraint_expr = f"<{constraint_tag}>\n{constraint}\n" - append_content_block(blocks, TextContent( - text=f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{value_data}\n\n{constraint_expr}\n\n\n" - )) + blocks.append( + f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{value_data}\n\n{constraint_expr}\n\n\n" + ) else: - append_content_block(blocks, TextContent( - text=f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{value_data}\n\n\n\n" - )) + blocks.append( + f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{value_data}\n\n\n\n" + ) else: # Code node (never an image) constraint_expr = f"<{constraint_tag}>\n{constraint}\n" signature = constraint.replace("The code should start with:\n", "") func_body = value_data.replace(signature, "") - append_content_block(blocks, TextContent( - text=f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{func_body}\n\n{constraint_expr}\n\n\n" - )) + blocks.append( + f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{func_body}\n\n{constraint_expr}\n\n\n" + ) return blocks def repr_node_value_compact_as_content_blocks(self, node_dict, node_tag="node", - value_tag="value", constraint_tag="constraint") -> List[ContentBlock]: - """Returns a list of ContentBlocks with compact representation, including images. + value_tag="value", constraint_tag="constraint") -> ContentBlockList: + """Returns a ContentBlockList with compact representation, including images. Consecutive TextContent blocks are merged for efficiency. Non-image values are truncated. Images break the text flow. """ - blocks: List[ContentBlock] = [] + blocks = ContentBlockList() for k, v in node_dict.items(): value_data = v[0] @@ -803,32 +765,32 @@ def repr_node_value_compact_as_content_blocks(self, node_dict, node_tag="node", constraint_expr = f"<{constraint_tag}>\n{constraint}\n" if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag else "" xml_text = f"<{node_tag} name=\"{k}\" type=\"{type_name}\">\n<{value_tag}>\n" - append_content_block(blocks, TextContent(text=xml_text)) + blocks.append(xml_text) blocks.append(image_content) # Image breaks the text flow closing_text = f"\n\n{constraint_expr}\n\n" if constraint_expr else f"\n\n\n\n" - append_content_block(blocks, TextContent(text=closing_text)) + blocks.append(closing_text) else: # Non-image node: truncated text representation node_value = self.truncate_expression(value_data, self.initial_var_char_limit) if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag: constraint_expr = f"<{constraint_tag}>\n{constraint}\n" - append_content_block(blocks, TextContent( - text=f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{node_value}\n\n{constraint_expr}\n\n\n" - )) + blocks.append( + f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{node_value}\n\n{constraint_expr}\n\n\n" + ) else: - append_content_block(blocks, TextContent( - text=f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{node_value}\n\n\n\n" - )) + blocks.append( + f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{node_value}\n\n\n\n" + ) else: # Code node (never an image) constraint_expr = f"<{constraint_tag}>\n{constraint}\n" signature = constraint.replace("The code should start with:\n", "") func_body = value_data.replace(signature, "") node_value = self.truncate_expression(func_body, self.initial_var_char_limit) - append_content_block(blocks, TextContent( - text=f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{node_value}\n\n{constraint_expr}\n\n\n" - )) + blocks.append( + f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{node_value}\n\n{constraint_expr}\n\n\n" + ) return blocks @@ -853,9 +815,8 @@ def construct_prompt(self, summary, mask=None, use_content_blocks=False, *args, problem_inst = self.problem_instance(summary, mask=mask, use_content_blocks=use_content_blocks) if use_content_blocks: - # Build user prompt as a list of ContentBlocks - # Consecutive TextContent blocks are merged for efficiency - user_content_blocks: List[ContentBlock] = [] + # Build user prompt as ContentBlockList (auto-merges consecutive text) + user_content_blocks = ContentBlockList() # Add example if included if self.include_example: @@ -863,26 +824,26 @@ def construct_prompt(self, summary, mask=None, use_content_blocks=False, *args, example_problem=str(self.example_problem), # Example is always text example_response=self.example_response, ) - append_content_block(user_content_blocks, TextContent(text=example_text)) + user_content_blocks.append(example_text) # Add problem instance header - append_content_block(user_content_blocks, TextContent(text=dedent(""" + user_content_blocks.append(dedent(""" Now you see problem instance: ================================ - """))) + """)) # Add problem instance content blocks (may contain images) - extend_content_blocks(user_content_blocks, problem_inst.to_content_blocks()) + user_content_blocks.extend(problem_inst.to_content_blocks()) # Add footer and final prompt var_names = ", ".join(k for k in summary.variables.keys()) - append_content_block(user_content_blocks, TextContent(text=dedent(""" + user_content_blocks.append(dedent(""" ================================ - """))) - append_content_block(user_content_blocks, TextContent(text=self.final_prompt.format(names=var_names))) + """)) + user_content_blocks.append(self.final_prompt.format(names=var_names)) return system_prompt, user_content_blocks else: diff --git a/tests/llm_optimizers_tests/test_optoprime_v3.py b/tests/llm_optimizers_tests/test_optoprime_v3.py index a8f785d5..e403a873 100644 --- a/tests/llm_optimizers_tests/test_optoprime_v3.py +++ b/tests/llm_optimizers_tests/test_optoprime_v3.py @@ -14,7 +14,7 @@ OptoPrimeV3, OptimizerPromptSymbolSet2, ProblemInstance, OptimizerPromptSymbolSet, value_to_image_content ) -from opto.optimizers.backbone import TextContent, ImageContent, ContentBlock +from opto.optimizers.backbone import TextContent, ImageContent, ContentBlock, ContentBlockList # You can override for temporarly testing a specific optimizer ALL_OPTIMIZERS = [TextGrad] # [OptoPrimeMulti] ALL_OPTIMIZERS = [OptoPrime] @@ -431,3 +431,95 @@ def test_repr_node_value_compact_as_content_blocks(): text_parts = [b.text for b in blocks if isinstance(b, TextContent)] full_text = "".join(text_parts) assert "skipped due to length limit" in full_text or len(full_text) < len(long_string) + + +# ==================== Real LLM Call Tests ==================== + +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +def test_optimizer_step_real_llm_call(): + """Test a real optimization step with LLM call.""" + # Create a simple optimization problem + greeting = node("Hello", trainable=True, description="A greeting message") + + @bundle() + def make_sentence(word): + """Create a sentence from a word.""" + return f"{word}, how are you today?" + + result = make_sentence(greeting) + + # Create optimizer + optimizer = OptoPrimeV3( + [greeting], + use_json_object_format=False, + ignore_extraction_error=True, + include_example=False, + ) + + # Setup feedback + optimizer.zero_feedback() + optimizer.backward(result, "The greeting should be more formal and professional") + + # Execute optimization step - this makes a real LLM call + update_dict = optimizer.step(verbose=True) + + # Verify the optimizer produced a suggestion + print(f"Update dict: {update_dict}") + + # The LLM should have suggested a new value + # We don't assert specific content since LLM output varies + # but we verify the step completed without error + assert optimizer.log is not None + assert len(optimizer.log) > 0 + + # Check that the log contains the expected structure + last_log = optimizer.log[-1] + assert "system_prompt" in last_log + assert "user_prompt" in last_log + assert "response" in last_log + + print(f"LLM Response: {last_log['response'][:500]}...") + + +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +def test_optimizer_step_with_content_blocks(): + """Test optimization step using content blocks (multimodal mode).""" + # Create trainable parameters + num_1 = node(5, trainable=True, description="A number to optimize") + num_2 = node(3, trainable=True, description="Another number") + + result = num_1 + num_2 + + # Create optimizer + optimizer = OptoPrimeV3( + [num_1, num_2], + use_json_object_format=False, + ignore_extraction_error=True, + include_example=False, + ) + + # Setup feedback + optimizer.zero_feedback() + optimizer.backward(result, "The sum should be exactly 100") + + # Test that construct_prompt works with content blocks + summary = optimizer.summarize() + system_prompt, user_prompt = optimizer.construct_prompt(summary, use_content_blocks=True) + + # Verify content blocks structure + from opto.optimizers.backbone import ContentBlockList + assert isinstance(user_prompt, ContentBlockList) + assert len(user_prompt) > 0 + + # Verify text is merged (should be fewer blocks than if not merged) + text_blocks = [b for b in user_prompt if isinstance(b, TextContent)] + print(f"Number of text blocks after merging: {len(text_blocks)}") + + # Execute the step (this makes a real LLM call) + update_dict = optimizer.step(verbose=True) + + print(f"Update dict: {update_dict}") + + # Verify the step completed + assert optimizer.log is not None + assert len(optimizer.log) > 0 From 6bace1cb0870051554b72cded4f0ff1002d7cab7 Mon Sep 17 00:00:00 2001 From: windweller Date: Tue, 2 Dec 2025 13:18:36 -0800 Subject: [PATCH 29/51] update actual call to ensure API correctness --- tests/unit_tests/test_multi_modal.py | 6 - tests/unit_tests/test_optimizer_backbone.py | 144 ++++++++++++++------ 2 files changed, 104 insertions(+), 46 deletions(-) delete mode 100644 tests/unit_tests/test_multi_modal.py diff --git a/tests/unit_tests/test_multi_modal.py b/tests/unit_tests/test_multi_modal.py deleted file mode 100644 index 2ab72c75..00000000 --- a/tests/unit_tests/test_multi_modal.py +++ /dev/null @@ -1,6 +0,0 @@ -""" -We test -1). Image context API in the optimizer -2). Node as image, whether it will show up correctly in the optimizer -""" - diff --git a/tests/unit_tests/test_optimizer_backbone.py b/tests/unit_tests/test_optimizer_backbone.py index 590a064a..b50b1b58 100644 --- a/tests/unit_tests/test_optimizer_backbone.py +++ b/tests/unit_tests/test_optimizer_backbone.py @@ -2,6 +2,7 @@ Comprehensive tests for optimizer backbone components (ConversationHistory, UserTurn, AssistantTurn) Tests include: truncation strategies, multimodal content, and conversation management """ +import os import pytest import base64 from opto.optimizers.backbone import ( @@ -12,6 +13,11 @@ ImageContent ) +# Skip tests if no API credentials are available +SKIP_REASON = "No API credentials found" +HAS_CREDENTIALS = os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get( + "OPENAI_API_KEY") + # ============================================================================ # Test Fixtures @@ -350,67 +356,125 @@ def test_truncate_multimodal_conversation(): assert len(messages[1]["content"]) == 2 # text + image assert messages[1]["content"][1]["type"] == "image_url" +# ============================================================================ +# Real LLM Call Tests with Images +# ============================================================================ -if __name__ == "__main__": - print("Running optimizer backbone tests...") +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +def test_real_llm_call_with_multiple_images(): + """Test sending real images to GPT and getting a response. + + This test sends two flower images to GPT-4 Vision and asks it to compare them. + """ + from opto.utils.llm import LLM + + # Create conversation with images + history = ConversationHistory(system_prompt="You are a helpful assistant that can analyze images.") + + # Create a user turn with text and two real flower images + user_turn = (UserTurn() + .add_text("What are in these images? Is there any difference between them? Please describe each image briefly.") + .add_image(url="https://images.pexels.com/photos/736230/pexels-photo-736230.jpeg") + .add_image(url="https://images.contentstack.io/v3/assets/bltcedd8dbd5891265b/blt134818d279038650/6668df6434f6fb5cd48aac34/beautiful-flowers-rose.jpeg")) + + history.add_user_turn(user_turn) + + # Get messages in LiteLLM format + messages = history.to_litellm_format() + print("\n" + "="*80) - print("TRUNCATION TESTS") + print("REAL LLM CALL WITH MULTIPLE IMAGES") print("="*80) + print(f"\nSending {len(user_turn.content)} content blocks (1 text + 2 images)...") - test_default_all_history() - print("✓ Default all history") + # Make the LLM call + llm = LLM() + response = llm(messages=messages, max_tokens=500) - test_truncate_from_start() - print("✓ Truncate from start") + response_content = response.choices[0].message.content - test_truncate_from_end() - print("✓ Truncate from end") + print("\n📷 User Query:") + print(" What are in these images? Is there any difference between them?") + print("\n🤖 GPT Response:") + print("-" * 40) + print(response_content) + print("-" * 40) - test_truncate_zero_turns() - print("✓ Truncate zero turns") + # Store assistant response in history + assistant_turn = AssistantTurn().add_text(response_content) + history.add_assistant_turn(assistant_turn) - test_truncate_more_than_available() - print("✓ Truncate more than available") + # Verify we got a meaningful response + assert response_content is not None + assert len(response_content) > 50 # Should have some substantial content - test_empty_conversation() - print("✓ Empty conversation") + # The response should mention something about flowers/images + response_lower = response_content.lower() + assert any(word in response_lower for word in ["flower", "image", "picture", "rose", "pink", "red", "petal"]), \ + f"Response doesn't seem to describe the flower images: {response_content[:200]}..." - test_to_litellm_format_with_truncation() - print("✓ LiteLLM format with truncation") + print("\n✅ Successfully received and validated GPT response about the images!") + + +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +def test_real_llm_multi_turn_with_images(): + """Test a multi-turn conversation with images. - test_invalid_strategy() - print("✓ Invalid strategy error handling") + First turn: Ask about images + Second turn: Follow-up question about the same images + """ + from opto.utils.llm import LLM - test_negative_n_values() - print("✓ Negative n values") + history = ConversationHistory(system_prompt="You are a helpful assistant that can analyze images.") + llm = LLM() print("\n" + "="*80) - print("MULTIMODAL TESTS") + print("MULTI-TURN CONVERSATION WITH IMAGES") print("="*80) - test_user_turn_multiple_images() - print("✓ User turn with multiple images") + # Turn 1: Send images and ask about them + user_turn1 = (UserTurn() + .add_text("What type of flowers are shown in these images?") + .add_image(url="https://images.pexels.com/photos/736230/pexels-photo-736230.jpeg") + .add_image(url="https://images.contentstack.io/v3/assets/bltcedd8dbd5891265b/blt134818d279038650/6668df6434f6fb5cd48aac34/beautiful-flowers-rose.jpeg")) - test_assistant_turn_multiple_images() - print("✓ Assistant turn with multiple images") + history.add_user_turn(user_turn1) + messages = history.to_litellm_format() - test_mixed_content_types_in_turn() - print("✓ Mixed content types in turn") + print("\n📷 Turn 1 - User:") + print(" What type of flowers are shown in these images? [+ 2 images]") - test_multiple_images_with_base64() - print("✓ Multiple base64 images") + response1 = llm(messages=messages, max_tokens=300) + response1_content = response1.choices[0].message.content - test_conversation_with_multiple_multi_image_turns() - print("✓ Conversation with multiple multi-image turns") + print("\n🤖 Turn 1 - Assistant:") + print(f" {response1_content[:200]}...") - print("\n" + "="*80) - print("INTEGRATION TESTS") - print("="*80) + history.add_assistant_turn(AssistantTurn().add_text(response1_content)) - test_truncate_multimodal_conversation() - print("✓ Truncate multimodal conversation") + # Turn 2: Follow-up question (no new images, but context from previous turn) + user_turn2 = UserTurn().add_text("Which of these flowers would be better for a romantic gift and why?") + history.add_user_turn(user_turn2) - print("\n" + "="*80) - print("✅ All tests passed!") - print("="*80) + messages = history.to_litellm_format() + + print("\n📷 Turn 2 - User:") + print(" Which of these flowers would be better for a romantic gift and why?") + + response2 = llm(messages=messages, max_tokens=300) + response2_content = response2.choices[0].message.content + + print("\n🤖 Turn 2 - Assistant:") + print(f" {response2_content[:200]}...") + + # Verify responses + assert response1_content is not None and len(response1_content) > 20 + assert response2_content is not None and len(response2_content) > 20 + + # Turn 2 should reference the context from turn 1 + response2_lower = response2_content.lower() + assert any(word in response2_lower for word in ["flower", "rose", "romantic", "gift", "love"]), \ + "Turn 2 response doesn't seem to reference the flower context" + + print("\n✅ Multi-turn conversation with images completed successfully!") From 79b9d5887787b638317749e4b89411781f47eb5f Mon Sep 17 00:00:00 2001 From: windweller Date: Tue, 2 Dec 2025 13:21:02 -0800 Subject: [PATCH 30/51] fix opro_v3 issues --- opto/optimizers/opro_v3.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/opto/optimizers/opro_v3.py b/opto/optimizers/opro_v3.py index 564f82a9..66591480 100644 --- a/opto/optimizers/opro_v3.py +++ b/opto/optimizers/opro_v3.py @@ -11,7 +11,7 @@ from typing import Dict, Optional, List, Union, Any from opto.trace.nodes import ParameterNode, is_image -from opto.optimizers.optoprime_v2 import OptoPrimeV2, OptimizerPromptSymbolSet +from opto.optimizers.optoprime_v3 import OptoPrimeV3, OptimizerPromptSymbolSet from opto.optimizers.backbone import ( ContentBlock, TextContent, ImageContent, ContentBlockList ) @@ -219,7 +219,7 @@ def has_images(self) -> bool: return True return False -class OPROv2(OptoPrimeV2): +class OPROv3(OptoPrimeV3): """OPRO (Optimization by PROmpting) optimizer version 2. OPRO is an optimization algorithm that leverages large language models to @@ -275,7 +275,7 @@ class OPROv2(OptoPrimeV2): Examples -------- - >>> optimizer = OPROv2(memory_size=10) + >>> optimizer = OPROv3(memory_size=10) >>> # Use optimizer to improve solutions based on feedback """ representation_prompt = dedent( From 3d3f8a1f9bb956c80736ba2c9455e730fb8056a0 Mon Sep 17 00:00:00 2001 From: windweller Date: Tue, 2 Dec 2025 13:22:33 -0800 Subject: [PATCH 31/51] update python dependency --- .github/workflows/ci.yml | 2 +- .github/workflows/python-app.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1fdcd036..157af79d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -49,7 +49,7 @@ jobs: # 6) Set up Python & install dependencies - uses: actions/setup-python@v5 - with: { python-version: "3.10" } + with: { python-version: "3.13" } - name: Install Python deps run: | pip install -e . diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml index 8074be85..a111e34f 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/python-app.yml @@ -19,10 +19,10 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Set up Python 3.10 + - name: Set up Python 3.13 uses: actions/setup-python@v3 with: - python-version: "3.10" + python-version: "3.13" - name: Install dependencies run: | python -m pip install --upgrade pip From 240f31954f516ef46d3156afc4dae15ce09894ce Mon Sep 17 00:00:00 2001 From: windweller Date: Wed, 3 Dec 2025 15:25:03 -0800 Subject: [PATCH 32/51] update the tests (it was broken before) --- opto/optimizers/helix.py | 0 opto/optimizers/optoprime_v3.py | 299 ++++++------------ .../llm_optimizers_tests/test_optoprime_v3.py | 122 ++++--- tests/unit_tests/test_optimizer_backbone.py | 61 ++-- 4 files changed, 189 insertions(+), 293 deletions(-) delete mode 100644 opto/optimizers/helix.py diff --git a/opto/optimizers/helix.py b/opto/optimizers/helix.py deleted file mode 100644 index e69de29b..00000000 diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py index d6f1fbb8..d3276b0a 100644 --- a/opto/optimizers/optoprime_v3.py +++ b/opto/optimizers/optoprime_v3.py @@ -231,23 +231,22 @@ class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet): @dataclass class ProblemInstance: - """Problem instance that can contain both text and multimodal content. + """Problem instance with multimodal content support. - Each field can be either: - - A string (text-only content) - - A List[ContentBlock] (multimodal content with text and/or images) + Uses ContentBlockList for variables, inputs, others, and outputs to support + both text and image content in a unified way. The class provides: - - __repr__: Returns text-only representation (backward compatible) - - to_content_blocks(): Returns List[ContentBlock] for multimodal prompts + - __repr__: Returns text-only representation for logging + - to_content_blocks(): Returns ContentBlockList for multimodal prompts """ instruction: str code: str documentation: str - variables: Union[str, List[ContentBlock]] - inputs: Union[str, List[ContentBlock]] - others: Union[str, List[ContentBlock]] - outputs: Union[str, List[ContentBlock]] + variables: ContentBlockList + inputs: ContentBlockList + others: ContentBlockList + outputs: ContentBlockList feedback: str context: Optional[str] @@ -282,11 +281,8 @@ class ProblemInstance: ) @staticmethod - def _content_to_text(content: Union[str, List[ContentBlock]]) -> str: - """Convert content (str or List[ContentBlock]) to text representation.""" - if isinstance(content, str): - return content - # Extract text from content blocks, skip images + def _content_to_text(content: ContentBlockList) -> str: + """Convert ContentBlockList to text representation.""" text_parts = [] for block in content: if isinstance(block, TextContent): @@ -794,180 +790,109 @@ def repr_node_value_compact_as_content_blocks(self, node_dict, node_tag="node", return blocks - def construct_prompt(self, summary, mask=None, use_content_blocks=False, *args, **kwargs): + def construct_prompt(self, summary, mask=None, *args, **kwargs): """Construct the system and user prompt. Args: summary: The FunctionFeedback summary containing graph information. mask: List of section titles to exclude from the problem instance. - use_content_blocks: If True, return user_prompt as List[ContentBlock] - for multimodal support. If False, return text-only (backward compatible). Returns: - Tuple of (system_prompt: str, user_prompt: Union[str, List[ContentBlock]]) + Tuple of (system_prompt: str, user_prompt: ContentBlockList) - system_prompt is always a string - - user_prompt is either a string or List[ContentBlock] based on use_content_blocks + - user_prompt is a ContentBlockList for multimodal support """ system_prompt = ( self.representation_prompt + self.output_format_prompt ) # generic representation + output rule - problem_inst = self.problem_instance(summary, mask=mask, use_content_blocks=use_content_blocks) + problem_inst = self.problem_instance(summary, mask=mask) - if use_content_blocks: - # Build user prompt as ContentBlockList (auto-merges consecutive text) - user_content_blocks = ContentBlockList() - - # Add example if included - if self.include_example: - example_text = self.example_problem_template.format( - example_problem=str(self.example_problem), # Example is always text - example_response=self.example_response, - ) - user_content_blocks.append(example_text) - - # Add problem instance header - user_content_blocks.append(dedent(""" - Now you see problem instance: - - ================================ - """)) - - # Add problem instance content blocks (may contain images) - user_content_blocks.extend(problem_inst.to_content_blocks()) - - # Add footer and final prompt - var_names = ", ".join(k for k in summary.variables.keys()) - - user_content_blocks.append(dedent(""" - ================================ - - """)) - user_content_blocks.append(self.final_prompt.format(names=var_names)) - - return system_prompt, user_content_blocks - else: - # Text-only user prompt (backward compatible) - user_prompt = self.user_prompt_template.format( - problem_instance=str(problem_inst) + # Build user prompt as ContentBlockList (auto-merges consecutive text) + user_content_blocks = ContentBlockList() + + # Add example if included + if self.include_example: + example_text = self.example_problem_template.format( + example_problem=str(self.example_problem), # Example is always text + example_response=self.example_response, ) - if self.include_example: - user_prompt = ( - self.example_problem_template.format( - example_problem=self.example_problem, - example_response=self.example_response, - ) - + user_prompt - ) - - # variables to optimize - var_names = [] - for k, v in summary.variables.items(): - var_names.append(f"{k}") - var_names = ", ".join(var_names) + user_content_blocks.append(example_text) + + # Add problem instance header + user_content_blocks.append(dedent(""" + Now you see problem instance: - user_prompt += self.final_prompt.format(names=var_names) + ================================ + """)) + + # Add problem instance content blocks (may contain images) + user_content_blocks.extend(problem_inst.to_content_blocks()) + + # Add footer and final prompt + var_names = ", ".join(k for k in summary.variables.keys()) + + user_content_blocks.append(dedent(""" + ================================ - return system_prompt, user_prompt + """)) + user_content_blocks.append(self.final_prompt.format(names=var_names)) + + return system_prompt, user_content_blocks - def problem_instance(self, summary, mask=None, use_content_blocks=False): + def problem_instance(self, summary, mask=None): """Create a ProblemInstance from the summary. Args: summary: The FunctionFeedback summary containing graph information. mask: List of section titles to exclude from the problem instance. - use_content_blocks: If True, use content blocks for multimodal sections - (variables, inputs, outputs, others). If False, use text-only. Returns: - ProblemInstance with either text-only or content block fields. + ProblemInstance with content block fields for multimodal support. """ mask = mask or [] - if use_content_blocks: - # Use content block representations for multimodal support - variables_content = ( - self.repr_node_value_as_content_blocks( - summary.variables, - node_tag=self.optimizer_prompt_symbol_set.variable_tag, - value_tag=self.optimizer_prompt_symbol_set.value_tag, - constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag - ) - if self.optimizer_prompt_symbol_set.variables_section_title not in mask - else [] - ) - inputs_content = ( - self.repr_node_value_compact_as_content_blocks( - summary.inputs, - node_tag=self.optimizer_prompt_symbol_set.node_tag, - value_tag=self.optimizer_prompt_symbol_set.value_tag, - constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag - ) - if self.optimizer_prompt_symbol_set.inputs_section_title not in mask - else [] - ) - outputs_content = ( - self.repr_node_value_compact_as_content_blocks( - summary.output, - node_tag=self.optimizer_prompt_symbol_set.node_tag, - value_tag=self.optimizer_prompt_symbol_set.value_tag, - constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag - ) - if self.optimizer_prompt_symbol_set.outputs_section_title not in mask - else [] - ) - others_content = ( - self.repr_node_value_compact_as_content_blocks( - summary.others, - node_tag=self.optimizer_prompt_symbol_set.node_tag, - value_tag=self.optimizer_prompt_symbol_set.value_tag, - constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag - ) - if self.optimizer_prompt_symbol_set.others_section_title not in mask - else [] - ) - else: - # Use text-only representations (backward compatible) - variables_content = ( - self.repr_node_value( - summary.variables, - node_tag=self.optimizer_prompt_symbol_set.variable_tag, - value_tag=self.optimizer_prompt_symbol_set.value_tag, - constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag - ) - if self.optimizer_prompt_symbol_set.variables_section_title not in mask - else "" + # Use content block representations for multimodal support + variables_content = ( + self.repr_node_value_as_content_blocks( + summary.variables, + node_tag=self.optimizer_prompt_symbol_set.variable_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag ) - inputs_content = ( - self.repr_node_value_compact( - summary.inputs, - node_tag=self.optimizer_prompt_symbol_set.node_tag, - value_tag=self.optimizer_prompt_symbol_set.value_tag, - constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag - ) - if self.optimizer_prompt_symbol_set.inputs_section_title not in mask - else "" + if self.optimizer_prompt_symbol_set.variables_section_title not in mask + else ContentBlockList() + ) + inputs_content = ( + self.repr_node_value_compact_as_content_blocks( + summary.inputs, + node_tag=self.optimizer_prompt_symbol_set.node_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag ) - outputs_content = ( - self.repr_node_value_compact( - summary.output, - node_tag=self.optimizer_prompt_symbol_set.node_tag, - value_tag=self.optimizer_prompt_symbol_set.value_tag, - constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag - ) - if self.optimizer_prompt_symbol_set.outputs_section_title not in mask - else "" + if self.optimizer_prompt_symbol_set.inputs_section_title not in mask + else ContentBlockList() + ) + outputs_content = ( + self.repr_node_value_compact_as_content_blocks( + summary.output, + node_tag=self.optimizer_prompt_symbol_set.node_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag ) - others_content = ( - self.repr_node_value_compact( - summary.others, - node_tag=self.optimizer_prompt_symbol_set.node_tag, - value_tag=self.optimizer_prompt_symbol_set.value_tag, - constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag - ) - if self.optimizer_prompt_symbol_set.others_section_title not in mask - else "" + if self.optimizer_prompt_symbol_set.outputs_section_title not in mask + else ContentBlockList() + ) + others_content = ( + self.repr_node_value_compact_as_content_blocks( + summary.others, + node_tag=self.optimizer_prompt_symbol_set.node_tag, + value_tag=self.optimizer_prompt_symbol_set.value_tag, + constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag ) + if self.optimizer_prompt_symbol_set.others_section_title not in mask + else ContentBlockList() + ) return ProblemInstance( instruction=self.objective if "#Instruction" not in mask else "", @@ -990,26 +915,14 @@ def problem_instance(self, summary, mask=None, use_content_blocks=False): optimizer_prompt_symbol_set=self.optimizer_prompt_symbol_set ) - def _has_images_in_summary(self, summary) -> bool: - """Check if any node values in the summary contain images.""" - for node_dict in [summary.variables, summary.inputs, summary.output, summary.others]: - if node_dict: - for k, v in node_dict.items(): - if is_image(v[0]): - return True - return False - def _step( - self, verbose=False, mask=None, use_content_blocks=None, *args, **kwargs + self, verbose=False, mask=None, *args, **kwargs ) -> Dict[ParameterNode, Any]: """Execute one optimization step. Args: verbose: If True, print prompts and responses. mask: List of section titles to exclude from the problem instance. - use_content_blocks: If True, force use of content blocks for multimodal. - If False, force text-only. If None (default), auto-detect based on - whether the summary contains images. Returns: Dictionary mapping parameters to their updated values. @@ -1017,13 +930,7 @@ def _step( assert isinstance(self.propagator, GraphPropagator) summary = self.summarize() - # Auto-detect whether to use content blocks - if use_content_blocks is None: - use_content_blocks = self._has_images_in_summary(summary) or self.multimodal_payload.image_data is not None - - system_prompt, user_prompt = self.construct_prompt( - summary, mask=mask, use_content_blocks=use_content_blocks - ) + system_prompt, user_prompt = self.construct_prompt(summary, mask=mask) response = self.call_llm( system_prompt=system_prompt, @@ -1040,8 +947,8 @@ def _step( # suggestion has two keys: reasoning, and variables if self.log is not None: - # For logging, always use text representation - log_user_prompt = user_prompt if isinstance(user_prompt, str) else str(self.problem_instance(summary)) + # For logging, use text representation + log_user_prompt = str(self.problem_instance(summary)) self.log.append( { "system_prompt": system_prompt, @@ -1070,7 +977,7 @@ def extract_llm_suggestion(self, response: str): def call_llm( self, system_prompt: str, - user_prompt: Union[str, List[ContentBlock]], + user_prompt: ContentBlockList, verbose: Union[bool, str] = False, max_tokens: int = 4096, ): @@ -1078,8 +985,7 @@ def call_llm( Args: system_prompt: The system prompt (always a string). - user_prompt: The user prompt, either as a string or List[ContentBlock] - for multimodal content. + user_prompt: The user prompt as ContentBlockList for multimodal content. verbose: If True, print the prompt and response. If "output", only print response. max_tokens: Maximum tokens in the response. @@ -1087,12 +993,11 @@ def call_llm( The LLM response content as a string. """ if verbose not in (False, "output"): - if isinstance(user_prompt, str): - print("Prompt\n", system_prompt + user_prompt) - else: - # For content blocks, print text portions only - text_parts = [block.text for block in user_prompt if isinstance(block, TextContent)] - print("Prompt\n", system_prompt + "".join(text_parts) + " [+ images]") + # Print text portions, indicate if images present + text_parts = [block.text for block in user_prompt if isinstance(block, TextContent)] + has_images = any(isinstance(block, ImageContent) for block in user_prompt) + suffix = " [+ images]" if has_images else "" + print("Prompt\n", system_prompt + "".join(text_parts) + suffix) # Update system prompt in conversation history self.conversation_history.system_prompt = system_prompt @@ -1104,17 +1009,13 @@ def call_llm( if self.multimodal_payload.image_data is not None: user_turn.add_image(url=self.multimodal_payload.image_data) - # Handle user_prompt based on type - if isinstance(user_prompt, str): - user_turn.add_text(user_prompt) - else: - # user_prompt is List[ContentBlock] - for block in user_prompt: - if isinstance(block, TextContent): - user_turn.content.append(block) - elif isinstance(block, ImageContent): - user_turn.content.append(block) - # Handle other content types if needed + # Add content blocks from user_prompt + for block in user_prompt: + if isinstance(block, TextContent): + user_turn.content.append(block) + elif isinstance(block, ImageContent): + user_turn.content.append(block) + # Handle other content types if needed self.conversation_history.add_user_turn(user_turn) diff --git a/tests/llm_optimizers_tests/test_optoprime_v3.py b/tests/llm_optimizers_tests/test_optoprime_v3.py index e403a873..0b9f5a3e 100644 --- a/tests/llm_optimizers_tests/test_optoprime_v3.py +++ b/tests/llm_optimizers_tests/test_optoprime_v3.py @@ -51,16 +51,20 @@ def test_tag_template_change(): optimizer.backward(result, 'make this number bigger') summary = optimizer.summarize() - part1, part2 = optimizer.construct_prompt(summary) + system_prompt, user_prompt = optimizer.construct_prompt(summary) - part1 = optimizer.replace_symbols(part1, optimizer.prompt_symbols) - part2 = optimizer.replace_symbols(part2, optimizer.prompt_symbols) + # system_prompt is a string, user_prompt is a ContentBlockList + system_prompt = optimizer.replace_symbols(system_prompt, optimizer.prompt_symbols) + + # Convert ContentBlockList to text for symbol replacement + user_prompt_text = "".join(block.text for block in user_prompt if isinstance(block, TextContent)) + user_prompt_text = optimizer.replace_symbols(user_prompt_text, optimizer.prompt_symbols) - assert """""" in part1, "Expected tag to be present in part1" - assert """""" in part2, "Expected tag to be present in part2" + assert """""" in system_prompt, "Expected tag to be present in system_prompt" + assert """""" in user_prompt_text, "Expected tag to be present in user_prompt" - print(part1) - print(part2) + print(system_prompt) + print(user_prompt_text) @bundle() @@ -86,10 +90,12 @@ def test_function_repr(): optimizer.backward(result, 'make this number bigger') summary = optimizer.summarize() - part1, part2 = optimizer.construct_prompt(summary) + system_prompt, user_prompt = optimizer.construct_prompt(summary) - part1 = optimizer.replace_symbols(part1, optimizer.prompt_symbols) - part2 = optimizer.replace_symbols(part2, optimizer.prompt_symbols) + system_prompt = optimizer.replace_symbols(system_prompt, optimizer.prompt_symbols) + # Convert ContentBlockList to text for symbol replacement + user_prompt_text = "".join(block.text for block in user_prompt if isinstance(block, TextContent)) + user_prompt_text = optimizer.replace_symbols(user_prompt_text, optimizer.prompt_symbols) function_repr = """ @@ -102,7 +108,7 @@ def multiply(num): """ - assert function_repr in part2, "Expected function representation to be present in part2" + assert function_repr in user_prompt_text, "Expected function representation to be present in user_prompt" def test_big_data_truncation(): num_1 = node("**2", trainable=True) @@ -119,14 +125,16 @@ def test_big_data_truncation(): optimizer.backward(result, 'compute the expression') summary = optimizer.summarize() - part1, part2 = optimizer.construct_prompt(summary) + system_prompt, user_prompt = optimizer.construct_prompt(summary) - part1 = optimizer.replace_symbols(part1, optimizer.prompt_symbols) - part2 = optimizer.replace_symbols(part2, optimizer.prompt_symbols) + system_prompt = optimizer.replace_symbols(system_prompt, optimizer.prompt_symbols) + # Convert ContentBlockList to text for symbol replacement + user_prompt_text = "".join(block.text for block in user_prompt if isinstance(block, TextContent)) + user_prompt_text = optimizer.replace_symbols(user_prompt_text, optimizer.prompt_symbols) truncated_repr = """1234569191...(skipped due to length limit)""" - assert truncated_repr in part2, "Expected truncated list representation to be present in part2" + assert truncated_repr in user_prompt_text, "Expected truncated list representation to be present in user_prompt" def test_extraction_pipeline(): num_1 = node(1, trainable=True) @@ -141,18 +149,13 @@ def test_extraction_pipeline(): optimizer.backward(result, 'make this number bigger') summary = optimizer.summarize() - part1, part2 = optimizer.construct_prompt(summary) - - part1 = optimizer.replace_symbols(part1, optimizer.prompt_symbols) - part2 = optimizer.replace_symbols(part2, optimizer.prompt_symbols) + system_prompt, user_prompt = optimizer.construct_prompt(summary) - messages = [ - {"role": "system", "content": part1}, - {"role": "user", "content": part2}, - ] + # Verify construct_prompt returns expected types + assert isinstance(system_prompt, str) + assert isinstance(user_prompt, list) - # response = optimizer.llm(messages=messages) - # response = response.choices[0].message.content + # Test extraction from a mock response response = """ The instruction suggests that the output, `add0`, needs to be made bigger than it currently is (3). The code performs an addition of `int0` and `int1` to produce `add0`. To increase `add0`, we can increase the values of `int0` or `int1`, or both. Given that `int1` has a constraint of being less than or equal to 5, we can set `int0` to a higher value, since it has no explicit constraint. By adjusting `int0` to a higher value, the output can be made larger in accordance with the feedback. @@ -170,7 +173,6 @@ def test_extraction_pipeline(): 5 """ - reasoning = response suggestion = optimizer.extract_llm_suggestion(response) assert 'reasoning' in suggestion, "Expected 'reasoning' in suggestion" @@ -185,16 +187,17 @@ def test_extraction_pipeline(): def test_problem_instance_text_only(): """Test that ProblemInstance with text-only content works correctly.""" + from opto.optimizers.backbone import ContentBlockList symbol_set = OptimizerPromptSymbolSet() instance = ProblemInstance( instruction="Test instruction", code="y = add(x=a, y=b)", documentation="[add] Adds two numbers", - variables="5", - inputs="3", - others="", - outputs="8", + variables=ContentBlockList("5"), + inputs=ContentBlockList("3"), + others=ContentBlockList(), + outputs=ContentBlockList("8"), feedback="Result should be 10", context="Some context", optimizer_prompt_symbol_set=symbol_set @@ -218,24 +221,25 @@ def test_problem_instance_text_only(): def test_problem_instance_with_content_blocks(): - """Test ProblemInstance with List[ContentBlock] fields.""" + """Test ProblemInstance with ContentBlockList fields containing images.""" + from opto.optimizers.backbone import ContentBlockList symbol_set = OptimizerPromptSymbolSet() # Create content blocks with an image - variables_blocks = [ + variables_blocks = ContentBlockList([ TextContent(text=""), ImageContent(image_url="https://example.com/test.jpg"), TextContent(text="") - ] + ]) instance = ProblemInstance( instruction="Analyze the image", code="result = analyze(img)", documentation="[analyze] Analyzes an image", - variables=variables_blocks, # List[ContentBlock] - inputs="", - others="", - outputs="cat", + variables=variables_blocks, + inputs=ContentBlockList(), + others=ContentBlockList(), + outputs=ContentBlockList("cat"), feedback="Result should be 'dog'", context=None, optimizer_prompt_symbol_set=symbol_set @@ -261,22 +265,23 @@ def test_problem_instance_with_content_blocks(): def test_problem_instance_mixed_content(): """Test ProblemInstance with mixed text and image content in multiple fields.""" + from opto.optimizers.backbone import ContentBlockList symbol_set = OptimizerPromptSymbolSet() # Variables with image - variables_blocks = [ + variables_blocks = ContentBlockList([ TextContent(text="Hello\n"), TextContent(text=""), ImageContent(image_data="base64data", media_type="image/png"), TextContent(text="") - ] + ]) # Inputs with image - inputs_blocks = [ + inputs_blocks = ContentBlockList([ TextContent(text=""), ImageContent(image_url="https://example.com/ref.png"), TextContent(text="") - ] + ]) instance = ProblemInstance( instruction="Compare images", @@ -284,8 +289,8 @@ def test_problem_instance_mixed_content(): documentation="[compare] Compares two images", variables=variables_blocks, inputs=inputs_blocks, - others=[], # Empty list - outputs="0.8", + others=ContentBlockList(), + outputs=ContentBlockList("0.8"), feedback="Similarity should be higher", context="Context text", optimizer_prompt_symbol_set=symbol_set @@ -343,27 +348,8 @@ def test_value_to_image_content_non_image(): assert value_to_image_content("hello world") is None -def test_construct_prompt_text_only(): - """Test construct_prompt with use_content_blocks=False (backward compatible).""" - num_1 = node(1, trainable=True) - num_2 = node(2, trainable=True) - result = num_1 + num_2 - - optimizer = OptoPrimeV3([num_1, num_2], use_json_object_format=False) - optimizer.zero_feedback() - optimizer.backward(result, 'make this number bigger') - - summary = optimizer.summarize() - system_prompt, user_prompt = optimizer.construct_prompt(summary, use_content_blocks=False) - - # Both should be strings - assert isinstance(system_prompt, str) - assert isinstance(user_prompt, str) - assert "int0" in user_prompt or "int1" in user_prompt - - -def test_construct_prompt_with_content_blocks(): - """Test construct_prompt with use_content_blocks=True.""" +def test_construct_prompt(): + """Test construct_prompt returns ContentBlockList for multimodal support.""" num_1 = node(1, trainable=True) num_2 = node(2, trainable=True) result = num_1 + num_2 @@ -373,9 +359,9 @@ def test_construct_prompt_with_content_blocks(): optimizer.backward(result, 'make this number bigger') summary = optimizer.summarize() - system_prompt, user_prompt = optimizer.construct_prompt(summary, use_content_blocks=True) + system_prompt, user_prompt = optimizer.construct_prompt(summary) - # system_prompt should be string, user_prompt should be List[ContentBlock] + # system_prompt should be string, user_prompt should be ContentBlockList assert isinstance(system_prompt, str) assert isinstance(user_prompt, list) assert all(isinstance(b, (TextContent, ImageContent)) for b in user_prompt) @@ -502,9 +488,9 @@ def test_optimizer_step_with_content_blocks(): optimizer.zero_feedback() optimizer.backward(result, "The sum should be exactly 100") - # Test that construct_prompt works with content blocks + # Test that construct_prompt returns ContentBlockList summary = optimizer.summarize() - system_prompt, user_prompt = optimizer.construct_prompt(summary, use_content_blocks=True) + system_prompt, user_prompt = optimizer.construct_prompt(summary) # Verify content blocks structure from opto.optimizers.backbone import ContentBlockList diff --git a/tests/unit_tests/test_optimizer_backbone.py b/tests/unit_tests/test_optimizer_backbone.py index b50b1b58..e41f9a49 100644 --- a/tests/unit_tests/test_optimizer_backbone.py +++ b/tests/unit_tests/test_optimizer_backbone.py @@ -68,45 +68,51 @@ def test_default_all_history(): def test_truncate_from_start(): - """Test truncate_from_start strategy - keeps last N turns""" + """Test truncate_from_start strategy - keeps last N rounds""" history = create_sample_conversation() - # Keep last 3 turns - messages = history.to_messages(n=3, truncate_strategy="from_start") + # Keep last 2 rounds (4 turns) + messages = history.to_messages(n=2, truncate_strategy="from_start") - # Should have: system + 3 turns - assert len(messages) == 4 # 1 system + 3 messages + # Should have: system + 2 rounds (4 turns) + assert len(messages) == 5 # 1 system + 4 messages assert messages[0]["role"] == "system" - # Should have the last 3 turns - # Last 3 turns are: assistant3, user4, assistant4 - assert messages[1]["role"] == "assistant" - assert "umbrella" in messages[1]["content"] - assert messages[2]["role"] == "user" - assert "Thanks" in messages[2]["content"][0]["text"] - assert messages[3]["role"] == "assistant" - assert "welcome" in messages[3]["content"] + # Should have the last 2 rounds (round 3 and round 4) + # Round 3: user3 (umbrella question), assistant3 (umbrella answer) + # Round 4: user4 (thanks), assistant4 (welcome) + assert messages[1]["role"] == "user" + assert "umbrella" in messages[1]["content"][0]["text"] + assert messages[2]["role"] == "assistant" + assert "umbrella" in messages[2]["content"] + assert messages[3]["role"] == "user" + assert "Thanks" in messages[3]["content"][0]["text"] + assert messages[4]["role"] == "assistant" + assert "welcome" in messages[4]["content"] def test_truncate_from_end(): - """Test truncate_from_end strategy - keeps first N turns""" + """Test truncate_from_end strategy - keeps first N rounds""" history = create_sample_conversation() - # Keep first 3 turns - messages = history.to_messages(n=3, truncate_strategy="from_end") + # Keep first 2 rounds (4 turns) + messages = history.to_messages(n=2, truncate_strategy="from_end") - # Should have: system + 3 turns - assert len(messages) == 4 # 1 system + 3 messages + # Should have: system + 2 rounds (4 turns) + assert len(messages) == 5 # 1 system + 4 messages assert messages[0]["role"] == "system" - # Should have the first 3 turns - # First 3 turns are: user1, assistant1, user2 + # Should have the first 2 rounds (round 1 and round 2) + # Round 1: user1 (weather), assistant1 (sunny) + # Round 2: user2 (tomorrow), assistant2 (rainy) assert messages[1]["role"] == "user" assert "Hello" in messages[1]["content"][0]["text"] assert messages[2]["role"] == "assistant" assert "sunny" in messages[2]["content"] assert messages[3]["role"] == "user" assert "tomorrow" in messages[3]["content"][0]["text"] + assert messages[4]["role"] == "assistant" + assert "rainy" in messages[4]["content"] def test_truncate_zero_turns(): @@ -145,13 +151,16 @@ def test_to_litellm_format_with_truncation(): """Test to_litellm_format() also supports truncation""" history = create_sample_conversation() + # n=2 means 2 rounds (4 turns), from_end keeps first 2 rounds messages = history.to_litellm_format(n=2, truncate_strategy="from_end") - # Should have: system + 2 turns - assert len(messages) == 3 + # Should have: system + 2 rounds (4 turns) + assert len(messages) == 5 assert messages[0]["role"] == "system" assert messages[1]["role"] == "user" assert messages[2]["role"] == "assistant" + assert messages[3]["role"] == "user" + assert messages[4]["role"] == "assistant" def test_invalid_strategy(): @@ -338,7 +347,7 @@ def test_truncate_multimodal_conversation(): """Test truncation works correctly with multimodal content""" history = ConversationHistory(system_prompt="You are a vision assistant.") - # Add several turns with images + # Add several turns with images (5 rounds = 10 turns) for i in range(5): user = (UserTurn() .add_text(f"Analyze image {i}") @@ -346,11 +355,11 @@ def test_truncate_multimodal_conversation(): assistant = AssistantTurn().add_text(f"Analysis of image {i}") history.add_user_turn(user).add_assistant_turn(assistant) - # Truncate to last 2 turns + # Truncate to last 2 rounds (4 turns) messages = history.to_messages(n=2, truncate_strategy="from_start") - # Should have system + 2 turns - assert len(messages) == 3 + # Should have system + 2 rounds (4 turns) + assert len(messages) == 5 # Check that multimodal content is preserved assert len(messages[1]["content"]) == 2 # text + image From a0777176128808d91fd298d01fa9ebe1cc459ea8 Mon Sep 17 00:00:00 2001 From: windweller Date: Tue, 16 Dec 2025 15:21:32 -0500 Subject: [PATCH 33/51] add new workflow tests conditions to avoid long build time on experimental --- .github/workflows/ci.yml | 22 +++++++++++----------- .github/workflows/unit-tests.yml | 30 ++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 11 deletions(-) create mode 100644 .github/workflows/unit-tests.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 157af79d..69c1330e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,13 +1,13 @@ -name: CI +name: Full CI (with Ollama) on: push: - branches: [ main, dev, experimental, ci-multi ] - pull_request: - branches: [ main, dev, experimental, ci-multi ] + branches: [ main ] + # Manual trigger for testing or major releases + workflow_dispatch: jobs: - test: + full-test: runs-on: ubuntu-latest timeout-minutes: 180 @@ -15,12 +15,12 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - # 1) Restore any cached Ollama data (~2 GB) + # 1) Restore any cached Ollama data - name: Restore Ollama cache uses: actions/cache@v4 with: path: ~/.ollama - key: qwen3-4b-gguf-v1 + key: qwen3-vl-2b-v1 # 2) Install Ollama - name: Install Ollama @@ -43,9 +43,9 @@ jobs: run: | sudo systemctl enable --now ollama - # 5) Pull the phi4-mini:3.8b model (uses cache if present) - - name: Pull phi4-mini:3.8b model - run: ollama pull phi4-mini:3.8b + # 5) Pull the qwen3-vl:2b model - supports multimodal/vision with OpenAI-compatible API + - name: Pull qwen3-vl:2b model (multimodal) + run: ollama pull qwen3-vl:2b # 6) Set up Python & install dependencies - uses: actions/setup-python@v5 @@ -60,7 +60,7 @@ jobs: run: | echo "OPENAI_API_KEY=ollama" >> $GITHUB_ENV echo "OPENAI_API_BASE=http://localhost:11434/v1" >> $GITHUB_ENV - echo "TRACE_LITELLM_MODEL=openai/phi4-mini:3.8b" >> $GITHUB_ENV + echo "TRACE_LITELLM_MODEL=openai/qwen3-vl:2b" >> $GITHUB_ENV # 8) Run all Trace unit tests - name: Run unit tests diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml new file mode 100644 index 00000000..43f4de3c --- /dev/null +++ b/.github/workflows/unit-tests.yml @@ -0,0 +1,30 @@ +name: Unit Tests + +on: + push: + branches: [ experimental, dev ] + pull_request: + branches: [ main, experimental, dev ] + +jobs: + unit-tests: + runs-on: ubuntu-latest + timeout-minutes: 15 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Install Python dependencies + run: | + pip install -e . + pip install pytest numpy + + - name: Run unit tests + run: pytest tests/unit_tests/ -v + From e01ddc8831c9e2455493f4bfc65b55d0ae6589e7 Mon Sep 17 00:00:00 2001 From: windweller Date: Tue, 16 Dec 2025 17:35:09 -0500 Subject: [PATCH 34/51] partial refactoring. Reworked `add_context` to support interleaved image/text insertion. TODO: need to re-divide system prompt and user prompt because multi-modal content cannot go into system prompt. --- opto/optimizers/optoprime_v3.py | 447 ++++++++++++++++++++++++++++---- 1 file changed, 403 insertions(+), 44 deletions(-) diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py index d3276b0a..2cd108db 100644 --- a/opto/optimizers/optoprime_v3.py +++ b/opto/optimizers/optoprime_v3.py @@ -25,6 +25,7 @@ import re from typing import Dict, Any +DEFAULT_IMAGE_PLACEHOLDER = "[IMAGE]" def value_to_image_content(value: Any) -> Optional[ImageContent]: """Convert a value to ImageContent if it's an image, otherwise return None. @@ -229,8 +230,66 @@ class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet): name_tag = "name" +class MultiModalContent: + """Base class for multimodal content blocks. + + Provides common utilities for handling mixed text/image content. + Subclasses should implement to_content_blocks() and __repr__() for their specific structure. + + The class provides: + - _content_to_text(): Static method to convert ContentBlockList to text + - to_content_blocks(): Returns ContentBlockList (must be implemented by subclasses) + - has_images(): Check if content contains images (uses to_content_blocks by default) + - __repr__: Text-only representation (must be implemented by subclasses) + """ + + @staticmethod + def _content_to_text(content: ContentBlockList) -> str: + """Convert ContentBlockList to text representation. + + Args: + content: A ContentBlockList containing text and/or image blocks. + + Returns: + str: Text representation where images are replaced with "[IMAGE]". + """ + text_parts = [] + for block in content: + if isinstance(block, TextContent): + text_parts.append(block.text) + elif isinstance(block, ImageContent): + text_parts.append(f"{DEFAULT_IMAGE_PLACEHOLDER}") + return "".join(text_parts) + + def to_content_blocks(self) -> ContentBlockList: + """Convert to a list of ContentBlocks for multimodal prompts. + + Returns: + ContentBlockList: A list containing TextContent and ImageContent blocks. + """ + raise NotImplementedError("Subclasses must implement to_content_blocks()") + + def has_images(self) -> bool: + """Check if this content contains any images. + + Default implementation iterates through to_content_blocks(). + Subclasses may override for more efficient implementations. + + Returns: + bool: True if any content contains ImageContent blocks. + """ + for block in self.to_content_blocks(): + if isinstance(block, ImageContent): + return True + return False + + def __repr__(self) -> str: + """Return text-only representation for logging and display.""" + raise NotImplementedError("Subclasses must implement __repr__()") + + @dataclass -class ProblemInstance: +class ProblemInstance(MultiModalContent): """Problem instance with multimodal content support. Uses ContentBlockList for variables, inputs, others, and outputs to support @@ -280,17 +339,6 @@ class ProblemInstance: """ ) - @staticmethod - def _content_to_text(content: ContentBlockList) -> str: - """Convert ContentBlockList to text representation.""" - text_parts = [] - for block in content: - if isinstance(block, TextContent): - text_parts.append(block.text) - elif isinstance(block, ImageContent): - text_parts.append("[IMAGE]") - return "".join(text_parts) - def __repr__(self) -> str: """Return text-only representation for backward compatibility.""" optimization_query = self.problem_template.format( @@ -371,17 +419,294 @@ def to_content_blocks(self) -> ContentBlockList: def has_images(self) -> bool: """Check if this problem instance contains any images. + Overrides base implementation for efficiency by checking + fields directly without building full content blocks. + Returns: bool: True if any field contains ImageContent blocks. """ - for field in [self.variables, self.inputs, self.others, self.outputs]: - if isinstance(field, list): - for block in field: + for content_field in [self.variables, self.inputs, self.others, self.outputs]: + if isinstance(content_field, list): + for block in content_field: if isinstance(block, ImageContent): return True return False -class OptoPrimeV3(OptoPrime): + +@dataclass +class Context(MultiModalContent): + """Provide context to the optimizer agent. + + A specialized mixed text/image block allowing users to insert context + in any format (text, images, or mixed content). This enables flexible + context injection for optimization tasks. + + The context can be: + - Pure text: Context("Your text here") + - Image: Context(ImageContent.from_file("image.png")) + - Mixed: Context(ContentBlockList([TextContent("desc"), ImageContent(...)])) + + Examples: + # Text-only context + ctx = Context("Important background information") + + # Image context + ctx = Context(ImageContent.from_file("diagram.png")) + + # Mixed content + blocks = ContentBlockList() + blocks.append("Here's the relevant diagram:") + blocks.append(ImageContent.from_file("diagram.png")) + ctx = Context(blocks) + """ + content: ContentBlockList = field(default_factory=ContentBlockList) + + def __post_init__(self): + """Normalize content to ContentBlockList after dataclass initialization.""" + if isinstance(self.content, str): + self.content = ContentBlockList(self.content) + elif isinstance(self.content, ContentBlock) and not isinstance(self.content, ContentBlockList): + # Single ContentBlock (e.g., ImageContent) + self.content = ContentBlockList([self.content]) + elif isinstance(self.content, list) and not isinstance(self.content, ContentBlockList): + # Regular list of ContentBlocks + self.content = ContentBlockList(self.content) + elif self.content is None: + self.content = ContentBlockList() + # If already ContentBlockList, keep as-is + + def to_content_blocks(self) -> ContentBlockList: + """Convert the context to a list of ContentBlocks. + + Returns: + ContentBlockList: The content blocks representing this context. + """ + return self.content + + def has_images(self) -> bool: + """Check if this context contains any images. + + Returns: + bool: True if content contains ImageContent blocks. + """ + return any(isinstance(block, ImageContent) for block in self.content) + + def __repr__(self) -> str: + """Return text-only representation for logging. + + Images are represented as "[IMAGE]" placeholder. + + Returns: + str: Text representation of the context. + """ + return self._content_to_text(self.content) + + def __bool__(self) -> bool: + """Check if context has any content. + + Returns: + bool: True if context is non-empty. + """ + if not self.content: + return False + # Check if there's any actual content (not just empty text) + for block in self.content: + if isinstance(block, ImageContent): + return True + if isinstance(block, TextContent) and block.text.strip(): + return True + return False + + def __len__(self) -> int: + """Return the number of content blocks. + + Returns: + int: Number of content blocks. + """ + return len(self.content) + + def append(self, item: Union[str, ContentBlock]) -> 'Context': + """Append content to this context. + + Args: + item: String (auto-converted to TextContent) or ContentBlock. + + Returns: + Context: Self for method chaining. + """ + self.content.append(item) + return self + + def extend(self, items: Union[str, ContentBlock, List[ContentBlock], ContentBlockList]) -> 'Context': + """Extend context with additional content. + + Args: + items: Content to add (string, ContentBlock, or list of ContentBlocks). + + Returns: + Context: Self for method chaining. + """ + self.content.extend(items) + return self + + @classmethod + def from_text(cls, text: str) -> 'Context': + """Create a Context from plain text. + + Args: + text: The text content. + + Returns: + Context: A new Context instance with the text. + """ + return cls(content=ContentBlockList(text)) + + @classmethod + def from_image(cls, image: Union[str, ImageContent, Any], format: str = "PNG") -> 'Context': + """Create a Context from an image. + + Args: + image: Can be: + - ImageContent instance + - URL string (http/https) + - File path string + - PIL Image object + - Numpy array + format: Image format for arrays (PNG, JPEG, etc.). Default: PNG + + Returns: + Context: A new Context instance with the image. + """ + if isinstance(image, ImageContent): + image_content = image + else: + image_content = ImageContent.from_value(image, format=format) + if image_content is None: + raise ValueError(f"Could not convert {type(image)} to ImageContent") + return cls(content=ContentBlockList([image_content])) + + +class ContextBuildUtils: + """Mixin providing utilities for building Context objects from various input formats.""" + + @staticmethod + def _build_context_from_args( + *args, images: Optional[List[Any]] = None, format: str = "PNG" + ) -> Context: + """Build a Context object from the provided arguments. + + Supports two patterns: + - Usage 1 (images=None): Variadic args with alternating text/images + - Usage 2 (images provided): Template string with [IMAGE] placeholders + + Args: + *args: Variable arguments (text strings and/or image sources) + images: Optional list of images for template mode + format: Image format for numpy arrays + + Returns: + Context: A Context object containing the multimodal content. + """ + if images is not None: + # Usage 2: Template mode with placeholders + return ContextBuildUtils._build_context_from_template(*args, images=images, format=format) + else: + # Usage 1: Variadic mode with alternating text/images + return ContextBuildUtils._build_context_from_variadic(*args, format=format) + + @staticmethod + def _build_context_from_variadic(*args, format: str = "PNG") -> Context: + """Build Context from variadic arguments (Usage 1). + + Each argument is either text (str) or an image source. + + Args: + *args: Alternating text and image sources + format: Image format for numpy arrays + + Returns: + Context: A Context object with the content. + """ + ctx = Context() + + for arg in args: + if isinstance(arg, str): + # Check if it could be an image URL or file path + image_content = ImageContent.from_value(arg, format=format) + if image_content is not None: + ctx.append(image_content) + else: + # It's just text + ctx.append(arg) + else: + # Try to convert to image + image_content = ImageContent.from_value(arg, format=format) + if image_content is not None: + ctx.append(image_content) + else: + # Fallback: convert to string + ctx.append(str(arg)) + + return ctx + + @staticmethod + def _build_context_from_template( + *args, images: List[Any], format: str = "PNG" + ) -> Context: + """Build Context from template with placeholders (Usage 2). + + The template string contains [IMAGE] placeholders that are replaced + by images from the images list. + + Args: + *args: Should be a single template string + images: List of image sources to insert at placeholders + format: Image format for numpy arrays + + Returns: + Context: A Context object with placeholders replaced by images. + + Raises: + ValueError: If number of placeholders doesn't match number of images + """ + if len(args) != 1 or not isinstance(args[0], str): + raise ValueError( + "Usage 2 requires exactly one template string as the first argument. " + f"Got {len(args)} arguments." + ) + + template = args[0] + placeholder = DEFAULT_IMAGE_PLACEHOLDER + + # Count placeholders + placeholder_count = template.count(placeholder) + if placeholder_count != len(images): + raise ValueError( + f"Number of {placeholder} placeholders ({placeholder_count}) " + f"does not match number of images ({len(images)})" + ) + + ctx = Context() + + # Split template by placeholder and interleave with images + parts = template.split(placeholder) + + for i, part in enumerate(parts): + if part: # Add text part if non-empty + ctx.append(part) + + # Add image after each part except the last + if i < len(images): + image_content = ImageContent.from_value(images[i], format=format) + if image_content is None: + raise ValueError( + f"Could not convert image at index {i} to ImageContent: {type(images[i])}" + ) + ctx.append(image_content) + + return ctx + +class OptoPrimeV3(OptoPrime, ContextBuildUtils): # This is generic representation prompt, which just explains how to read the problem. representation_prompt = dedent( """You're tasked to solve a coding/algorithm problem. You will see the instruction, the code, the documentation of each function used in the code, and the feedback about the execution result. @@ -491,13 +816,13 @@ def __init__( optimizer_prompt_symbol_set: OptimizerPromptSymbolSet = OptimizerPromptSymbolSet(), use_json_object_format=True, # whether to use json object format for the response when calling LLM truncate_expression=truncate_expression, - problem_context: Optional[str] = None, + problem_context: Optional[Context] = None, **kwargs, ): super().__init__(parameters, *args, propagator=propagator, **kwargs) self.truncate_expression = truncate_expression - self.problem_context = problem_context + self.problem_context: Optional[Context] = None self.multimodal_payload = MultiModalPayload() self.use_json_object_format = use_json_object_format if optimizer_prompt_symbol_set.expect_json and use_json_object_format else False @@ -568,35 +893,62 @@ def parameter_check(self, parameters: List[ParameterNode]): f"{param_names}. LLMs can only generate one image at a time." ) - def add_image_context(self, image: Union[str, Any], context: str = "", format: str = "PNG"): - """ - Add an image to the optimizer context. + def add_context(self, *args, images: Optional[List[Any]] = None, format: str = "PNG"): + """Add context to the optimizer, supporting both text and images. + + Two usage patterns are supported: + + **Usage 1: Variadic arguments (alternating text and images)** + + optimizer.add_context("text part 1", image_link, "text part 2", image_file) + + Each argument is either a string (text) or an image source. + + **Usage 2: Template with placeholders** + + optimizer.add_context( + "text part 1 [IMAGE] text part 2 [IMAGE]", + images=[image_link, image_file] + ) + + The text contains `[IMAGE]` placeholders that are replaced by images + from the `images` list in order. The number of placeholders must match + the number of images. Args: - image: Can be: - - URL string (starting with 'http://' or 'https://') - - Local file path (string) - - Numpy array or array-like RGB image - context: Optional context text to describe the image. If empty, uses default. + *args: Variable arguments. In Usage 1, alternating text and images. + In Usage 2, a single template string with placeholders. + images: Optional list of image sources for Usage 2. Each can be: + - URL string (http/https) + - Local file path + - PIL Image object + - Numpy array format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG + + Raises: + ValueError: If using Usage 2 and the number of placeholders doesn't + match the number of images. + + Examples: + # Usage 1: Alternating text and images + optimizer.add_context("Here's the diagram:", "diagram.png", "And here's another:", "other.png") + + # Usage 2: Template with placeholders + optimizer.add_context("See [IMAGE] and compare with [IMAGE]", images=["a.png", "b.png"]) + + # Text-only context + optimizer.add_context("Important background information") """ + ctx = self._build_context_from_args(*args, images=images, format=format) + + # Store the context if self.problem_context is None: - self.problem_context = "" - - if context == "": - context = "The attached image is given to the workflow. You should use the image to help you understand the problem and provide better suggestions. You can refer to the image when providing your suggestions." - - self.problem_context += f"{context}\n\n" - - # Set the image using the multimodal payload - self.multimodal_payload.set_image(image, format=format) - - self.initialize_prompt() - - def add_context(self, context: str): - if self.problem_context is None: - self.problem_context = "" - self.problem_context += f"{context}\n\n" + self.problem_context = ctx + else: + # Append to existing context with a newline separator + self.problem_context.append("\n\n") + self.problem_context.extend(ctx.to_content_blocks()) + self.initialize_prompt() def initialize_prompt(self): @@ -792,7 +1144,14 @@ def repr_node_value_compact_as_content_blocks(self, node_dict, node_tag="node", def construct_prompt(self, summary, mask=None, *args, **kwargs): """Construct the system and user prompt. - + + The prompt for the optimizer agent is rather complex. + There are prompts that are automatically constructed through the Trace frontend (aka the bundle/node API). + However, we also allow the user to provide additional context to the optimizer agent. + + We handle multimodal (MM) conversion implicitly for the automatic part (TraceGraph), + but we handle the user-provided context explicitly. + Args: summary: The FunctionFeedback summary containing graph information. mask: List of section titles to exclude from the problem instance. @@ -996,7 +1355,7 @@ def call_llm( # Print text portions, indicate if images present text_parts = [block.text for block in user_prompt if isinstance(block, TextContent)] has_images = any(isinstance(block, ImageContent) for block in user_prompt) - suffix = " [+ images]" if has_images else "" + suffix = f" [+ {DEFAULT_IMAGE_PLACEHOLDER}]" if has_images else "" print("Prompt\n", system_prompt + "".join(text_parts) + suffix) # Update system prompt in conversation history From a8a0c3a190293797c9e7e49ad098bf50f3af6acd Mon Sep 17 00:00:00 2001 From: windweller Date: Tue, 16 Dec 2025 23:18:09 -0500 Subject: [PATCH 35/51] continued refactoring... --- opto/optimizers/backbone.py | 90 ++++++- opto/optimizers/opro_v3.py | 19 +- opto/optimizers/optoprime_v3.py | 420 ++++++++++---------------------- 3 files changed, 217 insertions(+), 312 deletions(-) diff --git a/opto/optimizers/backbone.py b/opto/optimizers/backbone.py index e8905e77..96d135f6 100644 --- a/opto/optimizers/backbone.py +++ b/opto/optimizers/backbone.py @@ -3,21 +3,22 @@ Uses LiteLLM unified format for all providers (OpenAI, Anthropic, Google, etc.). """ -from typing import List, Dict, Any, Optional, Literal, Union +from typing import List, Dict, Any, Optional, Literal, Union, Iterable from dataclasses import dataclass, field import json import base64 from pathlib import Path import warnings -from abc import ABC, abstractmethod + +# Default placeholder for images that cannot be rendered as text +DEFAULT_IMAGE_PLACEHOLDER = "[IMAGE]" @dataclass -class ContentBlock(ABC): +class ContentBlock: """Abstract base class for all content blocks.""" - @abstractmethod def to_dict(self) -> Dict[str, Any]: """Convert the content block to a dictionary representation. @@ -141,6 +142,86 @@ def __radd__(self, other) -> 'ContentBlockList': else: return NotImplemented + # --- Multimodal utilities --- + + @staticmethod + def blocks_to_text(blocks: Iterable['ContentBlock'], + image_placeholder: str = DEFAULT_IMAGE_PLACEHOLDER) -> str: + """Convert any iterable of ContentBlocks to text representation. + + This is a utility that can be used by composite classes containing + multiple ContentBlockLists. + + Args: + blocks: Iterable of ContentBlock objects + image_placeholder: Placeholder string for images (default: "[IMAGE]") + + Returns: + str: Text representation where images are replaced with placeholder. + """ + text_parts = [] + for block in blocks: + if isinstance(block, TextContent): + text_parts.append(block.text) + elif isinstance(block, ImageContent): + text_parts.append(image_placeholder) + return "".join(text_parts) + + def to_text(self, image_placeholder: str = DEFAULT_IMAGE_PLACEHOLDER) -> str: + """Convert this list to text representation. + + Args: + image_placeholder: Placeholder string for images (default: "[IMAGE]") + + Returns: + str: Text representation where images are replaced with placeholder. + """ + return self.blocks_to_text(self, image_placeholder) + + def has_images(self) -> bool: + """Check if any image content exists in this list. + + Returns: + bool: True if any ImageContent block is present. + """ + return any(isinstance(block, ImageContent) for block in self) + + def __bool__(self) -> bool: + """Check if there's any actual content (not just empty text). + + Returns: + bool: True if content is non-empty (has images or non-whitespace text). + """ + for block in self: + if isinstance(block, ImageContent): + return True + if isinstance(block, TextContent) and block.text.strip(): + return True + return False + + def __repr__(self) -> str: + """Return text-only representation for logging. + + Images are represented as "[IMAGE]" placeholder. + + Returns: + str: Text representation of the content. + """ + return self.to_text() + + def to_content_blocks(self) -> 'ContentBlockList': + """Return self (for interface compatibility with composites). + + This allows ContentBlockList and classes that inherit from it + to be used interchangeably with composite classes that have + a to_content_blocks() method. + + Returns: + ContentBlockList: Self reference. + """ + return self + + @dataclass class TextContent(ContentBlock): """Text content block""" @@ -585,7 +666,6 @@ def to_dict(self) -> Dict[str, Any]: result.update(self.extra) return result - @dataclass class UserTurn: """Represents a user message turn in the conversation""" diff --git a/opto/optimizers/opro_v3.py b/opto/optimizers/opro_v3.py index 66591480..4b7af6a5 100644 --- a/opto/optimizers/opro_v3.py +++ b/opto/optimizers/opro_v3.py @@ -13,7 +13,8 @@ from opto.optimizers.optoprime_v3 import OptoPrimeV3, OptimizerPromptSymbolSet from opto.optimizers.backbone import ( - ContentBlock, TextContent, ImageContent, ContentBlockList + ContentBlock, TextContent, ImageContent, ContentBlockList, + DEFAULT_IMAGE_PLACEHOLDER ) # Not inheriting from optoprime_v2 because this should have a smaller set @@ -148,17 +149,15 @@ class ProblemInstance: @staticmethod def _content_to_text(content: Union[str, List[ContentBlock]]) -> str: - """Convert content (str or List[ContentBlock]) to text representation.""" + """Convert content (str or List[ContentBlock]) to text representation. + + Handles both string content and ContentBlockList/List[ContentBlock]. + Uses ContentBlockList.blocks_to_text for list content. + """ if isinstance(content, str): return content - # Extract text from content blocks, skip images - text_parts = [] - for block in content: - if isinstance(block, TextContent): - text_parts.append(block.text) - elif isinstance(block, ImageContent): - text_parts.append("[IMAGE]") - return "".join(text_parts) + # Use the shared utility from ContentBlockList + return ContentBlockList.blocks_to_text(content, DEFAULT_IMAGE_PLACEHOLDER) def __repr__(self) -> str: """Return text-only representation for backward compatibility.""" diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py index 2cd108db..df7eeb68 100644 --- a/opto/optimizers/optoprime_v3.py +++ b/opto/optimizers/optoprime_v3.py @@ -18,15 +18,14 @@ from opto.optimizers.buffers import FIFOBuffer from opto.optimizers.backbone import ( ConversationHistory, UserTurn, AssistantTurn, - ContentBlock, TextContent, ImageContent, ContentBlockList + ContentBlock, TextContent, ImageContent, ContentBlockList, + DEFAULT_IMAGE_PLACEHOLDER ) import copy import pickle import re from typing import Dict, Any -DEFAULT_IMAGE_PLACEHOLDER = "[IMAGE]" - def value_to_image_content(value: Any) -> Optional[ImageContent]: """Convert a value to ImageContent if it's an image, otherwise return None. @@ -230,74 +229,20 @@ class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet): name_tag = "name" -class MultiModalContent: - """Base class for multimodal content blocks. - - Provides common utilities for handling mixed text/image content. - Subclasses should implement to_content_blocks() and __repr__() for their specific structure. - - The class provides: - - _content_to_text(): Static method to convert ContentBlockList to text - - to_content_blocks(): Returns ContentBlockList (must be implemented by subclasses) - - has_images(): Check if content contains images (uses to_content_blocks by default) - - __repr__: Text-only representation (must be implemented by subclasses) - """ - - @staticmethod - def _content_to_text(content: ContentBlockList) -> str: - """Convert ContentBlockList to text representation. - - Args: - content: A ContentBlockList containing text and/or image blocks. - - Returns: - str: Text representation where images are replaced with "[IMAGE]". - """ - text_parts = [] - for block in content: - if isinstance(block, TextContent): - text_parts.append(block.text) - elif isinstance(block, ImageContent): - text_parts.append(f"{DEFAULT_IMAGE_PLACEHOLDER}") - return "".join(text_parts) - - def to_content_blocks(self) -> ContentBlockList: - """Convert to a list of ContentBlocks for multimodal prompts. - - Returns: - ContentBlockList: A list containing TextContent and ImageContent blocks. - """ - raise NotImplementedError("Subclasses must implement to_content_blocks()") - - def has_images(self) -> bool: - """Check if this content contains any images. - - Default implementation iterates through to_content_blocks(). - Subclasses may override for more efficient implementations. - - Returns: - bool: True if any content contains ImageContent blocks. - """ - for block in self.to_content_blocks(): - if isinstance(block, ImageContent): - return True - return False - - def __repr__(self) -> str: - """Return text-only representation for logging and display.""" - raise NotImplementedError("Subclasses must implement __repr__()") @dataclass -class ProblemInstance(MultiModalContent): +class ProblemInstance: """Problem instance with multimodal content support. - Uses ContentBlockList for variables, inputs, others, and outputs to support - both text and image content in a unified way. + A composite of multiple ContentBlockLists representing different parts + of a problem. Uses ContentBlockList for variables, inputs, others, and + outputs to support both text and image content in a unified way. The class provides: - __repr__: Returns text-only representation for logging - to_content_blocks(): Returns ContentBlockList for multimodal prompts + - has_images(): Check if any field contains images """ instruction: str code: str @@ -340,15 +285,18 @@ class ProblemInstance(MultiModalContent): ) def __repr__(self) -> str: - """Return text-only representation for backward compatibility.""" + """Return text-only representation for backward compatibility. + + Uses ContentBlockList.to_text() for fields that may contain images. + """ optimization_query = self.problem_template.format( instruction=self.instruction, code=self.code, documentation=self.documentation, - variables=self._content_to_text(self.variables), - inputs=self._content_to_text(self.inputs), - outputs=self._content_to_text(self.outputs), - others=self._content_to_text(self.others), + variables=self.variables.to_text(), + inputs=self.inputs.to_text(), + outputs=self.outputs.to_text(), + others=self.others.to_text(), feedback=self.feedback ) @@ -419,259 +367,154 @@ def to_content_blocks(self) -> ContentBlockList: def has_images(self) -> bool: """Check if this problem instance contains any images. - Overrides base implementation for efficiency by checking - fields directly without building full content blocks. + Efficiently checks each ContentBlockList field directly + without building full content blocks. Returns: bool: True if any field contains ImageContent blocks. """ - for content_field in [self.variables, self.inputs, self.others, self.outputs]: - if isinstance(content_field, list): - for block in content_field: - if isinstance(block, ImageContent): - return True - return False + return any( + field.has_images() + for field in [self.variables, self.inputs, self.others, self.outputs] + ) -@dataclass -class Context(MultiModalContent): - """Provide context to the optimizer agent. +class Context(ContentBlockList): + """Semantic wrapper providing context to the optimizer agent. - A specialized mixed text/image block allowing users to insert context - in any format (text, images, or mixed content). This enables flexible - context injection for optimization tasks. + Inherits all ContentBlockList functionality (append, extend, has_images, + to_text, __bool__, __repr__, etc.) with a flexible constructor that + supports multiple input patterns. - The context can be: - - Pure text: Context("Your text here") - - Image: Context(ImageContent.from_file("image.png")) - - Mixed: Context(ContentBlockList([TextContent("desc"), ImageContent(...)])) + Creation patterns: + - Variadic: Context("text", image, "more text") + - Template: Context("See [IMAGE] here", images=[img]) + - Empty: Context() Examples: # Text-only context ctx = Context("Important background information") - # Image context + # Image context ctx = Context(ImageContent.from_file("diagram.png")) - # Mixed content - blocks = ContentBlockList() - blocks.append("Here's the relevant diagram:") - blocks.append(ImageContent.from_file("diagram.png")) - ctx = Context(blocks) - """ - content: ContentBlockList = field(default_factory=ContentBlockList) - - def __post_init__(self): - """Normalize content to ContentBlockList after dataclass initialization.""" - if isinstance(self.content, str): - self.content = ContentBlockList(self.content) - elif isinstance(self.content, ContentBlock) and not isinstance(self.content, ContentBlockList): - # Single ContentBlock (e.g., ImageContent) - self.content = ContentBlockList([self.content]) - elif isinstance(self.content, list) and not isinstance(self.content, ContentBlockList): - # Regular list of ContentBlocks - self.content = ContentBlockList(self.content) - elif self.content is None: - self.content = ContentBlockList() - # If already ContentBlockList, keep as-is - - def to_content_blocks(self) -> ContentBlockList: - """Convert the context to a list of ContentBlocks. - - Returns: - ContentBlockList: The content blocks representing this context. - """ - return self.content - - def has_images(self) -> bool: - """Check if this context contains any images. - - Returns: - bool: True if content contains ImageContent blocks. - """ - return any(isinstance(block, ImageContent) for block in self.content) - - def __repr__(self) -> str: - """Return text-only representation for logging. + # Mixed content (variadic mode) + ctx = Context( + "Here's the diagram:", + "diagram.png", # auto-detected as image file + "And the analysis." + ) - Images are represented as "[IMAGE]" placeholder. + # Template mode with placeholders + ctx = Context( + "Compare [IMAGE] with [IMAGE]:", + images=[img1, img2] + ) - Returns: - str: Text representation of the context. - """ - return self._content_to_text(self.content) + # Manual building + ctx = Context() + ctx.append("Here's the relevant diagram:") + ctx.append(ImageContent.from_file("diagram.png")) + """ - def __bool__(self) -> bool: - """Check if context has any content. + def __init__( + self, + *args, + images: Optional[List[Any]] = None, + format: str = "PNG" + ): + """Initialize a Context from various input patterns. - Returns: - bool: True if context is non-empty. - """ - if not self.content: - return False - # Check if there's any actual content (not just empty text) - for block in self.content: - if isinstance(block, ImageContent): - return True - if isinstance(block, TextContent) and block.text.strip(): - return True - return False - - def __len__(self) -> int: - """Return the number of content blocks. + Supports two usage modes: - Returns: - int: Number of content blocks. - """ - return len(self.content) - - def append(self, item: Union[str, ContentBlock]) -> 'Context': - """Append content to this context. + **Mode 1: Variadic (images=None)** + Pass any mix of text and image sources as arguments. + Strings are auto-detected as text or image paths/URLs. - Args: - item: String (auto-converted to TextContent) or ContentBlock. - - Returns: - Context: Self for method chaining. - """ - self.content.append(item) - return self - - def extend(self, items: Union[str, ContentBlock, List[ContentBlock], ContentBlockList]) -> 'Context': - """Extend context with additional content. + Context("Hello", some_image, "World") + Context("Check this:", "path/to/image.png") - Args: - items: Content to add (string, ContentBlock, or list of ContentBlocks). - - Returns: - Context: Self for method chaining. - """ - self.content.extend(items) - return self - - @classmethod - def from_text(cls, text: str) -> 'Context': - """Create a Context from plain text. + **Mode 2: Template (images provided)** + Pass a template string with [IMAGE] placeholders and a list of images. - Args: - text: The text content. - - Returns: - Context: A new Context instance with the text. - """ - return cls(content=ContentBlockList(text)) - - @classmethod - def from_image(cls, image: Union[str, ImageContent, Any], format: str = "PNG") -> 'Context': - """Create a Context from an image. + Context( + "Compare [IMAGE] with [IMAGE]", + images=[img1, img2] + ) Args: - image: Can be: - - ImageContent instance - - URL string (http/https) - - File path string - - PIL Image object - - Numpy array - format: Image format for arrays (PNG, JPEG, etc.). Default: PNG + *args: Variable arguments - text strings and/or image sources (Mode 1), + or a single template string (Mode 2) + images: Optional list of images for template mode. When provided, + expects exactly one template string in args. + format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG - Returns: - Context: A new Context instance with the image. + Raises: + ValueError: In template mode, if placeholder count doesn't match image count, + or if args is not a single template string. """ - if isinstance(image, ImageContent): - image_content = image - else: - image_content = ImageContent.from_value(image, format=format) - if image_content is None: - raise ValueError(f"Could not convert {type(image)} to ImageContent") - return cls(content=ContentBlockList([image_content])) - - -class ContextBuildUtils: - """Mixin providing utilities for building Context objects from various input formats.""" - - @staticmethod - def _build_context_from_args( - *args, images: Optional[List[Any]] = None, format: str = "PNG" - ) -> Context: - """Build a Context object from the provided arguments. + # Initialize empty list first + super().__init__() - Supports two patterns: - - Usage 1 (images=None): Variadic args with alternating text/images - - Usage 2 (images provided): Template string with [IMAGE] placeholders - - Args: - *args: Variable arguments (text strings and/or image sources) - images: Optional list of images for template mode - format: Image format for numpy arrays - - Returns: - Context: A Context object containing the multimodal content. - """ + # Build content based on mode if images is not None: - # Usage 2: Template mode with placeholders - return ContextBuildUtils._build_context_from_template(*args, images=images, format=format) - else: - # Usage 1: Variadic mode with alternating text/images - return ContextBuildUtils._build_context_from_variadic(*args, format=format) + self._build_from_template(*args, images=images, format=format) + elif args: + self._build_from_variadic(*args, format=format) + # else: empty context - @staticmethod - def _build_context_from_variadic(*args, format: str = "PNG") -> Context: - """Build Context from variadic arguments (Usage 1). + def _build_from_variadic(self, *args, format: str = "PNG") -> None: + """Populate self from variadic arguments. Each argument is either text (str) or an image source. + Strings are auto-detected: if they look like image paths/URLs, + they're converted to ImageContent; otherwise treated as text. Args: *args: Alternating text and image sources format: Image format for numpy arrays - - Returns: - Context: A Context object with the content. """ - ctx = Context() - for arg in args: if isinstance(arg, str): # Check if it could be an image URL or file path image_content = ImageContent.from_value(arg, format=format) if image_content is not None: - ctx.append(image_content) + self.append(image_content) else: # It's just text - ctx.append(arg) + self.append(arg) else: # Try to convert to image image_content = ImageContent.from_value(arg, format=format) if image_content is not None: - ctx.append(image_content) + self.append(image_content) else: # Fallback: convert to string - ctx.append(str(arg)) - - return ctx + self.append(str(arg)) - @staticmethod - def _build_context_from_template( - *args, images: List[Any], format: str = "PNG" - ) -> Context: - """Build Context from template with placeholders (Usage 2). + def _build_from_template( + self, + *args, + images: List[Any], + format: str = "PNG" + ) -> None: + """Populate self from template with [IMAGE] placeholders. The template string contains [IMAGE] placeholders that are replaced - by images from the images list. + by images from the images list in order. Args: - *args: Should be a single template string + *args: Should be a single template string containing [IMAGE] placeholders images: List of image sources to insert at placeholders format: Image format for numpy arrays - Returns: - Context: A Context object with placeholders replaced by images. - Raises: - ValueError: If number of placeholders doesn't match number of images + ValueError: If args is not a single string, or if placeholder count + doesn't match the number of images. """ if len(args) != 1 or not isinstance(args[0], str): raise ValueError( - "Usage 2 requires exactly one template string as the first argument. " + "Template mode requires exactly one template string as the first argument. " f"Got {len(args)} arguments." ) @@ -686,14 +529,12 @@ def _build_context_from_template( f"does not match number of images ({len(images)})" ) - ctx = Context() - # Split template by placeholder and interleave with images parts = template.split(placeholder) for i, part in enumerate(parts): if part: # Add text part if non-empty - ctx.append(part) + self.append(part) # Add image after each part except the last if i < len(images): @@ -702,11 +543,9 @@ def _build_context_from_template( raise ValueError( f"Could not convert image at index {i} to ImageContent: {type(images[i])}" ) - ctx.append(image_content) - - return ctx + self.append(image_content) -class OptoPrimeV3(OptoPrime, ContextBuildUtils): +class OptoPrimeV3(OptoPrime): # This is generic representation prompt, which just explains how to read the problem. representation_prompt = dedent( """You're tasked to solve a coding/algorithm problem. You will see the instruction, the code, the documentation of each function used in the code, and the feedback about the execution result. @@ -720,7 +559,6 @@ class OptoPrimeV3(OptoPrime, ContextBuildUtils): - {others_section_title}: the intermediate values created through the code execution. - {outputs_section_title}: the result of the code output. - {feedback_section_title}: the feedback about the code's execution result. - - {context_section_title}: the context information that might be useful to solve the problem. In `{variables_section_title}`, `{inputs_section_title}`, `{outputs_section_title}`, and `{others_section_title}`, the format is: @@ -762,32 +600,24 @@ class OptoPrimeV3(OptoPrime, ContextBuildUtils): """ ) - user_prompt_template = dedent( + user_prompt_context_template = dedent( """ - Now you see problem instance: - + Now you see a new problem instance. Here is some context for this problem: + ================================ - {problem_instance} + {context} ================================ - """ ) - example_prompt = dedent( + user_prompt_template = dedent( """ - Here are some feasible but not optimal solutions for the current problem instance. Consider this as a hint to help you understand the problem better. + Now you see problem instance: ================================ - {examples} + {problem_instance} ================================ - """ - ) - context_prompt = dedent( - """ - Here is some additional **context** to solving this problem: - - {context} """ ) @@ -866,7 +696,7 @@ def __init__( self.default_prompt_symbols = self.optimizer_prompt_symbol_set.default_prompt_symbols self.prompt_symbols = copy.deepcopy(self.default_prompt_symbols) - self.initialize_prompt() + self.initialize_instruct_prompt() def parameter_check(self, parameters: List[ParameterNode]): """Check if the parameters are valid. @@ -880,7 +710,7 @@ def parameter_check(self, parameters: List[ParameterNode]): AssertionError: If more than one parameter contains image data. Notes: - OptoPrimeV2 supports image parameters, but only one parameter can be + OptoPrimeV3 supports image parameters, but only one parameter can be an image at a time since LLMs can only generate one image per inference. """ # Count image parameters @@ -889,7 +719,7 @@ def parameter_check(self, parameters: List[ParameterNode]): if len(image_params) > 1: param_names = ', '.join([f"'{p.name}'" for p in image_params]) raise AssertionError( - f"OptoPrimeV2 supports at most one image parameter, but found {len(image_params)}: " + f"OptoPrimeV3 supports at most one image parameter, but found {len(image_params)}: " f"{param_names}. LLMs can only generate one image at a time." ) @@ -939,7 +769,7 @@ def add_context(self, *args, images: Optional[List[Any]] = None, format: str = " # Text-only context optimizer.add_context("Important background information") """ - ctx = self._build_context_from_args(*args, images=images, format=format) + ctx = Context(*args, images=images, format=format) # Store the context if self.problem_context is None: @@ -949,9 +779,7 @@ def add_context(self, *args, images: Optional[List[Any]] = None, format: str = " self.problem_context.append("\n\n") self.problem_context.extend(ctx.to_content_blocks()) - self.initialize_prompt() - - def initialize_prompt(self): + def initialize_instruct_prompt(self): self.representation_prompt = self.representation_prompt.format( variable_expression_format=dedent(f""" <{self.optimizer_prompt_symbol_set.variable_tag} name="variable_name" type="data_type"> @@ -972,7 +800,6 @@ def initialize_prompt(self): code_section_title=self.optimizer_prompt_symbol_set.code_section_title.replace(" ", ""), documentation_section_title=self.optimizer_prompt_symbol_set.documentation_section_title.replace(" ", ""), others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", ""), - context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "") ) self.output_format_prompt = self.output_format_prompt_template.format( output_format=self.optimizer_prompt_symbol_set.output_format, @@ -986,7 +813,6 @@ def initialize_prompt(self): variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""), inputs_section_title=self.optimizer_prompt_symbol_set.inputs_section_title.replace(" ", ""), others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", ""), - context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "") ) def repr_node_value(self, node_dict, node_tag="node", @@ -1177,13 +1003,15 @@ def construct_prompt(self, summary, mask=None, *args, **kwargs): example_response=self.example_response, ) user_content_blocks.append(example_text) + + # Add contecxt here # Add problem instance header user_content_blocks.append(dedent(""" - Now you see problem instance: + Now you see problem instance: - ================================ - """)) + ================================ + """)) # Add problem instance content blocks (may contain images) user_content_blocks.extend(problem_inst.to_content_blocks()) @@ -1192,9 +1020,9 @@ def construct_prompt(self, summary, mask=None, *args, **kwargs): var_names = ", ".join(k for k in summary.variables.keys()) user_content_blocks.append(dedent(""" - ================================ + ================================ - """)) + """)) user_content_blocks.append(self.final_prompt.format(names=var_names)) return system_prompt, user_content_blocks @@ -1420,7 +1248,6 @@ def save(self, path: str): "prompt_symbols": self.prompt_symbols, "representation_prompt": self.representation_prompt, "output_format_prompt": self.output_format_prompt, - "context_prompt": self.context_prompt }, f) def load(self, path: str): @@ -1442,4 +1269,3 @@ def load(self, path: str): self.prompt_symbols = state["prompt_symbols"] self.representation_prompt = state["representation_prompt"] self.output_format_prompt = state["output_format_prompt"] - self.context_prompt = state["context_prompt"] From 7ff308d8c3537242c51496201eeb558adb2b3861 Mon Sep 17 00:00:00 2001 From: windweller Date: Tue, 16 Dec 2025 23:35:56 -0500 Subject: [PATCH 36/51] Adding prompt template to accommondate multi-modal fill-in --- opto/optimizers/README.md | 2 + opto/optimizers/backbone.py | 188 ++++++++++++++++++++++++++++++++ opto/optimizers/optoprime_v3.py | 14 +-- 3 files changed, 197 insertions(+), 7 deletions(-) create mode 100644 opto/optimizers/README.md diff --git a/opto/optimizers/README.md b/opto/optimizers/README.md new file mode 100644 index 00000000..b6c479c0 --- /dev/null +++ b/opto/optimizers/README.md @@ -0,0 +1,2 @@ +# Optimizers + diff --git a/opto/optimizers/backbone.py b/opto/optimizers/backbone.py index 96d135f6..37278740 100644 --- a/opto/optimizers/backbone.py +++ b/opto/optimizers/backbone.py @@ -222,6 +222,194 @@ def to_content_blocks(self) -> 'ContentBlockList': return self +class PromptTemplate: + """Template for building ContentBlockLists with {placeholder} support. + + Similar to str.format(), but supports multimodal content (ContentBlockList). + + Return type depends on values: + - All strings → returns str (backward compatible) + - Any multimodal content → returns ContentBlockList + + Features: + - Multiple placeholders: {a}, {b}, {c} + - Escaping: {{ and }} for literal braces + - Missing placeholders: left as-is in text + - Extra kwargs: silently ignored (no error) + - Nested templates: if value is PromptTemplate, formats it first + - Mixed values: str, ContentBlockList, or objects with to_content_blocks() + + Examples: + # Define template (can be class attribute) + user_prompt_template = PromptTemplate(''' + Now you see problem instance: + + ================================ + {problem_instance} + ================================ + ''') + + # Format with ContentBlockList (may contain images) + content = user_prompt_template.format( + problem_instance=problem.to_content_blocks() + ) + # Returns ContentBlockList: [TextContent("Now you see..."), *problem_blocks, TextContent("===...")] + + # Multiple placeholders + template = PromptTemplate("User: {user}\\nAssistant: {assistant}") + result = template.format(user=user_blocks, assistant=assistant_blocks) + + # Nested templates + outer = PromptTemplate("Header\\n{body}\\nFooter") + inner = PromptTemplate("Content: {data}") + result = outer.format(body=inner, data="some data") # inner gets same kwargs + + # Escaping braces + template = PromptTemplate('JSON example: {{"key": "{value}"}}') + result = template.format(value="hello") # {"key": "hello"} + + # Extra kwargs are ignored (no error) + result = template.format(value="hello", unused_key="ignored") + + # Missing placeholders left as-is + template = PromptTemplate("Hello {name}, score: {score}") + result = template.format(name="Alice") # "Hello Alice, score: {score}" + """ + + # Regex to find {placeholder} but not {{ or }} + _PLACEHOLDER_PATTERN = None # Lazy compiled + + def __init__(self, template: str): + """Initialize with a template string. + + Args: + template: Template string with {placeholder} syntax. + """ + self.template = template + + @classmethod + def _get_pattern(cls): + """Lazily compile the placeholder regex pattern.""" + if cls._PLACEHOLDER_PATTERN is None: + import re + # Match {name} but not {{ or }} + # Captures the placeholder name + cls._PLACEHOLDER_PATTERN = re.compile(r'\{(\w+)\}') + return cls._PLACEHOLDER_PATTERN + + def format(self, **kwargs) -> Union[str, 'ContentBlockList']: + """Format the template with the given values. + + Similar to str.format(), but supports multimodal content. + Extra kwargs are silently ignored. + + If all values are strings, returns a str (backward compatible). + If any value is a ContentBlockList or multimodal, returns ContentBlockList. + + Args: + **kwargs: Placeholder values. Each value can be: + - str: inserted as text + - ContentBlockList: blocks spliced in at that position + - PromptTemplate: formatted first, then spliced in + - Object with to_content_blocks(): method called, result spliced + - Other: converted to str + + Returns: + str: If all values are strings (backward compatible behavior). + ContentBlockList: If any value is multimodal content. + """ + # Check if all values are simple strings - if so, use simple string formatting + pattern = self._get_pattern() + placeholder_names = set(pattern.findall(self.template)) + + # Only check values for placeholders that exist in the template + relevant_values = {k: v for k, v in kwargs.items() if k in placeholder_names} + + if all(isinstance(v, str) for v in relevant_values.values()): + # All strings: use simple string replacement, return str + # Handle escaping and missing placeholders + result = self.template.replace("{{", "\x00LBRACE\x00").replace("}}", "\x00RBRACE\x00") + + for name in placeholder_names: + placeholder = "{" + name + "}" + if name in kwargs: + result = result.replace(placeholder, kwargs[name]) + # Missing placeholders left as-is + + result = result.replace("\x00LBRACE\x00", "{").replace("\x00RBRACE\x00", "}") + return result + + # Multimodal content: build ContentBlockList + result = ContentBlockList() + + # Handle escaping: replace {{ with a sentinel, }} with another + LBRACE_SENTINEL = "\x00LBRACE\x00" + RBRACE_SENTINEL = "\x00RBRACE\x00" + + text = self.template.replace("{{", LBRACE_SENTINEL).replace("}}", RBRACE_SENTINEL) + + last_end = 0 + + for match in pattern.finditer(text): + # Add text before this placeholder + prefix = text[last_end:match.start()] + if prefix: + # Restore escaped braces in prefix + prefix = prefix.replace(LBRACE_SENTINEL, "{").replace(RBRACE_SENTINEL, "}") + result.append(prefix) + + # Get placeholder name and value + placeholder_name = match.group(1) + + if placeholder_name in kwargs: + value = kwargs[placeholder_name] + # Convert value to ContentBlockList and splice in + content = self._value_to_content(value, **kwargs) + result.extend(content) + else: + # Missing placeholder: leave as-is (restore original {name}) + result.append("{" + placeholder_name + "}") + + last_end = match.end() + + # Add remaining text after last placeholder + suffix = text[last_end:] + if suffix: + suffix = suffix.replace(LBRACE_SENTINEL, "{").replace(RBRACE_SENTINEL, "}") + result.append(suffix) + + return result + + def _value_to_content(self, value, **kwargs) -> 'ContentBlockList': + """Convert a value to ContentBlockList. + + Args: + value: The value to convert + **kwargs: Passed to nested PromptTemplate.render() + + Returns: + ContentBlockList: The value as content blocks. + """ + if isinstance(value, ContentBlockList): + return value + elif isinstance(value, PromptTemplate): + # Nested template: format it with the same kwargs + return value.format(**kwargs) + elif hasattr(value, 'to_content_blocks'): + # Object with to_content_blocks method (e.g., ProblemInstance) + return value.to_content_blocks() + elif isinstance(value, str): + return ContentBlockList(value) + else: + # Fallback: convert to string + return ContentBlockList(str(value)) + + def __repr__(self) -> str: + """Return a preview of the template.""" + preview = self.template[:50] + "..." if len(self.template) > 50 else self.template + return f"PromptTemplate({preview!r})" + + @dataclass class TextContent(ContentBlock): """Text content block""" diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py index df7eeb68..6c12f98e 100644 --- a/opto/optimizers/optoprime_v3.py +++ b/opto/optimizers/optoprime_v3.py @@ -17,7 +17,7 @@ from opto.utils.llm import AbstractModel, LLM from opto.optimizers.buffers import FIFOBuffer from opto.optimizers.backbone import ( - ConversationHistory, UserTurn, AssistantTurn, + ConversationHistory, UserTurn, AssistantTurn, PromptTemplate, ContentBlock, TextContent, ImageContent, ContentBlockList, DEFAULT_IMAGE_PLACEHOLDER ) @@ -587,7 +587,7 @@ class OptoPrimeV3(OptoPrime): """ ) - example_problem_template = dedent( + example_problem_template = PromptTemplate(dedent( """ Here is an example of problem instance and response: @@ -598,9 +598,9 @@ class OptoPrimeV3(OptoPrime): Your response: {example_response} """ - ) + )) - user_prompt_context_template = dedent( + user_prompt_context_template = PromptTemplate(dedent( """ Now you see a new problem instance. Here is some context for this problem: @@ -608,9 +608,9 @@ class OptoPrimeV3(OptoPrime): {context} ================================ """ - ) + )) - user_prompt_template = dedent( + user_prompt_template = PromptTemplate(dedent( """ Now you see problem instance: @@ -619,7 +619,7 @@ class OptoPrimeV3(OptoPrime): ================================ """ - ) + )) final_prompt = dedent( """ From 5ee4860b3d296d45c59d7cd72a6cc97606ecb5b6 Mon Sep 17 00:00:00 2001 From: windweller Date: Wed, 17 Dec 2025 01:49:59 -0500 Subject: [PATCH 37/51] final update to both backbone and adding context. TODO: documentation, writing test cases. --- opto/optimizers/backbone.py | 105 ++++++++++++++++++++++++++++++-- opto/optimizers/optoprime_v3.py | 50 ++++++--------- 2 files changed, 121 insertions(+), 34 deletions(-) diff --git a/opto/optimizers/backbone.py b/opto/optimizers/backbone.py index 37278740..5795316a 100644 --- a/opto/optimizers/backbone.py +++ b/opto/optimizers/backbone.py @@ -27,6 +27,19 @@ def to_dict(self) -> Dict[str, Any]: """ raise NotImplementedError("Subclasses must implement this method") + @classmethod + def build(cls, value: Any, **kwargs) -> 'ContentBlock': + """Build a content block from a value with auto-detection. + + Args: + value: The value to build from (type depends on subclass) + **kwargs: Additional keyword arguments for building + + Returns: + ContentBlock: The built content block + """ + raise NotImplementedError("Subclasses must implement this method") + class ContentBlockList(list): """List of content blocks with automatic type conversion. @@ -416,6 +429,21 @@ class TextContent(ContentBlock): type: Literal["text"] = "text" text: str = "" + @classmethod + def build(cls, value: Any = "", **kwargs) -> 'TextContent': + """Build a text content block from a value. + + Args: + value: String or any value to convert to text + **kwargs: Unused, for compatibility with base class + + Returns: + TextContent: Text content block with the value as text + """ + if isinstance(value, str): + return cls(text=value) + return cls(text=str(value)) + def to_dict(self) -> Dict[str, Any]: return {"type": self.type, "text": self.text} @@ -631,7 +659,7 @@ def from_data_url(cls, data_url: str): return cls(image_data=data_url.split(',')[-1], media_type="image/jpeg") @classmethod - def from_value(cls, value: Any, format: str = "PNG"): + def build(cls, value: Any, format: str = "PNG"): """Auto-detect format and create ImageContent from various input types. Args: @@ -695,7 +723,7 @@ def set_image(self, image: Any, format: str = "PNG") -> None: - Raw bytes format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG """ - result = ImageContent.from_value(image, format=format) + result = ImageContent.build(image, format=format) if result: self.image_url = result.image_url self.image_data = result.image_data @@ -703,13 +731,43 @@ def set_image(self, image: Any, format: str = "PNG") -> None: @dataclass -class PDFContent: +class PDFContent(ContentBlock): """PDF content block""" type: Literal["pdf"] = "pdf" pdf_url: Optional[str] = None pdf_data: Optional[str] = None # base64 encoded filename: Optional[str] = None + @classmethod + def build(cls, value: Any, **kwargs) -> 'PDFContent': + """Build a PDF content block from a value. + + Args: + value: Can be: + - URL string (starting with 'http://' or 'https://') + - Local file path (string) + - Raw bytes + **kwargs: Unused, for compatibility with base class + + Returns: + PDFContent or None if the value cannot be converted + """ + if isinstance(value, str): + # HTTP/HTTPS URL + if value.startswith('http://') or value.startswith('https://'): + return cls(pdf_url=value) + # Assume it's a file path + if Path(value).exists(): + return cls.from_file(value) + return None + + # Handle bytes + if isinstance(value, bytes): + pdf_data = base64.b64encode(value).decode('utf-8') + return cls(pdf_data=pdf_data) + + return None + def to_dict(self) -> Dict[str, Any]: if self.pdf_url: return { @@ -739,7 +797,7 @@ def from_file(cls, filepath: str): @dataclass -class FileContent: +class FileContent(ContentBlock): """Generic file content block (for code, data files, etc.)""" file_data: str # Could be text content or base64 for binary filename: str @@ -747,6 +805,45 @@ class FileContent: mime_type: str = "text/plain" is_binary: bool = False + @classmethod + def build(cls, value: Any, **kwargs) -> 'FileContent': + """Build a file content block from a value. + + Args: + value: Can be: + - Local file path (string) + - Tuple of (filename, content) where content is str or bytes + **kwargs: Additional arguments like mime_type + + Returns: + FileContent or None if the value cannot be converted + """ + mime_type = kwargs.get('mime_type') + + if isinstance(value, str): + # Assume it's a file path + if Path(value).exists(): + return cls.from_file(value, mime_type=mime_type) + return None + + # Handle tuple of (filename, content) + if isinstance(value, tuple) and len(value) == 2: + filename, content = value + if isinstance(content, bytes): + file_data = base64.b64encode(content).decode('utf-8') + is_binary = True + else: + file_data = str(content) + is_binary = False + return cls( + file_data=file_data, + filename=filename, + mime_type=mime_type or 'application/octet-stream', + is_binary=is_binary + ) + + return None + def to_dict(self) -> Dict[str, Any]: return { "type": self.type, diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py index 6c12f98e..ca639f40 100644 --- a/opto/optimizers/optoprime_v3.py +++ b/opto/optimizers/optoprime_v3.py @@ -9,7 +9,7 @@ from dataclasses import dataclass, field, asdict from opto.optimizers.optoprime import OptoPrime, FunctionFeedback from opto.trace.utils import dedent -from opto.optimizers.utils import truncate_expression, extract_xml_like_data, MultiModalPayload +from opto.optimizers.utils import truncate_expression, extract_xml_like_data from opto.trace.nodes import ParameterNode, Node, MessageNode, is_image from opto.trace.propagators import TraceGraph, GraphPropagator from opto.trace.propagators.propagators import Propagator @@ -29,8 +29,8 @@ def value_to_image_content(value: Any) -> Optional[ImageContent]: """Convert a value to ImageContent if it's an image, otherwise return None. - Uses is_image() from opto.trace.nodes for validation (stricter than ImageContent.from_value, - e.g., only accepts URLs with image extensions), then delegates to ImageContent.from_value(). + Uses is_image() from opto.trace.nodes for validation (stricter than ImageContent.build, + e.g., only accepts URLs with image extensions), then delegates to ImageContent.build(). Supports (via is_image detection): - Base64 data URL strings (data:image/...) @@ -40,7 +40,7 @@ def value_to_image_content(value: Any) -> Optional[ImageContent]: """ if not is_image(value): return None - return ImageContent.from_value(value) + return ImageContent.build(value) class OptimizerPromptSymbolSet: """ @@ -396,7 +396,7 @@ class Context(ContentBlockList): ctx = Context("Important background information") # Image context - ctx = Context(ImageContent.from_file("diagram.png")) + ctx = Context(ImageContent.build("diagram.png")) # Mixed content (variadic mode) ctx = Context( @@ -414,7 +414,7 @@ class Context(ContentBlockList): # Manual building ctx = Context() ctx.append("Here's the relevant diagram:") - ctx.append(ImageContent.from_file("diagram.png")) + ctx.append(ImageContent.build("diagram.png")) """ def __init__( @@ -477,7 +477,7 @@ def _build_from_variadic(self, *args, format: str = "PNG") -> None: for arg in args: if isinstance(arg, str): # Check if it could be an image URL or file path - image_content = ImageContent.from_value(arg, format=format) + image_content = ImageContent.build(arg, format=format) if image_content is not None: self.append(image_content) else: @@ -485,7 +485,7 @@ def _build_from_variadic(self, *args, format: str = "PNG") -> None: self.append(arg) else: # Try to convert to image - image_content = ImageContent.from_value(arg, format=format) + image_content = ImageContent.build(arg, format=format) if image_content is not None: self.append(image_content) else: @@ -538,7 +538,7 @@ def _build_from_template( # Add image after each part except the last if i < len(images): - image_content = ImageContent.from_value(images[i], format=format) + image_content = ImageContent.build(images[i], format=format) if image_content is None: raise ValueError( f"Could not convert image at index {i} to ImageContent: {type(images[i])}" @@ -653,7 +653,6 @@ def __init__( self.truncate_expression = truncate_expression self.problem_context: Optional[Context] = None - self.multimodal_payload = MultiModalPayload() self.use_json_object_format = use_json_object_format if optimizer_prompt_symbol_set.expect_json and use_json_object_format else False self.ignore_extraction_error = ignore_extraction_error @@ -1005,25 +1004,20 @@ def construct_prompt(self, summary, mask=None, *args, **kwargs): user_content_blocks.append(example_text) # Add contecxt here - - # Add problem instance header - user_content_blocks.append(dedent(""" - Now you see problem instance: + user_content_blocks.append(self.user_prompt_context_template.format( + user_prompt_context=self.problem_context, + )) - ================================ - """)) - - # Add problem instance content blocks (may contain images) - user_content_blocks.extend(problem_inst.to_content_blocks()) + # Add problem instance template + user_content_blocks.append(self.user_prompt_template.format( + problem_instance=problem_inst.to_content_blocks(), + )) - # Add footer and final prompt + # Add final prompt var_names = ", ".join(k for k in summary.variables.keys()) - - user_content_blocks.append(dedent(""" - ================================ - - """)) - user_content_blocks.append(self.final_prompt.format(names=var_names)) + user_content_blocks.append(self.final_prompt.format( + names=var_names, + )) return system_prompt, user_content_blocks @@ -1192,10 +1186,6 @@ def call_llm( # Create user turn with content user_turn = UserTurn() - # Add image content from multimodal_payload if available (legacy path) - if self.multimodal_payload.image_data is not None: - user_turn.add_image(url=self.multimodal_payload.image_data) - # Add content blocks from user_prompt for block in user_prompt: if isinstance(block, TextContent): From f451250371abd5051b2dd5ade40b6b5642c99282 Mon Sep 17 00:00:00 2001 From: windweller Date: Wed, 17 Dec 2025 12:03:24 -0500 Subject: [PATCH 38/51] updating to take feedback as image. --- opto/optimizers/backbone.py | 76 ++++++++++- opto/optimizers/optimizer.py | 25 +++- opto/optimizers/optoprime_v3.py | 229 ++++++++++++++++++-------------- 3 files changed, 219 insertions(+), 111 deletions(-) diff --git a/opto/optimizers/backbone.py b/opto/optimizers/backbone.py index 5795316a..82b373ed 100644 --- a/opto/optimizers/backbone.py +++ b/opto/optimizers/backbone.py @@ -39,6 +39,14 @@ def build(cls, value: Any, **kwargs) -> 'ContentBlock': ContentBlock: The built content block """ raise NotImplementedError("Subclasses must implement this method") + + def is_empty(self) -> bool: + """Check if the content block is empty (has no meaningful content). + + Returns: + bool: True if the block is empty, False otherwise + """ + raise NotImplementedError("Subclasses must implement this method") class ContentBlockList(list): """List of content blocks with automatic type conversion. @@ -178,7 +186,7 @@ def blocks_to_text(blocks: Iterable['ContentBlock'], text_parts.append(block.text) elif isinstance(block, ImageContent): text_parts.append(image_placeholder) - return "".join(text_parts) + return " ".join(text_parts) def to_text(self, image_placeholder: str = DEFAULT_IMAGE_PLACEHOLDER) -> str: """Convert this list to text representation. @@ -429,6 +437,14 @@ class TextContent(ContentBlock): type: Literal["text"] = "text" text: str = "" + def __post_init__(self): + # Ensure type is always "text" (fixes issue when user passes positional arg) + object.__setattr__(self, 'type', 'text') + + def is_empty(self) -> bool: + """Check if the text content is empty.""" + return not self.text + @classmethod def build(cls, value: Any = "", **kwargs) -> 'TextContent': """Build a text content block from a value. @@ -457,9 +473,9 @@ def __add__(self, other) -> 'TextContent': TextContent: New TextContent with concatenated text """ if isinstance(other, str): - return TextContent(text=self.text + other) + return TextContent(text=self.text + " " + other) elif isinstance(other, TextContent): - return TextContent(text=self.text + other.text) + return TextContent(text=self.text + " " + other.text) else: return NotImplemented @@ -473,7 +489,7 @@ def __radd__(self, other) -> 'TextContent': TextContent: New TextContent with concatenated text """ if isinstance(other, str): - return TextContent(text=other + self.text) + return TextContent(text=other + " " + self.text) else: return NotImplemented @@ -495,6 +511,14 @@ class ImageContent(ContentBlock): media_type: str = "image/jpeg" # image/jpeg, image/png, image/gif, image/webp detail: Optional[str] = None # OpenAI: "auto", "low", "high" + def __post_init__(self): + # Ensure type is always "image" (fixes issue when user passes positional arg) + object.__setattr__(self, 'type', 'image') + + def is_empty(self) -> bool: + """Check if the image content is empty (no URL or data).""" + return not self.image_url and not self.image_data + def to_dict(self) -> Dict[str, Any]: if self.image_url: return { @@ -659,7 +683,7 @@ def from_data_url(cls, data_url: str): return cls(image_data=data_url.split(',')[-1], media_type="image/jpeg") @classmethod - def build(cls, value: Any, format: str = "PNG"): + def build(cls, value: Any, format: str = "PNG") -> 'ImageContent': """Auto-detect format and create ImageContent from various input types. Args: @@ -675,6 +699,17 @@ def build(cls, value: Any, format: str = "PNG"): Returns: ImageContent or None if the value cannot be converted """ + # handle None + if not value: + return cls() + + # handle self + if isinstance(value, cls): + return value + + if not value.strip(): + return cls() + # Handle string inputs if isinstance(value, str): # Data URL @@ -686,7 +721,7 @@ def build(cls, value: Any, format: str = "PNG"): # Assume it's a file path if Path(value).exists(): return cls.from_file(value) - return None + return cls() # Handle bytes if isinstance(value, bytes): @@ -708,7 +743,7 @@ def build(cls, value: Any, format: str = "PNG"): except ImportError: pass - return None + return cls() def set_image(self, image: Any, format: str = "PNG") -> None: """Set the image from various input formats (mutates self). @@ -738,6 +773,14 @@ class PDFContent(ContentBlock): pdf_data: Optional[str] = None # base64 encoded filename: Optional[str] = None + def __post_init__(self): + # Ensure type is always "pdf" (fixes issue when user passes positional arg) + object.__setattr__(self, 'type', 'pdf') + + def is_empty(self) -> bool: + """Check if the PDF content is empty (no URL or data).""" + return not self.pdf_url and not self.pdf_data + @classmethod def build(cls, value: Any, **kwargs) -> 'PDFContent': """Build a PDF content block from a value. @@ -844,6 +887,10 @@ def build(cls, value: Any, **kwargs) -> 'FileContent': return None + def is_empty(self) -> bool: + """Check if the file content is empty (no data).""" + return not self.file_data + def to_dict(self) -> Dict[str, Any]: return { "type": self.type, @@ -903,6 +950,10 @@ class ToolCall(ContentBlock): name: Optional[str] = None # function name arguments: Optional[Dict[str, Any]] = None # function arguments + def is_empty(self) -> bool: + """Check if the tool call is empty (no id).""" + return not self.id + def to_dict(self) -> Dict[str, Any]: result = {"id": self.id, "type": self.type} if self.name: @@ -919,6 +970,10 @@ class ToolResult(ContentBlock): content: str # Result as string (can be JSON stringified) is_error: bool = False + def is_empty(self) -> bool: + """Check if the tool result is empty (no tool_call_id).""" + return not self.tool_call_id + def to_dict(self) -> Dict[str, Any]: return { "tool_call_id": self.tool_call_id, @@ -938,6 +993,10 @@ class ToolDefinition(ContentBlock): # Provider-specific fields extra: Dict[str, Any] = field(default_factory=dict) + def is_empty(self) -> bool: + """Check if the tool definition is empty (no type).""" + return not self.type + def to_dict(self) -> Dict[str, Any]: result = {"type": self.type} if self.name: @@ -1022,6 +1081,9 @@ def to_litellm_format(self) -> Dict[str, Any]: """Convert to LiteLLM format (OpenAI-compatible, works with all providers)""" content = [] for block in self.content: + # Skip empty content blocks + if block.is_empty(): + continue if isinstance(block, TextContent): content.append({"type": "text", "text": block.text}) elif isinstance(block, ImageContent): diff --git a/opto/optimizers/optimizer.py b/opto/optimizers/optimizer.py index 9882d986..bc965e48 100644 --- a/opto/optimizers/optimizer.py +++ b/opto/optimizers/optimizer.py @@ -142,7 +142,7 @@ class Optimizer(AbstractOptimizer): update(update_dict) Apply updates to trainable parameters. backward(node, *args, **kwargs) - Propagate feedback through the graph. + Propagate feedback through the graph. Feedback is passed in through *args and **kwargs. zero_feedback() Clear accumulated feedback from all parameters. save(path) @@ -191,6 +191,12 @@ class Optimizer(AbstractOptimizer): ParameterNode : Parameters being optimized Projection : Constraints applied during optimization + Usage + -------- + result = traced_computation(x) + optimizer.zero_feedback() + optimizer.backward(result, 'user feedback') + Examples -------- >>> class MyOptimizer(Optimizer): @@ -368,9 +374,12 @@ def backward(self, node: Node, *args, **kwargs): node : Node Starting node for backward propagation. *args - Additional arguments passed to node.backward(). + Additional arguments passed to node.backward(*args, **kwargs). + This corresponds to the positional arguments in node.backward **kwargs - Additional keyword arguments passed to node.backward(). + Additional keyword arguments passed to node.backward(*args, **kwargs). + This corresponds to the keyword arguments in node.backward + If 'propagator' is not provided, uses the optimizer's propagator. Returns ------- @@ -379,9 +388,15 @@ def backward(self, node: Node, *args, **kwargs): Notes ----- - Uses the optimizer's propagator for feedback processing. + Uses the optimizer's propagator for feedback processing by default. + + Usage + ------ + optimizer.backward(result, 'make this number bigger', propagator=custom_propagator) + optimizer.backward(result, feedback='make this number bigger') """ - return node.backward(*args, propagator=self.propagator, **kwargs) + kwargs.setdefault('propagator', self.propagator) + return node.backward(*args, **kwargs) def save(self, path: str): """Save the optimizer state to a file.""" diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py index ca639f40..e4a5dfc6 100644 --- a/opto/optimizers/optoprime_v3.py +++ b/opto/optimizers/optoprime_v3.py @@ -26,6 +26,7 @@ import re from typing import Dict, Any + def value_to_image_content(value: Any) -> Optional[ImageContent]: """Convert a value to ImageContent if it's an image, otherwise return None. @@ -42,6 +43,7 @@ def value_to_image_content(value: Any) -> Optional[ImageContent]: return None return ImageContent.build(value) + class OptimizerPromptSymbolSet: """ By inheriting this class and pass into the optimizer. People can change the optimizer documentation @@ -207,6 +209,7 @@ def output_response_extractor(self, response: str) -> Dict[str, Any]: optoprime_instance = OptoPrime() return optoprime_instance.extract_llm_suggestion(response) + class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet): variables_section_title = "# Variables" inputs_section_title = "# Inputs" @@ -229,6 +232,42 @@ class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet): name_tag = "name" +@dataclass +class FunctionFeedback: + """Container for structured feedback from function execution traces. + + Used by OptoPrime to organize execution traces into a format suitable + for LLM-based optimization. + + Attributes + ---------- + graph : list[tuple[int, str]] + Topologically sorted function calls with (depth, representation) pairs. + documentation : dict[str, str] + Mapping of function names to their documentation strings. + others : dict[str, Any] + Intermediate variables with (data, description) tuples. + roots : dict[str, Any] + Input/root variables with (data, description) tuples. + output : dict[str, Any] + Output/leaf variables with (data, description) tuples. + user_feedback : Union[str, ContentBlockList] + User-provided feedback about the execution. May include images. + + Notes + ----- + This structure separates the execution trace into logical components + that can be formatted into prompts for LLM-based optimization. + """ + + graph: List[ + Tuple[int, str] + ] # Each item is is a representation of function call. The items are topologically sorted. + documentation: Dict[str, str] # Function name and its documentationstring + others: Dict[str, Any] # Intermediate variable names and their data + roots: Dict[str, Any] # Root variable name and its data + output: Dict[str, Any] # Leaf variable name and its data + user_feedback: Union[str, ContentBlockList] # User feedback at the leaf of the graph (may include images) @dataclass @@ -251,8 +290,7 @@ class ProblemInstance: inputs: ContentBlockList others: ContentBlockList outputs: ContentBlockList - feedback: str - context: Optional[str] + feedback: ContentBlockList # May contain images mixed with text optimizer_prompt_symbol_set: OptimizerPromptSymbolSet @@ -297,19 +335,9 @@ def __repr__(self) -> str: inputs=self.inputs.to_text(), outputs=self.outputs.to_text(), others=self.others.to_text(), - feedback=self.feedback + feedback=self.feedback.to_text() ) - context_section = dedent(""" - - # Context - {context} - """) - - if self.context is not None and self.context.strip() != "": - context_section = context_section.format(context=self.context) - optimization_query += context_section - return optimization_query def to_content_blocks(self) -> ContentBlockList: @@ -324,7 +352,7 @@ def to_content_blocks(self) -> ContentBlockList: from variables, inputs, others, or outputs. """ blocks = ContentBlockList() - + # Header sections (always text) header = dedent(f""" # Instruction @@ -339,31 +367,32 @@ def to_content_blocks(self) -> ContentBlockList: # Variables """) blocks.append(header) - + # Variables section (may contain images) blocks.extend(self.variables) - + # Inputs section blocks.append("\n\n# Inputs\n") blocks.extend(self.inputs) - + # Others section blocks.append("\n\n# Others\n") blocks.extend(self.others) - + # Outputs section blocks.append("\n\n# Outputs\n") blocks.extend(self.outputs) - - # Feedback section - blocks.append(f"\n\n# Feedback\n{self.feedback}") - + + # Feedback section (may contain images) + blocks.append("\n\n# Feedback\n") + blocks.extend(self.feedback) + # Context section (optional) if self.context is not None and self.context.strip() != "": blocks.append(f"\n\n# Context\n{self.context}") - + return blocks - + def has_images(self) -> bool: """Check if this problem instance contains any images. @@ -374,56 +403,62 @@ def has_images(self) -> bool: bool: True if any field contains ImageContent blocks. """ return any( - field.has_images() - for field in [self.variables, self.inputs, self.others, self.outputs] + field.has_images() + for field in [self.variables, self.inputs, self.others, self.outputs, self.feedback] ) -class Context(ContentBlockList): - """Semantic wrapper providing context to the optimizer agent. +class Content(ContentBlockList): + """Semantic wrapper providing multi-modal content for the optimizer agent. Inherits all ContentBlockList functionality (append, extend, has_images, to_text, __bool__, __repr__, etc.) with a flexible constructor that supports multiple input patterns. + + The goal is to provide a flexible interface for user to add mixed text and image content to the optimizer agent. + + Primary use cases: + - Building problem context for the optimizer agent + - Providing user feedback Creation patterns: - - Variadic: Context("text", image, "more text") - - Template: Context("See [IMAGE] here", images=[img]) - - Empty: Context() + - Variadic: Content("text", image, "more text") + - Template: Content("See [IMAGE] here", images=[img]) + - Empty: Content() Examples: - # Text-only context - ctx = Context("Important background information") + # Text-only content + ctx = Content("Important background information") - # Image context - ctx = Context(ImageContent.build("diagram.png")) + # Image content + ctx = Content(ImageContent.build("diagram.png")) # Mixed content (variadic mode) - ctx = Context( + ctx = Content( "Here's the diagram:", "diagram.png", # auto-detected as image file "And the analysis." ) # Template mode with placeholders - ctx = Context( + ctx = Content( "Compare [IMAGE] with [IMAGE]:", images=[img1, img2] ) # Manual building - ctx = Context() + ctx = Content() ctx.append("Here's the relevant diagram:") ctx.append(ImageContent.build("diagram.png")) """ - + def __init__( - self, - *args, - images: Optional[List[Any]] = None, - format: str = "PNG" + self, + *args, + images: Optional[List[Any]] = None, + format: str = "PNG" ): - """Initialize a Context from various input patterns. + """Initialize a Content from various input patterns. Supports two usage modes: @@ -431,13 +466,13 @@ def __init__( Pass any mix of text and image sources as arguments. Strings are auto-detected as text or image paths/URLs. - Context("Hello", some_image, "World") - Context("Check this:", "path/to/image.png") + Content("Hello", some_image, "World") + Content("Check this:", "path/to/image.png") **Mode 2: Template (images provided)** Pass a template string with [IMAGE] placeholders and a list of images. - Context( + Content( "Compare [IMAGE] with [IMAGE]", images=[img1, img2] ) @@ -455,14 +490,14 @@ def __init__( """ # Initialize empty list first super().__init__() - + # Build content based on mode if images is not None: self._build_from_template(*args, images=images, format=format) elif args: self._build_from_variadic(*args, format=format) # else: empty context - + def _build_from_variadic(self, *args, format: str = "PNG") -> None: """Populate self from variadic arguments. @@ -478,25 +513,17 @@ def _build_from_variadic(self, *args, format: str = "PNG") -> None: if isinstance(arg, str): # Check if it could be an image URL or file path image_content = ImageContent.build(arg, format=format) - if image_content is not None: + if not image_content.is_empty(): self.append(image_content) else: # It's just text self.append(arg) - else: - # Try to convert to image - image_content = ImageContent.build(arg, format=format) - if image_content is not None: - self.append(image_content) - else: - # Fallback: convert to string - self.append(str(arg)) - + def _build_from_template( - self, - *args, - images: List[Any], - format: str = "PNG" + self, + *args, + images: List[Any], + format: str = "PNG" ) -> None: """Populate self from template with [IMAGE] placeholders. @@ -517,10 +544,10 @@ def _build_from_template( "Template mode requires exactly one template string as the first argument. " f"Got {len(args)} arguments." ) - + template = args[0] placeholder = DEFAULT_IMAGE_PLACEHOLDER - + # Count placeholders placeholder_count = template.count(placeholder) if placeholder_count != len(images): @@ -528,14 +555,14 @@ def _build_from_template( f"Number of {placeholder} placeholders ({placeholder_count}) " f"does not match number of images ({len(images)})" ) - + # Split template by placeholder and interleave with images parts = template.split(placeholder) - + for i, part in enumerate(parts): if part: # Add text part if non-empty self.append(part) - + # Add image after each part except the last if i < len(images): image_content = ImageContent.build(images[i], format=format) @@ -545,6 +572,10 @@ def _build_from_template( ) self.append(image_content) +# we provide two aliases for the Content class for semantic convenience +Context = Content +Feedback = Content + class OptoPrimeV3(OptoPrime): # This is generic representation prompt, which just explains how to read the problem. representation_prompt = dedent( @@ -646,13 +677,13 @@ def __init__( optimizer_prompt_symbol_set: OptimizerPromptSymbolSet = OptimizerPromptSymbolSet(), use_json_object_format=True, # whether to use json object format for the response when calling LLM truncate_expression=truncate_expression, - problem_context: Optional[Context] = None, + problem_context: Optional[Content] = None, **kwargs, ): super().__init__(parameters, *args, propagator=propagator, **kwargs) self.truncate_expression = truncate_expression - self.problem_context: Optional[Context] = None + self.problem_context: Optional[Content] = None self.use_json_object_format = use_json_object_format if optimizer_prompt_symbol_set.expect_json and use_json_object_format else False self.ignore_extraction_error = ignore_extraction_error @@ -714,7 +745,7 @@ def parameter_check(self, parameters: List[ParameterNode]): """ # Count image parameters image_params = [param for param in parameters if param.is_image] - + if len(image_params) > 1: param_names = ', '.join([f"'{p.name}'" for p in image_params]) raise AssertionError( @@ -768,8 +799,8 @@ def add_context(self, *args, images: Optional[List[Any]] = None, format: str = " # Text-only context optimizer.add_context("Important background information") """ - ctx = Context(*args, images=images, format=format) - + ctx = Content(*args, images=images, format=format) + # Store the context if self.problem_context is None: self.problem_context = ctx @@ -777,7 +808,7 @@ def add_context(self, *args, images: Optional[List[Any]] = None, format: str = " # Append to existing context with a newline separator self.problem_context.append("\n\n") self.problem_context.extend(ctx.to_content_blocks()) - + def initialize_instruct_prompt(self): self.representation_prompt = self.representation_prompt.format( variable_expression_format=dedent(f""" @@ -873,24 +904,24 @@ def repr_node_value_as_content_blocks(self, node_dict, node_tag="node", For image values, the text before and after the image are separate blocks. """ blocks = ContentBlockList() - + for k, v in node_dict.items(): value_data = v[0] constraint = v[1] - + if "__code" not in k: # Check if this is an image image_content = value_to_image_content(value_data) - + if image_content is not None: # Image node: output XML structure, then image, then closing type_name = "image" constraint_expr = f"<{constraint_tag}>\n{constraint}\n" if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag else "" - + xml_text = f"<{node_tag} name=\"{k}\" type=\"{type_name}\">\n<{value_tag}>\n" blocks.append(xml_text) blocks.append(image_content) # Image breaks the text flow - + closing_text = f"\n\n{constraint_expr}\n\n" if constraint_expr else f"\n\n\n\n" blocks.append(closing_text) else: @@ -912,35 +943,35 @@ def repr_node_value_as_content_blocks(self, node_dict, node_tag="node", blocks.append( f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{func_body}\n\n{constraint_expr}\n\n\n" ) - + return blocks def repr_node_value_compact_as_content_blocks(self, node_dict, node_tag="node", - value_tag="value", constraint_tag="constraint") -> ContentBlockList: + value_tag="value", constraint_tag="constraint") -> ContentBlockList: """Returns a ContentBlockList with compact representation, including images. Consecutive TextContent blocks are merged for efficiency. Non-image values are truncated. Images break the text flow. """ blocks = ContentBlockList() - + for k, v in node_dict.items(): value_data = v[0] constraint = v[1] - + if "__code" not in k: # Check if this is an image image_content = value_to_image_content(value_data) - + if image_content is not None: # Image node: output XML structure, then image, then closing type_name = "image" constraint_expr = f"<{constraint_tag}>\n{constraint}\n" if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag else "" - + xml_text = f"<{node_tag} name=\"{k}\" type=\"{type_name}\">\n<{value_tag}>\n" blocks.append(xml_text) blocks.append(image_content) # Image breaks the text flow - + closing_text = f"\n\n{constraint_expr}\n\n" if constraint_expr else f"\n\n\n\n" blocks.append(closing_text) else: @@ -964,7 +995,7 @@ def repr_node_value_compact_as_content_blocks(self, node_dict, node_tag="node", blocks.append( f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{node_value}\n\n{constraint_expr}\n\n\n" ) - + return blocks def construct_prompt(self, summary, mask=None, *args, **kwargs): @@ -989,12 +1020,12 @@ def construct_prompt(self, summary, mask=None, *args, **kwargs): system_prompt = ( self.representation_prompt + self.output_format_prompt ) # generic representation + output rule - + problem_inst = self.problem_instance(summary, mask=mask) - + # Build user prompt as ContentBlockList (auto-merges consecutive text) user_content_blocks = ContentBlockList() - + # Add example if included if self.include_example: example_text = self.example_problem_template.format( @@ -1012,13 +1043,13 @@ def construct_prompt(self, summary, mask=None, *args, **kwargs): user_content_blocks.append(self.user_prompt_template.format( problem_instance=problem_inst.to_content_blocks(), )) - + # Add final prompt var_names = ", ".join(k for k in summary.variables.keys()) user_content_blocks.append(self.final_prompt.format( names=var_names, )) - + return system_prompt, user_content_blocks def problem_instance(self, summary, mask=None): @@ -1032,7 +1063,7 @@ def problem_instance(self, summary, mask=None): ProblemInstance with content block fields for multimodal support. """ mask = mask or [] - + # Use content block representations for multimodal support variables_content = ( self.repr_node_value_as_content_blocks( @@ -1074,7 +1105,7 @@ def problem_instance(self, summary, mask=None): if self.optimizer_prompt_symbol_set.others_section_title not in mask else ContentBlockList() ) - + return ProblemInstance( instruction=self.objective if "#Instruction" not in mask else "", code=( @@ -1091,8 +1122,8 @@ def problem_instance(self, summary, mask=None): inputs=inputs_content, outputs=outputs_content, others=others_content, - feedback=summary.user_feedback if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else "", - context=self.problem_context if self.optimizer_prompt_symbol_set.context_section_title not in mask else "", + feedback=ContentBlockList.ensure( + summary.user_feedback) if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else ContentBlockList(), optimizer_prompt_symbol_set=self.optimizer_prompt_symbol_set ) @@ -1110,7 +1141,7 @@ def _step( """ assert isinstance(self.propagator, GraphPropagator) summary = self.summarize() - + system_prompt, user_prompt = self.construct_prompt(summary, mask=mask) response = self.call_llm( @@ -1185,7 +1216,7 @@ def call_llm( # Create user turn with content user_turn = UserTurn() - + # Add content blocks from user_prompt for block in user_prompt: if isinstance(block, TextContent): @@ -1193,7 +1224,7 @@ def call_llm( elif isinstance(block, ImageContent): user_turn.content.append(block) # Handle other content types if needed - + self.conversation_history.add_user_turn(user_turn) # Get messages with conversation length control (truncate from start) From 1122d8bb5b928f960e3da9e5e845ca9b3ae57699 Mon Sep 17 00:00:00 2001 From: windweller Date: Wed, 17 Dec 2025 16:38:11 -0500 Subject: [PATCH 39/51] image as feedback is now roughly correct --- opto/optimizers/backbone.py | 202 +++++++++++++++++++++++++------- opto/optimizers/optoprime_v3.py | 131 +++++++++++++++------ 2 files changed, 254 insertions(+), 79 deletions(-) diff --git a/opto/optimizers/backbone.py b/opto/optimizers/backbone.py index 82b373ed..cce60832 100644 --- a/opto/optimizers/backbone.py +++ b/opto/optimizers/backbone.py @@ -1,9 +1,9 @@ """ Flexible conversation manager for multi-turn LLM conversations. Uses LiteLLM unified format for all providers (OpenAI, Anthropic, Google, etc.). -""" -from typing import List, Dict, Any, Optional, Literal, Union, Iterable +""" +from typing import List, Dict, Any, Optional, Literal, Union, Iterable, Tuple from dataclasses import dataclass, field import json import base64 @@ -14,10 +14,13 @@ # Default placeholder for images that cannot be rendered as text DEFAULT_IMAGE_PLACEHOLDER = "[IMAGE]" - @dataclass class ContentBlock: """Abstract base class for all content blocks.""" + + def __init__(self, **kwargs): + for key, value in kwargs.items(): + setattr(self, key, value) def to_dict(self) -> Dict[str, Any]: """Convert the content block to a dictionary representation. @@ -108,15 +111,15 @@ def append(self, item: Union[str, 'ContentBlock']) -> 'ContentBlockList': they are merged into a single TextContent. """ if isinstance(item, str): - # String: merge with last TextContent or create new one + # String: merge with last TextContent or create new one (with a separation mark " ") if self and isinstance(self[-1], TextContent): - self[-1] = TextContent(text=self[-1].text + item) + self[-1] = TextContent(text=self[-1].text + " " + item) else: super().append(TextContent(text=item)) elif isinstance(item, TextContent): - # TextContent: merge with last TextContent or add + # TextContent: merge with last TextContent or add (with a separation mark " ") if self and isinstance(self[-1], TextContent): - self[-1] = TextContent(text=self[-1].text + item.text) + self[-1] = TextContent(text=self[-1].text + " " + item.text) else: super().append(item) else: @@ -124,7 +127,8 @@ def append(self, item: Union[str, 'ContentBlock']) -> 'ContentBlockList': super().append(item) return self - def extend(self, blocks: Union[str, 'ContentBlock', List['ContentBlock'], 'ContentBlockList', None]) -> 'ContentBlockList': + def extend(self, blocks: Union[str, 'ContentBlock', List[ + 'ContentBlock'], 'ContentBlockList', None]) -> 'ContentBlockList': """Extend with blocks, merging consecutive TextContent. Args: @@ -163,10 +167,24 @@ def __radd__(self, other) -> 'ContentBlockList': else: return NotImplemented + def is_empty(self) -> bool: + """Check if the content block list is empty.""" + if len(self) == 0: + return True + return all(block.is_empty() for block in self) + + def has_images(self) -> bool: + """Check if the content block list contains any images.""" + return any(isinstance(block, ImageContent) for block in self) + + def has_text(self) -> bool: + """Check if the content block list contains any text.""" + return any(isinstance(block, TextContent) for block in self) + # --- Multimodal utilities --- @staticmethod - def blocks_to_text(blocks: Iterable['ContentBlock'], + def blocks_to_text(blocks: Iterable['ContentBlock'], image_placeholder: str = DEFAULT_IMAGE_PLACEHOLDER) -> str: """Convert any iterable of ContentBlocks to text representation. @@ -437,9 +455,8 @@ class TextContent(ContentBlock): type: Literal["text"] = "text" text: str = "" - def __post_init__(self): - # Ensure type is always "text" (fixes issue when user passes positional arg) - object.__setattr__(self, 'type', 'text') + def __init__(self, text: str = ""): + super().__init__(text=text) def is_empty(self) -> bool: """Check if the text content is empty.""" @@ -511,9 +528,34 @@ class ImageContent(ContentBlock): media_type: str = "image/jpeg" # image/jpeg, image/png, image/gif, image/webp detail: Optional[str] = None # OpenAI: "auto", "low", "high" - def __post_init__(self): - # Ensure type is always "image" (fixes issue when user passes positional arg) - object.__setattr__(self, 'type', 'image') + def __init__(self, value: Any = None, format: str = "PNG", **kwargs): + """Initialize ImageContentBlock with auto-detection of input type. + + Args: + value: Can be: + - URL string (starting with 'http://' or 'https://') + - Data URL string (starting with 'data:image/') + - Local file path (string) + - Numpy array or array-like RGB image + - PIL Image object + - Raw bytes + - None (empty image) + format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG + **kwargs: Direct field values (image_url, image_data, media_type, detail) + """ + # If explicit field values are provided, use them directly + if kwargs: + kwargs.setdefault('type', 'image') + kwargs.setdefault('media_type', 'image/jpeg') + super().__init__(**kwargs) + else: + # Use autocast to detect and convert the value + image_url, image_data, media_type = self.autocast(value, format=format) + super().__init__( + image_url=image_url, + image_data=image_data, + media_type=media_type, + ) def is_empty(self) -> bool: """Check if the image content is empty (no URL or data).""" @@ -682,9 +724,9 @@ def from_data_url(cls, data_url: str): # Fallback: assume the whole thing is base64 data return cls(image_data=data_url.split(',')[-1], media_type="image/jpeg") - @classmethod - def build(cls, value: Any, format: str = "PNG") -> 'ImageContent': - """Auto-detect format and create ImageContent from various input types. + @staticmethod + def autocast(value: Any, format: str = "PNG") -> Tuple[Optional[str], Optional[str], str]: + """Auto-detect value type and return image field values. Args: value: Can be: @@ -694,44 +736,72 @@ def build(cls, value: Any, format: str = "PNG") -> 'ImageContent': - Numpy array or array-like RGB image - PIL Image object - Raw bytes + - None (empty image) format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG Returns: - ImageContent or None if the value cannot be converted + Tuple of (image_url, image_data, media_type) """ - # handle None - if not value: - return cls() - - # handle self - if isinstance(value, cls): - return value - - if not value.strip(): - return cls() - + # Handle None or empty + if value is None: + return (None, None, "image/jpeg") + + # Handle ImageContentBlock instance + if isinstance(value, ImageContent): + return (value.image_url, value.image_data, value.media_type) + # Handle string inputs if isinstance(value, str): + if not value.strip(): + return (None, None, "image/jpeg") + # Data URL if value.startswith('data:image/'): - return cls.from_data_url(value) + try: + header, b64_data = value.split(',', 1) + media_type = header.split(':')[1].split(';')[0] + return (None, b64_data, media_type) + except (ValueError, IndexError): + return (None, value.split(',')[-1], "image/jpeg") + # HTTP/HTTPS URL if value.startswith('http://') or value.startswith('https://'): - return cls.from_url(value) - # Assume it's a file path - if Path(value).exists(): - return cls.from_file(value) - return cls() + return (value, None, "image/jpeg") + + # File path + path = Path(value) + if path.exists(): + ext_to_type = { + '.jpg': 'image/jpeg', + '.jpeg': 'image/jpeg', + '.png': 'image/png', + '.gif': 'image/gif', + '.webp': 'image/webp' + } + media_type = ext_to_type.get(path.suffix.lower(), 'image/jpeg') + with open(value, 'rb') as f: + image_data = base64.b64encode(f.read()).decode('utf-8') + return (None, image_data, media_type) + + return (None, None, "image/jpeg") # Handle bytes if isinstance(value, bytes): - return cls.from_bytes(value) + image_data = base64.b64encode(value).decode('utf-8') + return (None, image_data, "image/jpeg") # Handle PIL Image try: from PIL import Image if isinstance(value, Image.Image): - return cls.from_pil(value, format=format) + import io + buffer = io.BytesIO() + img_format = value.format or format.upper() + value.save(buffer, format=img_format) + buffer.seek(0) + image_data = base64.b64encode(buffer.getvalue()).decode('utf-8') + media_type = f"image/{img_format.lower()}" + return (None, image_data, media_type) except ImportError: pass @@ -739,11 +809,61 @@ def build(cls, value: Any, format: str = "PNG") -> 'ImageContent': try: import numpy as np if isinstance(value, np.ndarray) or hasattr(value, '__array__'): - return cls.from_array(value, format=format) + try: + from PIL import Image + except ImportError: + raise ImportError("Pillow is required for array conversion. Install with: pip install Pillow") + + import io + + if not isinstance(value, np.ndarray): + value = np.array(value) + + # Normalize to [0, 255] if needed + if value.dtype == np.float32 or value.dtype == np.float64: + if value.max() <= 1.0: + value = (value * 255).astype(np.uint8) + else: + value = value.astype(np.uint8) + elif value.dtype != np.uint8: + value = value.astype(np.uint8) + + image = Image.fromarray(value) + buffer = io.BytesIO() + image.save(buffer, format=format.upper()) + buffer.seek(0) + + image_data = base64.b64encode(buffer.getvalue()).decode('utf-8') + media_type = f"image/{format.lower()}" + return (None, image_data, media_type) except ImportError: pass - return cls() + return (None, None, "image/jpeg") + + @classmethod + def build(cls, value: Any, format: str = "PNG") -> 'ImageContent': + """Auto-detect format and create ImageContent from various input types. + + Args: + value: Can be: + - URL string (starting with 'http://' or 'https://') + - Data URL string (starting with 'data:image/') + - Local file path (string) + - Numpy array or array-like RGB image + - PIL Image object + - Raw bytes + format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG + + Returns: + ImageContent or None if the value cannot be converted + """ + # Handle ImageContentBlock instance directly + if isinstance(value, cls): + return value + + image_url, image_data, media_type = cls.autocast(value, format=format) + return cls(image_url=image_url, image_data=image_data, media_type=media_type) def set_image(self, image: Any, format: str = "PNG") -> None: """Set the image from various input formats (mutates self). @@ -764,7 +884,6 @@ def set_image(self, image: Any, format: str = "PNG") -> None: self.image_data = result.image_data self.media_type = result.media_type - @dataclass class PDFContent(ContentBlock): """PDF content block""" @@ -936,7 +1055,6 @@ def from_file(cls, filepath: str, mime_type: Optional[str] = None): is_binary=is_binary ) - # Union type alias for common content types (for type hints) # Note: ContentBlock remains the abstract base class for inheritance ContentBlockUnion = Union[TextContent, ImageContent, PDFContent, FileContent] diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py index e4a5dfc6..e1435ed6 100644 --- a/opto/optimizers/optoprime_v3.py +++ b/opto/optimizers/optoprime_v3.py @@ -7,7 +7,7 @@ import json from typing import Any, List, Dict, Union, Tuple, Optional from dataclasses import dataclass, field, asdict -from opto.optimizers.optoprime import OptoPrime, FunctionFeedback +from opto.optimizers.optoprime import OptoPrime, node_to_function_feedback from opto.trace.utils import dedent from opto.optimizers.utils import truncate_expression, extract_xml_like_data from opto.trace.nodes import ParameterNode, Node, MessageNode, is_image @@ -493,12 +493,16 @@ def __init__( # Build content based on mode if images is not None: - self._build_from_template(*args, images=images, format=format) + if len(args) != 1 or not isinstance(args[0], str): + raise ValueError( + "Template mode requires exactly one template string as the first argument. " + f"Got {len(args)} arguments." + ) + self._build_from_template(args[0], images=images, format=format) elif args: - self._build_from_variadic(*args, format=format) - # else: empty context + self._build_from_variadic(*args) - def _build_from_variadic(self, *args, format: str = "PNG") -> None: + def _build_from_variadic(self, *args) -> None: """Populate self from variadic arguments. Each argument is either text (str) or an image source. @@ -510,18 +514,17 @@ def _build_from_variadic(self, *args, format: str = "PNG") -> None: format: Image format for numpy arrays """ for arg in args: - if isinstance(arg, str): - # Check if it could be an image URL or file path - image_content = ImageContent.build(arg, format=format) - if not image_content.is_empty(): - self.append(image_content) - else: - # It's just text - self.append(arg) + # for Future expansion, we can check if the string is any special content type + # by is_empty() on special ContentBlock subclasses + image_content = ImageContent.build(arg) + if not image_content.is_empty(): + self.append(image_content) + else: + self.append(arg) def _build_from_template( self, - *args, + template: str, images: List[Any], format: str = "PNG" ) -> None: @@ -531,21 +534,13 @@ def _build_from_template( by images from the images list in order. Args: - *args: Should be a single template string containing [IMAGE] placeholders + template: Template string containing [IMAGE] placeholders images: List of image sources to insert at placeholders format: Image format for numpy arrays Raises: - ValueError: If args is not a single string, or if placeholder count - doesn't match the number of images. + ValueError: If placeholder count doesn't match the number of images. """ - if len(args) != 1 or not isinstance(args[0], str): - raise ValueError( - "Template mode requires exactly one template string as the first argument. " - f"Got {len(args)} arguments." - ) - - template = args[0] placeholder = DEFAULT_IMAGE_PLACEHOLDER # Count placeholders @@ -571,6 +566,12 @@ def _build_from_template( f"Could not convert image at index {i} to ImageContent: {type(images[i])}" ) self.append(image_content) + + def ensure(self, *args, **kwargs) -> 'Content': + """Ensure the value is a Content object.""" + if len(args) == 1 and isinstance(args[0], Content): + return args[0] + return Content(args, **kwargs) # we provide two aliases for the Content class for semantic convenience Context = Content @@ -683,7 +684,7 @@ def __init__( super().__init__(parameters, *args, propagator=propagator, **kwargs) self.truncate_expression = truncate_expression - self.problem_context: Optional[Content] = None + self.problem_context: Optional[Content] = problem_context self.use_json_object_format = use_json_object_format if optimizer_prompt_symbol_set.expect_json and use_json_object_format else False self.ignore_extraction_error = ignore_extraction_error @@ -755,22 +756,22 @@ def parameter_check(self, parameters: List[ParameterNode]): def add_context(self, *args, images: Optional[List[Any]] = None, format: str = "PNG"): """Add context to the optimizer, supporting both text and images. - + Two usage patterns are supported: - + **Usage 1: Variadic arguments (alternating text and images)** - + optimizer.add_context("text part 1", image_link, "text part 2", image_file) - + Each argument is either a string (text) or an image source. - + **Usage 2: Template with placeholders** - + optimizer.add_context( - "text part 1 [IMAGE] text part 2 [IMAGE]", + "text part 1 [IMAGE] text part 2 [IMAGE]", images=[image_link, image_file] ) - + The text contains `[IMAGE]` placeholders that are replaced by images from the `images` list in order. The number of placeholders must match the number of images. @@ -998,6 +999,62 @@ def repr_node_value_compact_as_content_blocks(self, node_dict, node_tag="node", return blocks + def summarize(self): + """Aggregate feedback from parameters into a structured summary. + + Collects and organizes feedback from all trainable parameters into + a FunctionFeedback structure suitable for problem representation. + + Returns + ------- + FunctionFeedback + Structured feedback containing: + - variables: Trainable parameters with values and descriptions + - inputs: Non-trainable root nodes + - graph: Topologically sorted function calls + - others: Intermediate computation values + - output: Final output values + - documentation: Function documentation strings + - user_feedback: Aggregated user feedback + + Notes + ----- + The method performs several transformations: + 1. Aggregates feedback from all trainable parameters + 2. Converts the trace graph to FunctionFeedback structure + 3. Separates root nodes into variables (trainable) and inputs (non-trainable) + 4. Preserves the computation graph and intermediate values + + Parameters without feedback (disconnected from output) are still + included in the summary but may not receive updates. + """ + # Aggregate feedback from all the parameters + feedbacks = [ + self.propagator.aggregate(node.feedback) + for node in self.parameters + if node.trainable + ] + summary = sum(feedbacks) # TraceGraph + # Construct variables and update others + # Some trainable nodes might not receive feedback, because they might not be connected to the output + summary = node_to_function_feedback(summary) + # Classify the root nodes into variables and others + # summary.variables = {p.py_name: p.data for p in self.parameters if p.trainable and p.py_name in summary.roots} + + trainable_param_dict = {p.py_name: p for p in self.parameters if p.trainable} + summary.variables = { + py_name: data + for py_name, data in summary.roots.items() + if py_name in trainable_param_dict + } + summary.inputs = { + py_name: data + for py_name, data in summary.roots.items() + if py_name not in trainable_param_dict + } # non-variable roots + + return summary + def construct_prompt(self, summary, mask=None, *args, **kwargs): """Construct the system and user prompt. @@ -1034,7 +1091,7 @@ def construct_prompt(self, summary, mask=None, *args, **kwargs): ) user_content_blocks.append(example_text) - # Add contecxt here + # Add context here user_content_blocks.append(self.user_prompt_context_template.format( user_prompt_context=self.problem_context, )) @@ -1052,7 +1109,7 @@ def construct_prompt(self, summary, mask=None, *args, **kwargs): return system_prompt, user_content_blocks - def problem_instance(self, summary, mask=None): + def problem_instance(self, summary: FunctionFeedback, mask=None): """Create a ProblemInstance from the summary. Args: @@ -1122,8 +1179,8 @@ def problem_instance(self, summary, mask=None): inputs=inputs_content, outputs=outputs_content, others=others_content, - feedback=ContentBlockList.ensure( - summary.user_feedback) if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else ContentBlockList(), + feedback=Content.ensure( + summary.user_feedback) if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else Content(""), optimizer_prompt_symbol_set=self.optimizer_prompt_symbol_set ) From 781b7ce4f580e1d7380f132b6c939dfa92c592b5 Mon Sep 17 00:00:00 2001 From: windweller Date: Wed, 17 Dec 2025 16:48:38 -0500 Subject: [PATCH 40/51] remove not needed files --- docs/tutorials/minibatch.ipynb | 834 ------------------------ opto/features/inference/dspy_example.py | 112 ---- 2 files changed, 946 deletions(-) delete mode 100644 docs/tutorials/minibatch.ipynb delete mode 100644 opto/features/inference/dspy_example.py diff --git a/docs/tutorials/minibatch.ipynb b/docs/tutorials/minibatch.ipynb deleted file mode 100644 index 95076033..00000000 --- a/docs/tutorials/minibatch.ipynb +++ /dev/null @@ -1,834 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Batch Optimization\n", - "\n", - "We provide an example of how to update parameters on a batch of data. In these toy examples, we show different ways to update parameters of functions on data containing multiple inputs. For simplicity, we consider batch update without random sampling." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%pip install trace-opt ipywidgets" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As a preamble, the code below provides a way to specify your API_KEY for calling LLMs using LiteLLM as part of this tutorial notebook. Alternatively, provide the keys by setting environment variables or loading LiteLLM config files." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import ipywidgets as widgets\n", - "from IPython.display import display\n", - "\n", - "# Function to save the environment variable and API key\n", - "def save_env_variable(env_name, api_key):\n", - " # Validate inputs\n", - " if not env_name.strip():\n", - " print(\"⚠️ Environment variable name cannot be empty.\")\n", - " return\n", - " if not api_key.strip():\n", - " print(\"⚠️ API key cannot be empty.\")\n", - " return\n", - " \n", - " # Store the API key as an environment variable\n", - " os.environ[env_name] = api_key\n", - " globals()[env_name] = api_key # Set it as a global variable\n", - " print(f\"✅ API key has been set for environment variable: {env_name}\")\n", - "\n", - "# Create the input widgets\n", - "env_name_input = widgets.Text(\n", - " value=\"OPENAI_API_KEY\", # Default value\n", - " description=\"Env Name:\",\n", - " placeholder=\"Enter env variable name (e.g., MY_API_KEY)\",\n", - ")\n", - "\n", - "api_key_input = widgets.Password(\n", - " description=\"API Key:\",\n", - " placeholder=\"Enter your API key\",\n", - ")\n", - "\n", - "# Create the button to submit the inputs\n", - "submit_button = widgets.Button(description=\"Set API Key\")\n", - "\n", - "# Display the widgets\n", - "display(env_name_input, api_key_input, submit_button)\n", - "\n", - "# Callback function for the button click\n", - "def on_button_click(b):\n", - " env_name = env_name_input.value\n", - " api_key = api_key_input.value\n", - " save_env_variable(env_name, api_key)\n", - "\n", - "# Attach the callback to the button\n", - "submit_button.on_click(on_button_click)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First, we consider a small linear regression problem. To perform updates on multiple inputs at a time, here we just compute the loss for each input and then sum it up, and perform one `backward` call to tell the optimizer to minimize the loss. Since the optimizer is capable of seeing the graph, it can understand how different inputs and labels are paired and evaluated by the loss function." - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [], - "source": [ - "import random\n", - "import numpy as np\n", - "\n", - "random.seed(0)\n", - "np.random.seed(0)\n", - "\n", - "from opto import trace\n", - "from opto.optimizers import OptoPrime\n", - "\n", - "\n", - "def true_fun(x):\n", - " return 2*x - 3\n", - "\n", - "inputs = [3, 2, 1, 5, 4]\n", - "outputs = [true_fun(x) for x in inputs]\n", - "N = len(inputs)\n", - "\n", - "\n", - "@trace.bundle()\n", - "def loss(y_hat, y):\n", - " \"\"\" A least squares loss function. \"\"\"\n", - " return (y_hat - y) ** 2\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Iteration 0 Loss: 85\n", - "Iteration 1 Loss: 85\n", - "Iteration 2 Loss: 10\n", - "Iteration 3 Loss: 15\n", - "Iteration 4 Loss: 10\n", - "Iteration 5 Loss: 40\n", - "Iteration 6 Loss: 0\n", - "Iteration 7 Loss: 0\n", - "Iteration 8 Loss: 0\n", - "Iteration 9 Loss: 0\n", - "Iteration 10 Loss: 0\n", - "Iteration 11 Loss: 0\n", - "Iteration 12 Loss: 0\n", - "Iteration 13 Loss: 0\n", - "Iteration 14 Loss: 0\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjIAAAGwCAYAAACzXI8XAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8ekN5oAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA9PUlEQVR4nO3deXyU5b338e9MJpnsCQTIJBAICMoWEEWQxeXRVOpjLVZbl1K3+tS24lGk1cqxuCNCKyKKUDzW7WirPadata0Wo0VQNkGRRQFli0ASAyaThWwz9/NHuAciAZKQmXvuez7v1yuvlkky80uA8PW6ftf1cxmGYQgAAMCG3FYXAAAA0FEEGQAAYFsEGQAAYFsEGQAAYFsEGQAAYFsEGQAAYFsEGQAAYFseqwsIt2AwqD179igtLU0ul8vqcgAAQBsYhqGqqirl5ubK7T76uovjg8yePXuUl5dndRkAAKADiouL1atXr6O+3/FBJi0tTVLzNyI9Pd3iagAAQFv4/X7l5eWF/h0/GscHGXM7KT09nSADAIDNHK8thGZfAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgW44fGhku39Q0qKahydIa4uPcyk5PtLQGAACsRJDpoN/9a7NeWrnL6jJ0y/kDNPU7J1tdBgAAliDIdFC82yWvx7qduaBhqDFg6MMvygkyAICYRZDpoPsmDtV9E4da9vprdn6jyxZ8qL2VdZbVAACA1Wj2tamcjObemLKqOgWDhsXVAABgDYKMTXVP88rtkhoDhvbVNFhdDgAAliDI2FR8nFvd07ySpBK2lwAAMYogY2O+jCRJ0t7KAxZXAgCANQgyNuZLP7gi42dFBgAQmwgyNpYTWpEhyAAAYhNBxsZ8B08ulRJkAAAxiiBjY+YRbFZkAACxiiBjY76Dc5bokQEAxCqCjI35QisyB2QYXIoHAIg9BBkbMydf1zUGVXmg0eJqAACIPIKMjSXGx6lrSoIktpcAALGJIGNzZp8MDb8AgFhEkLE58+QSYwoAALHI0iATCAQ0ffp09e3bV0lJSTrppJP0wAMPtGhcNQxDd999t3JycpSUlKTCwkJt3brVwqqjSzZHsAEAMczSIDNr1iwtWLBATzzxhD777DPNmjVLs2fP1uOPPx76mNmzZ2vevHlauHChVq5cqZSUFE2YMEF1dfzDLUk55hFs5i0BAGKQx8oX//DDDzVx4kRddNFFkqT8/Hz96U9/0qpVqyQ1r8bMnTtXv/3tbzVx4kRJ0vPPP6/s7Gy99tpruvLKKy2rPVqYR7BL/PUWVwIAQORZuiIzduxYFRUVacuWLZKkdevWadmyZbrwwgslSdu3b1dJSYkKCwtDn5ORkaHRo0dr+fLlrT5nfX29/H5/izcnM+ctsSIDAIhFlq7I3HnnnfL7/Ro4cKDi4uIUCAQ0Y8YMTZo0SZJUUlIiScrOzm7xednZ2aH3fdvMmTN13333hbfwKOKjRwYAEMMsXZF55ZVX9OKLL+qll17S2rVr9dxzz+n3v/+9nnvuuQ4/57Rp01RZWRl6Ky4u7sSKo48ZZKrqmlRd32RxNQAARJalKzK333677rzzzlCvS0FBgXbu3KmZM2fq2muvlc/nkySVlpYqJycn9HmlpaU69dRTW31Or9crr9cb9tqjRarXozSvR1X1TSqprFP/HqlWlwQAQMRYuiJTW1srt7tlCXFxcQoGg5Kkvn37yufzqaioKPR+v9+vlStXasyYMRGtNZr5uEsGABCjLF2RufjiizVjxgz17t1bQ4YM0ccff6w5c+bopz/9qSTJ5XJpypQpevDBBzVgwAD17dtX06dPV25uri655BIrS48qvoxEbS2rZkwBACDmWBpkHn/8cU2fPl033XSTysrKlJubq5///Oe6++67Qx9zxx13qKamRjfeeKMqKio0fvx4vfXWW0pMTLSw8uhy6HZfTi4BAGKLyzj8Gl0H8vv9ysjIUGVlpdLT060uJyzmLN6ieUVbNWl0b834QYHV5QAAcMLa+u83s5YcwJdOjwwAIDYRZBwgh7tkAAAxiiDjAOappVKafQEAMYYg4wDmisy+mgbVNQYsrgYAgMghyDhARlK8EuObfyvLGB4JAIghBBkHcLlcoYbfvRzBBgDEEIKMQ4Ru96VPBgAQQwgyDpGTkSSJI9gAgNhCkHEIH0ewAQAxiCDjEDkMjgQAxCCCjENkm82+9MgAAGIIQcYhGBwJAIhFBBmHMHtkvq6qV1MgaHE1AABEBkHGIbqleOVxuxQ0pK+ruRQPABAbCDIO4Xa7DvXJ0PALAIgRBBkH4eQSACDWEGQcJJu7ZAAAMYYg4yA56ZxcAgDEFoKMgxyat0SzLwAgNhBkHOTQvCVWZAAAsYEg4yDMWwIAxBqCjIOYQabUX6dg0LC4GgAAwo8g4yA90rxyuaTGgKF9NQ1WlwMAQNgRZBwkPs6t7qleSc2rMgAAOB1BxmFy6JMBAMQQgozD+JiCDQCIIQQZh/ExbwkAEEMIMg7jC90lQ5ABADgfQcZhQoMjafYFAMQAgozD+JiADQCIIQQZhzn81JJhcCkeAMDZCDIOk32w2fdAY0D+A00WVwMAQHgRZBwmMT5OXZLjJUl7/RzBBgA4G0HGgcyTSxzBBgA4HUHGgcw+mVKCDADA4QgyDuRjTAEAIEYQZBwoJ50j2ACA2ECQcaBsc0WGS/EAAA5HkHGgHAZHAgBiBEHGgXK43RcAECMIMg5kHr/21zWppp5L8QAAzkWQcaBUr0dpXo8khkcCAJyNIONQ2WwvAQBiAEHGoXK4SwYAEAMIMg7lO3iXTClbSwAAByPIONShFRmOYAMAnIsg41DmySV6ZAAATkaQcShfhlcSPTIAAGcjyDiUL50VGQCA8xFkHMrskdlX06D6poDF1QAAEB4EGYfKTI6X19P821vmr7e4GgAAwoMg41Aul4u7ZAAAjkeQcbDsdI5gAwCcjSDjYEzBBgA4HUHGwcy7ZNhaAgA4FUHGwcwVGcYUAACciiDjYD6afQEADkeQcTB6ZAAATkeQcTBzAnZZVZ2aAkGLqwEAoPMRZBwsK9Urj9uloCF9Xc2leAAA5yHIOFic2xW6S4btJQCAExFkHM5HnwwAwMEIMg7HySUAgJMRZBzObPgt4S4ZAIADEWQcjsGRAAAnI8g43KEeGQZHAgCchyDjcKFL8dhaAgA4EEHG4czBkaWV9QoGDYurAQCgc1keZHbv3q2f/OQnysrKUlJSkgoKCvTRRx+F3m8Yhu6++27l5OQoKSlJhYWF2rp1q4UV20uPNK9cLqkhENT+2garywEAoFNZGmS++eYbjRs3TvHx8frnP/+pTZs26ZFHHlGXLl1CHzN79mzNmzdPCxcu1MqVK5WSkqIJEyaoro6tkraIj3OrW6pXEnfJAACcx2Pli8+aNUt5eXl65plnQo/17ds39P8Nw9DcuXP129/+VhMnTpQkPf/888rOztZrr72mK6+8MuI121FORqK+rqrX3so6De2ZYXU5AAB0GktXZF5//XWNHDlSP/rRj9SjRw+NGDFCTz31VOj927dvV0lJiQoLC0OPZWRkaPTo0Vq+fHmrz1lfXy+/39/iLdZxlwwAwKksDTLbtm3TggULNGDAAL399tv65S9/qVtuuUXPPfecJKmkpESSlJ2d3eLzsrOzQ+/7tpkzZyojIyP0lpeXF94vwgZyOIINAHAoS4NMMBjUaaedpoceekgjRozQjTfeqJ/97GdauHBhh59z2rRpqqysDL0VFxd3YsX2lM2leAAAh7I0yOTk5Gjw4MEtHhs0aJB27dolSfL5fJKk0tLSFh9TWloaet+3eb1epaent3iLdTkMjgQAOJSlQWbcuHHavHlzi8e2bNmiPn36SGpu/PX5fCoqKgq93+/3a+XKlRozZkxEa7UzX3rzXTIEGQCA01h6aum2227T2LFj9dBDD+nyyy/XqlWrtGjRIi1atEiS5HK5NGXKFD344IMaMGCA+vbtq+nTpys3N1eXXHKJlaXbyuG3+xqGIZfLZXFFAAB0DkuDzBlnnKFXX31V06ZN0/3336++fftq7ty5mjRpUuhj7rjjDtXU1OjGG29URUWFxo8fr7feekuJiYkWVm4v5ryl2oaA/HVNykiKt7giAAA6h8swDEffW+/3+5WRkaHKysqY7pcZcf+/9E1to96ecrZO8aVZXQ4AAMfU1n+/LR9RgMjITjdPLnEEGwDgHASZGMHJJQCAExFkYoQ5BZu7ZAAATkKQiRHmikwpYwoAAA5CkIkRPm73BQA4EEEmRoQGRxJkAAAOQpCJETkZnFoCADgPQSZGmFtL/rom1TY0WVwNAACdgyATI9IS45Xqbb7Ime0lAIBTEGRiiI+7ZAAADkOQiSG+dE4uAQCchSATQ3yHTcEGAMAJCDIxhJNLAACnIcjEkEM9MvUWVwIAQOcgyMSQ0OBIPysyAABnIMjEEF968+BITi0BAJyCIBNDzK2l8uoG1TcFLK4GAIATR5CJIV2S45Xgaf4tL/PTJwMAsD+CTAxxuVyHnVxiewkAYH8EmRgTmoLNXTIAAAcgyMSY0Mkl7pIBADgAQSbGZLO1BABwEIJMjMlJZ3AkAMA5CDIxxpfRfJcMKzIAACcgyMQYs0emlGZfAIADEGRijBlkyqrq1RQIWlwNAAAnhiATY7JSvYpzuxQIGiqvbrC6HAAATghBJsbEuV3KTvNKkvZyBBsAYHMEmRjky+DkEgDAGQgyMSjn4MklbvcFANgdQSYGsSIDAHAKgkwMMuctcZcMAMDuCDIxiBUZAIBTEGRikHmXzF4/p5YAAPZGkIlB5opMaWW9DMOwuBoAADqOIBODeqQlyuWSGgJB7a/hUjwAgH0RZGJQgsetbqnmpXj0yQAA7IsgE6PMk0s0/AIA7IwgE6N8oYZfggwAwL4IMjEqJ3QEm5NLAAD7IsjEqEN3ydRbXAkAAB1HkIlRoRUZ7pIBANgYQSZGZTOmAADgAASZGBWagF1Zx6V4AADbIsjEKPP4dW1DQP66JourAQCgYwgyMSopIU6ZyfGSpFKOYAMAbIogE8N89MkAAGyOIBPDfNwlAwCwOYJMDDOPYLMiAwCwK4JMDPOlHzq5BACAHRFkYhgrMgAAu+tQkCkuLtZXX30V+vWqVas0ZcoULVq0qNMKQ/iZPTKcWgIA2FWHgsyPf/xjvffee5KkkpISfec739GqVat011136f777+/UAhE+PlZkAAA216Egs2HDBo0aNUqS9Morr2jo0KH68MMP9eKLL+rZZ5/tzPoQRmaQqTzQqNoGLsUDANhPh4JMY2OjvF6vJOmdd97R97//fUnSwIEDtXfv3s6rDmGV5vUoJSFOEg2/AAB76lCQGTJkiBYuXKilS5dq8eLF+u53vytJ2rNnj7Kysjq1QISPy+U67C4ZggwAwH46FGRmzZqlP/zhDzr33HN11VVXafjw4ZKk119/PbTlBHsIDY+k4RcAYEOejnzSueeeq/Lycvn9fnXp0iX0+I033qjk5OROKw7hR8MvAMDOOrQic+DAAdXX14dCzM6dOzV37lxt3rxZPXr06NQCEV7mvCW2lgAAdtShIDNx4kQ9//zzkqSKigqNHj1ajzzyiC655BItWLCgUwtEeLEiAwCwsw4FmbVr1+qss86SJP3P//yPsrOztXPnTj3//POaN29epxaI8DJv9y3xMzgSAGA/HQoytbW1SktLkyT961//0qWXXiq3260zzzxTO3fu7NQCEV6HTi3VW1wJAADt16Eg079/f7322msqLi7W22+/rQsuuECSVFZWpvT09E4tEOFlnloqr65XQ1PQ4moAAGifDgWZu+++W7/+9a+Vn5+vUaNGacyYMZKaV2dGjBjRqQUivLokxyvB0/zHgJlLAAC76dDx6x/+8IcaP3689u7dG7pDRpLOP/98/eAHP+i04hB+LpdLvvRE7dpfqxJ/nfK6cnweAGAfHQoykuTz+eTz+UJTsHv16sVleDbly2gOMpxcAgDYTYe2loLBoO6//35lZGSoT58+6tOnjzIzM/XAAw8oGKTPwm5CJ5cqObkEALCXDq3I3HXXXXr66af18MMPa9y4cZKkZcuW6d5771VdXZ1mzJjRqUUivDi5BACwqw6tyDz33HP6r//6L/3yl7/UsGHDNGzYMN1000166qmn9Oyzz3aokIcfflgul0tTpkwJPVZXV6fJkycrKytLqampuuyyy1RaWtqh58fRhW735S4ZAIDNdCjI7N+/XwMHDjzi8YEDB2r//v3tfr7Vq1frD3/4g4YNG9bi8dtuu01vvPGG/vKXv2jJkiXas2ePLr300o6UjGPI4XZfAIBNdSjIDB8+XE888cQRjz/xxBNHhJHjqa6u1qRJk/TUU0+1GEBZWVmpp59+WnPmzNF5552n008/Xc8884w+/PBDrVixoiNl4yh85gRsgoytGIahXftqZRiG1aUAgGU61CMze/ZsXXTRRXrnnXdCd8gsX75cxcXF+sc//tGu55o8ebIuuugiFRYW6sEHHww9vmbNGjU2NqqwsDD02MCBA9W7d28tX75cZ555ZqvPV19fr/r6Q70efr+/XfXEInNFpqyqXoGgoTi3y+KK0BbPfrhD972xSQ9fWqArR/W2uhwAsESHVmTOOeccbdmyRT/4wQ9UUVGhiooKXXrppdq4caNeeOGFNj/Pn//8Z61du1YzZ8484n0lJSVKSEhQZmZmi8ezs7NVUlJy1OecOXOmMjIyQm95eXltridWdUv1Ks7tUiBoqLyahl+7eO2TPZKkdz4rs7gSALBOh++Ryc3NPeJ00rp16/T0009r0aJFx/384uJi3XrrrVq8eLESExM7WsYRpk2bpqlTp4Z+7ff7CTPHEed2KTvNqz2VddpbWafs9M77/UB4VNQ26NOvKiRJ63dXWFoLAFipQysynWHNmjUqKyvTaaedJo/HI4/HoyVLlmjevHnyeDzKzs5WQ0ODKioqWnxeaWmpfD7fUZ/X6/UqPT29xRuOL5u7ZGzlwy/3yWyNKfXXM14CQMyyLMicf/75Wr9+vT755JPQ28iRIzVp0qTQ/4+Pj1dRUVHoczZv3qxdu3aF+nLQeTi5ZC9Lt37d4tfrv6q0qBIAsFaHt5ZOVFpamoYOHdrisZSUFGVlZYUev+GGGzR16lR17dpV6enp+o//+A+NGTPmqI2+6DhfOieX7MIwDL2/pVxS8x1AJf46fbq7UoWDsy2uDAAir11B5nh3uHx7G+hEPfroo3K73brssstUX1+vCRMm6Mknn+zU10Cz0JgCtiii3o59tdpdcUDxcS5dOzZfs976XOsP9ssAQKxpV5DJyMg47vuvueaaDhfz73//u8WvExMTNX/+fM2fP7/Dz4m28bG1ZBvLDm4rnda7i87s11WS9OlXlTIMQy4XR+cBxJZ2BZlnnnkmXHXAYofmLRFkot37W5u3lc4+ubsG5aTL43ZpX02D9lTWqWdmksXVAUBkWdbsi+gSmrdUWcdNsVGsMRDUii/3SZLG9++mxPg4neJLkyS2lwDEJIIMJCl0d0xDIKj9NQ0WV4OjWVdcoar6JmUmx2toz+at3mG9mv/3U04uAYhBBBlIkhI8bnVL9Uqi4TeaLT24rTTupG6hURIFPTMlSet3E2QAxB6CDEJy6JOJeub9MWcN6BZ67PAVGbYFAcQaggxCzO0lTi5Fp8oDjVp3cPto/GFB5uTsNCXEuVV5oFHF+7mZGUBsIcgghBWZ6Lb8y30KBA3165aiXl2SQ48neNwalNs8imMdDb8AYgxBBiHcJRPdln1x5LaSadjBxl/6ZADEGoIMQg7d7sv2RDQyG33HD+h+xPsKQn0yFZEsCQAsR5BBCJfiRa9d+2q1c1+tPG5X6Dbfw5kNvxt2+xUM0vALIHYQZBDiO6zZl9Mv0WXpwW2lEb0zlZYYf8T7+3dPVWK8W9X1Tdq+rybS5QGAZQgyCDFXZGobAqqqb7K4Ghxu2cFtpbNa2VaSJE+cW0NyD/bJcDEegBhCkEFIcoJHGUnN/7XP9lL0CAQNffCF2R9zZKOvqaAnN/wCiD0EGbSQw8mlqPPpVxXy1zUpPdETOp3UmuF5NPwCiD0EGbRgbi+VEmSihnlaaexJ3eSJO/pfWXNUwcY9fjUFgpEoDQAsR5BBC6zIRJ/QWIKTj76tJEn9uqUoJSFOBxoD+vJrGn4BxAaCDFowxxRwl0x0qKpr1Me7KiRJZ/VvvdHX5Ha7QhOx2V4CECsIMmiBFZnosmLbfjUFDfXJSlbvrOTjfrx5nww3/AKIFQQZtODLSJLEqaVosezgttL4/sfeVjIV9MqUxMklALGDIIMWDo0pIMhEg6XHuT/m28xTTZv2+tVIwy+AGECQQQvmqaWK2kYdaAhYXE1s++qbWm0rr1Gc26UxJ2W16XP6ZCUrPdGjhqagNpdUhblCALAeQQYtpHk9Sk6Ik8SqjNXM23yH98oIXVR4PC6XS8MObi/RJwMgFhBk0ILL5Qqtyuyt5OSSlZZ+0b5tJdOhSdgEGQDOR5DBEXKYgm25w8cSnHWMsQStMftk1u+u6OyyACDqEGRwBF9688kljmBbZ+OeSlXUNirN69HwvMx2fa65IrO5pEp1jfQ5AXA2ggyOYK7IlNIjYxnztNKZJ2Up/hhjCVrTMzNJXVMS1BgwaPgF4HgEGRwhm0vxLGeOJTi7ndtKUnOfUwE3/AKIEQQZHCEnnR4ZK9XUN2nNzm8kSePb2ehrGk7DL4AYQZDBEXysyFhq1fb9agwY6tUlSfltGEvQmgKOYAOIEQQZHMHskSmvrldDE7fDRtr75rTrAd3kcrk69BzmzKUtpVVcbAjA0QgyOELXlAQlHGwwLatiVSbSlrVzLEFrstMT1SPNq6AhbdrLqgwA5yLI4Agul0vZGV5J9MlE2t7KA9paVi2XSxrbxrEERzOMPhkAMYAgg1blcJeMJczVmGG9MpWZnHBCz1XQM1OStJ4gA8DBCDJolY/bfS0Rmnbdv/3Hrr/NXJFZxxFsAA5GkEGrcji5FHHBExhL0Brzht9t5TWqqms84ecDgGhEkEGrfNzuG3Gb9vq1r6ZByQlxGtG7ywk/X7dUr3pmJskwpI17/J1QIQBEH4IMWpXDBOyIW3ZwNWZMvywleDrnr6Z5wy99MgCciiCDVmVzu2/ELT3s/pjOYm4vfcrFeAAciiCDVuVkNJ9aKq2qVyBoWFyN8x1oCGj19hMbS9Aas+F3PQ2/AByKIINWdU/zKs7tUiBoqLy63upyHG/Vjv1qCASVm5Gok7qndNrzmltLO/bVqrKWhl8AzkOQQavi3C71SGu+FI+TS+G3dEvzttL4ExhL0JrM5AT1OTiviblLAJyIIIOj4i6ZyFn2xYmPJTgac1Xm090Vnf7cAGA1ggyOyhdq+OXkUjiV+ev0eUmVXC5pXCdchPdth/pkWJEB4DwEGRyVuSKzl7tkwspcjRmam6GuKSc2lqA15qgCZi4BcCKCDI4qh62liDDHEozvxGPXhxvaM12StLvigPbRuA3AYQgyOCpfBoMjw80wjEPzlcIUZNIS49Xv4EkoGn4BOA1BBkeVw5iCsPu8pErl1fVKio/T6X1OfCzB0QwzG37ZXgLgMAQZHJXZ7Lu3sk6GwaV44bDs4GrM6H5d5fXEhe11hvXKlESQAeA8BBkcVY/05ntkGpqC+obL1MLi/YNjCcaH4bTS4UInlziCDcBhCDI4Kq8nTt1Sm0/RMDyy89U1BrRq+35J0tknd/79MYcbnJsut0sq9dezVQjAUQgyOCYuxQufj3Z8o/qmoLLTvRrQIzWsr5Wc4NGAHmmSuE8GgLMQZHBMvvTmk0sl/Fd8p1v6hbmt1L1TxxIcDZOwATgRQQbH5Mto7pNhRabzLd3S3Oh79snh7Y8xMQkbgBMRZHBMOdwlExbl1fXatNcvKTxjCVpTcNgRbE6hAXAKggyO6dC8JYJMZ/rg4FiCwTnp6pbqjchrDspJl8ft0r6aBu3h9xOAQxBkcEzmpXicWupc4b7NtzWJ8XE6xWc2/FZE7HUBIJwIMjgmTi11vuaxBM2NvmcNCO+x628z+2S4GA+AUxBkcExmkKlpCKiqjkvxOsMXZdUq9dfL63FrZH74xhK0xpyEzcwlAE5BkMExJSd4lJ7okcSqTGd5/+C20qi+XZUYH76xBK05fEWGhl8ATkCQwXFxcqlzLQttK0WuP8Z0cnaaEuLcqjzQqOL99D0BsD+CDI6LPpnOU98U0IptzWMJIt0fI0kJHrcG5TQ3/K6j4ReAAxBkcFyHTi4RZE7U2p0VOtAYULdUrwYePEEUaeYkbPpkADgBQQbHFVqRYUzBCVt62LZSJMYStCY0qoAVGQAOQJDBcR26FI+eihNl3h8zPkK3+bbGbPjdsNuvYJCGXwD2RpDBcfnYWuoU+2satGFP83aOFY2+pv7dU5UY71Z1fZO276uxrA4A6AwEGRyXeWqJraUT88EX5TIM6ZTsNPU4uMplBU+cW0NyzQGS9MkAsDeCDI7LXJGpqG1UXWPA4mrsa5kFYwmO5vABkgBgZwQZHFd6okfJCc0Xt3EEu2MOH0swPgqCzDAafgE4hKVBZubMmTrjjDOUlpamHj166JJLLtHmzZtbfExdXZ0mT56srKwspaam6rLLLlNpaalFFccml8sVavilT6ZjtpXXaE9lnRLi3BrdN8vqckJHsDfu8aspELS2GAA4AZYGmSVLlmjy5MlasWKFFi9erMbGRl1wwQWqqTnUgHjbbbfpjTfe0F/+8hctWbJEe/bs0aWXXmph1bHp0BFsTi51xNItzasxZ/TtoqSEyI4laE2/bilKSYjTgcaAvvyahl8A9uWx8sXfeuutFr9+9tln1aNHD61Zs0Znn322Kisr9fTTT+ull17SeeedJ0l65plnNGjQIK1YsUJnnnmmFWXHJE4unZhlX5jHriN/m29r3G6XhvbM0Mrt+/XpVxU6xaLL+QDgREVVj0xlZXPjYdeuXSVJa9asUWNjowoLC0MfM3DgQPXu3VvLly9v9Tnq6+vl9/tbvOHE5TCmoMMaA0Et/3KfpOho9DWZfTLc8AvAzqImyASDQU2ZMkXjxo3T0KFDJUklJSVKSEhQZmZmi4/Nzs5WSUlJq88zc+ZMZWRkhN7y8vLCXXpM8DE4ssM+3lWhmoaAslISNDgn3epyQgoO9slwcgmAnUVNkJk8ebI2bNigP//5zyf0PNOmTVNlZWXorbi4uJMqjG05B5t9S7lLpt3M00rj+neT223NWILWDDt4BHvTXr8ammj4BWBPlvbImG6++Wa9+eabev/999WrV6/Q4z6fTw0NDaqoqGixKlNaWiqfz9fqc3m9Xnm93nCXHHPokem40FiCKNpWkqQ+WclKT/TIX9ekLaVVGnow2ACAnVi6ImMYhm6++Wa9+uqrevfdd9W3b98W7z/99NMVHx+voqKi0GObN2/Wrl27NGbMmEiXG9PMIFNeXc9/vbdDZW1j6K6WaOqPkZqP1TMJG4DdWRpkJk+erP/+7//WSy+9pLS0NJWUlKikpEQHDjQf8c3IyNANN9ygqVOn6r333tOaNWt0/fXXa8yYMZxYirCuyQlKiHPLMKSyKlZl2urDL8sVNKT+PVJDox6iyaFJ2AQZAPZk6dbSggULJEnnnntui8efeeYZXXfddZKkRx99VG63W5dddpnq6+s1YcIEPfnkkxGuFG63S9kZXhXvP6CSyjr16pJsdUm28H4UjSVojdkns353hbWFAEAHWRpkDMM47sckJiZq/vz5mj9/fgQqwrHkpCc1Bxkaftvk8LEE0RpkzBWZzSVVqmsMKDHe+sv6AKA9oubUEqJfNnfJtMvOfbX66psDio9zRcVYgtb0zExS15QENQYMbS6psrocAGg3ggzaLIeTS+2y9OBtvqf17qIUb1QcEDyCy+U6bBJ2hbXFAEAHEGTQZubgSFZk2sacr3T2ydExluBohtPwC8DGCDJos0MrMgyOPJ6mw8YSjO8fnf0xpgKOYAOwMYIM2sy8S6bUX29xJdFv3VcVqqpvUmZyfNRfNGfOXNpSWqUDDQGLqwGA9iHIoM0OBZk6BYLHP3EWy8zbfMf176a4KBpL0Jrs9ET1SPMqaEib9rIqA8BeCDJos+6pXrldUlPQ0L5qVmWOxQwyZ0X5tpJpGH0yAGyKIIM288S51SONk0vH469r1CfFFZKib77S0RT0zJQkrSfIALAZggzaheGRx7f8y30KBA3165ZimxuQzRWZdRzBBmAzBBm0S07oUjxOLh1NtN/m2xrzht9t5TWqqmu0uBoAaDuCDNol27xLhpNLR7XsYH/M+AHRfX/M4bqletUzM0mGIW3c47e6HABoM4IM2iWaVmQ+Ka7Q0q1ft2lmV6QU76/Vjn218rhdOrNfV6vLaRfzhl/6ZADYSXTem46oFQ09MuXV9Xro75/prx/vltR8c+4DE4eoT1aKZTWZzNNKI3pnKi0x3uJq2qegV4be2liiT7kYD4CNsCKDdsnJSJIkSyZgB4OGXly5U+f9/t/668e75XJJ8XEuvb/la33n0ff12DtbVd9k7YVuh/pj7LOtZDIbftfT8AvARggyaJfDB0dGcktnw+5K/WDBh7rr1Q3y1zVpaM90vXrTOL095WyN799NDU1BPfrOFn137tJQj0qkBYKGPvjC7I+xT6Ovydxa2rGvVpW1NPwCsAeCDNqlR7pXktTQFFRFBP6xq6pr1L2vb9T3n1imdcUVSvV6dO/Fg/W3yeN1al6m+nVP1Qs3jNK8q0aoe5pX28tr9JOnV+o//vSxyiK8avTpVxXy1zUpPdGjYVE+lqA1mckJ6t21+bg4c5cA2AVBBu3i9cQpKyVBUnj7ZAzD0Juf7tH5jyzRsx/uUNCQvjcsR0W/OkfXjevb4tp/l8ul7w/PbX7f2Hy5XdIb65o/97kPd0RsnIK5EjT2pG7yxNnzr1boht/dFdYWAgBtZM+ftrCU2fBb4g/PyaUd5TW65o+rdPNLH6usql75Wcl64YZReuLHp4WOf7cmPTFe935/iP42ebyG98pQVX2T7nl9oy6Z/4E+jUDfR2gswcn221YyHeqTYUUGgD0QZNBuOWE6uVTXGNDcd7bogrnva+nWciV43JpSOEBvTTm7Xc2zBb0y9NebxumBiUOUlujR+t2Vmjj/A01/bYMqD4RnO6y6vklrd30jSTqrv/0afU3mqAJmLgGwC45fo91CKzKdGGSWbv1ad/9to7aX10hqvhX3/olD1bdbx45Ux7ldunpMviYM9WnmPz7Xqx/v1gsrduqfG0r024sGaeKpuXK5Om8q9Yov96kpaKhPVrJ6Z9ljLEFrhvZMlyTtrjigfdX1ykr1WlwRABwbKzJot9AR7E4IMqX+Ot380lpd/fQqbS+vUY80r5748Qg9/9NRHQ4xh+uRlqhHrzhVL/2/0erXPUXl1fWa8vInmvRfK/VFWfUJP79pmXlaySbTro8mLTFe/bo3f99p+AVgBwQZtNuhMQUdDzKBoKFnPtiu8x9Zojc/3Su3S7p+XL6KfnWOvjesc1dLJGls/276561n6dcXnCyvx60Pv9ynCx97X79/e7PqGk/87pn3bXx/zLeZJ67YXgJgBwQZtNuJ9sisK67QxPnLdN8bm1Rd36TheZl6/ebxuufiIWG9DdfridPN5w3Q4tvO0f85pbsaA4aeeO8LfefRJXrv87IOP+/uigPa9nWN4twujTkpqxMrtkZBr0xJBBkA9kCPDNqtoz0ylQca9bu3P9eLK3fJMKT0RI/u+O5AXTWqd4vj1OHWOytZf7zuDL29sUT3vbFJxfsP6PpnV+u7Q3y65/uDQ1tnbbXs4GrM8F4Zykiy11iC1gw3Ty5xBBuADRBk0G6+g1tL1fVNqqprPO4qimEYeu2T3Zrx989UXt0gSbp0RE9N+7+D1D3NmmZSl8ul7w7N0fgB3fXYO1v0xw926K2NJXp/69ea+p2Tdd3Y/DbfBRM6du2AbSVJGpybLrdLKvXXq9Rfd8wj7wBgNbaW0G4pXo/SE5sz8PFWZb4oq9aPn1qp215ep/LqBp3UPUV/+tmZmnPFqZaFmMOlej2666LBevM/xuv0Pl1U2xDQg3//TN97fJnW7Nx/3M8PHjaW4CwbjiVoTXKCRwN6pEniPhkA0Y8ggw45dCle60HmQENAv3v7c1342Ptavm2fvB63bp9wiv5569lR2UcyKCddf/n5GM26rECZyfH6vKRKly1Yrjv/91N9U9Nw1M/buMevb2obleb1aHheZuQKDrOC0A2/BBkA0Y0ggw7xHewjaa3h993PS/WdR5do/ntfqjFg6LyBPfTO1HM0+f/0V4Inev/Iud0uXXFGb737q3P1o9N7SZL+vLpY589Zolc+KlawlVEH5mmlM0/KUrxNxxK0hknYAOyCHhl0SE76kQ2/eyoO6P43NumtjSXNH5ORqHsuHqIJQ7I7/Th1OHVNSdDvfjRcl5+Rp7teXa8tpdW6438+1V8+KtaDlxToFF9a6GPN+UpnO2RbyVRw2BFswzBs9fsHILY45z8hEVG+w45gNwaCeur9bSqcs0RvbSxRnNulG8/up3emnqPvDvXZ9h/BM/K76u+3nKVpFw5UUnycVu/4RhfNW6qZ//hMtQ1Nqm1o0kcH+2jGO6TR1zQoJ10et0v7ahq0J4zDQQHgRLEigw4x75JZu/MbXfz4Mn1eUiVJGtmnix78wVAN9KVbWV6niY9z6+fnnKTvDc/Vfa9v1L82leoP72/TG+v26KJhOWoMGOrVJUn5Nh5L0JrE+Did4kvTxj1+rf+qQj0z23ckHQAihRUZdEj2wSCzubRKn5dUqUtyvGZfNkyv/HyMY0LM4XpmJmnRNSP19LUj1TMzSXsq6/TU0u2Smk8r2XXV6VjMPhkuxgMQzQgy6JB+h81BumJknop+da4uPyNP7ghebGeF8wdl652p5+imc0+S5+DXWjgo2+KqwsOchM3MJQDRjK0ldEifrBQ9e/0Zykrxho7qxoqkhDjd8d2B+tHIPO3aX6tzTnZWf4zp8BUZGn4BRCuCDDrs3FN6WF2Cpfp2S+mUCd3R6uTsNCXEuVV5oFHF+w+ot8P6gAA4A1tLAFqV4HFrUE7zUfN13CcDIEoRZAAc1bCDk7DpkwEQrQgyAI4qNKqAFRkAUYogA+CozIbfDbv9rY5oAACrEWQAHFX/7qlKjHerur5J2/fVWF0OAByBIAPgqDxxbg3JNQdI0icDIPoQZAAckzlAkpNLAKIRQQbAMZl9MqzIAIhGBBkAx2Qewd64x6+mQNDaYgDgWwgyAI6pX7cUpSTE6UBjQF9+TcMvgOhCkAFwTG63S0N7cp8MgOhEkAFwXKE+GW74BRBlCDIAjqvgYJ/MpzT8AogyBBkAxzXs4NbSpr1+NTTR8AsgehBkABxXn6xkpSd61NAU1JbSKqvLAYAQggyA43K5XEzCBhCVCDIA2uTQJGyCDIDoQZAB0CZmn8z63RXWFgIAhyHIAGgTc0Vmc0mV6hoDFlcDAM0IMgDapGdmkrqmJKgxYGhzCQ2/AKIDQQZAm7hcrtAkbG74BRAtCDIA2mwYDb8AogxBBkCbcQQbQLQhyABoM3NFZktplQ400PALwHoEGQBtlp2eqB5pXgUNadNeVmUAWI8gA6Bd6JMBEE0IMgDapaBnpiRpPUEGQBQgyABoF3NFZh1HsAFEAYIMgHYxb/jdVl6jqrpGi6sBEOsIMgDapVuqVz0zk2QY0sY9fqvLARDjCDIA2s284Zc+GQBWI8gAaDdze+lTLsYDYDGCDIB2Mxt+19PwC8Bitggy8+fPV35+vhITEzV69GitWrXK6pKAmGZuLe3YV6vKWhp+AVgn6oPMyy+/rKlTp+qee+7R2rVrNXz4cE2YMEFlZWVWlwbErMzkBPXumiyJuUsArOWxuoDjmTNnjn72s5/p+uuvlyQtXLhQf//73/XHP/5Rd955p8XVAbGroFeGdu2v1Ydfliu/W7LV5QCwUGZyglK91kSKqA4yDQ0NWrNmjaZNmxZ6zO12q7CwUMuXL2/1c+rr61VfXx/6td/P8VAgHIb3ytDfP92rJ//9pZ7895dWlwPAQg/9oEA/Ht3bkteO6iBTXl6uQCCg7OzsFo9nZ2fr888/b/VzZs6cqfvuuy8S5QEx7btDcvTchztVXl1//A8G4GhxFjaqRHWQ6Yhp06Zp6tSpoV/7/X7l5eVZWBHgTL2zkvXBnedZXQaAGBfVQaZbt26Ki4tTaWlpi8dLS0vl8/la/Ryv1yuv1xuJ8gAAgMWi+tRSQkKCTj/9dBUVFYUeCwaDKioq0pgxYyysDAAARIOoXpGRpKlTp+raa6/VyJEjNWrUKM2dO1c1NTWhU0wAACB2RX2QueKKK/T111/r7rvvVklJiU499VS99dZbRzQAAwCA2OMyDMOwuohw8vv9ysjIUGVlpdLT060uBwAAtEFb//2O6h4ZAACAYyHIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA24r6EQUnyry42O/3W1wJAABoK/Pf7eMNIHB8kKmqqpIk5eXlWVwJAABor6qqKmVkZBz1/Y6ftRQMBrVnzx6lpaXJ5XJ12vP6/X7l5eWpuLg4Zmc4xfr3INa/fonvQax//RLfA77+8H39hmGoqqpKubm5cruP3gnj+BUZt9utXr16he3509PTY/IP7+Fi/XsQ61+/xPcg1r9+ie8BX394vv5jrcSYaPYFAAC2RZABAAC2RZDpIK/Xq3vuuUder9fqUiwT69+DWP/6Jb4Hsf71S3wP+Pqt//od3+wLAACcixUZAABgWwQZAABgWwQZAABgWwQZAABgWwSZDpo/f77y8/OVmJio0aNHa9WqVVaXFBEzZ87UGWecobS0NPXo0UOXXHKJNm/ebHVZlnr44Yflcrk0ZcoUq0uJmN27d+snP/mJsrKylJSUpIKCAn300UdWlxUxgUBA06dPV9++fZWUlKSTTjpJDzzwwHFnwtjV+++/r4svvli5ublyuVx67bXXWrzfMAzdfffdysnJUVJSkgoLC7V161Zrig2TY30PGhsb9Zvf/EYFBQVKSUlRbm6urrnmGu3Zs8e6gjvZ8f4MHO4Xv/iFXC6X5s6dG5HaCDId8PLLL2vq1Km65557tHbtWg0fPlwTJkxQWVmZ1aWF3ZIlSzR58mStWLFCixcvVmNjoy644ALV1NRYXZolVq9erT/84Q8aNmyY1aVEzDfffKNx48YpPj5e//znP7Vp0yY98sgj6tKli9WlRcysWbO0YMECPfHEE/rss880a9YszZ49W48//rjVpYVFTU2Nhg8frvnz57f6/tmzZ2vevHlauHChVq5cqZSUFE2YMEF1dXURrjR8jvU9qK2t1dq1azV9+nStXbtWf/3rX7V582Z9//vft6DS8DjenwHTq6++qhUrVig3NzdClUky0G6jRo0yJk+eHPp1IBAwcnNzjZkzZ1pYlTXKysoMScaSJUusLiXiqqqqjAEDBhiLFy82zjnnHOPWW2+1uqSI+M1vfmOMHz/e6jIsddFFFxk//elPWzx26aWXGpMmTbKoosiRZLz66quhXweDQcPn8xm/+93vQo9VVFQYXq/X+NOf/mRBheH37e9Ba1atWmVIMnbu3BmZoiLoaF//V199ZfTs2dPYsGGD0adPH+PRRx+NSD2syLRTQ0OD1qxZo8LCwtBjbrdbhYWFWr58uYWVWaOyslKS1LVrV4sribzJkyfroosuavFnIRa8/vrrGjlypH70ox+pR48eGjFihJ566imry4qosWPHqqioSFu2bJEkrVu3TsuWLdOFF15ocWWRt337dpWUlLT4e5CRkaHRo0fH5M9EU2VlpVwulzIzM60uJSKCwaCuvvpq3X777RoyZEhEX9vxQyM7W3l5uQKBgLKzs1s8np2drc8//9yiqqwRDAY1ZcoUjRs3TkOHDrW6nIj685//rLVr12r16tVWlxJx27Zt04IFCzR16lT953/+p1avXq1bbrlFCQkJuvbaa60uLyLuvPNO+f1+DRw4UHFxcQoEApoxY4YmTZpkdWkRV1JSIkmt/kw03xdr6urq9Jvf/EZXXXVVzAySnDVrljwej2655ZaIvzZBBh02efJkbdiwQcuWLbO6lIgqLi7WrbfeqsWLFysxMdHqciIuGAxq5MiReuihhyRJI0aM0IYNG7Rw4cKYCTKvvPKKXnzxRb300ksaMmSIPvnkE02ZMkW5ubkx8z1A6xobG3X55ZfLMAwtWLDA6nIiYs2aNXrssce0du1auVyuiL8+W0vt1K1bN8XFxam0tLTF46WlpfL5fBZVFXk333yz3nzzTb333nvq1auX1eVE1Jo1a1RWVqbTTjtNHo9HHo9HS5Ys0bx58+TxeBQIBKwuMaxycnI0ePDgFo8NGjRIu3btsqiiyLv99tt155136sorr1RBQYGuvvpq3XbbbZo5c6bVpUWc+XMv1n8mSodCzM6dO7V48eKYWY1ZunSpysrK1Lt379DPxJ07d+pXv/qV8vPzw/76BJl2SkhI0Omnn66ioqLQY8FgUEVFRRozZoyFlUWGYRi6+eab9eqrr+rdd99V3759rS4p4s4//3ytX79en3zySeht5MiRmjRpkj755BPFxcVZXWJYjRs37ogj91u2bFGfPn0sqijyamtr5Xa3/PEZFxenYDBoUUXW6du3r3w+X4ufiX6/XytXroyJn4kmM8Rs3bpV77zzjrKysqwuKWKuvvpqffrppy1+Jubm5ur222/X22+/HfbXZ2upA6ZOnaprr71WI0eO1KhRozR37lzV1NTo+uuvt7q0sJs8ebJeeukl/e1vf1NaWlpoDzwjI0NJSUkWVxcZaWlpR/QEpaSkKCsrKyZ6hW677TaNHTtWDz30kC6//HKtWrVKixYt0qJFi6wuLWIuvvhizZgxQ71799aQIUP08ccfa86cOfrpT39qdWlhUV1drS+++CL06+3bt+uTTz5R165d1bt3b02ZMkUPPvigBgwYoL59+2r69OnKzc3VJZdcYl3RnexY34OcnBz98Ic/1Nq1a/Xmm28qEAiEfjZ27dpVCQkJVpXdaY73Z+DbwS0+Pl4+n0+nnHJK+IuLyNkoB3r88ceN3r17GwkJCcaoUaOMFStWWF1SREhq9e2ZZ56xujRLxdLxa8MwjDfeeMMYOnSo4fV6jYEDBxqLFi2yuqSI8vv9xq233mr07t3bSExMNPr162fcddddRn19vdWlhcV7773X6t/7a6+91jCM5iPY06dPN7Kzsw2v12ucf/75xubNm60tupMd63uwffv2o/5sfO+996wuvVMc78/At0Xy+LXLMBx6FSUAAHA8emQAAIBtEWQAAIBtEWQAAIBtEWQAAIBtEWQAAIBtEWQAAIBtEWQAAIBtEWQAAIBtEWQAOF5+fr7mzp1rdRkAwoAgA6BTXXfddaEZO+eee66mTJkSsdd+9tlnlZmZecTjq1ev1o033hixOgBEDkMjAUS9hoaGExq81717906sBkA0YUUGQFhcd911WrJkiR577DG5XC65XC7t2LFDkrRhwwZdeOGFSk1NVXZ2tq6++mqVl5eHPvfcc8/VzTffrClTpqhbt26aMGGCJGnOnDkqKChQSkqK8vLydNNNN6m6ulqS9O9//1vXX3+9KisrQ6937733Sjpya2nXrl2aOHGiUlNTlZ6erssvv1ylpaWh999777069dRT9cILLyg/P18ZGRm68sorVVVVFd5vGoB2I8gACIvHHntMY8aM0c9+9jPt3btXe/fuVV5enioqKnTeeedpxIgR+uijj/TWW2+ptLRUl19+eYvPf+6555SQkKAPPvhACxculCS53W7NmzdPGzdu1HPPPad3331Xd9xxhyRp7Nixmjt3rtLT00Ov9+tf//qIuoLBoCZOnKj9+/dryZIlWrx4sbZt26Yrrriixcd9+eWXeu211/Tmm2/qzTff1JIlS/Twww+H6bsFoKPYWgIQFhkZGUpISFBycrJ8Pl/o8SeeeEIjRozQQw89FHrsj3/8o/Ly8rRlyxadfPLJkqQBAwZo9uzZLZ7z8H6b/Px8Pfjgg/rFL36hJ598UgkJCcrIyJDL5Wrxet9WVFSk9evXa/v27crLy5MkPf/88xoyZIhWr16tM844Q1Jz4Hn22WeVlpYmSbr66qtVVFSkGTNmnNg3BkCnYkUGQEStW7dO7733nlJTU0NvAwcOlNS8CmI6/fTTj/jcd955R+eff7569uyptLQ0XX311dq3b59qa2vb/PqfffaZ8vLyQiFGkgYPHqzMzEx99tlnocfy8/NDIUaScnJyVFZW1q6vFUD4sSIDIKKqq6t18cUXa9asWUe8LycnJ/T/U1JSWrxvx44d+t73vqdf/vKXmjFjhrp27aply5bphhtuUENDg5KTkzu1zvj4+Ba/drlcCgaDnfoaAE4cQQZA2CQkJCgQCLR47LTTTtP//u//Kj8/Xx5P238ErVmzRsFgUI888ojc7ubF5FdeeeW4r/dtgwYNUnFxsYqLi0OrMps2bVJFRYUGDx7c5noARAe2lgCETX5+vlauXKkdO3aovLxcwWBQkydP1v79+3XVVVdp9erV+vLLL/X222/r+uuvP2YI6d+/vxobG/X4449r27ZteuGFF0JNwIe/XnV1tYqKilReXt7qllNhYaEKCgo0adIkrV27VqtWrdI111yjc845RyNHjuz07wGA8CLIAAibX//614qLi9PgwYPVvXt37dq1S7m5ufrggw8UCAR0wQUXqKCgQFOmTFFmZmZopaU1w4cP15w5czRr1iwNHTpUL774ombOnNniY8aOHatf/OIXuuKKK9S9e/cjmoWl5i2iv/3tb+rSpYvOPvtsFRYWql+/fnr55Zc7/esHEH4uwzAMq4sAAADoCFZkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbf1/vMD8m5kd3FAAAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "trace.GRAPH.clear()\n", - "\n", - "@trace.bundle(trainable=True)\n", - "def fun(x):\n", - " \"\"\" A linear predictor function \"\"\"\n", - " return 0\n", - "\n", - "def compute_loss(inputs, outputs):\n", - " l = 0\n", - " for x,y in zip(inputs, outputs):\n", - " y_hat = fun(x)\n", - " l += loss(y_hat, y)\n", - " return l\n", - "\n", - "optimizer = OptoPrime(fun.parameters())\n", - "\n", - "ls = []\n", - "for i in range(15):\n", - " try:\n", - " l = compute_loss(inputs, outputs)\n", - " target = l\n", - " feedback = 'Minimize loss'\n", - " print(f'Iteration {i} Loss: {l.data}')\n", - " ls.append(l.data)\n", - " except trace.ExecutionError as e:\n", - " target = e.exception_node\n", - " feedback = str(e.exception_node.data)\n", - "\n", - " optimizer.zero_feedback()\n", - " optimizer.backward(target, feedback)\n", - " optimizer.step()\n", - "\n", - "# plot ls\n", - "import matplotlib.pyplot as plt\n", - "plt.plot(ls)\n", - "plt.xlabel('Iteration')\n", - "plt.ylabel('Loss')\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In contrast, if we update the parameter without batching but in a purely online fashion one by one, then the optimization results can be more noisy sometimes." - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Iteration 0 Loss: 85\n", - "Iteration 1 Loss: 10\n", - "Iteration 2 Loss: 10\n", - "Iteration 3 Loss: 120\n", - "Iteration 4 Loss: 120\n", - "Iteration 5 Loss: 120\n", - "Iteration 6 Loss: 60\n", - "Iteration 7 Loss: 30\n", - "Iteration 8 Loss: 30\n", - "Iteration 9 Loss: 15\n", - "Iteration 10 Loss: 10\n", - "Iteration 11 Loss: 10\n", - "Iteration 12 Loss: 15\n", - "Iteration 13 Loss: 55\n", - "Iteration 14 Loss: 15\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjsAAAGwCAYAAABPSaTdAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8ekN5oAAAACXBIWXMAAA9hAAAPYQGoP6dpAABVTElEQVR4nO3deXxU5b0/8M+ZNckkM9nIBtlYww7KIuBCCxV3qFrFS90rtxWuIr+6tcW2KCLcqohaqK3XpVertldRqcUiIsq+CYIia0gCIZksJJNtksnM+f0xOSeJsiXMzHPOmc/79crrJZPtmwiTT77P93keSZZlGUREREQGZRJdABEREVE4MewQERGRoTHsEBERkaEx7BAREZGhMewQERGRoTHsEBERkaEx7BAREZGhWUQXoAWBQAClpaVISEiAJEmiyyEiIqJzIMsy6urqkJWVBZPp9P0bhh0ApaWlyM7OFl0GERERdUNJSQl69ep12tcz7ABISEgAEPxmOZ1OwdUQERHRufB4PMjOzlZ/jp8Oww6gLl05nU6GHSIiIp052wgKB5SJiIjI0Bh2iIiIyNAYdoiIiMjQGHaIiIjI0Bh2iIiIyNAYdoiIiMjQGHaIiIjI0Bh2iIiIyNAYdoiIiMjQGHaIiIjI0ISGnc8//xzXXnstsrKyIEkSVqxYob7O5/Ph4YcfxtChQ+FwOJCVlYXbbrsNpaWlnT5GdXU1ZsyYAafTicTERNx9992or6+P8FdCREREWiU07DQ0NGD48OF48cUXv/e6xsZG7Ny5E/PmzcPOnTvx7rvvYv/+/bjuuus6vd2MGTPw9ddfY/Xq1Vi5ciU+//xzzJw5M1JfAhEREWmcJMuyLLoIIHiJ13vvvYdp06ad9m22bduGMWPGoKioCDk5Odi3bx8GDRqEbdu2YdSoUQCAVatW4aqrrsKxY8eQlZV1Tp/b4/HA5XKhtraWF4EaTGNLK6obWkSXIVRinA3xdt75S0TGc64/v3X1DFhbWwtJkpCYmAgA2LRpExITE9WgAwCTJ0+GyWTCli1b8OMf//iUH6e5uRnNzc3qnz0eT1jrJjEq65vxgz98hjpvq+hShIqxmvDxnEuRm+IQXQoRkRC6CTterxcPP/wwbrnlFjW9lZWVIS0trdPbWSwWJCcno6ys7LQfa+HChfj9738f1npJvL3Ha1HnbYUkATZzdM7i+/wBeH0BbDxcxbBDRFFLF2HH5/PhpptugizLWLZs2Xl/vEcffRRz585V/+zxeJCdnX3eH5e0xV0X7N5d0q8HXr9rjOBqxFjwz2/w5y8Ksb+sTnQpRETCaD7sKEGnqKgIn376aac1uYyMDLjd7k5v39raiurqamRkZJz2Y9rtdtjt9rDVTNpQ0RZ20hKi9//1gIzgv5dvy7hUS0TRS9O9fSXoHDx4EJ988glSUlI6vX7cuHGoqanBjh071Mc+/fRTBAIBjB07NtLlksYoYadHFIedgowEAMD+sjpoZC8CEVHECe3s1NfX49ChQ+qfCwsLsWvXLiQnJyMzMxM33ngjdu7ciZUrV8Lv96tzOMnJybDZbBg4cCCuuOIK3HPPPVi+fDl8Ph9mz56N6dOnn/NOLDIud50XQHR3dvqmxcMkAScbfaioa0aaM0Z0SUREESe0s7N9+3aMHDkSI0eOBADMnTsXI0eOxGOPPYbjx4/jgw8+wLFjxzBixAhkZmaqLxs3blQ/xhtvvIGCggJMmjQJV111FS6++GK89NJLor4k0pD2Zazo/QEfYzUjLzU4mPwt53aIKEoJ7exMnDjxjK31c2m7Jycn48033wxlWWQQbi5jAQguZR2paMD+sjpc2r+H6HKIiCJO0zM7ROeDA8pBA9KVIWV2dogoOjHskCHVN7eiscUPgJ2dAcqQcjl3ZBFRdGLYIUNSujoOmxmOKL8qQdmRdbC8Hv4Ad2QRUfRh2CFDcnuCO7GivasDADnJcYi1mtHcGsDRqgbR5RARRRzDDhlSRT13YilMJgn90+MBgCcpE1FUYtghQ3J7uBOrI2Vuh0PKRBSNGHbIkJTODsNOkHJtxH5eG0FEUYhhhwyJnZ3OOl4bQUQUbRh2yJDaZ3YYdoD2Zayi6kY0trQKroaIKLIYdsiQuBurs9R4O1LjbZDl4BZ0IqJowrBDhsR7sb5vAJeyiChKMeyQ4fj8AVQ3tgAA0pzs7Ch4bQQRRSuGHTKcqvoWyDJgNklIjrOJLkczCnhtBBFFKYYdMhxlCSs13gaTSRJcjXZwGYuIohXDDhmOu47DyafSPz0BkgRU1regsm23GhFRNGDYIcPhcPKpxdrMyE2OA8DuDhFFF4YdMhx3W9jpEc/Oznfx2ggiikYMO2Q4ameHO7G+h9dGEFE0Ytghw+HMzunx2ggiikYMO2Q47TM7DDvfpSxjHSivRyAgC66GiCgyGHbIcNSZHYad78lLccBuMaHJ50dxdaPocoiIIoJhhwxFlmXuxjoDs0lCv/R4ABxSJqLowbBDhuLxtqK5NQCAnZ3TUa6N4NwOEUULhh0ylIq24eSEGAtirGbB1WgTr40gomjDsEOG4uZw8lnxrB0iijYMO2QoFRxOPiuls3O0sgFen19wNURE4cewQ4bC4eSz65FgR1KcFQEZOOSuF10OEVHYMeyQoXDb+dlJksSlLCKKKgw7ZCg8UPDcFPDaCCKKIgw7ZCi8KuLcsLNDRNGEYYcMhTM752YA78gioijCsEOGwpmdc9M/PRh23HXNONnQIrgaIqLwYtghw2hu9aOm0QeAMztnE2+3IDs5FgCXsojI+Bh2yDAq64MdCqtZQmKcVXA12td+bQSHlInI2Bh2yDDUAwXj7ZAkSXA12td+bQQ7O0RkbAw7ZBhuD3didQV3ZBFRtGDYIcNoH07mTqxzoXR2DpTVIRCQBVdDRBQ+DDtkGOq2cyc7O+ciL9UBm9mEhhY/jtc0iS6HiChsGHbIMNwdZnbo7KxmE3r3cADgUhYRGRvDDhkGOztdpw4pc0cWERkYww4ZRoVyVQQ7O+dsQNsdWezsEJGRMeyQYbR3djigfK4KeG0EEUUBhh0yBFmWUVHPqyK6Stl+fqSyAc2tfsHVEBGFB8MOGUJNow8+f3D7dGq8TXA1+pHpikFCjAX+gIzD7gbR5RARhQXDDhmCshMrMc4Ku8UsuBr9kCSpw0nKHFImImNi2CFDUOd1uITVZTxJmYiMjmGHDMFdx6siukvZkcUhZSIyKoYdMoT2zg53YnUVd2QRkdEx7JAhtN+Lxc5OV/VPD4adE7Ve1Db6BFdDRBR6DDtkCG7O7HSbK9aKLFewI7a/nN0dIjIeoWHn888/x7XXXousrCxIkoQVK1Z0er0sy3jssceQmZmJ2NhYTJ48GQcPHuz0NtXV1ZgxYwacTicSExNx9913o76+PoJfBWlBBWd2zssAXhtBRAYmNOw0NDRg+PDhePHFF0/5+sWLF2Pp0qVYvnw5tmzZAofDgSlTpsDr9apvM2PGDHz99ddYvXo1Vq5cic8//xwzZ86M1JdAGsFlrPPDayOIyMgsIj/5lVdeiSuvvPKUr5NlGUuWLMFvfvMbTJ06FQDw+uuvIz09HStWrMD06dOxb98+rFq1Ctu2bcOoUaMAAM8//zyuuuoq/OEPf0BWVlbEvhYSiwPK54dDykRkZJqd2SksLERZWRkmT56sPuZyuTB27Fhs2rQJALBp0yYkJiaqQQcAJk+eDJPJhC1btpz2Yzc3N8Pj8XR6If3y+vyo87YCYGenu9RlrPI6yLIsuBoiotDSbNgpKysDAKSnp3d6PD09XX1dWVkZ0tLSOr3eYrEgOTlZfZtTWbhwIVwul/qSnZ0d4uopkpSujt1igjNGaLNSt/r0iIfFJKHO24rSWu/Z34GISEc0G3bC6dFHH0Vtba36UlJSIrokOg8dDxSUJElwNfpks5jQu4cDAIeUich4NBt2MjIyAADl5eWdHi8vL1dfl5GRAbfb3en1ra2tqK6uVt/mVOx2O5xOZ6cX0i9eFREaHFImIqPSbNjJz89HRkYG1qxZoz7m8XiwZcsWjBs3DgAwbtw41NTUYMeOHerbfPrppwgEAhg7dmzEayYxuBMrNDikTERGJXTAob6+HocOHVL/XFhYiF27diE5ORk5OTmYM2cOnnjiCfTr1w/5+fmYN28esrKyMG3aNADAwIEDccUVV+Cee+7B8uXL4fP5MHv2bEyfPp07saIId2KFxoB0hh0iMiahYWf79u34wQ9+oP557ty5AIDbb78dr776Kh566CE0NDRg5syZqKmpwcUXX4xVq1YhJqb9h9obb7yB2bNnY9KkSTCZTLjhhhuwdOnSiH8tJI7bw85OKCg7sg5X1MPnD8Bq1mzjl4ioS4SGnYkTJ55xm6skSZg/fz7mz59/2rdJTk7Gm2++GY7ySCcq6jmzEwq9kmIRb7egvrkVRyoa1PBDRKR3/NWNdM/NqyJCQpIk9E+PBwB8yx1ZRGQgDDuke8oyFmd2zp+yI4tzO0RkJAw7pGv+gIyqhhYAQJqTnZ3zxR1ZRGREDDuka9UNLfAHZEgSkOKwiS5H95Q5HZ61Q0RGwrBDuqZsO09x2GDh7qHzpnR2jtc0oc7rE1wNEVFo8KcD6ZoynJwazyWsUEiMsyG9bTnwQDm7O0RkDAw7pGvqgYJODieHCq+NICKjYdghXVOvimBnJ2Q4pExERsOwQ7rW3tlh2AkVXhtBREbDsEO6VsHOTsgpO7L2l9ed8YRzIiK9YNghXWNnJ/T6psXDbJJQ0+hTlwmJiPSMYYd0Tb0qgp2dkImxmpGXEgeAQ8pEZAwMO6Rr3I0VHgXqtRG8I4uI9I9hh3SrobkVDS1+ALwENNR4kjIRGQnDDumWMk8SZzMj3m4RXI2xDOD2cyIyEIYd0i11CYtdnZBTzto56K5Hqz8guBoiovPDsEO6pQ4nM+yEXHZSHOJsZrS0BnC0qlF0OURE54Vhh3SrvbPD4eRQM5kk9OPhgkRkEAw7pFvqVRHs7IRFgRp2uCOLiPSNYYd0q4JhJ6y4I4uIjIJhh3SLnZ3wKuhwbQQRkZ4x7JBucTdWeCmdneLqRjS2tAquhoio+xh2SLcquBsrrFLi7UiNt0OWgQPl9aLLISLqNoYd0qVWfwBVDS0AuBsrnNSlLA4pE5GOMeyQLlU1tECWAZMEJDtsossxLA4pE5ERMOyQLrk9wXmd1Hg7zCZJcDXGxWsjiMgIGHZIlyrqOa8TCQUMO0RkAAw7pEtKZ4c7scKrX1oCJCm4bKjsfiMi0huGHdIlXhURGbE2M/JSHADY3SEi/WLYIV3igYKRMyBdGVLmjiwi0ieGHdIltbPjZNgJNw4pE5HeMeyQLrmVAwXjGXbCjddGEJHeMeyQLlXUs7MTKUpn50B5HfwBWXA1RERdx7BDuiPLsrobq0c8B5TDLTfFgRirCV5fAMXVjaLLISLqMoYd0p265lY0twYAcEA5EswmCf3SeG0EEekXww7pjtLVSbBbEGszC64mOvDaCCLSM4Yd0h1lJ1YPzutEDE9SJiI9Y9gJo5LqRqw/WImmFr/oUgyFO7Eij9vPiUjPGHbC6PplG/HTl7fgoJs/IEKp/YwdDidHihJ2jlY1wOtjeCcifWHYCaO8lDgAwNEq7mAJJXUZi52diOkRb0eyw4aADBwsrxddDhFRlzDshFFu251CRZUNgisxFjdPT444SZJ4bQQR6RbDThixsxMe7ZeAMuxEEud2iEivGHbCSO3sVLGzE0rqgDLDTkTx2ggi0iuGnTDKaws77OyEVntnhwPKkcSzdohIrxh2wiinbRmrsr4Z9c2tgqsxhpbWAE42+gCwsxNp/dtmdirqmlHd0CK4GiKic8ewE0auWCuSHTYAXMoKlcq2C0CtZgmJsVbB1UQXh92CnORggOeQMhHpCcNOmOW2dXeKuJQVEspOrNR4O0wmSXA10YdDykSkRww7YdY+t8POTihwJ5ZYvDaCiPSIYSfM1M5OJTs7ocCdWGJxSJmI9IhhJ8zY2Qkt9fRk7sQSQunsHCivQyAgC66GiOjcaDrs+P1+zJs3D/n5+YiNjUWfPn3w+OOPQ5bbn2RlWcZjjz2GzMxMxMbGYvLkyTh48KDAqjvjzE5oudWww86OCHkpDtjMJjS2+HHsZJPocoiIzommw86iRYuwbNkyvPDCC9i3bx8WLVqExYsX4/nnn1ffZvHixVi6dCmWL1+OLVu2wOFwYMqUKfB6vQIrb6d0dso8Xt5+HgJuD2d2RLKYTeiTFg+AO7KISD80HXY2btyIqVOn4uqrr0ZeXh5uvPFGXH755di6dSuAYFdnyZIl+M1vfoOpU6di2LBheP3111FaWooVK1aILb5NYpwVzhgLAKC4mt2d81VRz7AjGoeUiUhvNB12xo8fjzVr1uDAgQMAgN27d2P9+vW48sorAQCFhYUoKyvD5MmT1fdxuVwYO3YsNm3adNqP29zcDI/H0+klXCRJQl4q53ZCpcLDAWXR1CFlXhtBRDphEV3AmTzyyCPweDwoKCiA2WyG3+/HggULMGPGDABAWVkZACA9Pb3T+6Wnp6uvO5WFCxfi97//ffgK/47cFAe+OlbLgwXPkyzL7Z0dJweUReFZO0SkN5ru7Lzzzjt444038Oabb2Lnzp147bXX8Ic//AGvvfbaeX3cRx99FLW1tepLSUlJiCo+Nd5+Hho1jT74/MHh9NR4m+BqopeyjFVY2YDmVs6hEZH2abqz8+CDD+KRRx7B9OnTAQBDhw5FUVERFi5ciNtvvx0ZGRkAgPLycmRmZqrvV15ejhEjRpz249rtdtjtkVsG4e3noaF0dRLjrLBbzIKriV4Zzhg4YyzweFtxyF2PwVku0SUREZ2Rpjs7jY2NMJk6l2g2mxEIBAAA+fn5yMjIwJo1a9TXezwebNmyBePGjYtorWeidnZ4sOB5UXZi9YjnvI5IkiShIMMJgEtZRKQPmu7sXHvttViwYAFycnIwePBgfPnll3jmmWdw1113AQg+6c6ZMwdPPPEE+vXrh/z8fMybNw9ZWVmYNm2a2OI7UDo7pbVNaG71syvRTRX1weHkNCfDjmgDMhKw9Wg1ww4R6YKmw87zzz+PefPm4d5774Xb7UZWVhb+8z//E4899pj6Ng899BAaGhowc+ZM1NTU4OKLL8aqVasQE6OdAdbUeBscNjMaWvwoqW5C37ZzSqhr2NnRDl4bQUR6oumwk5CQgCVLlmDJkiWnfRtJkjB//nzMnz8/coV1kSRJyE1x4JsTHhRVNTDsdJN6CSh3YgnHs3aISE80PbNjJHmp3JF1vtSrItjZEa5/W9gp83hR2+gTXA0R0Zkx7EQId2Sdv/bODsOOaM4YK3omxgLgtRFEpH0MOxHCs3bOn7uu7fRkdnY0QT1ckCcpE5HGMexECDs758/Nzo6mcEiZiPSCYSdClNvPj51sgs8fEFyN/nh9ftR5WwEAPRI4oKwFHFImIr1g2ImQtAQ7Yqwm+AMyjp9sEl2O7ijzOjaLSb1FnsRSOjsHyuogy7LgaoiITo9hJ0JMJgm5ybz9vLvUJawEOyRJElwNAUDv1HhYTBLqmltxvIYBnoi0i2EngnLbhpSLOKTcZRXKcHIC53W0wmYxoU+P4JlRXMoiIi1j2ImgvFR2drqrokNnh7SDQ8pEpAcMOxHEzk73qQcKMuxoygAOKRORDjDsRJCyI4udna5r7+xwJ5aWcEcWEekBw04EKZ2dkupG+APcvdIV7Oxok9LZOVxRj5ZWHqlARNrEsBNBma5Y2Mwm+PwySrl7pUs4s6NNPRNjkWC3oDUg40hlvehyiIhOiWEngswmCdnJwfuEOLfTNW7uxtIkSZLUS0G5lEVEWsWwE2Gc2+m6QEBGZX0LAM7saBF3ZBGR1jHsRBjvyOq66sYW+AMyJAlIibeJLoe+g0PKRKR1DDsRlpfK28+7yu0Jzuskx9lgNfOvrNYMSGfYISJt40+OCGNnp+sq6rkTS8sKMpwAgOM1TfB4fYKrISL6PoadCMvrcLBggNvPz4nbw+FkLXPFWZHhDM5SHWB3h4g0iGEnwnomxsJiktDcGkB52w4jOjOls8PhZO3ikDIRaRnDToRZzCb0SuL2865QZnbY2dEuDikTkZYx7AjAuZ2uae/sMOxoFe/IIiItY9gRQJnb4Y6sc1PBzo7mtS9jeSDLnEUjIm1h2BGAnZ2uYWdH+/qmxcNskuDxtqLMw1k0ItIWhh0B1LN2KtnZORfcjaV9dosZ+anBEM8hZSLSGoYdATp2dtjyP7OG5lY0tPgBAGlO7sbSMs7tEJFWMewI0CspFiYJaGjxq3c+0akpt53HWs1w2MyCq6EzKeBJykSkUQw7AtgtZmQlKtvPObdzJuq8jtMOSZIEV0NnwrN2iEiruhV2SkpKcOzYMfXPW7duxZw5c/DSSy+FrDCja7/9nHM7Z6KesRPPeR2tU66NOOyuh88fEFwNEVG7boWd//iP/8DatWsBAGVlZfjRj36ErVu34te//jXmz58f0gKNKle9NoKdnTNxt50yneZk2NG6XkmxiLOZ0eIP4Ggl/14TkXZ0K+zs3bsXY8aMAQC88847GDJkCDZu3Ig33ngDr776aijrMyx2ds6NMrPDqyK0z2SS0D+dS1lEpD3dCjs+nw92e/A37U8++QTXXXcdAKCgoAAnTpwIXXUGxs7OuXHX8UBBPeG1EUSkRd0KO4MHD8by5cvxxRdfYPXq1bjiiisAAKWlpUhJSQlpgUaV13YmSWElt5+fSQXDjq5wSJmItKhbYWfRokX405/+hIkTJ+KWW27B8OHDAQAffPCBurxFZ5aTHOzs1HlbUdPoE1yNdrGzoy/qWTvlHsGVEBG1s3TnnSZOnIjKykp4PB4kJSWpj8+cORNxcXEhK87IYqxmZLpicKLWi6NVDUhy2ESXpEntMzsMO3qg7MgqqW5CfXMr4u3deoohIgqpbnV2mpqa0NzcrAadoqIiLFmyBPv370daWlpICzSy9rkdDimfSqs/gKoGdnb0JNlhU/9fHSjnUhYRaUO3ws7UqVPx+uuvAwBqamowduxYPP3005g2bRqWLVsW0gKNrH1HFoeUT6W6oQWyDJgkIMXBsKMXHFImIq3pVtjZuXMnLrnkEgDAP/7xD6Snp6OoqAivv/46li5dGtICjaz9jix2dk5FmddJibfDbOLpyXoxgNdGEJHGdCvsNDY2IiEh+IT273//G9dffz1MJhMuuugiFBUVhbRAI8trW8ZiZ+fUOK+jT+07sjikTETa0K2w07dvX6xYsQIlJSX4+OOPcfnllwMA3G43nE5nSAs0MnZ2zkw5PZnzOvqiDCnvL6vjsQpEpAndCjuPPfYYfvnLXyIvLw9jxozBuHHjAAS7PCNHjgxpgUamDChXN7Sgtonbz7+LnR196pceD5MEnGz0qf8PiYhE6lbYufHGG1FcXIzt27fj448/Vh+fNGkSnn322ZAVZ3QOu0XtWhSzu/M9PGNHn2KsZnX4nocLEpEWdCvsAEBGRgZGjhyJ0tJS9Qb0MWPGoKCgIGTFRQPO7ZyecuM578XSnwHckUVEGtKtsBMIBDB//ny4XC7k5uYiNzcXiYmJePzxxxEIBEJdo6G1z+0w7HxXRT2XsfSK10YQkZZ063jTX//613j55Zfx1FNPYcKECQCA9evX43e/+x28Xi8WLFgQ0iKNrL2zw2Ws7+KAsn4V8NoIItKQboWd1157DX/5y1/U284BYNiwYejZsyfuvfdehp0uYGfn1GRZ7jCgzGUsvRnQtiPrYHk9/AGZ5yQRkVDdWsaqrq4+5WxOQUEBqqurz7uoaNJ+ijI7Ox3VNbfC6wsuibKzoz85yXGIsZrQ3BrgPBoRCdetsDN8+HC88MIL33v8hRdewLBhw867qGiS07aMVVHXjIbmVsHVaIfS1UmwWxBrMwuuhrrKbJLQnycpE5FGdGsZa/Hixbj66qvxySefqGfsbNq0CSUlJfjoo49CWqDRuWKtSHbYUN3QgqKqRgzK4qGMQPtOLHZ19GtAegK+OlaLb8vqcNXQTNHlEFEU61Zn57LLLsOBAwfw4x//GDU1NaipqcH111+Pr7/+Gn/9619DXaPhtd9+zna/QtmJxbCjX+3bzzmkTERidauzAwBZWVnfG0TevXs3Xn75Zbz00kvnXVg0yUtx4MviGs7tdOD2cCeW3nW8NoKISKRuHyoYKcePH8dPf/pTpKSkIDY2FkOHDsX27dvV18uyjMceewyZmZmIjY3F5MmTcfDgQYEVdx07O9/XfsYOd2LpldLZKapuRGML59GISBxNh52TJ09iwoQJsFqt+Ne//oVvvvkGTz/9NJKSktS3Wbx4MZYuXYrly5djy5YtcDgcmDJlCrxer8DKu6Z9RxbDjqKCMzu61yPBjhSHDbIc3IJORCRKt5exImHRokXIzs7GK6+8oj6Wn5+v/rcsy1iyZAl+85vfYOrUqQCA119/Henp6VixYgWmT59+yo/b3NyM5ub2Cwo9HrEzBe2dHS5jKdy8BNQQBmQkYOPhKuwvq8Pw7ETR5RBRlOpS2Ln++uvP+PqamprzqeV7PvjgA0yZMgU/+clPsG7dOvXQwnvuuQcAUFhYiLKyMkyePFl9H5fLhbFjx2LTpk2nDTsLFy7E73//+5DWej6Uzs6JWi+8Pj9irNxqXcFLQA1haC8XNh6uwucHK3DT6GzR5RBRlOrSMpbL5TrjS25uLm677baQFXfkyBEsW7YM/fr1w8cff4xf/OIXuO+++/Daa68BAMrKygAA6enpnd4vPT1dfd2pPProo6itrVVfSkpKQlZzdyTGWeGMCebO4mp2d4D2qyLSnAw7enZ125bzT/aVo57nSBGRIF3q7HRcToqEQCCAUaNG4cknnwQAjBw5Env37sXy5ctx++23d/vj2u122O3a+SEqSRLyUh346lgtjlY2qIexRauW1gBONvoAcEBZ74b2dCE/1YHCygas/qYMPx7ZS3RJRBSFND2gnJmZiUGDBnV6bODAgSguLgYAZGRkAADKy8s7vU15ebn6Or1ovyOLnZ3Ktp1YFpOExFir4GrofEiShOuGZwEA3t9VKrgaIopWmg47EyZMwP79+zs9duDAAeTm5gIIDitnZGRgzZo16us9Hg+2bNminuysF+23n3NHVsd5HRMvkNS960YEw84XBytRVd98lrcmIgo9TYedBx54AJs3b8aTTz6JQ4cO4c0338RLL72EWbNmAQj+1jhnzhw88cQT+OCDD7Bnzx7cdtttyMrKwrRp08QW30Xs7LRzczjZUPr0iMfQni74AzI+2nNCdDlEFIU0HXZGjx6N9957D3/7298wZMgQPP7441iyZAlmzJihvs1DDz2E//qv/8LMmTMxevRo1NfXY9WqVYiJ0desBzs77Sq47dxwpo7gUhYRiaPpc3YA4JprrsE111xz2tdLkoT58+dj/vz5Eawq9JTOTmlNE5pb/bBbonf7ubITi50d47hmWBYWfLQP24tOoqS6EdnJcaJLIqIoounOTjRJjbfBYTMjIAPHTjaJLkeo9pkdfXXn6PQyXDG4KD8FAPDhV+zuEFFkMexohCRJHeZ2onspizM7xqQsZX3ApSwiijCGHQ3JS22b26mM7iFlzuwY05VDMmE1S/i2rA7flom9ooWIogvDjoawsxPEqyKMyRVnxcQBaQDY3SGiyGLY0ZD2HVnR29mRZZmdHQNTl7J2l0KWZcHVEFG0YNjREHZ2gNomH1r8AQBAajzDjtFMKkiHw2bGsZNN2FlcI7ocIooSDDsaotx+fuxkE3xtP/CjjTKc7Iq18vZ3A4q1mTFlcPAqlw92HRdcDVF0WPHlcWw8VCm6DKEYdjQkLcGOGKsJrQEZpTXRuf2cS1jGp1wfsfKrE2iN0lBPFCn7Tngw5+1duOf17Whu9YsuRxiGHQ0xmSTkJge7O9E6t8MDBY1vQt9UpDhsqGpowYbDVaLLITK0Lw5WAAAaWvz4MoqXjhl2NCanbUg5Wud22NkxPqvZhKuHZQIA3udSFlFYrT/U/gvFhiheymLY0Rh1R1aUnrXj9nDbeTRQdmV9vLcMXl/0ttaJwqm51Y9thdXqnxl2SDOifUdWRb3S2eFVEUZ2QU4SeiXFoqHFjzX73KLLITKkL4tr0OTzw2ELbvbYfawWdV6f4KrEYNjRGGVHVrTefs7OTnSQJAnXDVduQudSFlE4KDuwJg1MR15KHPwBGVuOVJ/lvYyJYUdjctuWsUqqm+APRN+ha+2dHYYdo5s6oicA4LP9FahtjM7fNonCaX1b2Lm4byom9E3t9Fi0YdjRmKzEWFjNElr8AZyojb7t524Pd2NFiwEZCSjISECLP4BVX58QXQ6RodR5fdh9rBYAML5vihp2Nh5m2CENMJskZCcrO7Kia0jZ6/PD420FwJmdaKGcufM+78oiCqktR6rhD8jITYlDr6Q4jOudAkkCDpTXq79URhOGHQ2K1rkdZdu5zWKCM9YiuBqKhGuHBcPOpiNVKI/CJ2CicNnQ1sFROjpJDhsGZzkBABuj8Hwrhh0Nyk2Jzs6OclVEj3g7JEkSXA1FQnZyHEblJkGWgQ93s7tDFCrKNvMJfVLVx5T/jsa5HYYdDVI7O5XR2dnhvE506XgTOhGdP3edFwfK6yFJwLg+Kerj6tzOoUrIcnRtgGHY0aBo7exUtF0VwZ1Y0eWqoZkwmyR8dawWRyrqRZdDpHsb205NHpTpRLLDpj4+Oi8ZNrMJpbVeFEbZL9MMOxqkdHaKqhsQiKLt5+pVEU6GnWiSEm/HJf2Cv3Gyu0N0/jZ02HLeUazNjAtyE4NvE2VzOww7GtQzKRZmkwSvL6DOsUSD9pkd7sSKNupS1q7SqGuvE4WSLMtq2Bn/nbADtM/tbDgYXXM7DDsaZDWb0CspFkB07chiZyd6/WhQBmKsJhypbMDe4x7R5RDp1tGqRpTWemEzmzA6L+l7r5/Q1kXddKQqqg6uZdjRqGi8I6vjbiyKLvF2CyYPTAfA6yOIzofS1RmZk4g42/eP8BjW04UEuwW1TT58XVob6fKEYdjRKPX28ygaUmZnJ7op10d8+FVpVP3GSRRKp5vXUVjMJoztndL2ttEzt8Owo1HR1tkJBGRU1nPreTS7rH8PuGKtKPc0Y0th9DwJE4WKPyBj05Hgv51TzesoJvQNhp1oujqCYUej1M5OZXR0dk42tqC17bf5VC5jRSWbxYQrh2QACA4qE1HXfFPqQU2jD/F2C4b3cp327ZSuz9bCanh9/kiVJxTDjkZ17OxEw+4UZV4n2WGD1cy/ltFKuSvroz0n0NwaHU/CRKGiXBFxUe9kWM7wPNo3LR5pCXY0twaws/hkpMoTij9VNCo7ORaSBDS0+FFZ3yK6nLBT53W4hBXVxuanIN1ph8fbinX7K0SXQ6Qr6hURZ1jCAgBJktS32RAlV0cw7GiU3WJGliu4/Twa5nbcvCqCAJhNkno56Ps8YJDonHl9fmw7Wg3g7GEHAMb3ia4hZYYdDctLjZ4dWe62qyIYdkjZlfXJN+Wob24VXA2RPuwsPgmvL4AeCXb0S4s/69srgeirYzWobfKFuzzhGHY0LJp2ZPESUFIM6elE71QHmlsD+PfXZaLLIdIF5T6sCX1SIEnSWd8+KzEWvVMdCMjAliPG7+4w7GhYNJ2141ZndnhVRLSTJEkdVH6fu7KIzsn6c5zX6Sia5nYYdjQsGjs7HFAmALhueDDsrD9UqZ6/RESn5vH68NWxGgBdDTttcztRcCkow46GKbefF1Yaf/s5l7Goo9494jGslwv+gIyP9pwQXQ6Rpm0+XIWADPROdSArMfac329c71RIEnDIXY+yWm8YKxSPYUfDcpKDy1h13lbUNBp7gIydHfoupbvDpSyiM9t4WDk1OaVL7+eKs2JoT1fbxzD2UhbDjobF2szIcAZnWIx8+3ljS6u664adHVJcOzwLkgTsKDqJkmrjz60Rddf6s9yHdSbKstd6g8/tMOxoXG7bkHKRgYeUla5OrNWMePv3b+ml6JTujMG4tgsLP+CZO0SnVO7x4pC7HpIEXNS7a50dAJjQJxh2Nh6qMvS4BMOOxilzO0bu7HQ8UPBctkxS9JjatiuLd2URnZqyk2poTxcS42xdfv9ReUmwWUwo83hxuMK4P2cYdjQuNzV6Ojuc16HvumJwJmxmE/aX1+HbMo/ocog0RzkBeXyfri9hAUCM1YxRuUkAjD23w7CjcVHR2fHw9GQ6NVecFRMH9ADAQWWi75JlucN9WF1fwlKoczsHGXZIkKiY2alnZ4dOT7k+4oNdpQgEjDtTQNRVRyobUObxwmYxYXRecrc/jhJ2Nh2pgt+g/8YYdjROOViwuqHFsPeXuD08Y4dOb9LANDhsZhyvacLO4pOiyyHSDKWrc2FOEmKs5m5/nKE9XUiIsaDO24o9x2tDVZ6mMOxoXLzdgtT4YAgoNmh3h1dF0JnEWM2YMiQDAJeyiDpSws7F/bo3r6MwmyR156NRr45g2NGB9juyjDm3w9OT6WyUpayP9pyAzx8QXA2ReP6AjE3KYYJ9uj+vo1ACE8MOCWP0O7LcDDt0FhP6pCDFYUNVQ4thn4yJumLv8Vp4vK1IiLGopyCfD2U31/aik/D6/Of98bSGYUcHjHz7uT8go7qhbRnLybBDp2Yxm3DNsEwAPHOHCAA2tG0Tv6h3Cizm8/9R3qeHAxnOGLS0BrD9qPFm4xh2dCA31bidnar6ZgRkwCQBKQ6GHTq969qWsj7+ugxNLcb7zZOoKzacxxURpyJJknq31gYDnrfDsKMDRu7sKEtYKfF2mE08PZlO74KcRPRKikVDix9rvi0XXQ6RMF6fH9vaui/nc77OdynByYhLxQw7OpCbHOzsVNQ1o6HtwkyjUIeT49nVoTOTJEm9PoK7siia7Sg6iZbWANKddvTpER+yj6uct7PneC1qG4111Imuws5TTz0FSZIwZ84c9TGv14tZs2YhJSUF8fHxuOGGG1Bebqzf+lxxViTFWQEY73BB9aoIzuvQOVB2ZX223224J2Oic6WemtwnNaT3CaY7Y9A3LR6yDGw6Yqzujm7CzrZt2/CnP/0Jw4YN6/T4Aw88gA8//BB///vfsW7dOpSWluL6668XVGX4GHVHlruu7aoIdnboHPRPT0BBRgJ8fhn/2ntCdDlEQrRfERGaeZ2OJvRRztupCvnHFkkXYae+vh4zZszAn//8ZyQlJamP19bW4uWXX8YzzzyDH/7wh7jwwgvxyiuvYOPGjdi8efNpP15zczM8Hk+nF60z6twOOzvUVUp3h0tZFI1qG33qKcdhCTsGndvRRdiZNWsWrr76akyePLnT4zt27IDP5+v0eEFBAXJycrBp06bTfryFCxfC5XKpL9nZ2WGrPVSM29nhzA51zbXDg1vQNxdWoazWK7gaosjadKQKAbltq7gr9KfOj+2dApMUvHfrRG1TyD++KJoPO2+99RZ27tyJhQsXfu91ZWVlsNlsSExM7PR4eno6ysrKTvsxH330UdTW1qovJSUloS475PJSjXmKcntnh1dF0LnplRSH0XlJkGVg5Vfs7lB02Xg4fEtYAOCKtWJYr0QAxlrK0nTYKSkpwf3334833ngDMTGh+2Fot9vhdDo7vWhde2fHWMtYPD2ZuuM6LmVRlFofxnkdhbKd3UhLWZoOOzt27IDb7cYFF1wAi8UCi8WCdevWYenSpbBYLEhPT0dLSwtqamo6vV95eTkyMjLEFB0meW1h50St1zBHecuyrA4opzHsUBdcPTQTFpOEPcdrcbiiXnQ5RBFxorYJRyoaYJKCJyeHS8e5HVmWw/Z5IknTYWfSpEnYs2cPdu3apb6MGjUKM2bMUP/barVizZo16vvs378fxcXFGDdunMDKQy8pzoqEGAsAoLjaGN2d+uZWeH3BSx3Z2aGuSHbYcEnbxYW8PoKihbKsNLRXIlyx1rB9ngtykmC3mOCua8YhtzF+mdB02ElISMCQIUM6vTgcDqSkpGDIkCFwuVy4++67MXfuXKxduxY7duzAnXfeiXHjxuGiiy4SXX5ISZKkdneOVhpjbkdZwoq3WxBnswiuhvRG2ZX1we5Sw/z2SXQmG9XzdcLX1QGAGKsZY/KTARhnKUvTYedcPPvss7jmmmtwww034NJLL0VGRgbeffdd0WWFRW7b9nOjzO2ow8ns6lA3/GhQOmKsJhRWNqhbcYmMSpZldV4nVPdhnYlyC/p6gwwp6+7X6c8++6zTn2NiYvDiiy/ixRdfFFNQBKmdHYPsyFI6O6kMO9QNDrsFPxqUgQ93l+L9XaXqDhIiIzpcUQ93XTPsFhMuyE06+zucp4v7pmIRgC1HqtDqD4TkZnWR9F19lGFnh6izqcODd2V9uLsU/gCXssi41h8MdnVG5yUjxmoO++cblOWEK9aKuuZWfGWAzinDjo7kpRqts9N2VQTDDnXTpf17wBVrhbuuGVuOGKPdTnQqGw4H/36PD+Et52diNkkY3zYbtNEAczsMOzqidHZKa5rQ3Kr/7eftnR0eKEjdY7OYcNXQ4InKPHOHjKrVH8DmtrATiXkdxfi+ytwOww5FUI94O+JsZgRk4NhJ/R/jXcEDBSkEpo4ILmV9tPeEIX4JIPquPcdrUdfcCmeMBYOzXBH7vMqur51FNWhq0fe/LYYdHZEkyVB3ZHFmh0JhTF4yMpwxqPO24rP9FaLLIQo5Zfv3+D6pMJukiH3e/FQHslwxaPEHsO1odcQ+bzgw7OiMevt5pf6HlHlVBIWCySThurbuDg8YJCNSDhOcEKF5HYUkSepS1obD+l7KYtjRGaN0dnz+AKobWgCws0Pn77q2XVmf7CtHndcnuBqi0Glq8WNH0UkA7TM0kXRxh6sj9IxhR2fUzo7Ot59X1ge7OhaThKQ4m+BqSO8GZznRp4cDza0B/PvrctHlEIXM9qJqtPgDyHTFoHfbjtxIUnZkfV3qwcm2X1D1iGFHZ4zS2XF72g4UjLfDFME1aDImSZLU6yPe382lLDKO9R3mdSQp8s+Vac4Y9E+PhywDm3R8vAPDjs7kpQY7O8dONsHnDwiupvvU4WQnl7AoNJSlrA2HKtW/X0R6t7FtXufifpGd1+lIuTpCz0tZDDs6k54QA7vFhNaAjNIa/W4/V4eT4xl2KDTyUh0Ynp0If0DGR3tOiC6H6LzVNLZgb2nw9GIlcIhghLkdhh2dMZkkQ1wbwc4OhYNyfcT7u44LroTo/G06XAVZBvqlxSPdKe7w1bG9k2E2STha1YhjJ/X5c4dhR4eMMLejXhXBzg6F0DXDMmGSgJ3FNSjW8S8DRED7du8JAnZhdZQQY8XwXsHDDDfq9BZ0hh0dMsKOLPX0ZIG/rZDxpDlj1Hb/h19xUJn0rf18HbFhp2MNej1vh2FHh4zR2eHMDoWHcsDgii+PQ5Z5Ezrp0/GaJhRWNsAkBZeRRFPDzqEqXf67YtjRobwU5fZz/Xd2OLNDoXbFkAzYLCYcdNfj27I60eUQdYsyDDw8OxHOGKvgaoCROYmIsZpQWd+MA+X1osvpMoYdHVIGlIurGuEP6C9hy7LcvozFzg6FmDPGih8OSAPAm9BJvza2hZ0JAndhdWS3mDEmP7j9XY+3oDPs6FBWYiysZgkt/gDKPF7R5XSZp6kVLW1nBPFeLAoH5Sb0D3eXIqDDXwgousmyjA2HtTOvo1BuQd/IsEORYDZJyE5u235eqb+5HWUnljPGghirWXA1ZEQ/KEhDgt2C4zVN2FF8UnQ5RF1y0F2PirpmxFhNuCA3UXQ5KiV4bT5SpbtDbRl2dErPczvt8zrciUXhEWM1Y8qQDAA8c4f0Z/3BYOdkdF4y7Bbt/EI4KNOJpDgrGlr8+OpYjehyuoRhR6faDxbUY2eH8zoUfspS1j+/OqG730Ipum3UyPk632UySR2ujtDXeTsW0QVQ97R3dvQYdoLLWNyJReE0rncKUuPtqKxvxlvbSjC0p0tYLX16OJCggR01pH2t/gA2H6kG0H5Ng5aM75uCf+45gfWHKnHfpH6iyzlnDDs6pecrI9RlLA4nUxhZzCZcMywTr248inkr9gqtpXcPBz6cfTEcdj7l0pntPlaL+uZWJMZZMSjTKbqc71EC2JfFJ9HY0oo4mz7+TuujSvqejp0dWZYhSZLgis6duozFsENhdueEPGw7Wo3aJp+wGqobWnCkogFP/PMbLLx+mLA6SB+U83XG90mByaS95/Wc5Dj0TIzF8ZombC2sxsS2Yx60jmFHp3omxcJskuD1BeCuaxZ6SVxXtXd29FMz6VNuigP/vO8SoTVsOlyF//jLZvxtawl+MCANlw/OEFoPaVt72NHeEhYASJKEi/um4u3tJdh4uEo3YYcDyjplNZvQKykWAHBUZ9vP2dmhaDKuTwpmXtobAPDIu3vUmTWi72psacXOtqMStDivoxjft+1wwYP6OW+HYUfH2u/I0tfcDmd2KNrM/VF/DMx0orqhBQ/94ytd3i1E4bft6En4/DJ6Jsaqc5lapHSdvjnhQXVDi+Bqzg3Djo61336un86O1+dX5yfY2aFoYbeY8dz0EbBZTPhsfwX+d3OR6JJIg5QlrAl9UzQ9h9kjwY6CjAQA7dvktY5hR8f02NmprA92dWxmE1yx3IpL0aN/egIevbIAAPDEP/fhkJuXlFJn7WFHu0tYio63oOsBw46O6bGz03FeR8u/uRCFw+3j8nBJv1Q0twYw5+1daGnlYYcUVN3Qgq9LPQC0O5zc0YS2uZ0NOrkni2FHxzp2dvQyA1DB4WSKYiaThD/8ZDgS46zYe9yDJZ8cEF0SacSmtos/B6Qn6OL5cUx+CiwmCcXVjSip1v7qAsOOjmUnx0KSgPrmVlTpZEiMO7Eo2qU7Y/DU9UMBAMvWHcbWwmrBFZEWrNfREhYAxNstGJGdCEAf3R2GHR2zW8zIcgW3n+vljqwKT9tVEQw7FMWuGJKJn1zYC7IMPPD2Lni84g49JG1ovw8rRXAl506d2zms/bkdhh2dy0ttm9up1H4bEQAq6tnZIQKA3143GDnJcThe04Tfvf+16HJIoJLqRhRVNcJskjAmP1l0OedMCTsbD1UiEND2KAXDjs61z+3oo7Pj9vD0ZCIguAzw7M3DYZKAd788jpVflYouiQRRujojshN1dWHsiOxExNnMqGpowf5ybe8uZNjRufYdWfrq7HAZiwi4MDcZs3/QFwDw6/f24kRtk+CKSARl+/aEPvpZwgIAm8WkdqK0PrfDsKNzeu3scBmLKOi/JvXD8F4u1Db58Mu/79b8cgCFlizLHeZ19DGc3NHF6nk7DDsURu23n2u/sxMIyOqhgmlOhh0iIHjP3bM3j0Cs1YwNh6rwPxsKRZdEEbS/vA6V9S2ItZoxMidJdDldppwJtKWwWtPnRjHs6FxOcnAZq7bJh5pGbW8/P9nYgta231pTHAw7RIrePeIx75pBAIDFq/bj2zKP4IooUpTLNMfkJ8Nm0d+P5IKMBKQ4bGhs8WP3sRrR5ZyW/r6z1EmszYwMZ3DYV+vdHWVeJ9lh0+U/aqJwumVMNiYPTEOLP4A5b+2C1+cXXRJFwMa2bdt62nLekckkYVwf7d+Czp84BqDcjqv1uR11XieeXR2i75IkCU/dMAyp8TZ8W1aHP3y8X3RJFGY+fwBbjihhR3/zOgplbkfLl4Iy7BiAOrej8bN2lKsiOK9DdGqp8XYsumEYAOAv6ws1P/RJ52d3SQ0aWvxIdtgwMMMpupxuU4Lal8U1aGhuFVzNqTHsGEBuqk46O3Xs7BCdzaSB6ZgxNgcA8P/e2a35WTzqPuWKiHF9UmAy6fdi5OzkOOQkx6E1IGv2+hOGHQNo35Gl7bCjXgLKzg7RGf366oHonepAmceLX7+3VzcX/VLXbFTP19HvEpZCmTlar9FuJMOOAbTP7Gh7GctdF7wXi50dojOLs1mwZPoIWEwS/rnnBN778rjokijEGppbsbP4JID2mRc9m6Dx83YYdgxAOViwqqFF0xcKutWZHV4VQXQ2w3olYs7kfgCAx97/GiXV2v5lhrpm69FqtAZk9EqKRU7bL6x6Nq53sLPzbVmdep6aljDsGEC83YLUeBsAoFjD3Z1KzuwQdckvJvbFqNwk1De3Yu47u+Dn6cqGsaFtm7YRujoAkBJvx6DM4JD1Rg3egs6wYxC5OpjbcXM3FlGXmE0Snr15BOLtFmw7ehLL1x0WXRKFyIa2QDDeIGEHaJ/b2aDB83YYdgxC63M7jS2tqG/bkshLQInOXXZyHH533WAAwLOrD2DPsVrBFdH5qqxvxr4TwVOyx+vs8s8zUYLbBg2et8OwYxDtZ+1os7Oj7MSKsZoQb7cIroZIX264oCeuGpqB1oCM+9/+Ek0tPF1Zzza1dXUKMhKQaqBl/TF5ybCaJRw72aS5kQpNh52FCxdi9OjRSEhIQFpaGqZNm4b9+zufKur1ejFr1iykpKQgPj4eN9xwA8rLywVVLI7WOzvqgYIJMZAk/Z4nQSSCJElYMG0o0p12HKlowJMf7RNdEp0HZceSUeZ1FA67BSOzg5eZam0LuqbDzrp16zBr1ixs3rwZq1evhs/nw+WXX46GhvbuxQMPPIAPP/wQf//737Fu3TqUlpbi+uuvF1i1GFo/a0c9UJBLWETdkuSw4Q8/GQ4A+OvmIqz91i24IuouZZlHz1dEnM4EjS5laTrsrFq1CnfccQcGDx6M4cOH49VXX0VxcTF27NgBAKitrcXLL7+MZ555Bj/84Q9x4YUX4pVXXsHGjRuxefPm037c5uZmeDyeTi96p4Qdd10zGlu0d1x3e2eHYYeouy7p1wN3TcgHADz4j92a3OJLZ1Zc1YiS6iZYTBLG5CeLLifklCHljYcqEdDQ7kFNh53vqq0NDuYlJwf/guzYsQM+nw+TJ09W36agoAA5OTnYtGnTaT/OwoUL4XK51Jfs7OzwFh4BrjgrEuOsALS5lKUeKMiwQ3ReHrpiAPqnx6OyvgWP/N8enq6sM0rHY2ROIhwGnF8cnp0Ih82Mk40+7CvTTiNBN2EnEAhgzpw5mDBhAoYMGQIAKCsrg81mQ2JiYqe3TU9PR1lZ2Wk/1qOPPora2lr1paSkJJylR4yy/VyLd2Sxs0MUGjFWM5bcPBI2swmf7CvHW9uM8fwVLZRZFiMuYQGA1WzC2LYDBrV0mrJuws6sWbOwd+9evPXWW+f9sex2O5xOZ6cXI8hrG1I+qsnODmd2iEJlUJYTv5zSHwAw/8NvUKjRXZjUWSAgqzuxjBp2gI5XR2jncEFdhJ3Zs2dj5cqVWLt2LXr16qU+npGRgZaWFtTU1HR6+/LycmRkZES4SvH00dnhVRFEofCzi3tjXO8UNPn8mPP2Lvj8AdEl0VnsK/OguqEFDpsZI7ITRZcTNsrcztbCarS0auPvpabDjizLmD17Nt577z18+umnyM/P7/T6Cy+8EFarFWvWrFEf279/P4qLizFu3LhIlyuc2tmpZGeHyOhMJglP3zQcCTEW7C6pwfOfHhJdEp2Fcsv5mPxkWM2a/vF7XgakJyA13oYmnx9ftl12Kpqmv9uzZs3C//7v/+LNN99EQkICysrKUFZWhqamJgCAy+XC3Xffjblz52Lt2rXYsWMH7rzzTowbNw4XXXSR4OojT6udHX9ARlU9Z3aIQi0rMRYLfjwUAPDCpwexo0gbP1jo1Iw+r6OQJAnj+2jrFnRNh51ly5ahtrYWEydORGZmpvry9ttvq2/z7LPP4pprrsENN9yASy+9FBkZGXj33XcFVi2O0tkprfXC69POCatVDc0IyIAkAckOm+hyiAzluuFZmDYiCwEZeODtXeq1LKQtLa0BbC2sBmD8sAO0H5i4QSOXgmo67MiyfMqXO+64Q32bmJgYvPjii6iurkZDQwPefffdqJzXAYJBIqFtK2NJtXaWstyeYFcnxWGHxcCtWyJRfj91CHomxqK4uhGPf/iN6HLoFHaV1KDJ50eKw4YB6Qmiywm78W1zO7tKalDn9QmuRuNhh7pGkiTkpmpvR1YFl7CIwsoVa8XTNw2HJAFvby/Bqr2nP3qDxFCWsMb3TYXJZPwrc3olxSEvJQ7+gKx2tERi2DEYLc7tVHg4nEwUbhf1TsF/XtoHAPDou1/B7fEKrog62qjM6xjolvOzUW5B18I9WQw7BtN+1o6Gwg47O0QRMfdH/TEo04mTjT788h9f8XRljahvbsWukhoA0TGvo1DmdjZq4Lwdhh2Dae/saGcZS/kNk50dovCyWUx4bvoI2C0mfH6gAq9vKhJdEgHYWliF1oCMnOQ4ZCfHiS4nYsb1ToEkAfvL69Qrg0Rh2DEYLd5+zs4OUeT0S0/Ar64aCAB48qN9OFheJ7giWn/Q+Kcmn0qSw4bBWcEbCjYJ3pXFsGMwyjLW8ZNNmjm50q3O7PD0ZKJIuG1cLi7r3wPNrQHc/9YuzTwXRKuNh5XzdaJnXkcxoe28nfUHxc7tMOwYTI8EO2KtZgRk4NhJbSxlqZ0dJzs7RJEgSRL++8ZhSIqz4psTHixe9S0CAc7viOD2ePFtWbC7phy0F03a78mqFDpDxrBjMJIkIbetu6OFuR1Zlts7O/EMO0SRkuaMwcLrhwEA/rK+EFc+9wXe33UcrbxDKyI8Xh/++NkhXLX0CwDAoExnVB6qOjovGTazCaW1XqFHoliEfWYKm7wUB74tq9PE3E5Dix9Nbac5c0CZKLKuGJKBX11VgKVrDmF/eR3uf2sXnv73Acy8tDduvLAXYqxm0SUaTmV9M/5nfSH+uqkIdW2nWfdMjMX8qYMFVyZGrM2MK4dmwGo2Ce3sMOwYkHKwoBY6O8pOLIfNDIedf92IIm3mpX1w8+gc/HXTUfzPhqMorm7Eb1bsxXNrDuJnF+djxkW5iOe/zfN27GQj/vz5Eby1rQTNbTNSfdPice/EPrh2eJahL/48m+emjxRdAsOOEWlpR5Zy23mak8PJRKK4Yq2Y/cN+uPvi3nhrWzH+/PkRlNZ6sfBf3+LFtYdwx/g83DEhPyqXWc7XIXcdln12JLhE2DYXNTw7EfdO7IMfDUyPitOS9YBhx4C0NLNTUcd5HSKtiLWZceeEfMwYm4sVu45j+brDOFLRgKWfHsKfvyjE9DHZuOeS3shKjBVdquZ9dawGf1x7GB9/UwZldWZC3xTcO7EvxvdJgSQx5GgJw44BKZ2dkupGtPoDQi/fVDo7PbgTi0gzbBYTbhqVjRsu6IWPvy7DHz87hL3HPXhlw1H87+Yi/HhkT/z8sj7o3SNedKmaIssyNh2pwrLPDuOLDlupLx+Ujnt/0BcjshPFFUdnxLBjQBnOGNgsJrS0BlBa40VOirgTO5XODg8UJNIes0nCVUMzceWQDHxxsBIvrj2ELYXVeGf7Mfx9xzFcNSQTv5jYB0N6ukSXKlQgIGPNt2788bND+LK4BkDwezd1eBZ+PrEP+kfBLeZ6x7BjQCaThNzkOBx01+NoVYPQsKMcEc6dWETaJUkSLu3fA5f274EdRdX449rDWPOtG//ccwL/3HMCl/XvgXsn9sGY/OSoWp5p9Qew8qsT+ONnh3CgvB5AsCt286hszLy0d1Rd/aB3DDsGlZviwEF3fdvt5z2E1dHe2eGAMpEeXJibjJfvSMa3ZR4s++wwPtxdinUHKrDuQAVG5Sbh3h/0wQ8GpBk69Hh9fvx9xzG89PlhlFQ3AQDi7RbcOi4Xd03I5y9vOsSwY1Dtt5+LHVJWB5T55ECkKwUZTjw3fSTm/qg//vT5Efxj+zFsLzqJu17djoGZTvxiYh9cPTQTZgPtNqrz+vDGlmK8vL5Qfe5Kcdhw18X5+OlFuXDFWgVXSN3FsGNQuanK7edit59zZodI33JTHHjyx0Nx/6R+eHl9Id7YXIR9Jzy4729f4ul/78fPL+uD6y/oCbtFvwcUVje04JUNhXht41F4vMGDALNcMZh5aW/cPDoHsTb9fm0UxLBjUFro7Pj8AVQ1tABgZ4dI79KdMfjVVQNx78Q+eG1jEV7ZWIiiqkY8+u4eLPnkAH52cW/8x9gcXR0eWlrThD9/cQRvbS1RT3rv3cOBX1zWB1NH9ITNEr0HARqNfv5WUpco28+LqxrhD8hCWs1V9cGgYzZJSI7jYWVERpAYZ8P9k/vhZ5fk429bi/GXLwpR5vFiwUf78OJnh3D7uDzcMT4PSRo+oPBIRT2WrzuM9748Dp8/eEjO0J4u3DuxDy4fnGGopTkKYtgxqExXDKxmCS3+AMo8XvQUcEiYshMrNd7GU0SJDMZht+Bnl/TGreNyseLL41j22WEcrWrEc2sO4s9fHMF/jMnBzy7pjQyXdjYn7D1eiz9+dgj/2tt+EOBFvZNx78S+uKRfqqGHrqMdw45BWcwmZCfF4UhlA3YUnRRyAdu3ZXUAuBOLyMjsFjNuHp2DGy/Mxr/2nsAf1x7GNyc8+Mv6Qry+qQjXX9ATt44TO9xbXNWI5Z8fwecHKtTHJg9Mwy8m9sWFuUnC6qLIYdgxsNyUYNi5729fCq2D8zpExmc2SbhmWBauHpqJzw5UYNnaw9h6tBpvbSvBW9tKRJcHADBJwLXDs/CLiX1QkOEUXQ5FEMOOgU0b2RM7ik6qN/CKYLOYcO3wTGGfn4giS5Ik/GBAGn4wIA3bjlZj2WeHselwFQICusuK4PNQFv7z0t7IbZtnpOgiySLWNzTG4/HA5XKhtrYWTifTPhERkR6c689v7qsjIiIiQ2PYISIiIkNj2CEiIiJDY9ghIiIiQ2PYISIiIkNj2CEiIiJDY9ghIiIiQ2PYISIiIkNj2CEiIiJDY9ghIiIiQ2PYISIiIkNj2CEiIiJDY9ghIiIiQ2PYISIiIkOziC5AC2RZBhC8Kp6IiIj0Qfm5rfwcPx2GHQB1dXUAgOzsbMGVEBERUVfV1dXB5XKd9vWSfLY4FAUCgQBKS0uRkJAASZJC9nE9Hg+ys7NRUlICp9MZso+rJ9H+PeDXH91fP8DvQbR//QC/B+H8+mVZRl1dHbKysmAynX4yh50dACaTCb169Qrbx3c6nVH5F7yjaP8e8OuP7q8f4Pcg2r9+gN+DcH39Z+roKDigTERERIbGsENERESGxrATRna7Hb/97W9ht9tFlyJMtH8P+PVH99cP8HsQ7V8/wO+BFr5+DigTERGRobGzQ0RERIbGsENERESGxrBDREREhsawQ0RERIbGsBNGL774IvLy8hATE4OxY8di69atokuKiIULF2L06NFISEhAWloapk2bhv3794suS5innnoKkiRhzpw5okuJqOPHj+OnP/0pUlJSEBsbi6FDh2L79u2iy4oIv9+PefPmIT8/H7GxsejTpw8ef/zxs97fo2eff/45rr32WmRlZUGSJKxYsaLT62VZxmOPPYbMzEzExsZi8uTJOHjwoJhiw+BMX7/P58PDDz+MoUOHwuFwICsrC7fddhtKS0vFFRwGZ/s70NHPf/5zSJKEJUuWRKQ2hp0wefvttzF37lz89re/xc6dOzF8+HBMmTIFbrdbdGlht27dOsyaNQubN2/G6tWr4fP5cPnll6OhoUF0aRG3bds2/OlPf8KwYcNElxJRJ0+exIQJE2C1WvGvf/0L33zzDZ5++mkkJSWJLi0iFi1ahGXLluGFF17Avn37sGjRIixevBjPP/+86NLCpqGhAcOHD8eLL754ytcvXrwYS5cuxfLly7FlyxY4HA5MmTIFXq83wpWGx5m+/sbGRuzcuRPz5s3Dzp078e6772L//v247rrrBFQaPmf7O6B47733sHnzZmRlZUWoMgAyhcWYMWPkWbNmqX/2+/1yVlaWvHDhQoFVieF2u2UA8rp160SXElF1dXVyv3795NWrV8uXXXaZfP/994suKWIefvhh+eKLLxZdhjBXX321fNddd3V67Prrr5dnzJghqKLIAiC/99576p8DgYCckZEh//d//7f6WE1NjWy32+W//e1vAioMr+9+/aeydetWGYBcVFQUmaIi7HTfg2PHjsk9e/aU9+7dK+fm5srPPvtsROphZycMWlpasGPHDkyePFl9zGQyYfLkydi0aZPAysSora0FACQnJwuuJLJmzZqFq6++utPfg2jxwQcfYNSoUfjJT36CtLQ0jBw5En/+859FlxUx48ePx5o1a3DgwAEAwO7du7F+/XpceeWVgisTo7CwEGVlZZ3+LbhcLowdOzYqnxOB4POiJElITEwUXUrEBAIB3HrrrXjwwQcxePDgiH5uXgQaBpWVlfD7/UhPT+/0eHp6Or799ltBVYkRCAQwZ84cTJgwAUOGDBFdTsS89dZb2LlzJ7Zt2ya6FCGOHDmCZcuWYe7cufjVr36Fbdu24b777oPNZsPtt98uurywe+SRR+DxeFBQUACz2Qy/348FCxZgxowZoksToqysDABO+ZyovC6aeL1ePPzww7jlllui6mLQRYsWwWKx4L777ov452bYobCaNWsW9u7di/Xr14suJWJKSkpw//33Y/Xq1YiJiRFdjhCBQACjRo3Ck08+CQAYOXIk9u7di+XLl0dF2HnnnXfwxhtv4M0338TgwYOxa9cuzJkzB1lZWVHx9dPp+Xw+3HTTTZBlGcuWLRNdTsTs2LEDzz33HHbu3AlJkiL++bmMFQapqakwm80oLy/v9Hh5eTkyMjIEVRV5s2fPxsqVK7F27Vr06tVLdDkRs2PHDrjdblxwwQWwWCywWCxYt24dli5dCovFAr/fL7rEsMvMzMSgQYM6PTZw4EAUFxcLqiiyHnzwQTzyyCOYPn06hg4diltvvRUPPPAAFi5cKLo0IZTnvWh/TlSCTlFREVavXh1VXZ0vvvgCbrcbOTk56vNiUVER/t//+3/Iy8sL++dn2AkDm82GCy+8EGvWrFEfCwQCWLNmDcaNGyewssiQZRmzZ8/Ge++9h08//RT5+fmiS4qoSZMmYc+ePdi1a5f6MmrUKMyYMQO7du2C2WwWXWLYTZgw4XvHDRw4cAC5ubmCKoqsxsZGmEydn17NZjMCgYCgisTKz89HRkZGp+dEj8eDLVu2RMVzItAedA4ePIhPPvkEKSkpokuKqFtvvRVfffVVp+fFrKwsPPjgg/j444/D/vm5jBUmc+fOxe23345Ro0ZhzJgxWLJkCRoaGnDnnXeKLi3sZs2ahTfffBPvv/8+EhIS1DV5l8uF2NhYwdWFX0JCwvfmkxwOB1JSUqJmbumBBx7A+PHj8eSTT+Kmm27C1q1b8dJLL+Gll14SXVpEXHvttViwYAFycnIwePBgfPnll3jmmWdw1113iS4tbOrr63Ho0CH1z4WFhdi1axeSk5ORk5ODOXPm4IknnkC/fv2Qn5+PefPmISsrC9OmTRNXdAid6evPzMzEjTfeiJ07d2LlypXw+/3q82JycjJsNpuoskPqbH8HvhvwrFYrMjIyMGDAgPAXF5E9X1Hq+eefl3NycmSbzSaPGTNG3rx5s+iSIgLAKV9eeeUV0aUJE21bz2VZlj/88EN5yJAhst1ulwsKCuSXXnpJdEkR4/F45Pvvv1/OycmRY2Ji5N69e8u//vWv5ebmZtGlhc3atWtP+e/+9ttvl2U5uP183rx5cnp6umy32+VJkybJ+/fvF1t0CJ3p6y8sLDzt8+LatWtFlx4yZ/s78F2R3HouybKBj/QkIiKiqMeZHSIiIjI0hh0iIiIyNIYdIiIiMjSGHSIiIjI0hh0iIiIyNIYdIiIiMjSGHSIiIjI0hh0iIiIyNIYdIiIAeXl5WLJkiegyiCgMGHaIKOLuuOMO9U6kiRMnYs6cORH73K+++ioSExO/9/i2bdswc+bMiNVBRJHDi0CJyBBaWlrO60LFHj16hLAaItISdnaISJg77rgD69atw3PPPQdJkiBJEo4ePQoA2Lt3L6688krEx8cjPT0dt956KyorK9X3nThxImbPno05c+YgNTUVU6ZMAQA888wzGDp0KBwOB7Kzs3Hvvfeivr4eAPDZZ5/hzjvvRG1trfr5fve73wH4/jJWcXExpk6divj4eDidTtx0000oLy9XX/+73/0OI0aMwF//+lfk5eXB5XJh+vTpqKurC+83jYi6jGGHiIR57rnnMG7cONxzzz04ceIETpw4gezsbNTU1OCHP/whRo4cie3bt2PVqlUoLy/HTTfd1On9X3vtNdhsNmzYsAHLly8HAJhMJixduhRff/01XnvtNXz66ad46KGHAADjx4/HkiVL4HQ61c/3y1/+8nt1BQIBTJ06FdXV1Vi3bh1Wr16NI0eO4Oabb+70docPH8aKFSuwcuVKrFy5EuvWrcNTTz0Vpu8WEXUXl7GISBiXywWbzYa4uDhkZGSoj7/wwgsYOXIknnzySfWx//mf/0F2djYOHDiA/v37AwD69euHxYsXd/qYHed/8vLy8MQTT+DnP/85/vjHP8Jms8HlckGSpE6f77vWrFmDPXv2oLCwENnZ2QCA119/HYMHD8a2bdswevRoAMFQ9OqrryIhIQEAcOutt2LNmjVYsGDB+X1jiCik2NkhIs3ZvXs31q5di/j4ePWloKAAQLCborjwwgu/976ffPIJJk2ahJ49eyIhIQG33norqqqq0NjYeM6ff9++fcjOzlaDDgAMGjQIiYmJ2Ldvn/pYXl6eGnQAIDMzE263u0tfKxGFHzs7RKQ59fX1uPbaa7Fo0aLvvS4zM1P9b4fD0el1R48exTXXXINf/OIXWLBgAZKTk7F+/XrcfffdaGlpQVxcXEjrtFqtnf4sSRICgUBIPwcRnT+GHSISymazwe/3d3rsggsuwP/93/8hLy8PFsu5P03t2LEDgUAATz/9NEymYOP6nXfeOevn+66BAweipKQEJSUlanfnm2++QU1NDQYNGnTO9RCRNnAZi4iEysvLw5YtW3D06FFUVlYiEAhg1qxZqK6uxi233IJt27bh8OHD+Pjjj3HnnXeeMaj07dsXPp8Pzz//PI4cOYK//vWv6uByx89XX1+PNWvWoLKy8pTLW5MnT8bQoUMxY8YM7Ny5E1u3bsVtt92Gyy67DKNGjQr594CIwothh4iE+uUvfwmz2YxBgwahR48eKC4uRlZWFjZs2AC/34/LL78cQ4cOxZw5c5CYmKh2bE5l+PDheOaZZ7Bo0SIMGTIEb7zxBhYuXNjpbcaPH4+f//znuPnmm9GjR4/vDTgDweWo999/H0lJSbj00ksxefJk9O7dG2+//XbIv34iCj9JlmVZdBFERERE4cLODhERERkaww4REREZGsMOERERGRrDDhERERkaww4REREZGsMOERERGRrDDhERERkaww4REREZGsMOERERGRrDDhERERkaww4REREZ2v8HIXnv1Uyqb3EAAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "\n", - "\n", - "trace.GRAPH.clear()\n", - "\n", - "@trace.bundle(trainable=True)\n", - "def fun(x):\n", - " \"\"\" A linear predictor function \"\"\"\n", - " return 0\n", - "\n", - "optimizer = OptoPrime(fun.parameters())\n", - "\n", - "ls = []\n", - "for i in range(15):\n", - " try:\n", - " l_eval = compute_loss(inputs, outputs)\n", - " print(f'Iteration {i} Loss: {l_eval.data}')\n", - " ls.append(l_eval.data)\n", - "\n", - " ind = np.random.randint(0, N) % N\n", - " target = compute_loss([inputs[ind]], [outputs[ind]])\n", - " feedback = 'Minimize loss'\n", - " except trace.ExecutionError as e:\n", - " target = e.exception_node\n", - " feedback = str(e.exception_node.data)\n", - "\n", - " optimizer.zero_feedback()\n", - " optimizer.backward(target, feedback)\n", - " optimizer.step()\n", - "\n", - "\n", - "\n", - "# plot ls\n", - "import matplotlib.pyplot as plt\n", - "plt.plot(ls)\n", - "plt.xlabel('Iteration')\n", - "plt.ylabel('Loss')\n", - "plt.show()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Batching Non-Commutative Feedbacks\n", - "\n", - "In the earlier numerical example, the loss function was commutative so that we can do `batch_loss += loss(each_input)`. What if the feedbacks received are not commutative? This can happen often with non-numeric (e.g. text) feedbacks. Here we will see a simple design pattern for using `trace` and `OptoPrime` for batch optimization in such cases." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [], - "source": [ - "from opto.trace import bundle\n", - "\n", - "@bundle(trainable=False)\n", - "def concat(*items):\n", - " \"\"\" Concatenate the items into a single string \"\"\"\n", - " output = ''\n", - " for i, item in enumerate(items):\n", - " output += f'ID {[i]}: {item}\\n'\n", - " return output" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that the `concat` function when called with a list of feedbacks will concatenate them all with an identifier for each element. This way, the optimizer when given a batch of outputs and a corresponding batch of feedbacks can disambiguate which feedback corresponds to which output." - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Prompt\n", - " \n", - "You're tasked to solve a coding/algorithm problem. You will see the instruction, the code, the documentation of each function used in the code, and the feedback about the execution result.\n", - "\n", - "Specifically, a problem will be composed of the following parts:\n", - "- #Instruction: the instruction which describes the things you need to do or the question you should answer.\n", - "- #Code: the code defined in the problem.\n", - "- #Documentation: the documentation of each function used in #Code. The explanation might be incomplete and just contain high-level description. You can use the values in #Others to help infer how those functions work.\n", - "- #Variables: the input variables that you can change.\n", - "- #Constraints: the constraints or descriptions of the variables in #Variables.\n", - "- #Inputs: the values of other inputs to the code, which are not changeable.\n", - "- #Others: the intermediate values created through the code execution.\n", - "- #Outputs: the result of the code output.\n", - "- #Feedback: the feedback about the code's execution result.\n", - "\n", - "In #Variables, #Inputs, #Outputs, and #Others, the format is:\n", - "\n", - " = \n", - "\n", - "If is (code), it means is the source code of a python code, which may include docstring and definitions.\n", - "\n", - "Output_format: Your output should be in the following json format, satisfying the json syntax:\n", - "\n", - "{\n", - "\"reasoning\": ,\n", - "\"answer\": ,\n", - "\"suggestion\": {\n", - " : ,\n", - " : ,\n", - "}\n", - "}\n", - "\n", - "In \"reasoning\", explain the problem: 1. what the #Instruction means 2. what the #Feedback on #Output means to #Variables considering how #Variables are used in #Code and other values in #Documentation, #Inputs, #Others. 3. Reasoning about the suggested changes in #Variables (if needed) and the expected result.\n", - "\n", - "If #Instruction asks for an answer, write it down in \"answer\".\n", - "\n", - "If you need to suggest a change in the values of #Variables, write down the suggested values in \"suggestion\". Remember you can change only the values in #Variables, not others. When of a variable is (code), you should write the new definition in the format of python code without syntax errors, and you should not change the function name or the function signature.\n", - "\n", - "If no changes or answer are needed, just output TERMINATE.\n", - "\n", - "Now you see problem instance:\n", - "\n", - "================================\n", - "\n", - "#Instruction\n", - "You need to change the of the variables in #Variables to improve the output in accordance to #Feedback.\n", - "\n", - "#Code\n", - "eval90 = eval(lst=lst0, __code=__code1)\n", - "eval91 = eval(lst=lst1, __code=__code1)\n", - "eval92 = eval(lst=lst2, __code=__code1)\n", - "eval93 = eval(lst=lst3, __code=__code1)\n", - "eq0 = eq(x=eval90, y=list0)\n", - "eq1 = eq(x=eval91, y=list1)\n", - "eq2 = eq(x=eval92, y=list2)\n", - "eq3 = eq(x=eval93, y=list3)\n", - "concat1 = concat(args_0=eq0, args_1=eq1, args_2=eq2, args_3=eq3)\n", - "\n", - "#Documentation\n", - "[eval] This operator eval(__code, *args, **kwargs) evaluates the code block, where __code is the code (str) and *args and **kwargs are the arguments of the function. The output is the result of the evaluation, i.e., __code(*args, **kwargs).\n", - "[eq] This is an eq operator of x and y.\n", - "[concat] Concatenate the items into a single string\n", - "\n", - "#Variables\n", - "(code) __code1:def strange_sort_list(lst):\n", - " '''\n", - " Given list of integers, return list in strange order.\n", - " Strange sorting, is when you start with the minimum value,\n", - " then maximum of the remaining integers, then minimum and so on.\n", - " '''\n", - " lst = sorted(lst)\n", - " return lst\n", - "\n", - "#Constraints\n", - "(code) __code1: The code should start with:\n", - "def strange_sort_list(lst):\n", - "\n", - "#Inputs\n", - "(list) lst1=[5, 5, 5, 5]\n", - "(list) lst2=[]\n", - "(list) lst0=[1, 2, 3, 4]\n", - "(list) lst3=[9, 8, 7, 6, 5, 4]\n", - "(list) list1=[5, 5, 5, 5]\n", - "(list) list2=[]\n", - "(list) list0=[1, 4, 2, 3]\n", - "(list) list3=[4, 9, 5, 8, 6, 7]\n", - "\n", - "#Others\n", - "(list) eval91=[5, 5, 5, 5]\n", - "(list) eval92=[]\n", - "(list) eval90=[1, 2, 3, 4]\n", - "(list) eval93=[4, 5, 6, 7, 8, 9]\n", - "(bool) eq0=False\n", - "(bool) eq1=True\n", - "(bool) eq2=True\n", - "(bool) eq3=False\n", - "\n", - "#Outputs\n", - "(str) concat1=ID [0]: False\n", - "ID [1]: True\n", - "ID [2]: True\n", - "ID [3]: False\n", - "\n", - "\n", - "#Feedback\n", - "ID [0]: test case failed!\n", - "ID [1]: test case passed!\n", - "ID [2]: test case passed!\n", - "ID [3]: test case failed!\n", - "\n", - "\n", - "================================\n", - "\n", - "\n", - "Your response:\n", - "\n", - "LLM response:\n", - " {\n", - "\"reasoning\": \"The #Instruction requires us to modify the values in #Variables, specifically the function __code1, to improve the output according to the feedback. According to #Feedback, test cases 0 and 3 failed, while test cases 1 and 2 passed. The current definition of strange_sort_list only sorts the list in ascending order, which is not sufficient for the 'strange order' specified. The 'strange order' is defined as starting with the minimum, then the maximum of the remaining, then the next minimum, and so forth. Therefore, we need to modify the function strange_sort_list(lst) to implement this logic. \\n\\nThe correct transformation should alternate between taking the smallest and largest remaining values in the list until the list is exhausted. This adjustment will ensure lists such as lst0 and lst3 are correctly transformed to match list0 and list3, respectively.\",\n", - "\"answer\": null,\n", - "\"suggestion\": {\n", - " \"__code1\": \"def strange_sort_list(lst):\\n '''\\n Given list of integers, return list in strange order.\\n Strange sorting, is when you start with the minimum value,\\n then maximum of the remaining integers, then minimum and so on.\\n '''\\n lst = sorted(lst)\\n result = []\\n while lst:\\n result.append(lst.pop(0)) # take min\\n if lst:\\n result.append(lst.pop(-1)) # take max\\n return result\"\n", - "}\n", - "}\n" - ] - }, - { - "data": { - "text/plain": [ - "{: \"def strange_sort_list(lst):\\n '''\\n Given list of integers, return list in strange order.\\n Strange sorting, is when you start with the minimum value,\\n then maximum of the remaining integers, then minimum and so on.\\n '''\\n lst = sorted(lst)\\n result = []\\n while lst:\\n result.append(lst.pop(0)) # take min\\n if lst:\\n result.append(lst.pop(-1)) # take max\\n return result\"}" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "@bundle(trainable=True)\n", - "def strange_sort_list(lst):\n", - " '''\n", - " Given list of integers, return list in strange order.\n", - " Strange sorting, is when you start with the minimum value,\n", - " then maximum of the remaining integers, then minimum and so on.\n", - " '''\n", - " lst = sorted(lst)\n", - " return lst\n", - "\n", - "def get_feedback(predict, target):\n", - " if predict == target:\n", - " return \"test case passed!\"\n", - " else:\n", - " return \"test case failed!\"\n", - " \n", - "from opto.optimizers import OptoPrime\n", - "\n", - "test_ground_truths = [[1, 4, 2, 3], [5, 5, 5, 5], [], [4, 9, 5, 8, 6, 7]]\n", - "test_inputs = [[1, 2, 3, 4], [5, 5, 5, 5], [], [9, 8, 7, 6, 5, 4]]\n", - "\n", - "optimizer = OptoPrime(strange_sort_list.parameters())\n", - "\n", - "outputs = []\n", - "feedbacks = []\n", - "for i in range(len(test_inputs)):\n", - " try:\n", - " test_output = strange_sort_list(test_inputs[i])\n", - " feedback = get_feedback(test_output, test_ground_truths[i])\n", - " except trace.ExecutionError as e:\n", - " feedback = e.exception_node.data\n", - " test_output = e.exception_node\n", - " feedbacks.append(feedback)\n", - " \n", - " correctness = test_output.eq(test_ground_truths[i])\n", - " outputs.append(correctness)\n", - "\n", - "batched_feedback = concat(*feedbacks)\n", - "batched_outputs = concat(*outputs)\n", - "optimizer.zero_feedback()\n", - "optimizer.backward(batched_outputs, batched_feedback.data)\n", - "optimizer.step(verbose=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using the functions in `opto.trainer` to perform Batching\n", - "\n", - "In the earlier examples, we wrote our own design patterns for accomplishing batch optimization. However, Trace provides the `MiniBatchAlgorithm` to accomplish this automatically.\n", - "Let us see how the abstractions in `opto.trainer` allow us to scale up optimization, for example, doing minibatch optimization on the GSM 8K Dataset, which is a dataset of math word problems." - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [], - "source": [ - "import datasets\n", - "import numpy as np\n", - "\n", - "train_dataset = datasets.load_dataset('openai/gsm8k', 'main')['train'][:10]\n", - "train_dataset = dict(inputs=train_dataset['question'], infos=train_dataset['answer'])\n", - "test_dataset = train_dataset\n", - "\n", - "# set seed\n", - "seed = 42\n", - "num_epochs = 1\n", - "batch_size = 2\n", - "test_frequency = -1\n", - "num_threads = 3\n", - "verbose = True\n", - "\n", - "np.random.seed(seed)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We define the `Learner` agent which is a student LLM with a trainable system prompt. Trace will use a generative optimizer to tune the system prompt. Trace provides also a class for LLM-as-Judge called `VerbalJudgeGuide` that uses a Teacher LLM to provide rich feedbacks to the student LLM. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": "from opto import trace\nfrom opto.utils.llm import LLM\nfrom opto.optimizers import OptoPrime\nfrom opto.trainer.algorithms.basic_algorithms import MinibatchAlgorithm\nfrom opto.trainer.loggers import TensorboardLogger\nfrom opto.trainer.guide import LLMJudge\nfrom opto.features.predefined_agents import BasicLearner\nfrom typing import Any\n\n# Use the predefined BasicLearner instead of defining our own\nLearner = BasicLearner" - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, we use the `MiniBatchAlgorithm` as the trainer to sample batches from the GSM8K dataset, run the student model on the samples, gather feedback from the teacher model, and present the resulting traced graph to the optimizer." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "STARTING TRAINING\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Forward pass (batch size: 2): 100%|██████████| 2/2 [00:06<00:00, 3.12s/it]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "LLM response:\n", - " {\n", - "\"reasoning\": \"The #Instruction asks us to change the values of the variables in #Variables to improve the output according to #Feedback. The #Feedback section provides the analysis of the answers generated for each query. Both answers for the queries (regarding Alexis and Weng) are correct, as indicated by the statement 'Correct [TERMINATE]'. The #Output shows that the responses generated for each model (Learner.model0 and Learner.model1) are logical and correct given the input prompts. Therefore, there are no errors in the current setup, and no changes are needed in the variables.\",\n", - "\"answer\": \"TERMINATE\",\n", - "\"suggestion\": {}\n", - "}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Evaluating agent (iteration 1): 100%|██████████| 10/10 [00:22<00:00, 2.30s/it]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[Step 1] \u001B[92mAverage test score: 1.0\u001B[0m\n", - "Epoch: 0. Iteration: 1\n", - "[Step 1] Instantaneous train score: 1.0\n", - "[Step 1] Average train score: 1.0\n", - "[Step 1] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Forward pass (batch size: 2): 100%|██████████| 2/2 [00:09<00:00, 4.65s/it]\n", - "/home/aswaminathan/miniconda3/envs/trace/lib/python3.9/copy.py:263: RuntimeWarning: coroutine 'main' was never awaited\n", - " args = (deepcopy(arg, memo) for arg in args)\n", - "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "LLM response:\n", - " {\n", - "\"reasoning\": \"The instruction asks us to change the value of variables if necessary to improve the output based on the feedback provided. In this instance, the feedback for both outputs (ID [0] and ID [1]) states 'Correct' and suggests termination, which indicates that the outputs match the expected results. The variables in the code that we have control over are used to set up prompts for an LLM model to process. The feedback shows the model's output correctly answers the questions based on the inputs, matching the expected correct answers outlined in the feedback. Therefore, no changes to the variables are necessary as the task is operating as intended.\",\n", - "\"answer\": \"TERMINATE\",\n", - "\"suggestion\": {}\n", - "}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Evaluating agent (iteration 2): 100%|██████████| 10/10 [00:18<00:00, 1.88s/it]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[Step 2] \u001B[92mAverage test score: 1.0\u001B[0m\n", - "Epoch: 0. Iteration: 2\n", - "[Step 2] Instantaneous train score: 1.0\n", - "[Step 2] Average train score: 1.0\n", - "[Step 2] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Forward pass (batch size: 2): 100%|██████████| 2/2 [00:04<00:00, 2.46s/it]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "LLM response:\n", - " {\n", - " \"reasoning\": \"The #Instruction asks us to adjust the #Variables to improve the output based on #Feedback. The feedback suggests that the answers provided by the models are correct for both IDs. The output of both Learner.model25 and Learner.model24 correctly represents the calculation processes needed to answer the given queries. As the feedback indicates '[TERMINATE]', it means the current outputs are satisfactory, and no changes to the #Variables are necessary.\",\n", - " \"answer\": \"TERMINATE\"\n", - "}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Evaluating agent (iteration 3): 100%|██████████| 10/10 [00:20<00:00, 2.05s/it]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[Step 3] \u001B[92mAverage test score: 1.0\u001B[0m\n", - "Epoch: 0. Iteration: 3\n", - "[Step 3] Instantaneous train score: 1.0\n", - "[Step 3] Average train score: 1.0\n", - "[Step 3] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Forward pass (batch size: 2): 100%|██████████| 2/2 [00:08<00:00, 4.16s/it]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "LLM response:\n", - " {\n", - "\"reasoning\": \"The #Instruction requires us to change the values in #Variables to improve the output. However, based on #Feedback, both IDs in the #Outputs are correctly calculated according to the logic specified in #Documentation and supported by expert feedback. Therefore, no changes are needed to improve the outputs, as they already match the expected results provided in the feedback.\",\n", - "\"answer\": \"Both outputs are correct as per the feedback.\",\n", - "\"suggestion\": {}\n", - "}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Evaluating agent (iteration 4): 100%|██████████| 10/10 [00:19<00:00, 1.91s/it]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[Step 4] \u001B[92mAverage test score: 1.0\u001B[0m\n", - "Epoch: 0. Iteration: 4\n", - "[Step 4] Instantaneous train score: 1.0\n", - "[Step 4] Average train score: 1.0\n", - "[Step 4] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Forward pass (batch size: 2): 100%|██████████| 2/2 [00:05<00:00, 2.63s/it]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "LLM response:\n", - " {\n", - "\"reasoning\": \"The #Instruction requires adjusting the value of the variable in #Variables to improve the output based on #Feedback. In this scenario, the feedback has been provided for both outputs (ID [0] and ID [1]) as correct, with an explicit [TERMINATE] instruction from the expert feedback, indicating that no changes are needed for the variable's value, as the outputs align perfectly with the expected answers. The current settings in #Variables, #Inputs, and #Others, including the prompts and message, are correctly leading to the generation of accurate answers to the queries, both for Julie's reading task and Albert's pizza consumption problem.\",\n", - "\"answer\": \"TERMINATE\",\n", - "\"suggestion\": {}\n", - "}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Evaluating agent (iteration 5): 100%|██████████| 10/10 [00:17<00:00, 1.76s/it]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[Step 5] \u001B[92mAverage test score: 1.0\u001B[0m\n", - "Epoch: 0. Iteration: 5\n", - "[Step 5] Instantaneous train score: 1.0\n", - "[Step 5] Average train score: 1.0\n", - "[Step 5] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n", - "FINISHED TRAINING\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], - "source": [ - "agent = Learner(llm=LLM())\n", - "guide = LLMJudge(llm=LLM())\n", - "optimizer = OptoPrime(agent.parameters(), llm=LLM())\n", - "logger = TensorboardLogger(verbose=True)\n", - "\n", - "alg = MinibatchAlgorithm(\n", - " agent=agent,\n", - " optimizer=optimizer,\n", - " logger=logger)\n", - "\n", - "import nest_asyncio\n", - "nest_asyncio.apply()\n", - "import asyncio\n", - "\n", - "async def wrapper():\n", - " print(\"STARTING TRAINING\")\n", - " alg.train(guide,\n", - " train_dataset,\n", - " num_epochs=num_epochs,\n", - " batch_size=batch_size,\n", - " test_frequency=test_frequency,\n", - " test_dataset=test_dataset,\n", - " num_threads=num_threads,\n", - " verbose='output')\n", - " print(\"FINISHED TRAINING\")\n", - " \n", - "asyncio.run(wrapper())" - ] - }, - { - "cell_type": "markdown", - "source": "## Simplified Training with `trainer.train()`\n\nInstead of manually setting up the algorithm, optimizer, guide, and logger, you can use the simplified `trainer.train()` function that handles all the setup for you. This is the recommended approach for most use cases.", - "metadata": {} - }, - { - "cell_type": "code", - "source": "# Using the simplified trainer.train approach\nfrom opto import trainer\n\n# Create a fresh agent for simplified training\nsimple_agent = Learner(\n system_prompt=\"You're a helpful agent answering math problems.\",\n llm=LLM()\n)\n\nprint(\"STARTING SIMPLIFIED TRAINING\")\nmetrics, final_score = trainer.train(\n model=simple_agent,\n train_dataset=train_dataset,\n algorithm='MinibatchAlgorithm',\n guide=LLMJudge(llm=LLM()),\n # trainer kwargs\n num_epochs=num_epochs,\n batch_size=batch_size,\n eval_frequency=eval_frequency,\n test_dataset=test_dataset,\n num_threads=num_threads,\n verbose='output',\n)\nprint(\"FINISHED SIMPLIFIED TRAINING\")\nprint(f\"Final score: {final_score}\")", - "metadata": {}, - "execution_count": null, - "outputs": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "trace", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.23" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/opto/features/inference/dspy_example.py b/opto/features/inference/dspy_example.py deleted file mode 100644 index 25a72ab5..00000000 --- a/opto/features/inference/dspy_example.py +++ /dev/null @@ -1,112 +0,0 @@ -import dspy -import heapq -from typing import List, Dict, Any, Optional, Tuple -from dataclasses import dataclass - -dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) - - -@dataclass -class HistoryNode: - """A node representing a conversation history with its associated score.""" - history: dspy.History - score: float - depth: int # Number of questions asked so far - last_question: str - last_answer: str - - def __lt__(self, other): - # For max heap behavior, negate the score (heapq is min heap by default) - return self.score > other.score - - def __repr__(self): - return f"HistoryNode(score={self.score}, depth={self.depth}, last_q='{self.last_answer[:30]}...')" - - -class PriorityQueue: - """Priority queue for storing conversation histories ranked by score.""" - - def __init__(self, max_size: Optional[int] = None): - self.heap: List[HistoryNode] = [] - self.max_size = max_size - - def pop(self) -> Optional[HistoryNode]: - """Remove and return the highest-scoring history node.""" - if self.heap: - return heapq.heappop(self.heap) - return None - - def peek(self) -> Optional[HistoryNode]: - """Return the highest-scoring history node without removing it.""" - return self.heap[0] if self.heap else None - - def is_empty(self) -> bool: - """Check if the priority queue is empty.""" - return len(self.heap) == 0 - - def size(self) -> int: - """Return the number of items in the queue.""" - return len(self.heap) - - def get_all_sorted(self) -> List[HistoryNode]: - """Return all nodes sorted by score (highest first) without modifying the queue.""" - return sorted(self.heap, reverse=True) - - def clear(self): - """Remove all items from the queue.""" - self.heap.clear() - - -class HistoryPriorityQueue(PriorityQueue): - - # We keep the signature specific functions here to change things! - def push(self, history: dspy.History, score: float, question: str, answer: str, depth: int = 0): - """Add a new history node to the priority queue.""" - node = HistoryNode( - history=history, - score=score, - depth=depth, - last_question=question, - last_answer=answer - ) - - heapq.heappush(self.heap, node) - - # Maintain max size if specified - if self.max_size and len(self.heap) > self.max_size: - # Remove the lowest scoring item (at the end after popping highest) - temp_items = [] - # Keep the best items - for _ in range(min(self.max_size, len(self.heap))): - if self.heap: - temp_items.append(heapq.heappop(self.heap)) - - self.heap = temp_items - # Re-heapify - heapq.heapify(self.heap) - -class MySignature(dspy.Signature): - question: str = dspy.InputField() - history: dspy.History = dspy.InputField() - answer: str = dspy.OutputField() - -predict = dspy.Predict(MySignature) -outputs = predict(question="What is the capital of France?") -history = dspy.History(messages=[{"question": "What is the capital of France?", **outputs}]) -outputs_with_history = predict(question="Are you sure?", history=history) - -""" -Idea 1: Greedy explorer -- always pop off the highest scoring node -Idea 2: Discounted sum of reward -- after each proposal, we update the full PATH that leads to the node (need additional structure) -""" - -class GreedyExplorer: - def __init__(self, exploration_budget: int = 20, max_queue_size: int = 100): - self.pq = HistoryPriorityQueue(max_size=max_queue_size) - self.exploration_budget = exploration_budget - self.explored_nodes = 0 - self.final_result = None - self.initial_history = dspy.History(messages=[]) - -if __name__ == '__main__': - pass \ No newline at end of file From 095e7f7ea9f6db47debb50b5702986e3b78c2b17 Mon Sep 17 00:00:00 2001 From: windweller Date: Wed, 17 Dec 2025 16:50:07 -0500 Subject: [PATCH 41/51] add back minibatch.ipynb --- docs/tutorials/minibatch.ipynb | 834 +++++++++++++++++++++++++++++++++ 1 file changed, 834 insertions(+) create mode 100644 docs/tutorials/minibatch.ipynb diff --git a/docs/tutorials/minibatch.ipynb b/docs/tutorials/minibatch.ipynb new file mode 100644 index 00000000..95076033 --- /dev/null +++ b/docs/tutorials/minibatch.ipynb @@ -0,0 +1,834 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Batch Optimization\n", + "\n", + "We provide an example of how to update parameters on a batch of data. In these toy examples, we show different ways to update parameters of functions on data containing multiple inputs. For simplicity, we consider batch update without random sampling." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%pip install trace-opt ipywidgets" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As a preamble, the code below provides a way to specify your API_KEY for calling LLMs using LiteLLM as part of this tutorial notebook. Alternatively, provide the keys by setting environment variables or loading LiteLLM config files." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import ipywidgets as widgets\n", + "from IPython.display import display\n", + "\n", + "# Function to save the environment variable and API key\n", + "def save_env_variable(env_name, api_key):\n", + " # Validate inputs\n", + " if not env_name.strip():\n", + " print(\"⚠️ Environment variable name cannot be empty.\")\n", + " return\n", + " if not api_key.strip():\n", + " print(\"⚠️ API key cannot be empty.\")\n", + " return\n", + " \n", + " # Store the API key as an environment variable\n", + " os.environ[env_name] = api_key\n", + " globals()[env_name] = api_key # Set it as a global variable\n", + " print(f\"✅ API key has been set for environment variable: {env_name}\")\n", + "\n", + "# Create the input widgets\n", + "env_name_input = widgets.Text(\n", + " value=\"OPENAI_API_KEY\", # Default value\n", + " description=\"Env Name:\",\n", + " placeholder=\"Enter env variable name (e.g., MY_API_KEY)\",\n", + ")\n", + "\n", + "api_key_input = widgets.Password(\n", + " description=\"API Key:\",\n", + " placeholder=\"Enter your API key\",\n", + ")\n", + "\n", + "# Create the button to submit the inputs\n", + "submit_button = widgets.Button(description=\"Set API Key\")\n", + "\n", + "# Display the widgets\n", + "display(env_name_input, api_key_input, submit_button)\n", + "\n", + "# Callback function for the button click\n", + "def on_button_click(b):\n", + " env_name = env_name_input.value\n", + " api_key = api_key_input.value\n", + " save_env_variable(env_name, api_key)\n", + "\n", + "# Attach the callback to the button\n", + "submit_button.on_click(on_button_click)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, we consider a small linear regression problem. To perform updates on multiple inputs at a time, here we just compute the loss for each input and then sum it up, and perform one `backward` call to tell the optimizer to minimize the loss. Since the optimizer is capable of seeing the graph, it can understand how different inputs and labels are paired and evaluated by the loss function." + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "import random\n", + "import numpy as np\n", + "\n", + "random.seed(0)\n", + "np.random.seed(0)\n", + "\n", + "from opto import trace\n", + "from opto.optimizers import OptoPrime\n", + "\n", + "\n", + "def true_fun(x):\n", + " return 2*x - 3\n", + "\n", + "inputs = [3, 2, 1, 5, 4]\n", + "outputs = [true_fun(x) for x in inputs]\n", + "N = len(inputs)\n", + "\n", + "\n", + "@trace.bundle()\n", + "def loss(y_hat, y):\n", + " \"\"\" A least squares loss function. \"\"\"\n", + " return (y_hat - y) ** 2\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Iteration 0 Loss: 85\n", + "Iteration 1 Loss: 85\n", + "Iteration 2 Loss: 10\n", + "Iteration 3 Loss: 15\n", + "Iteration 4 Loss: 10\n", + "Iteration 5 Loss: 40\n", + "Iteration 6 Loss: 0\n", + "Iteration 7 Loss: 0\n", + "Iteration 8 Loss: 0\n", + "Iteration 9 Loss: 0\n", + "Iteration 10 Loss: 0\n", + "Iteration 11 Loss: 0\n", + "Iteration 12 Loss: 0\n", + "Iteration 13 Loss: 0\n", + "Iteration 14 Loss: 0\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjIAAAGwCAYAAACzXI8XAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8ekN5oAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA9PUlEQVR4nO3deXyU5b338e9MJpnsCQTIJBAICMoWEEWQxeXRVOpjLVZbl1K3+tS24lGk1cqxuCNCKyKKUDzW7WirPadata0Wo0VQNkGRRQFli0ASAyaThWwz9/NHuAciAZKQmXvuez7v1yuvlkky80uA8PW6ftf1cxmGYQgAAMCG3FYXAAAA0FEEGQAAYFsEGQAAYFsEGQAAYFsEGQAAYFsEGQAAYFsEGQAAYFseqwsIt2AwqD179igtLU0ul8vqcgAAQBsYhqGqqirl5ubK7T76uovjg8yePXuUl5dndRkAAKADiouL1atXr6O+3/FBJi0tTVLzNyI9Pd3iagAAQFv4/X7l5eWF/h0/GscHGXM7KT09nSADAIDNHK8thGZfAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgWwQZAABgW44fGhku39Q0qKahydIa4uPcyk5PtLQGAACsRJDpoN/9a7NeWrnL6jJ0y/kDNPU7J1tdBgAAliDIdFC82yWvx7qduaBhqDFg6MMvygkyAICYRZDpoPsmDtV9E4da9vprdn6jyxZ8qL2VdZbVAACA1Wj2tamcjObemLKqOgWDhsXVAABgDYKMTXVP88rtkhoDhvbVNFhdDgAAliDI2FR8nFvd07ySpBK2lwAAMYogY2O+jCRJ0t7KAxZXAgCANQgyNuZLP7gi42dFBgAQmwgyNpYTWpEhyAAAYhNBxsZ8B08ulRJkAAAxiiBjY+YRbFZkAACxiiBjY76Dc5bokQEAxCqCjI35QisyB2QYXIoHAIg9BBkbMydf1zUGVXmg0eJqAACIPIKMjSXGx6lrSoIktpcAALGJIGNzZp8MDb8AgFhEkLE58+QSYwoAALHI0iATCAQ0ffp09e3bV0lJSTrppJP0wAMPtGhcNQxDd999t3JycpSUlKTCwkJt3brVwqqjSzZHsAEAMczSIDNr1iwtWLBATzzxhD777DPNmjVLs2fP1uOPPx76mNmzZ2vevHlauHChVq5cqZSUFE2YMEF1dfzDLUk55hFs5i0BAGKQx8oX//DDDzVx4kRddNFFkqT8/Hz96U9/0qpVqyQ1r8bMnTtXv/3tbzVx4kRJ0vPPP6/s7Gy99tpruvLKKy2rPVqYR7BL/PUWVwIAQORZuiIzduxYFRUVacuWLZKkdevWadmyZbrwwgslSdu3b1dJSYkKCwtDn5ORkaHRo0dr+fLlrT5nfX29/H5/izcnM+ctsSIDAIhFlq7I3HnnnfL7/Ro4cKDi4uIUCAQ0Y8YMTZo0SZJUUlIiScrOzm7xednZ2aH3fdvMmTN13333hbfwKOKjRwYAEMMsXZF55ZVX9OKLL+qll17S2rVr9dxzz+n3v/+9nnvuuQ4/57Rp01RZWRl6Ky4u7sSKo48ZZKrqmlRd32RxNQAARJalKzK333677rzzzlCvS0FBgXbu3KmZM2fq2muvlc/nkySVlpYqJycn9HmlpaU69dRTW31Or9crr9cb9tqjRarXozSvR1X1TSqprFP/HqlWlwQAQMRYuiJTW1srt7tlCXFxcQoGg5Kkvn37yufzqaioKPR+v9+vlStXasyYMRGtNZr5uEsGABCjLF2RufjiizVjxgz17t1bQ4YM0ccff6w5c+bopz/9qSTJ5XJpypQpevDBBzVgwAD17dtX06dPV25uri655BIrS48qvoxEbS2rZkwBACDmWBpkHn/8cU2fPl033XSTysrKlJubq5///Oe6++67Qx9zxx13qKamRjfeeKMqKio0fvx4vfXWW0pMTLSw8uhy6HZfTi4BAGKLyzj8Gl0H8vv9ysjIUGVlpdLT060uJyzmLN6ieUVbNWl0b834QYHV5QAAcMLa+u83s5YcwJdOjwwAIDYRZBwgh7tkAAAxiiDjAOappVKafQEAMYYg4wDmisy+mgbVNQYsrgYAgMghyDhARlK8EuObfyvLGB4JAIghBBkHcLlcoYbfvRzBBgDEEIKMQ4Ru96VPBgAQQwgyDpGTkSSJI9gAgNhCkHEIH0ewAQAxiCDjEDkMjgQAxCCCjENkm82+9MgAAGIIQcYhGBwJAIhFBBmHMHtkvq6qV1MgaHE1AABEBkHGIbqleOVxuxQ0pK+ruRQPABAbCDIO4Xa7DvXJ0PALAIgRBBkH4eQSACDWEGQcJJu7ZAAAMYYg4yA56ZxcAgDEFoKMgxyat0SzLwAgNhBkHOTQvCVWZAAAsYEg4yDMWwIAxBqCjIOYQabUX6dg0LC4GgAAwo8g4yA90rxyuaTGgKF9NQ1WlwMAQNgRZBwkPs6t7qleSc2rMgAAOB1BxmFy6JMBAMQQgozD+JiCDQCIIQQZh/ExbwkAEEMIMg7jC90lQ5ABADgfQcZhQoMjafYFAMQAgozD+JiADQCIIQQZhzn81JJhcCkeAMDZCDIOk32w2fdAY0D+A00WVwMAQHgRZBwmMT5OXZLjJUl7/RzBBgA4G0HGgcyTSxzBBgA4HUHGgcw+mVKCDADA4QgyDuRjTAEAIEYQZBwoJ50j2ACA2ECQcaBsc0WGS/EAAA5HkHGgHAZHAgBiBEHGgXK43RcAECMIMg5kHr/21zWppp5L8QAAzkWQcaBUr0dpXo8khkcCAJyNIONQ2WwvAQBiAEHGoXK4SwYAEAMIMg7lO3iXTClbSwAAByPIONShFRmOYAMAnIsg41DmySV6ZAAATkaQcShfhlcSPTIAAGcjyDiUL50VGQCA8xFkHMrskdlX06D6poDF1QAAEB4EGYfKTI6X19P821vmr7e4GgAAwoMg41Aul4u7ZAAAjkeQcbDsdI5gAwCcjSDjYEzBBgA4HUHGwcy7ZNhaAgA4FUHGwcwVGcYUAACciiDjYD6afQEADkeQcTB6ZAAATkeQcTBzAnZZVZ2aAkGLqwEAoPMRZBwsK9Urj9uloCF9Xc2leAAA5yHIOFic2xW6S4btJQCAExFkHM5HnwwAwMEIMg7HySUAgJMRZBzObPgt4S4ZAIADEWQcjsGRAAAnI8g43KEeGQZHAgCchyDjcKFL8dhaAgA4EEHG4czBkaWV9QoGDYurAQCgc1keZHbv3q2f/OQnysrKUlJSkgoKCvTRRx+F3m8Yhu6++27l5OQoKSlJhYWF2rp1q4UV20uPNK9cLqkhENT+2garywEAoFNZGmS++eYbjRs3TvHx8frnP/+pTZs26ZFHHlGXLl1CHzN79mzNmzdPCxcu1MqVK5WSkqIJEyaoro6tkraIj3OrW6pXEnfJAACcx2Pli8+aNUt5eXl65plnQo/17ds39P8Nw9DcuXP129/+VhMnTpQkPf/888rOztZrr72mK6+8MuI121FORqK+rqrX3so6De2ZYXU5AAB0GktXZF5//XWNHDlSP/rRj9SjRw+NGDFCTz31VOj927dvV0lJiQoLC0OPZWRkaPTo0Vq+fHmrz1lfXy+/39/iLdZxlwwAwKksDTLbtm3TggULNGDAAL399tv65S9/qVtuuUXPPfecJKmkpESSlJ2d3eLzsrOzQ+/7tpkzZyojIyP0lpeXF94vwgZyOIINAHAoS4NMMBjUaaedpoceekgjRozQjTfeqJ/97GdauHBhh59z2rRpqqysDL0VFxd3YsX2lM2leAAAh7I0yOTk5Gjw4MEtHhs0aJB27dolSfL5fJKk0tLSFh9TWloaet+3eb1epaent3iLdTkMjgQAOJSlQWbcuHHavHlzi8e2bNmiPn36SGpu/PX5fCoqKgq93+/3a+XKlRozZkxEa7UzX3rzXTIEGQCA01h6aum2227T2LFj9dBDD+nyyy/XqlWrtGjRIi1atEiS5HK5NGXKFD344IMaMGCA+vbtq+nTpys3N1eXXHKJlaXbyuG3+xqGIZfLZXFFAAB0DkuDzBlnnKFXX31V06ZN0/3336++fftq7ty5mjRpUuhj7rjjDtXU1OjGG29URUWFxo8fr7feekuJiYkWVm4v5ryl2oaA/HVNykiKt7giAAA6h8swDEffW+/3+5WRkaHKysqY7pcZcf+/9E1to96ecrZO8aVZXQ4AAMfU1n+/LR9RgMjITjdPLnEEGwDgHASZGMHJJQCAExFkYoQ5BZu7ZAAATkKQiRHmikwpYwoAAA5CkIkRPm73BQA4EEEmRoQGRxJkAAAOQpCJETkZnFoCADgPQSZGmFtL/rom1TY0WVwNAACdgyATI9IS45Xqbb7Ime0lAIBTEGRiiI+7ZAAADkOQiSG+dE4uAQCchSATQ3yHTcEGAMAJCDIxhJNLAACnIcjEkEM9MvUWVwIAQOcgyMSQ0OBIPysyAABnIMjEEF968+BITi0BAJyCIBNDzK2l8uoG1TcFLK4GAIATR5CJIV2S45Xgaf4tL/PTJwMAsD+CTAxxuVyHnVxiewkAYH8EmRgTmoLNXTIAAAcgyMSY0Mkl7pIBADgAQSbGZLO1BABwEIJMjMlJZ3AkAMA5CDIxxpfRfJcMKzIAACcgyMQYs0emlGZfAIADEGRijBlkyqrq1RQIWlwNAAAnhiATY7JSvYpzuxQIGiqvbrC6HAAATghBJsbEuV3KTvNKkvZyBBsAYHMEmRjky+DkEgDAGQgyMSjn4MklbvcFANgdQSYGsSIDAHAKgkwMMuctcZcMAMDuCDIxiBUZAIBTEGRikHmXzF4/p5YAAPZGkIlB5opMaWW9DMOwuBoAADqOIBODeqQlyuWSGgJB7a/hUjwAgH0RZGJQgsetbqnmpXj0yQAA7IsgE6PMk0s0/AIA7IwgE6N8oYZfggwAwL4IMjEqJ3QEm5NLAAD7IsjEqEN3ydRbXAkAAB1HkIlRoRUZ7pIBANgYQSZGZTOmAADgAASZGBWagF1Zx6V4AADbIsjEKPP4dW1DQP66JourAQCgYwgyMSopIU6ZyfGSpFKOYAMAbIogE8N89MkAAGyOIBPDfNwlAwCwOYJMDDOPYLMiAwCwK4JMDPOlHzq5BACAHRFkYhgrMgAAu+tQkCkuLtZXX30V+vWqVas0ZcoULVq0qNMKQ/iZPTKcWgIA2FWHgsyPf/xjvffee5KkkpISfec739GqVat011136f777+/UAhE+PlZkAAA216Egs2HDBo0aNUqS9Morr2jo0KH68MMP9eKLL+rZZ5/tzPoQRmaQqTzQqNoGLsUDANhPh4JMY2OjvF6vJOmdd97R97//fUnSwIEDtXfv3s6rDmGV5vUoJSFOEg2/AAB76lCQGTJkiBYuXKilS5dq8eLF+u53vytJ2rNnj7Kysjq1QISPy+U67C4ZggwAwH46FGRmzZqlP/zhDzr33HN11VVXafjw4ZKk119/PbTlBHsIDY+k4RcAYEOejnzSueeeq/Lycvn9fnXp0iX0+I033qjk5OROKw7hR8MvAMDOOrQic+DAAdXX14dCzM6dOzV37lxt3rxZPXr06NQCEV7mvCW2lgAAdtShIDNx4kQ9//zzkqSKigqNHj1ajzzyiC655BItWLCgUwtEeLEiAwCwsw4FmbVr1+qss86SJP3P//yPsrOztXPnTj3//POaN29epxaI8DJv9y3xMzgSAGA/HQoytbW1SktLkyT961//0qWXXiq3260zzzxTO3fu7NQCEV6HTi3VW1wJAADt16Eg079/f7322msqLi7W22+/rQsuuECSVFZWpvT09E4tEOFlnloqr65XQ1PQ4moAAGifDgWZu+++W7/+9a+Vn5+vUaNGacyYMZKaV2dGjBjRqQUivLokxyvB0/zHgJlLAAC76dDx6x/+8IcaP3689u7dG7pDRpLOP/98/eAHP+i04hB+LpdLvvRE7dpfqxJ/nfK6cnweAGAfHQoykuTz+eTz+UJTsHv16sVleDbly2gOMpxcAgDYTYe2loLBoO6//35lZGSoT58+6tOnjzIzM/XAAw8oGKTPwm5CJ5cqObkEALCXDq3I3HXXXXr66af18MMPa9y4cZKkZcuW6d5771VdXZ1mzJjRqUUivDi5BACwqw6tyDz33HP6r//6L/3yl7/UsGHDNGzYMN1000166qmn9Oyzz3aokIcfflgul0tTpkwJPVZXV6fJkycrKytLqampuuyyy1RaWtqh58fRhW735S4ZAIDNdCjI7N+/XwMHDjzi8YEDB2r//v3tfr7Vq1frD3/4g4YNG9bi8dtuu01vvPGG/vKXv2jJkiXas2ePLr300o6UjGPI4XZfAIBNdSjIDB8+XE888cQRjz/xxBNHhJHjqa6u1qRJk/TUU0+1GEBZWVmpp59+WnPmzNF5552n008/Xc8884w+/PBDrVixoiNl4yh85gRsgoytGIahXftqZRiG1aUAgGU61CMze/ZsXXTRRXrnnXdCd8gsX75cxcXF+sc//tGu55o8ebIuuugiFRYW6sEHHww9vmbNGjU2NqqwsDD02MCBA9W7d28tX75cZ555ZqvPV19fr/r6Q70efr+/XfXEInNFpqyqXoGgoTi3y+KK0BbPfrhD972xSQ9fWqArR/W2uhwAsESHVmTOOeccbdmyRT/4wQ9UUVGhiooKXXrppdq4caNeeOGFNj/Pn//8Z61du1YzZ8484n0lJSVKSEhQZmZmi8ezs7NVUlJy1OecOXOmMjIyQm95eXltridWdUv1Ks7tUiBoqLyahl+7eO2TPZKkdz4rs7gSALBOh++Ryc3NPeJ00rp16/T0009r0aJFx/384uJi3XrrrVq8eLESExM7WsYRpk2bpqlTp4Z+7ff7CTPHEed2KTvNqz2VddpbWafs9M77/UB4VNQ26NOvKiRJ63dXWFoLAFipQysynWHNmjUqKyvTaaedJo/HI4/HoyVLlmjevHnyeDzKzs5WQ0ODKioqWnxeaWmpfD7fUZ/X6/UqPT29xRuOL5u7ZGzlwy/3yWyNKfXXM14CQMyyLMicf/75Wr9+vT755JPQ28iRIzVp0qTQ/4+Pj1dRUVHoczZv3qxdu3aF+nLQeTi5ZC9Lt37d4tfrv6q0qBIAsFaHt5ZOVFpamoYOHdrisZSUFGVlZYUev+GGGzR16lR17dpV6enp+o//+A+NGTPmqI2+6DhfOieX7MIwDL2/pVxS8x1AJf46fbq7UoWDsy2uDAAir11B5nh3uHx7G+hEPfroo3K73brssstUX1+vCRMm6Mknn+zU10Cz0JgCtiii3o59tdpdcUDxcS5dOzZfs976XOsP9ssAQKxpV5DJyMg47vuvueaaDhfz73//u8WvExMTNX/+fM2fP7/Dz4m28bG1ZBvLDm4rnda7i87s11WS9OlXlTIMQy4XR+cBxJZ2BZlnnnkmXHXAYofmLRFkot37W5u3lc4+ubsG5aTL43ZpX02D9lTWqWdmksXVAUBkWdbsi+gSmrdUWcdNsVGsMRDUii/3SZLG9++mxPg4neJLkyS2lwDEJIIMJCl0d0xDIKj9NQ0WV4OjWVdcoar6JmUmx2toz+at3mG9mv/3U04uAYhBBBlIkhI8bnVL9Uqi4TeaLT24rTTupG6hURIFPTMlSet3E2QAxB6CDEJy6JOJeub9MWcN6BZ67PAVGbYFAcQaggxCzO0lTi5Fp8oDjVp3cPto/GFB5uTsNCXEuVV5oFHF+7mZGUBsIcgghBWZ6Lb8y30KBA3165aiXl2SQ48neNwalNs8imMdDb8AYgxBBiHcJRPdln1x5LaSadjBxl/6ZADEGoIMQg7d7sv2RDQyG33HD+h+xPsKQn0yFZEsCQAsR5BBCJfiRa9d+2q1c1+tPG5X6Dbfw5kNvxt2+xUM0vALIHYQZBDiO6zZl9Mv0WXpwW2lEb0zlZYYf8T7+3dPVWK8W9X1Tdq+rybS5QGAZQgyCDFXZGobAqqqb7K4Ghxu2cFtpbNa2VaSJE+cW0NyD/bJcDEegBhCkEFIcoJHGUnN/7XP9lL0CAQNffCF2R9zZKOvqaAnN/wCiD0EGbSQw8mlqPPpVxXy1zUpPdETOp3UmuF5NPwCiD0EGbRgbi+VEmSihnlaaexJ3eSJO/pfWXNUwcY9fjUFgpEoDQAsR5BBC6zIRJ/QWIKTj76tJEn9uqUoJSFOBxoD+vJrGn4BxAaCDFowxxRwl0x0qKpr1Me7KiRJZ/VvvdHX5Ha7QhOx2V4CECsIMmiBFZnosmLbfjUFDfXJSlbvrOTjfrx5nww3/AKIFQQZtODLSJLEqaVosezgttL4/sfeVjIV9MqUxMklALGDIIMWDo0pIMhEg6XHuT/m28xTTZv2+tVIwy+AGECQQQvmqaWK2kYdaAhYXE1s++qbWm0rr1Gc26UxJ2W16XP6ZCUrPdGjhqagNpdUhblCALAeQQYtpHk9Sk6Ik8SqjNXM23yH98oIXVR4PC6XS8MObi/RJwMgFhBk0ILL5Qqtyuyt5OSSlZZ+0b5tJdOhSdgEGQDOR5DBEXKYgm25w8cSnHWMsQStMftk1u+u6OyyACDqEGRwBF9688kljmBbZ+OeSlXUNirN69HwvMx2fa65IrO5pEp1jfQ5AXA2ggyOYK7IlNIjYxnztNKZJ2Up/hhjCVrTMzNJXVMS1BgwaPgF4HgEGRwhm0vxLGeOJTi7ndtKUnOfUwE3/AKIEQQZHCEnnR4ZK9XUN2nNzm8kSePb2ehrGk7DL4AYQZDBEXysyFhq1fb9agwY6tUlSfltGEvQmgKOYAOIEQQZHMHskSmvrldDE7fDRtr75rTrAd3kcrk69BzmzKUtpVVcbAjA0QgyOELXlAQlHGwwLatiVSbSlrVzLEFrstMT1SPNq6AhbdrLqgwA5yLI4Agul0vZGV5J9MlE2t7KA9paVi2XSxrbxrEERzOMPhkAMYAgg1blcJeMJczVmGG9MpWZnHBCz1XQM1OStJ4gA8DBCDJolY/bfS0Rmnbdv/3Hrr/NXJFZxxFsAA5GkEGrcji5FHHBExhL0Brzht9t5TWqqms84ecDgGhEkEGrfNzuG3Gb9vq1r6ZByQlxGtG7ywk/X7dUr3pmJskwpI17/J1QIQBEH4IMWpXDBOyIW3ZwNWZMvywleDrnr6Z5wy99MgCciiCDVmVzu2/ELT3s/pjOYm4vfcrFeAAciiCDVuVkNJ9aKq2qVyBoWFyN8x1oCGj19hMbS9Aas+F3PQ2/AByKIINWdU/zKs7tUiBoqLy63upyHG/Vjv1qCASVm5Gok7qndNrzmltLO/bVqrKWhl8AzkOQQavi3C71SGu+FI+TS+G3dEvzttL4ExhL0JrM5AT1OTiviblLAJyIIIOj4i6ZyFn2xYmPJTgac1Xm090Vnf7cAGA1ggyOyhdq+OXkUjiV+ev0eUmVXC5pXCdchPdth/pkWJEB4DwEGRyVuSKzl7tkwspcjRmam6GuKSc2lqA15qgCZi4BcCKCDI4qh62liDDHEozvxGPXhxvaM12StLvigPbRuA3AYQgyOCpfBoMjw80wjEPzlcIUZNIS49Xv4EkoGn4BOA1BBkeVw5iCsPu8pErl1fVKio/T6X1OfCzB0QwzG37ZXgLgMAQZHJXZ7Lu3sk6GwaV44bDs4GrM6H5d5fXEhe11hvXKlESQAeA8BBkcVY/05ntkGpqC+obL1MLi/YNjCcaH4bTS4UInlziCDcBhCDI4Kq8nTt1Sm0/RMDyy89U1BrRq+35J0tknd/79MYcbnJsut0sq9dezVQjAUQgyOCYuxQufj3Z8o/qmoLLTvRrQIzWsr5Wc4NGAHmmSuE8GgLMQZHBMvvTmk0sl/Fd8p1v6hbmt1L1TxxIcDZOwATgRQQbH5Mto7pNhRabzLd3S3Oh79snh7Y8xMQkbgBMRZHBMOdwlExbl1fXatNcvKTxjCVpTcNgRbE6hAXAKggyO6dC8JYJMZ/rg4FiCwTnp6pbqjchrDspJl8ft0r6aBu3h9xOAQxBkcEzmpXicWupc4b7NtzWJ8XE6xWc2/FZE7HUBIJwIMjgmTi11vuaxBM2NvmcNCO+x628z+2S4GA+AUxBkcExmkKlpCKiqjkvxOsMXZdUq9dfL63FrZH74xhK0xpyEzcwlAE5BkMExJSd4lJ7okcSqTGd5/+C20qi+XZUYH76xBK05fEWGhl8ATkCQwXFxcqlzLQttK0WuP8Z0cnaaEuLcqjzQqOL99D0BsD+CDI6LPpnOU98U0IptzWMJIt0fI0kJHrcG5TQ3/K6j4ReAAxBkcFyHTi4RZE7U2p0VOtAYULdUrwYePEEUaeYkbPpkADgBQQbHFVqRYUzBCVt62LZSJMYStCY0qoAVGQAOQJDBcR26FI+eihNl3h8zPkK3+bbGbPjdsNuvYJCGXwD2RpDBcfnYWuoU+2satGFP83aOFY2+pv7dU5UY71Z1fZO276uxrA4A6AwEGRyXeWqJraUT88EX5TIM6ZTsNPU4uMplBU+cW0NyzQGS9MkAsDeCDI7LXJGpqG1UXWPA4mrsa5kFYwmO5vABkgBgZwQZHFd6okfJCc0Xt3EEu2MOH0swPgqCzDAafgE4hKVBZubMmTrjjDOUlpamHj166JJLLtHmzZtbfExdXZ0mT56srKwspaam6rLLLlNpaalFFccml8sVavilT6ZjtpXXaE9lnRLi3BrdN8vqckJHsDfu8aspELS2GAA4AZYGmSVLlmjy5MlasWKFFi9erMbGRl1wwQWqqTnUgHjbbbfpjTfe0F/+8hctWbJEe/bs0aWXXmph1bHp0BFsTi51xNItzasxZ/TtoqSEyI4laE2/bilKSYjTgcaAvvyahl8A9uWx8sXfeuutFr9+9tln1aNHD61Zs0Znn322Kisr9fTTT+ull17SeeedJ0l65plnNGjQIK1YsUJnnnmmFWXHJE4unZhlX5jHriN/m29r3G6XhvbM0Mrt+/XpVxU6xaLL+QDgREVVj0xlZXPjYdeuXSVJa9asUWNjowoLC0MfM3DgQPXu3VvLly9v9Tnq6+vl9/tbvOHE5TCmoMMaA0Et/3KfpOho9DWZfTLc8AvAzqImyASDQU2ZMkXjxo3T0KFDJUklJSVKSEhQZmZmi4/Nzs5WSUlJq88zc+ZMZWRkhN7y8vLCXXpM8DE4ssM+3lWhmoaAslISNDgn3epyQgoO9slwcgmAnUVNkJk8ebI2bNigP//5zyf0PNOmTVNlZWXorbi4uJMqjG05B5t9S7lLpt3M00rj+neT223NWILWDDt4BHvTXr8ammj4BWBPlvbImG6++Wa9+eabev/999WrV6/Q4z6fTw0NDaqoqGixKlNaWiqfz9fqc3m9Xnm93nCXHHPokem40FiCKNpWkqQ+WclKT/TIX9ekLaVVGnow2ACAnVi6ImMYhm6++Wa9+uqrevfdd9W3b98W7z/99NMVHx+voqKi0GObN2/Wrl27NGbMmEiXG9PMIFNeXc9/vbdDZW1j6K6WaOqPkZqP1TMJG4DdWRpkJk+erP/+7//WSy+9pLS0NJWUlKikpEQHDjQf8c3IyNANN9ygqVOn6r333tOaNWt0/fXXa8yYMZxYirCuyQlKiHPLMKSyKlZl2urDL8sVNKT+PVJDox6iyaFJ2AQZAPZk6dbSggULJEnnnntui8efeeYZXXfddZKkRx99VG63W5dddpnq6+s1YcIEPfnkkxGuFG63S9kZXhXvP6CSyjr16pJsdUm28H4UjSVojdkns353hbWFAEAHWRpkDMM47sckJiZq/vz5mj9/fgQqwrHkpCc1Bxkaftvk8LEE0RpkzBWZzSVVqmsMKDHe+sv6AKA9oubUEqJfNnfJtMvOfbX66psDio9zRcVYgtb0zExS15QENQYMbS6psrocAGg3ggzaLIeTS+2y9OBtvqf17qIUb1QcEDyCy+U6bBJ2hbXFAEAHEGTQZubgSFZk2sacr3T2ydExluBohtPwC8DGCDJos0MrMgyOPJ6mw8YSjO8fnf0xpgKOYAOwMYIM2sy8S6bUX29xJdFv3VcVqqpvUmZyfNRfNGfOXNpSWqUDDQGLqwGA9iHIoM0OBZk6BYLHP3EWy8zbfMf176a4KBpL0Jrs9ET1SPMqaEib9rIqA8BeCDJos+6pXrldUlPQ0L5qVmWOxQwyZ0X5tpJpGH0yAGyKIIM288S51SONk0vH469r1CfFFZKib77S0RT0zJQkrSfIALAZggzaheGRx7f8y30KBA3165ZimxuQzRWZdRzBBmAzBBm0S07oUjxOLh1NtN/m2xrzht9t5TWqqmu0uBoAaDuCDNol27xLhpNLR7XsYH/M+AHRfX/M4bqletUzM0mGIW3c47e6HABoM4IM2iWaVmQ+Ka7Q0q1ft2lmV6QU76/Vjn218rhdOrNfV6vLaRfzhl/6ZADYSXTem46oFQ09MuXV9Xro75/prx/vltR8c+4DE4eoT1aKZTWZzNNKI3pnKi0x3uJq2qegV4be2liiT7kYD4CNsCKDdsnJSJIkSyZgB4OGXly5U+f9/t/668e75XJJ8XEuvb/la33n0ff12DtbVd9k7YVuh/pj7LOtZDIbftfT8AvARggyaJfDB0dGcktnw+5K/WDBh7rr1Q3y1zVpaM90vXrTOL095WyN799NDU1BPfrOFn137tJQj0qkBYKGPvjC7I+xT6Ovydxa2rGvVpW1NPwCsAeCDNqlR7pXktTQFFRFBP6xq6pr1L2vb9T3n1imdcUVSvV6dO/Fg/W3yeN1al6m+nVP1Qs3jNK8q0aoe5pX28tr9JOnV+o//vSxyiK8avTpVxXy1zUpPdGjYVE+lqA1mckJ6t21+bg4c5cA2AVBBu3i9cQpKyVBUnj7ZAzD0Juf7tH5jyzRsx/uUNCQvjcsR0W/OkfXjevb4tp/l8ul7w/PbX7f2Hy5XdIb65o/97kPd0RsnIK5EjT2pG7yxNnzr1boht/dFdYWAgBtZM+ftrCU2fBb4g/PyaUd5TW65o+rdPNLH6usql75Wcl64YZReuLHp4WOf7cmPTFe935/iP42ebyG98pQVX2T7nl9oy6Z/4E+jUDfR2gswcn221YyHeqTYUUGgD0QZNBuOWE6uVTXGNDcd7bogrnva+nWciV43JpSOEBvTTm7Xc2zBb0y9NebxumBiUOUlujR+t2Vmjj/A01/bYMqD4RnO6y6vklrd30jSTqrv/0afU3mqAJmLgGwC45fo91CKzKdGGSWbv1ad/9to7aX10hqvhX3/olD1bdbx45Ux7ldunpMviYM9WnmPz7Xqx/v1gsrduqfG0r024sGaeKpuXK5Om8q9Yov96kpaKhPVrJ6Z9ljLEFrhvZMlyTtrjigfdX1ykr1WlwRABwbKzJot9AR7E4IMqX+Ot380lpd/fQqbS+vUY80r5748Qg9/9NRHQ4xh+uRlqhHrzhVL/2/0erXPUXl1fWa8vInmvRfK/VFWfUJP79pmXlaySbTro8mLTFe/bo3f99p+AVgBwQZtNuhMQUdDzKBoKFnPtiu8x9Zojc/3Su3S7p+XL6KfnWOvjesc1dLJGls/276561n6dcXnCyvx60Pv9ynCx97X79/e7PqGk/87pn3bXx/zLeZJ67YXgJgBwQZtNuJ9sisK67QxPnLdN8bm1Rd36TheZl6/ebxuufiIWG9DdfridPN5w3Q4tvO0f85pbsaA4aeeO8LfefRJXrv87IOP+/uigPa9nWN4twujTkpqxMrtkZBr0xJBBkA9kCPDNqtoz0ylQca9bu3P9eLK3fJMKT0RI/u+O5AXTWqd4vj1OHWOytZf7zuDL29sUT3vbFJxfsP6PpnV+u7Q3y65/uDQ1tnbbXs4GrM8F4Zykiy11iC1gw3Ty5xBBuADRBk0G6+g1tL1fVNqqprPO4qimEYeu2T3Zrx989UXt0gSbp0RE9N+7+D1D3NmmZSl8ul7w7N0fgB3fXYO1v0xw926K2NJXp/69ea+p2Tdd3Y/DbfBRM6du2AbSVJGpybLrdLKvXXq9Rfd8wj7wBgNbaW0G4pXo/SE5sz8PFWZb4oq9aPn1qp215ep/LqBp3UPUV/+tmZmnPFqZaFmMOlej2666LBevM/xuv0Pl1U2xDQg3//TN97fJnW7Nx/3M8PHjaW4CwbjiVoTXKCRwN6pEniPhkA0Y8ggw45dCle60HmQENAv3v7c1342Ptavm2fvB63bp9wiv5569lR2UcyKCddf/n5GM26rECZyfH6vKRKly1Yrjv/91N9U9Nw1M/buMevb2obleb1aHheZuQKDrOC0A2/BBkA0Y0ggw7xHewjaa3h993PS/WdR5do/ntfqjFg6LyBPfTO1HM0+f/0V4Inev/Iud0uXXFGb737q3P1o9N7SZL+vLpY589Zolc+KlawlVEH5mmlM0/KUrxNxxK0hknYAOyCHhl0SE76kQ2/eyoO6P43NumtjSXNH5ORqHsuHqIJQ7I7/Th1OHVNSdDvfjRcl5+Rp7teXa8tpdW6438+1V8+KtaDlxToFF9a6GPN+UpnO2RbyVRw2BFswzBs9fsHILY45z8hEVG+w45gNwaCeur9bSqcs0RvbSxRnNulG8/up3emnqPvDvXZ9h/BM/K76u+3nKVpFw5UUnycVu/4RhfNW6qZ//hMtQ1Nqm1o0kcH+2jGO6TR1zQoJ10et0v7ahq0J4zDQQHgRLEigw4x75JZu/MbXfz4Mn1eUiVJGtmnix78wVAN9KVbWV6niY9z6+fnnKTvDc/Vfa9v1L82leoP72/TG+v26KJhOWoMGOrVJUn5Nh5L0JrE+Did4kvTxj1+rf+qQj0z23ckHQAihRUZdEj2wSCzubRKn5dUqUtyvGZfNkyv/HyMY0LM4XpmJmnRNSP19LUj1TMzSXsq6/TU0u2Smk8r2XXV6VjMPhkuxgMQzQgy6JB+h81BumJknop+da4uPyNP7ghebGeF8wdl652p5+imc0+S5+DXWjgo2+KqwsOchM3MJQDRjK0ldEifrBQ9e/0Zykrxho7qxoqkhDjd8d2B+tHIPO3aX6tzTnZWf4zp8BUZGn4BRCuCDDrs3FN6WF2Cpfp2S+mUCd3R6uTsNCXEuVV5oFHF+w+ot8P6gAA4A1tLAFqV4HFrUE7zUfN13CcDIEoRZAAc1bCDk7DpkwEQrQgyAI4qNKqAFRkAUYogA+CozIbfDbv9rY5oAACrEWQAHFX/7qlKjHerur5J2/fVWF0OAByBIAPgqDxxbg3JNQdI0icDIPoQZAAckzlAkpNLAKIRQQbAMZl9MqzIAIhGBBkAx2Qewd64x6+mQNDaYgDgWwgyAI6pX7cUpSTE6UBjQF9+TcMvgOhCkAFwTG63S0N7cp8MgOhEkAFwXKE+GW74BRBlCDIAjqvgYJ/MpzT8AogyBBkAxzXs4NbSpr1+NTTR8AsgehBkABxXn6xkpSd61NAU1JbSKqvLAYAQggyA43K5XEzCBhCVCDIA2uTQJGyCDIDoQZAB0CZmn8z63RXWFgIAhyHIAGgTc0Vmc0mV6hoDFlcDAM0IMgDapGdmkrqmJKgxYGhzCQ2/AKIDQQZAm7hcrtAkbG74BRAtCDIA2mwYDb8AogxBBkCbcQQbQLQhyABoM3NFZktplQ400PALwHoEGQBtlp2eqB5pXgUNadNeVmUAWI8gA6Bd6JMBEE0IMgDapaBnpiRpPUEGQBQgyABoF3NFZh1HsAFEAYIMgHYxb/jdVl6jqrpGi6sBEOsIMgDapVuqVz0zk2QY0sY9fqvLARDjCDIA2s284Zc+GQBWI8gAaDdze+lTLsYDYDGCDIB2Mxt+19PwC8Bitggy8+fPV35+vhITEzV69GitWrXK6pKAmGZuLe3YV6vKWhp+AVgn6oPMyy+/rKlTp+qee+7R2rVrNXz4cE2YMEFlZWVWlwbErMzkBPXumiyJuUsArOWxuoDjmTNnjn72s5/p+uuvlyQtXLhQf//73/XHP/5Rd955p8XVAbGroFeGdu2v1Ydfliu/W7LV5QCwUGZyglK91kSKqA4yDQ0NWrNmjaZNmxZ6zO12q7CwUMuXL2/1c+rr61VfXx/6td/P8VAgHIb3ytDfP92rJ//9pZ7895dWlwPAQg/9oEA/Ht3bkteO6iBTXl6uQCCg7OzsFo9nZ2fr888/b/VzZs6cqfvuuy8S5QEx7btDcvTchztVXl1//A8G4GhxFjaqRHWQ6Yhp06Zp6tSpoV/7/X7l5eVZWBHgTL2zkvXBnedZXQaAGBfVQaZbt26Ki4tTaWlpi8dLS0vl8/la/Ryv1yuv1xuJ8gAAgMWi+tRSQkKCTj/9dBUVFYUeCwaDKioq0pgxYyysDAAARIOoXpGRpKlTp+raa6/VyJEjNWrUKM2dO1c1NTWhU0wAACB2RX2QueKKK/T111/r7rvvVklJiU499VS99dZbRzQAAwCA2OMyDMOwuohw8vv9ysjIUGVlpdLT060uBwAAtEFb//2O6h4ZAACAYyHIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA2yLIAAAA24r6EQUnyry42O/3W1wJAABoK/Pf7eMNIHB8kKmqqpIk5eXlWVwJAABor6qqKmVkZBz1/Y6ftRQMBrVnzx6lpaXJ5XJ12vP6/X7l5eWpuLg4Zmc4xfr3INa/fonvQax//RLfA77+8H39hmGoqqpKubm5cruP3gnj+BUZt9utXr16he3509PTY/IP7+Fi/XsQ61+/xPcg1r9+ie8BX394vv5jrcSYaPYFAAC2RZABAAC2RZDpIK/Xq3vuuUder9fqUiwT69+DWP/6Jb4Hsf71S3wP+Pqt//od3+wLAACcixUZAABgWwQZAABgWwQZAABgWwQZAABgWwSZDpo/f77y8/OVmJio0aNHa9WqVVaXFBEzZ87UGWecobS0NPXo0UOXXHKJNm/ebHVZlnr44Yflcrk0ZcoUq0uJmN27d+snP/mJsrKylJSUpIKCAn300UdWlxUxgUBA06dPV9++fZWUlKSTTjpJDzzwwHFnwtjV+++/r4svvli5ublyuVx67bXXWrzfMAzdfffdysnJUVJSkgoLC7V161Zrig2TY30PGhsb9Zvf/EYFBQVKSUlRbm6urrnmGu3Zs8e6gjvZ8f4MHO4Xv/iFXC6X5s6dG5HaCDId8PLLL2vq1Km65557tHbtWg0fPlwTJkxQWVmZ1aWF3ZIlSzR58mStWLFCixcvVmNjoy644ALV1NRYXZolVq9erT/84Q8aNmyY1aVEzDfffKNx48YpPj5e//znP7Vp0yY98sgj6tKli9WlRcysWbO0YMECPfHEE/rss880a9YszZ49W48//rjVpYVFTU2Nhg8frvnz57f6/tmzZ2vevHlauHChVq5cqZSUFE2YMEF1dXURrjR8jvU9qK2t1dq1azV9+nStXbtWf/3rX7V582Z9//vft6DS8DjenwHTq6++qhUrVig3NzdClUky0G6jRo0yJk+eHPp1IBAwcnNzjZkzZ1pYlTXKysoMScaSJUusLiXiqqqqjAEDBhiLFy82zjnnHOPWW2+1uqSI+M1vfmOMHz/e6jIsddFFFxk//elPWzx26aWXGpMmTbKoosiRZLz66quhXweDQcPn8xm/+93vQo9VVFQYXq/X+NOf/mRBheH37e9Ba1atWmVIMnbu3BmZoiLoaF//V199ZfTs2dPYsGGD0adPH+PRRx+NSD2syLRTQ0OD1qxZo8LCwtBjbrdbhYWFWr58uYWVWaOyslKS1LVrV4sribzJkyfroosuavFnIRa8/vrrGjlypH70ox+pR48eGjFihJ566imry4qosWPHqqioSFu2bJEkrVu3TsuWLdOFF15ocWWRt337dpWUlLT4e5CRkaHRo0fH5M9EU2VlpVwulzIzM60uJSKCwaCuvvpq3X777RoyZEhEX9vxQyM7W3l5uQKBgLKzs1s8np2drc8//9yiqqwRDAY1ZcoUjRs3TkOHDrW6nIj685//rLVr12r16tVWlxJx27Zt04IFCzR16lT953/+p1avXq1bbrlFCQkJuvbaa60uLyLuvPNO+f1+DRw4UHFxcQoEApoxY4YmTZpkdWkRV1JSIkmt/kw03xdr6urq9Jvf/EZXXXVVzAySnDVrljwej2655ZaIvzZBBh02efJkbdiwQcuWLbO6lIgqLi7WrbfeqsWLFysxMdHqciIuGAxq5MiReuihhyRJI0aM0IYNG7Rw4cKYCTKvvPKKXnzxRb300ksaMmSIPvnkE02ZMkW5ubkx8z1A6xobG3X55ZfLMAwtWLDA6nIiYs2aNXrssce0du1auVyuiL8+W0vt1K1bN8XFxam0tLTF46WlpfL5fBZVFXk333yz3nzzTb333nvq1auX1eVE1Jo1a1RWVqbTTjtNHo9HHo9HS5Ys0bx58+TxeBQIBKwuMaxycnI0ePDgFo8NGjRIu3btsqiiyLv99tt155136sorr1RBQYGuvvpq3XbbbZo5c6bVpUWc+XMv1n8mSodCzM6dO7V48eKYWY1ZunSpysrK1Lt379DPxJ07d+pXv/qV8vPzw/76BJl2SkhI0Omnn66ioqLQY8FgUEVFRRozZoyFlUWGYRi6+eab9eqrr+rdd99V3759rS4p4s4//3ytX79en3zySeht5MiRmjRpkj755BPFxcVZXWJYjRs37ogj91u2bFGfPn0sqijyamtr5Xa3/PEZFxenYDBoUUXW6du3r3w+X4ufiX6/XytXroyJn4kmM8Rs3bpV77zzjrKysqwuKWKuvvpqffrppy1+Jubm5ur222/X22+/HfbXZ2upA6ZOnaprr71WI0eO1KhRozR37lzV1NTo+uuvt7q0sJs8ebJeeukl/e1vf1NaWlpoDzwjI0NJSUkWVxcZaWlpR/QEpaSkKCsrKyZ6hW677TaNHTtWDz30kC6//HKtWrVKixYt0qJFi6wuLWIuvvhizZgxQ71799aQIUP08ccfa86cOfrpT39qdWlhUV1drS+++CL06+3bt+uTTz5R165d1bt3b02ZMkUPPvigBgwYoL59+2r69OnKzc3VJZdcYl3RnexY34OcnBz98Ic/1Nq1a/Xmm28qEAiEfjZ27dpVCQkJVpXdaY73Z+DbwS0+Pl4+n0+nnHJK+IuLyNkoB3r88ceN3r17GwkJCcaoUaOMFStWWF1SREhq9e2ZZ56xujRLxdLxa8MwjDfeeMMYOnSo4fV6jYEDBxqLFi2yuqSI8vv9xq233mr07t3bSExMNPr162fcddddRn19vdWlhcV7773X6t/7a6+91jCM5iPY06dPN7Kzsw2v12ucf/75xubNm60tupMd63uwffv2o/5sfO+996wuvVMc78/At0Xy+LXLMBx6FSUAAHA8emQAAIBtEWQAAIBtEWQAAIBtEWQAAIBtEWQAAIBtEWQAAIBtEWQAAIBtEWQAAIBtEWQAOF5+fr7mzp1rdRkAwoAgA6BTXXfddaEZO+eee66mTJkSsdd+9tlnlZmZecTjq1ev1o033hixOgBEDkMjAUS9hoaGExq81717906sBkA0YUUGQFhcd911WrJkiR577DG5XC65XC7t2LFDkrRhwwZdeOGFSk1NVXZ2tq6++mqVl5eHPvfcc8/VzTffrClTpqhbt26aMGGCJGnOnDkqKChQSkqK8vLydNNNN6m6ulqS9O9//1vXX3+9KisrQ6937733Sjpya2nXrl2aOHGiUlNTlZ6erssvv1ylpaWh999777069dRT9cILLyg/P18ZGRm68sorVVVVFd5vGoB2I8gACIvHHntMY8aM0c9+9jPt3btXe/fuVV5enioqKnTeeedpxIgR+uijj/TWW2+ptLRUl19+eYvPf+6555SQkKAPPvhACxculCS53W7NmzdPGzdu1HPPPad3331Xd9xxhyRp7Nixmjt3rtLT00Ov9+tf//qIuoLBoCZOnKj9+/dryZIlWrx4sbZt26Yrrriixcd9+eWXeu211/Tmm2/qzTff1JIlS/Twww+H6bsFoKPYWgIQFhkZGUpISFBycrJ8Pl/o8SeeeEIjRozQQw89FHrsj3/8o/Ly8rRlyxadfPLJkqQBAwZo9uzZLZ7z8H6b/Px8Pfjgg/rFL36hJ598UgkJCcrIyJDL5Wrxet9WVFSk9evXa/v27crLy5MkPf/88xoyZIhWr16tM844Q1Jz4Hn22WeVlpYmSbr66qtVVFSkGTNmnNg3BkCnYkUGQEStW7dO7733nlJTU0NvAwcOlNS8CmI6/fTTj/jcd955R+eff7569uyptLQ0XX311dq3b59qa2vb/PqfffaZ8vLyQiFGkgYPHqzMzEx99tlnocfy8/NDIUaScnJyVFZW1q6vFUD4sSIDIKKqq6t18cUXa9asWUe8LycnJ/T/U1JSWrxvx44d+t73vqdf/vKXmjFjhrp27aply5bphhtuUENDg5KTkzu1zvj4+Ba/drlcCgaDnfoaAE4cQQZA2CQkJCgQCLR47LTTTtP//u//Kj8/Xx5P238ErVmzRsFgUI888ojc7ubF5FdeeeW4r/dtgwYNUnFxsYqLi0OrMps2bVJFRYUGDx7c5noARAe2lgCETX5+vlauXKkdO3aovLxcwWBQkydP1v79+3XVVVdp9erV+vLLL/X222/r+uuvP2YI6d+/vxobG/X4449r27ZteuGFF0JNwIe/XnV1tYqKilReXt7qllNhYaEKCgo0adIkrV27VqtWrdI111yjc845RyNHjuz07wGA8CLIAAibX//614qLi9PgwYPVvXt37dq1S7m5ufrggw8UCAR0wQUXqKCgQFOmTFFmZmZopaU1w4cP15w5czRr1iwNHTpUL774ombOnNniY8aOHatf/OIXuuKKK9S9e/cjmoWl5i2iv/3tb+rSpYvOPvtsFRYWql+/fnr55Zc7/esHEH4uwzAMq4sAAADoCFZkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbRFkAACAbf1/vMD8m5kd3FAAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "trace.GRAPH.clear()\n", + "\n", + "@trace.bundle(trainable=True)\n", + "def fun(x):\n", + " \"\"\" A linear predictor function \"\"\"\n", + " return 0\n", + "\n", + "def compute_loss(inputs, outputs):\n", + " l = 0\n", + " for x,y in zip(inputs, outputs):\n", + " y_hat = fun(x)\n", + " l += loss(y_hat, y)\n", + " return l\n", + "\n", + "optimizer = OptoPrime(fun.parameters())\n", + "\n", + "ls = []\n", + "for i in range(15):\n", + " try:\n", + " l = compute_loss(inputs, outputs)\n", + " target = l\n", + " feedback = 'Minimize loss'\n", + " print(f'Iteration {i} Loss: {l.data}')\n", + " ls.append(l.data)\n", + " except trace.ExecutionError as e:\n", + " target = e.exception_node\n", + " feedback = str(e.exception_node.data)\n", + "\n", + " optimizer.zero_feedback()\n", + " optimizer.backward(target, feedback)\n", + " optimizer.step()\n", + "\n", + "# plot ls\n", + "import matplotlib.pyplot as plt\n", + "plt.plot(ls)\n", + "plt.xlabel('Iteration')\n", + "plt.ylabel('Loss')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In contrast, if we update the parameter without batching but in a purely online fashion one by one, then the optimization results can be more noisy sometimes." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Iteration 0 Loss: 85\n", + "Iteration 1 Loss: 10\n", + "Iteration 2 Loss: 10\n", + "Iteration 3 Loss: 120\n", + "Iteration 4 Loss: 120\n", + "Iteration 5 Loss: 120\n", + "Iteration 6 Loss: 60\n", + "Iteration 7 Loss: 30\n", + "Iteration 8 Loss: 30\n", + "Iteration 9 Loss: 15\n", + "Iteration 10 Loss: 10\n", + "Iteration 11 Loss: 10\n", + "Iteration 12 Loss: 15\n", + "Iteration 13 Loss: 55\n", + "Iteration 14 Loss: 15\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjsAAAGwCAYAAABPSaTdAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8ekN5oAAAACXBIWXMAAA9hAAAPYQGoP6dpAABVTElEQVR4nO3deXxU5b0/8M+ZNckkM9nIBtlYww7KIuBCCxV3qFrFS90rtxWuIr+6tcW2KCLcqohaqK3XpVertldRqcUiIsq+CYIia0gCIZksJJNtksnM+f0xOSeJsiXMzHPOmc/79crrJZPtmwiTT77P93keSZZlGUREREQGZRJdABEREVE4MewQERGRoTHsEBERkaEx7BAREZGhMewQERGRoTHsEBERkaEx7BAREZGhWUQXoAWBQAClpaVISEiAJEmiyyEiIqJzIMsy6urqkJWVBZPp9P0bhh0ApaWlyM7OFl0GERERdUNJSQl69ep12tcz7ABISEgAEPxmOZ1OwdUQERHRufB4PMjOzlZ/jp8Oww6gLl05nU6GHSIiIp052wgKB5SJiIjI0Bh2iIiIyNAYdoiIiMjQGHaIiIjI0Bh2iIiIyNAYdoiIiMjQGHaIiIjI0Bh2iIiIyNAYdoiIiMjQGHaIiIjI0ISGnc8//xzXXnstsrKyIEkSVqxYob7O5/Ph4YcfxtChQ+FwOJCVlYXbbrsNpaWlnT5GdXU1ZsyYAafTicTERNx9992or6+P8FdCREREWiU07DQ0NGD48OF48cUXv/e6xsZG7Ny5E/PmzcPOnTvx7rvvYv/+/bjuuus6vd2MGTPw9ddfY/Xq1Vi5ciU+//xzzJw5M1JfAhEREWmcJMuyLLoIIHiJ13vvvYdp06ad9m22bduGMWPGoKioCDk5Odi3bx8GDRqEbdu2YdSoUQCAVatW4aqrrsKxY8eQlZV1Tp/b4/HA5XKhtraWF4EaTGNLK6obWkSXIVRinA3xdt75S0TGc64/v3X1DFhbWwtJkpCYmAgA2LRpExITE9WgAwCTJ0+GyWTCli1b8OMf//iUH6e5uRnNzc3qnz0eT1jrJjEq65vxgz98hjpvq+hShIqxmvDxnEuRm+IQXQoRkRC6CTterxcPP/wwbrnlFjW9lZWVIS0trdPbWSwWJCcno6ys7LQfa+HChfj9738f1npJvL3Ha1HnbYUkATZzdM7i+/wBeH0BbDxcxbBDRFFLF2HH5/PhpptugizLWLZs2Xl/vEcffRRz585V/+zxeJCdnX3eH5e0xV0X7N5d0q8HXr9rjOBqxFjwz2/w5y8Ksb+sTnQpRETCaD7sKEGnqKgIn376aac1uYyMDLjd7k5v39raiurqamRkZJz2Y9rtdtjt9rDVTNpQ0RZ20hKi9//1gIzgv5dvy7hUS0TRS9O9fSXoHDx4EJ988glSUlI6vX7cuHGoqanBjh071Mc+/fRTBAIBjB07NtLlksYoYadHFIedgowEAMD+sjpoZC8CEVHECe3s1NfX49ChQ+qfCwsLsWvXLiQnJyMzMxM33ngjdu7ciZUrV8Lv96tzOMnJybDZbBg4cCCuuOIK3HPPPVi+fDl8Ph9mz56N6dOnn/NOLDIud50XQHR3dvqmxcMkAScbfaioa0aaM0Z0SUREESe0s7N9+3aMHDkSI0eOBADMnTsXI0eOxGOPPYbjx4/jgw8+wLFjxzBixAhkZmaqLxs3blQ/xhtvvIGCggJMmjQJV111FS6++GK89NJLor4k0pD2Zazo/QEfYzUjLzU4mPwt53aIKEoJ7exMnDjxjK31c2m7Jycn48033wxlWWQQbi5jAQguZR2paMD+sjpc2r+H6HKIiCJO0zM7ROeDA8pBA9KVIWV2dogoOjHskCHVN7eiscUPgJ2dAcqQcjl3ZBFRdGLYIUNSujoOmxmOKL8qQdmRdbC8Hv4Ad2QRUfRh2CFDcnuCO7GivasDADnJcYi1mtHcGsDRqgbR5RARRRzDDhlSRT13YilMJgn90+MBgCcpE1FUYtghQ3J7uBOrI2Vuh0PKRBSNGHbIkJTODsNOkHJtxH5eG0FEUYhhhwyJnZ3OOl4bQUQUbRh2yJDaZ3YYdoD2Zayi6kY0trQKroaIKLIYdsiQuBurs9R4O1LjbZDl4BZ0IqJowrBDhsR7sb5vAJeyiChKMeyQ4fj8AVQ3tgAA0pzs7Ch4bQQRRSuGHTKcqvoWyDJgNklIjrOJLkczCnhtBBFFKYYdMhxlCSs13gaTSRJcjXZwGYuIohXDDhmOu47DyafSPz0BkgRU1regsm23GhFRNGDYIcPhcPKpxdrMyE2OA8DuDhFFF4YdMhx3W9jpEc/Oznfx2ggiikYMO2Q4ameHO7G+h9dGEFE0Ytghw+HMzunx2ggiikYMO2Q47TM7DDvfpSxjHSivRyAgC66GiCgyGHbIcNSZHYad78lLccBuMaHJ50dxdaPocoiIIoJhhwxFlmXuxjoDs0lCv/R4ABxSJqLowbBDhuLxtqK5NQCAnZ3TUa6N4NwOEUULhh0ylIq24eSEGAtirGbB1WgTr40gomjDsEOG4uZw8lnxrB0iijYMO2QoFRxOPiuls3O0sgFen19wNURE4cewQ4bC4eSz65FgR1KcFQEZOOSuF10OEVHYMeyQoXDb+dlJksSlLCKKKgw7ZCg8UPDcFPDaCCKKIgw7ZCi8KuLcsLNDRNGEYYcMhTM752YA78gioijCsEOGwpmdc9M/PRh23HXNONnQIrgaIqLwYtghw2hu9aOm0QeAMztnE2+3IDs5FgCXsojI+Bh2yDAq64MdCqtZQmKcVXA12td+bQSHlInI2Bh2yDDUAwXj7ZAkSXA12td+bQQ7O0RkbAw7ZBhuD3didQV3ZBFRtGDYIcNoH07mTqxzoXR2DpTVIRCQBVdDRBQ+DDtkGOq2cyc7O+ciL9UBm9mEhhY/jtc0iS6HiChsGHbIMNwdZnbo7KxmE3r3cADgUhYRGRvDDhkGOztdpw4pc0cWERkYww4ZRoVyVQQ7O+dsQNsdWezsEJGRMeyQYbR3djigfK4KeG0EEUUBhh0yBFmWUVHPqyK6Stl+fqSyAc2tfsHVEBGFB8MOGUJNow8+f3D7dGq8TXA1+pHpikFCjAX+gIzD7gbR5RARhQXDDhmCshMrMc4Ku8UsuBr9kCSpw0nKHFImImNi2CFDUOd1uITVZTxJmYiMjmGHDMFdx6siukvZkcUhZSIyKoYdMoT2zg53YnUVd2QRkdEx7JAhtN+Lxc5OV/VPD4adE7Ve1Db6BFdDRBR6DDtkCG7O7HSbK9aKLFewI7a/nN0dIjIeoWHn888/x7XXXousrCxIkoQVK1Z0er0sy3jssceQmZmJ2NhYTJ48GQcPHuz0NtXV1ZgxYwacTicSExNx9913o76+PoJfBWlBBWd2zssAXhtBRAYmNOw0NDRg+PDhePHFF0/5+sWLF2Pp0qVYvnw5tmzZAofDgSlTpsDr9apvM2PGDHz99ddYvXo1Vq5cic8//xwzZ86M1JdAGsFlrPPDayOIyMgsIj/5lVdeiSuvvPKUr5NlGUuWLMFvfvMbTJ06FQDw+uuvIz09HStWrMD06dOxb98+rFq1Ctu2bcOoUaMAAM8//zyuuuoq/OEPf0BWVlbEvhYSiwPK54dDykRkZJqd2SksLERZWRkmT56sPuZyuTB27Fhs2rQJALBp0yYkJiaqQQcAJk+eDJPJhC1btpz2Yzc3N8Pj8XR6If3y+vyo87YCYGenu9RlrPI6yLIsuBoiotDSbNgpKysDAKSnp3d6PD09XX1dWVkZ0tLSOr3eYrEgOTlZfZtTWbhwIVwul/qSnZ0d4uopkpSujt1igjNGaLNSt/r0iIfFJKHO24rSWu/Z34GISEc0G3bC6dFHH0Vtba36UlJSIrokOg8dDxSUJElwNfpks5jQu4cDAIeUich4NBt2MjIyAADl5eWdHi8vL1dfl5GRAbfb3en1ra2tqK6uVt/mVOx2O5xOZ6cX0i9eFREaHFImIqPSbNjJz89HRkYG1qxZoz7m8XiwZcsWjBs3DgAwbtw41NTUYMeOHerbfPrppwgEAhg7dmzEayYxuBMrNDikTERGJXTAob6+HocOHVL/XFhYiF27diE5ORk5OTmYM2cOnnjiCfTr1w/5+fmYN28esrKyMG3aNADAwIEDccUVV+Cee+7B8uXL4fP5MHv2bEyfPp07saIId2KFxoB0hh0iMiahYWf79u34wQ9+oP557ty5AIDbb78dr776Kh566CE0NDRg5syZqKmpwcUXX4xVq1YhJqb9h9obb7yB2bNnY9KkSTCZTLjhhhuwdOnSiH8tJI7bw85OKCg7sg5X1MPnD8Bq1mzjl4ioS4SGnYkTJ55xm6skSZg/fz7mz59/2rdJTk7Gm2++GY7ySCcq6jmzEwq9kmIRb7egvrkVRyoa1PBDRKR3/NWNdM/NqyJCQpIk9E+PBwB8yx1ZRGQgDDuke8oyFmd2zp+yI4tzO0RkJAw7pGv+gIyqhhYAQJqTnZ3zxR1ZRGREDDuka9UNLfAHZEgSkOKwiS5H95Q5HZ61Q0RGwrBDuqZsO09x2GDh7qHzpnR2jtc0oc7rE1wNEVFo8KcD6ZoynJwazyWsUEiMsyG9bTnwQDm7O0RkDAw7pGvqgYJODieHCq+NICKjYdghXVOvimBnJ2Q4pExERsOwQ7rW3tlh2AkVXhtBREbDsEO6VsHOTsgpO7L2l9ed8YRzIiK9YNghXWNnJ/T6psXDbJJQ0+hTlwmJiPSMYYd0Tb0qgp2dkImxmpGXEgeAQ8pEZAwMO6Rr3I0VHgXqtRG8I4uI9I9hh3SrobkVDS1+ALwENNR4kjIRGQnDDumWMk8SZzMj3m4RXI2xDOD2cyIyEIYd0i11CYtdnZBTzto56K5Hqz8guBoiovPDsEO6pQ4nM+yEXHZSHOJsZrS0BnC0qlF0OURE54Vhh3SrvbPD4eRQM5kk9OPhgkRkEAw7pFvqVRHs7IRFgRp2uCOLiPSNYYd0q4JhJ6y4I4uIjIJhh3SLnZ3wKuhwbQQRkZ4x7JBucTdWeCmdneLqRjS2tAquhoio+xh2SLcquBsrrFLi7UiNt0OWgQPl9aLLISLqNoYd0qVWfwBVDS0AuBsrnNSlLA4pE5GOMeyQLlU1tECWAZMEJDtsossxLA4pE5ERMOyQLrk9wXmd1Hg7zCZJcDXGxWsjiMgIGHZIlyrqOa8TCQUMO0RkAAw7pEtKZ4c7scKrX1oCJCm4bKjsfiMi0huGHdIlXhURGbE2M/JSHADY3SEi/WLYIV3igYKRMyBdGVLmjiwi0ieGHdIltbPjZNgJNw4pE5HeMeyQLrmVAwXjGXbCjddGEJHeMeyQLlXUs7MTKUpn50B5HfwBWXA1RERdx7BDuiPLsrobq0c8B5TDLTfFgRirCV5fAMXVjaLLISLqMoYd0p265lY0twYAcEA5EswmCf3SeG0EEekXww7pjtLVSbBbEGszC64mOvDaCCLSM4Yd0h1lJ1YPzutEDE9SJiI9Y9gJo5LqRqw/WImmFr/oUgyFO7Eij9vPiUjPGHbC6PplG/HTl7fgoJs/IEKp/YwdDidHihJ2jlY1wOtjeCcifWHYCaO8lDgAwNEq7mAJJXUZi52diOkRb0eyw4aADBwsrxddDhFRlzDshFFu251CRZUNgisxFjdPT444SZJ4bQQR6RbDThixsxMe7ZeAMuxEEud2iEivGHbCSO3sVLGzE0rqgDLDTkTx2ggi0iuGnTDKaws77OyEVntnhwPKkcSzdohIrxh2wiinbRmrsr4Z9c2tgqsxhpbWAE42+gCwsxNp/dtmdirqmlHd0CK4GiKic8ewE0auWCuSHTYAXMoKlcq2C0CtZgmJsVbB1UQXh92CnORggOeQMhHpCcNOmOW2dXeKuJQVEspOrNR4O0wmSXA10YdDykSkRww7YdY+t8POTihwJ5ZYvDaCiPSIYSfM1M5OJTs7ocCdWGJxSJmI9IhhJ8zY2Qkt9fRk7sQSQunsHCivQyAgC66GiOjcaDrs+P1+zJs3D/n5+YiNjUWfPn3w+OOPQ5bbn2RlWcZjjz2GzMxMxMbGYvLkyTh48KDAqjvjzE5oudWww86OCHkpDtjMJjS2+HHsZJPocoiIzommw86iRYuwbNkyvPDCC9i3bx8WLVqExYsX4/nnn1ffZvHixVi6dCmWL1+OLVu2wOFwYMqUKfB6vQIrb6d0dso8Xt5+HgJuD2d2RLKYTeiTFg+AO7KISD80HXY2btyIqVOn4uqrr0ZeXh5uvPFGXH755di6dSuAYFdnyZIl+M1vfoOpU6di2LBheP3111FaWooVK1aILb5NYpwVzhgLAKC4mt2d81VRz7AjGoeUiUhvNB12xo8fjzVr1uDAgQMAgN27d2P9+vW48sorAQCFhYUoKyvD5MmT1fdxuVwYO3YsNm3adNqP29zcDI/H0+klXCRJQl4q53ZCpcLDAWXR1CFlXhtBRDphEV3AmTzyyCPweDwoKCiA2WyG3+/HggULMGPGDABAWVkZACA9Pb3T+6Wnp6uvO5WFCxfi97//ffgK/47cFAe+OlbLgwXPkyzL7Z0dJweUReFZO0SkN5ru7Lzzzjt444038Oabb2Lnzp147bXX8Ic//AGvvfbaeX3cRx99FLW1tepLSUlJiCo+Nd5+Hho1jT74/MHh9NR4m+BqopeyjFVY2YDmVs6hEZH2abqz8+CDD+KRRx7B9OnTAQBDhw5FUVERFi5ciNtvvx0ZGRkAgPLycmRmZqrvV15ejhEjRpz249rtdtjtkVsG4e3noaF0dRLjrLBbzIKriV4Zzhg4YyzweFtxyF2PwVku0SUREZ2Rpjs7jY2NMJk6l2g2mxEIBAAA+fn5yMjIwJo1a9TXezwebNmyBePGjYtorWeidnZ4sOB5UXZi9YjnvI5IkiShIMMJgEtZRKQPmu7sXHvttViwYAFycnIwePBgfPnll3jmmWdw1113AQg+6c6ZMwdPPPEE+vXrh/z8fMybNw9ZWVmYNm2a2OI7UDo7pbVNaG71syvRTRX1weHkNCfDjmgDMhKw9Wg1ww4R6YKmw87zzz+PefPm4d5774Xb7UZWVhb+8z//E4899pj6Ng899BAaGhowc+ZM1NTU4OKLL8aqVasQE6OdAdbUeBscNjMaWvwoqW5C37ZzSqhr2NnRDl4bQUR6oumwk5CQgCVLlmDJkiWnfRtJkjB//nzMnz8/coV1kSRJyE1x4JsTHhRVNTDsdJN6CSh3YgnHs3aISE80PbNjJHmp3JF1vtSrItjZEa5/W9gp83hR2+gTXA0R0Zkx7EQId2Sdv/bODsOOaM4YK3omxgLgtRFEpH0MOxHCs3bOn7uu7fRkdnY0QT1ckCcpE5HGMexECDs758/Nzo6mcEiZiPSCYSdClNvPj51sgs8fEFyN/nh9ftR5WwEAPRI4oKwFHFImIr1g2ImQtAQ7Yqwm+AMyjp9sEl2O7ijzOjaLSb1FnsRSOjsHyuogy7LgaoiITo9hJ0JMJgm5ybz9vLvUJawEOyRJElwNAUDv1HhYTBLqmltxvIYBnoi0i2EngnLbhpSLOKTcZRXKcHIC53W0wmYxoU+P4JlRXMoiIi1j2ImgvFR2drqrokNnh7SDQ8pEpAcMOxHEzk73qQcKMuxoygAOKRORDjDsRJCyI4udna5r7+xwJ5aWcEcWEekBw04EKZ2dkupG+APcvdIV7Oxok9LZOVxRj5ZWHqlARNrEsBNBma5Y2Mwm+PwySrl7pUs4s6NNPRNjkWC3oDUg40hlvehyiIhOiWEngswmCdnJwfuEOLfTNW7uxtIkSZLUS0G5lEVEWsWwE2Gc2+m6QEBGZX0LAM7saBF3ZBGR1jHsRBjvyOq66sYW+AMyJAlIibeJLoe+g0PKRKR1DDsRlpfK28+7yu0Jzuskx9lgNfOvrNYMSGfYISJt40+OCGNnp+sq6rkTS8sKMpwAgOM1TfB4fYKrISL6PoadCMvrcLBggNvPz4nbw+FkLXPFWZHhDM5SHWB3h4g0iGEnwnomxsJiktDcGkB52w4jOjOls8PhZO3ikDIRaRnDToRZzCb0SuL2865QZnbY2dEuDikTkZYx7AjAuZ2uae/sMOxoFe/IIiItY9gRQJnb4Y6sc1PBzo7mtS9jeSDLnEUjIm1h2BGAnZ2uYWdH+/qmxcNskuDxtqLMw1k0ItIWhh0B1LN2KtnZORfcjaV9dosZ+anBEM8hZSLSGoYdATp2dtjyP7OG5lY0tPgBAGlO7sbSMs7tEJFWMewI0CspFiYJaGjxq3c+0akpt53HWs1w2MyCq6EzKeBJykSkUQw7AtgtZmQlKtvPObdzJuq8jtMOSZIEV0NnwrN2iEiruhV2SkpKcOzYMfXPW7duxZw5c/DSSy+FrDCja7/9nHM7Z6KesRPPeR2tU66NOOyuh88fEFwNEVG7boWd//iP/8DatWsBAGVlZfjRj36ErVu34te//jXmz58f0gKNKle9NoKdnTNxt50yneZk2NG6XkmxiLOZ0eIP4Ggl/14TkXZ0K+zs3bsXY8aMAQC88847GDJkCDZu3Ig33ngDr776aijrMyx2ds6NMrPDqyK0z2SS0D+dS1lEpD3dCjs+nw92e/A37U8++QTXXXcdAKCgoAAnTpwIXXUGxs7OuXHX8UBBPeG1EUSkRd0KO4MHD8by5cvxxRdfYPXq1bjiiisAAKWlpUhJSQlpgUaV13YmSWElt5+fSQXDjq5wSJmItKhbYWfRokX405/+hIkTJ+KWW27B8OHDAQAffPCBurxFZ5aTHOzs1HlbUdPoE1yNdrGzoy/qWTvlHsGVEBG1s3TnnSZOnIjKykp4PB4kJSWpj8+cORNxcXEhK87IYqxmZLpicKLWi6NVDUhy2ESXpEntMzsMO3qg7MgqqW5CfXMr4u3deoohIgqpbnV2mpqa0NzcrAadoqIiLFmyBPv370daWlpICzSy9rkdDimfSqs/gKoGdnb0JNlhU/9fHSjnUhYRaUO3ws7UqVPx+uuvAwBqamowduxYPP3005g2bRqWLVsW0gKNrH1HFoeUT6W6oQWyDJgkIMXBsKMXHFImIq3pVtjZuXMnLrnkEgDAP/7xD6Snp6OoqAivv/46li5dGtICjaz9jix2dk5FmddJibfDbOLpyXoxgNdGEJHGdCvsNDY2IiEh+IT273//G9dffz1MJhMuuugiFBUVhbRAI8trW8ZiZ+fUOK+jT+07sjikTETa0K2w07dvX6xYsQIlJSX4+OOPcfnllwMA3G43nE5nSAs0MnZ2zkw5PZnzOvqiDCnvL6vjsQpEpAndCjuPPfYYfvnLXyIvLw9jxozBuHHjAAS7PCNHjgxpgUamDChXN7Sgtonbz7+LnR196pceD5MEnGz0qf8PiYhE6lbYufHGG1FcXIzt27fj448/Vh+fNGkSnn322ZAVZ3QOu0XtWhSzu/M9PGNHn2KsZnX4nocLEpEWdCvsAEBGRgZGjhyJ0tJS9Qb0MWPGoKCgIGTFRQPO7ZyecuM578XSnwHckUVEGtKtsBMIBDB//ny4XC7k5uYiNzcXiYmJePzxxxEIBEJdo6G1z+0w7HxXRT2XsfSK10YQkZZ063jTX//613j55Zfx1FNPYcKECQCA9evX43e/+x28Xi8WLFgQ0iKNrL2zw2Ws7+KAsn4V8NoIItKQboWd1157DX/5y1/U284BYNiwYejZsyfuvfdehp0uYGfn1GRZ7jCgzGUsvRnQtiPrYHk9/AGZ5yQRkVDdWsaqrq4+5WxOQUEBqqurz7uoaNJ+ijI7Ox3VNbfC6wsuibKzoz85yXGIsZrQ3BrgPBoRCdetsDN8+HC88MIL33v8hRdewLBhw867qGiS07aMVVHXjIbmVsHVaIfS1UmwWxBrMwuuhrrKbJLQnycpE5FGdGsZa/Hixbj66qvxySefqGfsbNq0CSUlJfjoo49CWqDRuWKtSHbYUN3QgqKqRgzK4qGMQPtOLHZ19GtAegK+OlaLb8vqcNXQTNHlEFEU61Zn57LLLsOBAwfw4x//GDU1NaipqcH111+Pr7/+Gn/9619DXaPhtd9+zna/QtmJxbCjX+3bzzmkTERidauzAwBZWVnfG0TevXs3Xn75Zbz00kvnXVg0yUtx4MviGs7tdOD2cCeW3nW8NoKISKRuHyoYKcePH8dPf/pTpKSkIDY2FkOHDsX27dvV18uyjMceewyZmZmIjY3F5MmTcfDgQYEVdx07O9/XfsYOd2LpldLZKapuRGML59GISBxNh52TJ09iwoQJsFqt+Ne//oVvvvkGTz/9NJKSktS3Wbx4MZYuXYrly5djy5YtcDgcmDJlCrxer8DKu6Z9RxbDjqKCMzu61yPBjhSHDbIc3IJORCRKt5exImHRokXIzs7GK6+8oj6Wn5+v/rcsy1iyZAl+85vfYOrUqQCA119/Henp6VixYgWmT59+yo/b3NyM5ub2Cwo9HrEzBe2dHS5jKdy8BNQQBmQkYOPhKuwvq8Pw7ETR5RBRlOpS2Ln++uvP+PqamprzqeV7PvjgA0yZMgU/+clPsG7dOvXQwnvuuQcAUFhYiLKyMkyePFl9H5fLhbFjx2LTpk2nDTsLFy7E73//+5DWej6Uzs6JWi+8Pj9irNxqXcFLQA1haC8XNh6uwucHK3DT6GzR5RBRlOrSMpbL5TrjS25uLm677baQFXfkyBEsW7YM/fr1w8cff4xf/OIXuO+++/Daa68BAMrKygAA6enpnd4vPT1dfd2pPProo6itrVVfSkpKQlZzdyTGWeGMCebO4mp2d4D2qyLSnAw7enZ125bzT/aVo57nSBGRIF3q7HRcToqEQCCAUaNG4cknnwQAjBw5Env37sXy5ctx++23d/vj2u122O3a+SEqSRLyUh346lgtjlY2qIexRauW1gBONvoAcEBZ74b2dCE/1YHCygas/qYMPx7ZS3RJRBSFND2gnJmZiUGDBnV6bODAgSguLgYAZGRkAADKy8s7vU15ebn6Or1ovyOLnZ3Ktp1YFpOExFir4GrofEiShOuGZwEA3t9VKrgaIopWmg47EyZMwP79+zs9duDAAeTm5gIIDitnZGRgzZo16us9Hg+2bNminuysF+23n3NHVsd5HRMvkNS960YEw84XBytRVd98lrcmIgo9TYedBx54AJs3b8aTTz6JQ4cO4c0338RLL72EWbNmAQj+1jhnzhw88cQT+OCDD7Bnzx7cdtttyMrKwrRp08QW30Xs7LRzczjZUPr0iMfQni74AzI+2nNCdDlEFIU0HXZGjx6N9957D3/7298wZMgQPP7441iyZAlmzJihvs1DDz2E//qv/8LMmTMxevRo1NfXY9WqVYiJ0desBzs77Sq47dxwpo7gUhYRiaPpc3YA4JprrsE111xz2tdLkoT58+dj/vz5Eawq9JTOTmlNE5pb/bBbonf7ubITi50d47hmWBYWfLQP24tOoqS6EdnJcaJLIqIoounOTjRJjbfBYTMjIAPHTjaJLkeo9pkdfXXn6PQyXDG4KD8FAPDhV+zuEFFkMexohCRJHeZ2onspizM7xqQsZX3ApSwiijCGHQ3JS22b26mM7iFlzuwY05VDMmE1S/i2rA7flom9ooWIogvDjoawsxPEqyKMyRVnxcQBaQDY3SGiyGLY0ZD2HVnR29mRZZmdHQNTl7J2l0KWZcHVEFG0YNjREHZ2gNomH1r8AQBAajzDjtFMKkiHw2bGsZNN2FlcI7ocIooSDDsaotx+fuxkE3xtP/CjjTKc7Iq18vZ3A4q1mTFlcPAqlw92HRdcDVF0WPHlcWw8VCm6DKEYdjQkLcGOGKsJrQEZpTXRuf2cS1jGp1wfsfKrE2iN0lBPFCn7Tngw5+1duOf17Whu9YsuRxiGHQ0xmSTkJge7O9E6t8MDBY1vQt9UpDhsqGpowYbDVaLLITK0Lw5WAAAaWvz4MoqXjhl2NCanbUg5Wud22NkxPqvZhKuHZQIA3udSFlFYrT/U/gvFhiheymLY0Rh1R1aUnrXj9nDbeTRQdmV9vLcMXl/0ttaJwqm51Y9thdXqnxl2SDOifUdWRb3S2eFVEUZ2QU4SeiXFoqHFjzX73KLLITKkL4tr0OTzw2ELbvbYfawWdV6f4KrEYNjRGGVHVrTefs7OTnSQJAnXDVduQudSFlE4KDuwJg1MR15KHPwBGVuOVJ/lvYyJYUdjctuWsUqqm+APRN+ha+2dHYYdo5s6oicA4LP9FahtjM7fNonCaX1b2Lm4byom9E3t9Fi0YdjRmKzEWFjNElr8AZyojb7t524Pd2NFiwEZCSjISECLP4BVX58QXQ6RodR5fdh9rBYAML5vihp2Nh5m2CENMJskZCcrO7Kia0jZ6/PD420FwJmdaKGcufM+78oiCqktR6rhD8jITYlDr6Q4jOudAkkCDpTXq79URhOGHQ2K1rkdZdu5zWKCM9YiuBqKhGuHBcPOpiNVKI/CJ2CicNnQ1sFROjpJDhsGZzkBABuj8Hwrhh0Nyk2Jzs6OclVEj3g7JEkSXA1FQnZyHEblJkGWgQ93s7tDFCrKNvMJfVLVx5T/jsa5HYYdDVI7O5XR2dnhvE506XgTOhGdP3edFwfK6yFJwLg+Kerj6tzOoUrIcnRtgGHY0aBo7exUtF0VwZ1Y0eWqoZkwmyR8dawWRyrqRZdDpHsb205NHpTpRLLDpj4+Oi8ZNrMJpbVeFEbZL9MMOxqkdHaKqhsQiKLt5+pVEU6GnWiSEm/HJf2Cv3Gyu0N0/jZ02HLeUazNjAtyE4NvE2VzOww7GtQzKRZmkwSvL6DOsUSD9pkd7sSKNupS1q7SqGuvE4WSLMtq2Bn/nbADtM/tbDgYXXM7DDsaZDWb0CspFkB07chiZyd6/WhQBmKsJhypbMDe4x7R5RDp1tGqRpTWemEzmzA6L+l7r5/Q1kXddKQqqg6uZdjRqGi8I6vjbiyKLvF2CyYPTAfA6yOIzofS1RmZk4g42/eP8BjW04UEuwW1TT58XVob6fKEYdjRKPX28ygaUmZnJ7op10d8+FVpVP3GSRRKp5vXUVjMJoztndL2ttEzt8Owo1HR1tkJBGRU1nPreTS7rH8PuGKtKPc0Y0th9DwJE4WKPyBj05Hgv51TzesoJvQNhp1oujqCYUej1M5OZXR0dk42tqC17bf5VC5jRSWbxYQrh2QACA4qE1HXfFPqQU2jD/F2C4b3cp327ZSuz9bCanh9/kiVJxTDjkZ17OxEw+4UZV4n2WGD1cy/ltFKuSvroz0n0NwaHU/CRKGiXBFxUe9kWM7wPNo3LR5pCXY0twaws/hkpMoTij9VNCo7ORaSBDS0+FFZ3yK6nLBT53W4hBXVxuanIN1ph8fbinX7K0SXQ6Qr6hURZ1jCAgBJktS32RAlV0cw7GiU3WJGliu4/Twa5nbcvCqCAJhNkno56Ps8YJDonHl9fmw7Wg3g7GEHAMb3ia4hZYYdDctLjZ4dWe62qyIYdkjZlfXJN+Wob24VXA2RPuwsPgmvL4AeCXb0S4s/69srgeirYzWobfKFuzzhGHY0LJp2ZPESUFIM6elE71QHmlsD+PfXZaLLIdIF5T6sCX1SIEnSWd8+KzEWvVMdCMjAliPG7+4w7GhYNJ2141ZndnhVRLSTJEkdVH6fu7KIzsn6c5zX6Sia5nYYdjQsGjs7HFAmALhueDDsrD9UqZ6/RESn5vH68NWxGgBdDTttcztRcCkow46GKbefF1Yaf/s5l7Goo9494jGslwv+gIyP9pwQXQ6Rpm0+XIWADPROdSArMfac329c71RIEnDIXY+yWm8YKxSPYUfDcpKDy1h13lbUNBp7gIydHfoupbvDpSyiM9t4WDk1OaVL7+eKs2JoT1fbxzD2UhbDjobF2szIcAZnWIx8+3ljS6u664adHVJcOzwLkgTsKDqJkmrjz60Rddf6s9yHdSbKstd6g8/tMOxoXG7bkHKRgYeUla5OrNWMePv3b+ml6JTujMG4tgsLP+CZO0SnVO7x4pC7HpIEXNS7a50dAJjQJxh2Nh6qMvS4BMOOxilzO0bu7HQ8UPBctkxS9JjatiuLd2URnZqyk2poTxcS42xdfv9ReUmwWUwo83hxuMK4P2cYdjQuNzV6Ojuc16HvumJwJmxmE/aX1+HbMo/ocog0RzkBeXyfri9hAUCM1YxRuUkAjD23w7CjcVHR2fHw9GQ6NVecFRMH9ADAQWWi75JlucN9WF1fwlKoczsHGXZIkKiY2alnZ4dOT7k+4oNdpQgEjDtTQNRVRyobUObxwmYxYXRecrc/jhJ2Nh2pgt+g/8YYdjROOViwuqHFsPeXuD08Y4dOb9LANDhsZhyvacLO4pOiyyHSDKWrc2FOEmKs5m5/nKE9XUiIsaDO24o9x2tDVZ6mMOxoXLzdgtT4YAgoNmh3h1dF0JnEWM2YMiQDAJeyiDpSws7F/bo3r6MwmyR156NRr45g2NGB9juyjDm3w9OT6WyUpayP9pyAzx8QXA2ReP6AjE3KYYJ9uj+vo1ACE8MOCWP0O7LcDDt0FhP6pCDFYUNVQ4thn4yJumLv8Vp4vK1IiLGopyCfD2U31/aik/D6/Of98bSGYUcHjHz7uT8go7qhbRnLybBDp2Yxm3DNsEwAPHOHCAA2tG0Tv6h3Cizm8/9R3qeHAxnOGLS0BrD9qPFm4xh2dCA31bidnar6ZgRkwCQBKQ6GHTq969qWsj7+ugxNLcb7zZOoKzacxxURpyJJknq31gYDnrfDsKMDRu7sKEtYKfF2mE08PZlO74KcRPRKikVDix9rvi0XXQ6RMF6fH9vaui/nc77OdynByYhLxQw7OpCbHOzsVNQ1o6HtwkyjUIeT49nVoTOTJEm9PoK7siia7Sg6iZbWANKddvTpER+yj6uct7PneC1qG4111Imuws5TTz0FSZIwZ84c9TGv14tZs2YhJSUF8fHxuOGGG1Bebqzf+lxxViTFWQEY73BB9aoIzuvQOVB2ZX223224J2Oic6WemtwnNaT3CaY7Y9A3LR6yDGw6Yqzujm7CzrZt2/CnP/0Jw4YN6/T4Aw88gA8//BB///vfsW7dOpSWluL6668XVGX4GHVHlruu7aoIdnboHPRPT0BBRgJ8fhn/2ntCdDlEQrRfERGaeZ2OJvRRztupCvnHFkkXYae+vh4zZszAn//8ZyQlJamP19bW4uWXX8YzzzyDH/7wh7jwwgvxyiuvYOPGjdi8efNpP15zczM8Hk+nF60z6twOOzvUVUp3h0tZFI1qG33qKcdhCTsGndvRRdiZNWsWrr76akyePLnT4zt27IDP5+v0eEFBAXJycrBp06bTfryFCxfC5XKpL9nZ2WGrPVSM29nhzA51zbXDg1vQNxdWoazWK7gaosjadKQKAbltq7gr9KfOj+2dApMUvHfrRG1TyD++KJoPO2+99RZ27tyJhQsXfu91ZWVlsNlsSExM7PR4eno6ysrKTvsxH330UdTW1qovJSUloS475PJSjXmKcntnh1dF0LnplRSH0XlJkGVg5Vfs7lB02Xg4fEtYAOCKtWJYr0QAxlrK0nTYKSkpwf3334833ngDMTGh+2Fot9vhdDo7vWhde2fHWMtYPD2ZuuM6LmVRlFofxnkdhbKd3UhLWZoOOzt27IDb7cYFF1wAi8UCi8WCdevWYenSpbBYLEhPT0dLSwtqamo6vV95eTkyMjLEFB0meW1h50St1zBHecuyrA4opzHsUBdcPTQTFpOEPcdrcbiiXnQ5RBFxorYJRyoaYJKCJyeHS8e5HVmWw/Z5IknTYWfSpEnYs2cPdu3apb6MGjUKM2bMUP/barVizZo16vvs378fxcXFGDdunMDKQy8pzoqEGAsAoLjaGN2d+uZWeH3BSx3Z2aGuSHbYcEnbxYW8PoKihbKsNLRXIlyx1rB9ngtykmC3mOCua8YhtzF+mdB02ElISMCQIUM6vTgcDqSkpGDIkCFwuVy4++67MXfuXKxduxY7duzAnXfeiXHjxuGiiy4SXX5ISZKkdneOVhpjbkdZwoq3WxBnswiuhvRG2ZX1we5Sw/z2SXQmG9XzdcLX1QGAGKsZY/KTARhnKUvTYedcPPvss7jmmmtwww034NJLL0VGRgbeffdd0WWFRW7b9nOjzO2ow8ns6lA3/GhQOmKsJhRWNqhbcYmMSpZldV4nVPdhnYlyC/p6gwwp6+7X6c8++6zTn2NiYvDiiy/ixRdfFFNQBKmdHYPsyFI6O6kMO9QNDrsFPxqUgQ93l+L9XaXqDhIiIzpcUQ93XTPsFhMuyE06+zucp4v7pmIRgC1HqtDqD4TkZnWR9F19lGFnh6izqcODd2V9uLsU/gCXssi41h8MdnVG5yUjxmoO++cblOWEK9aKuuZWfGWAzinDjo7kpRqts9N2VQTDDnXTpf17wBVrhbuuGVuOGKPdTnQqGw4H/36PD+Et52diNkkY3zYbtNEAczsMOzqidHZKa5rQ3Kr/7eftnR0eKEjdY7OYcNXQ4InKPHOHjKrVH8DmtrATiXkdxfi+ytwOww5FUI94O+JsZgRk4NhJ/R/jXcEDBSkEpo4ILmV9tPeEIX4JIPquPcdrUdfcCmeMBYOzXBH7vMqur51FNWhq0fe/LYYdHZEkyVB3ZHFmh0JhTF4yMpwxqPO24rP9FaLLIQo5Zfv3+D6pMJukiH3e/FQHslwxaPEHsO1odcQ+bzgw7OiMevt5pf6HlHlVBIWCySThurbuDg8YJCNSDhOcEKF5HYUkSepS1obD+l7KYtjRGaN0dnz+AKobWgCws0Pn77q2XVmf7CtHndcnuBqi0Glq8WNH0UkA7TM0kXRxh6sj9IxhR2fUzo7Ot59X1ge7OhaThKQ4m+BqSO8GZznRp4cDza0B/PvrctHlEIXM9qJqtPgDyHTFoHfbjtxIUnZkfV3qwcm2X1D1iGFHZ4zS2XF72g4UjLfDFME1aDImSZLU6yPe382lLDKO9R3mdSQp8s+Vac4Y9E+PhywDm3R8vAPDjs7kpQY7O8dONsHnDwiupvvU4WQnl7AoNJSlrA2HKtW/X0R6t7FtXufifpGd1+lIuTpCz0tZDDs6k54QA7vFhNaAjNIa/W4/V4eT4xl2KDTyUh0Ynp0If0DGR3tOiC6H6LzVNLZgb2nw9GIlcIhghLkdhh2dMZkkQ1wbwc4OhYNyfcT7u44LroTo/G06XAVZBvqlxSPdKe7w1bG9k2E2STha1YhjJ/X5c4dhR4eMMLejXhXBzg6F0DXDMmGSgJ3FNSjW8S8DRED7du8JAnZhdZQQY8XwXsHDDDfq9BZ0hh0dMsKOLPX0ZIG/rZDxpDlj1Hb/h19xUJn0rf18HbFhp2MNej1vh2FHh4zR2eHMDoWHcsDgii+PQ5Z5Ezrp0/GaJhRWNsAkBZeRRFPDzqEqXf67YtjRobwU5fZz/Xd2OLNDoXbFkAzYLCYcdNfj27I60eUQdYsyDDw8OxHOGKvgaoCROYmIsZpQWd+MA+X1osvpMoYdHVIGlIurGuEP6C9hy7LcvozFzg6FmDPGih8OSAPAm9BJvza2hZ0JAndhdWS3mDEmP7j9XY+3oDPs6FBWYiysZgkt/gDKPF7R5XSZp6kVLW1nBPFeLAoH5Sb0D3eXIqDDXwgousmyjA2HtTOvo1BuQd/IsEORYDZJyE5u235eqb+5HWUnljPGghirWXA1ZEQ/KEhDgt2C4zVN2FF8UnQ5RF1y0F2PirpmxFhNuCA3UXQ5KiV4bT5SpbtDbRl2dErPczvt8zrciUXhEWM1Y8qQDAA8c4f0Z/3BYOdkdF4y7Bbt/EI4KNOJpDgrGlr8+OpYjehyuoRhR6faDxbUY2eH8zoUfspS1j+/OqG730Ipum3UyPk632UySR2ujtDXeTsW0QVQ97R3dvQYdoLLWNyJReE0rncKUuPtqKxvxlvbSjC0p0tYLX16OJCggR01pH2t/gA2H6kG0H5Ng5aM75uCf+45gfWHKnHfpH6iyzlnDDs6pecrI9RlLA4nUxhZzCZcMywTr248inkr9gqtpXcPBz6cfTEcdj7l0pntPlaL+uZWJMZZMSjTKbqc71EC2JfFJ9HY0oo4mz7+TuujSvqejp0dWZYhSZLgis6duozFsENhdueEPGw7Wo3aJp+wGqobWnCkogFP/PMbLLx+mLA6SB+U83XG90mByaS95/Wc5Dj0TIzF8ZombC2sxsS2Yx60jmFHp3omxcJskuD1BeCuaxZ6SVxXtXd29FMz6VNuigP/vO8SoTVsOlyF//jLZvxtawl+MCANlw/OEFoPaVt72NHeEhYASJKEi/um4u3tJdh4uEo3YYcDyjplNZvQKykWAHBUZ9vP2dmhaDKuTwpmXtobAPDIu3vUmTWi72psacXOtqMStDivoxjft+1wwYP6OW+HYUfH2u/I0tfcDmd2KNrM/VF/DMx0orqhBQ/94ytd3i1E4bft6En4/DJ6Jsaqc5lapHSdvjnhQXVDi+Bqzg3Djo61336un86O1+dX5yfY2aFoYbeY8dz0EbBZTPhsfwX+d3OR6JJIg5QlrAl9UzQ9h9kjwY6CjAQA7dvktY5hR8f02NmprA92dWxmE1yx3IpL0aN/egIevbIAAPDEP/fhkJuXlFJn7WFHu0tYio63oOsBw46O6bGz03FeR8u/uRCFw+3j8nBJv1Q0twYw5+1daGnlYYcUVN3Qgq9LPQC0O5zc0YS2uZ0NOrkni2FHxzp2dvQyA1DB4WSKYiaThD/8ZDgS46zYe9yDJZ8cEF0SacSmtos/B6Qn6OL5cUx+CiwmCcXVjSip1v7qAsOOjmUnx0KSgPrmVlTpZEiMO7Eo2qU7Y/DU9UMBAMvWHcbWwmrBFZEWrNfREhYAxNstGJGdCEAf3R2GHR2zW8zIcgW3n+vljqwKT9tVEQw7FMWuGJKJn1zYC7IMPPD2Lni84g49JG1ovw8rRXAl506d2zms/bkdhh2dy0ttm9up1H4bEQAq6tnZIQKA3143GDnJcThe04Tfvf+16HJIoJLqRhRVNcJskjAmP1l0OedMCTsbD1UiEND2KAXDjs61z+3oo7Pj9vD0ZCIguAzw7M3DYZKAd788jpVflYouiQRRujojshN1dWHsiOxExNnMqGpowf5ybe8uZNjRufYdWfrq7HAZiwi4MDcZs3/QFwDw6/f24kRtk+CKSARl+/aEPvpZwgIAm8WkdqK0PrfDsKNzeu3scBmLKOi/JvXD8F4u1Db58Mu/79b8cgCFlizLHeZ19DGc3NHF6nk7DDsURu23n2u/sxMIyOqhgmlOhh0iIHjP3bM3j0Cs1YwNh6rwPxsKRZdEEbS/vA6V9S2ItZoxMidJdDldppwJtKWwWtPnRjHs6FxOcnAZq7bJh5pGbW8/P9nYgta231pTHAw7RIrePeIx75pBAIDFq/bj2zKP4IooUpTLNMfkJ8Nm0d+P5IKMBKQ4bGhs8WP3sRrR5ZyW/r6z1EmszYwMZ3DYV+vdHWVeJ9lh0+U/aqJwumVMNiYPTEOLP4A5b+2C1+cXXRJFwMa2bdt62nLekckkYVwf7d+Czp84BqDcjqv1uR11XieeXR2i75IkCU/dMAyp8TZ8W1aHP3y8X3RJFGY+fwBbjihhR3/zOgplbkfLl4Iy7BiAOrej8bN2lKsiOK9DdGqp8XYsumEYAOAv6ws1P/RJ52d3SQ0aWvxIdtgwMMMpupxuU4Lal8U1aGhuFVzNqTHsGEBuqk46O3Xs7BCdzaSB6ZgxNgcA8P/e2a35WTzqPuWKiHF9UmAy6fdi5OzkOOQkx6E1IGv2+hOGHQNo35Gl7bCjXgLKzg7RGf366oHonepAmceLX7+3VzcX/VLXbFTP19HvEpZCmTlar9FuJMOOAbTP7Gh7GctdF7wXi50dojOLs1mwZPoIWEwS/rnnBN778rjokijEGppbsbP4JID2mRc9m6Dx83YYdgxAOViwqqFF0xcKutWZHV4VQXQ2w3olYs7kfgCAx97/GiXV2v5lhrpm69FqtAZk9EqKRU7bL6x6Nq53sLPzbVmdep6aljDsGEC83YLUeBsAoFjD3Z1KzuwQdckvJvbFqNwk1De3Yu47u+Dn6cqGsaFtm7YRujoAkBJvx6DM4JD1Rg3egs6wYxC5OpjbcXM3FlGXmE0Snr15BOLtFmw7ehLL1x0WXRKFyIa2QDDeIGEHaJ/b2aDB83YYdgxC63M7jS2tqG/bkshLQInOXXZyHH533WAAwLOrD2DPsVrBFdH5qqxvxr4TwVOyx+vs8s8zUYLbBg2et8OwYxDtZ+1os7Oj7MSKsZoQb7cIroZIX264oCeuGpqB1oCM+9/+Ek0tPF1Zzza1dXUKMhKQaqBl/TF5ybCaJRw72aS5kQpNh52FCxdi9OjRSEhIQFpaGqZNm4b9+zufKur1ejFr1iykpKQgPj4eN9xwA8rLywVVLI7WOzvqgYIJMZAk/Z4nQSSCJElYMG0o0p12HKlowJMf7RNdEp0HZceSUeZ1FA67BSOzg5eZam0LuqbDzrp16zBr1ixs3rwZq1evhs/nw+WXX46GhvbuxQMPPIAPP/wQf//737Fu3TqUlpbi+uuvF1i1GFo/a0c9UJBLWETdkuSw4Q8/GQ4A+OvmIqz91i24IuouZZlHz1dEnM4EjS5laTrsrFq1CnfccQcGDx6M4cOH49VXX0VxcTF27NgBAKitrcXLL7+MZ555Bj/84Q9x4YUX4pVXXsHGjRuxefPm037c5uZmeDyeTi96p4Qdd10zGlu0d1x3e2eHYYeouy7p1wN3TcgHADz4j92a3OJLZ1Zc1YiS6iZYTBLG5CeLLifklCHljYcqEdDQ7kFNh53vqq0NDuYlJwf/guzYsQM+nw+TJ09W36agoAA5OTnYtGnTaT/OwoUL4XK51Jfs7OzwFh4BrjgrEuOsALS5lKUeKMiwQ3ReHrpiAPqnx6OyvgWP/N8enq6sM0rHY2ROIhwGnF8cnp0Ih82Mk40+7CvTTiNBN2EnEAhgzpw5mDBhAoYMGQIAKCsrg81mQ2JiYqe3TU9PR1lZ2Wk/1qOPPora2lr1paSkJJylR4yy/VyLd2Sxs0MUGjFWM5bcPBI2swmf7CvHW9uM8fwVLZRZFiMuYQGA1WzC2LYDBrV0mrJuws6sWbOwd+9evPXWW+f9sex2O5xOZ6cXI8hrG1I+qsnODmd2iEJlUJYTv5zSHwAw/8NvUKjRXZjUWSAgqzuxjBp2gI5XR2jncEFdhJ3Zs2dj5cqVWLt2LXr16qU+npGRgZaWFtTU1HR6+/LycmRkZES4SvH00dnhVRFEofCzi3tjXO8UNPn8mPP2Lvj8AdEl0VnsK/OguqEFDpsZI7ITRZcTNsrcztbCarS0auPvpabDjizLmD17Nt577z18+umnyM/P7/T6Cy+8EFarFWvWrFEf279/P4qLizFu3LhIlyuc2tmpZGeHyOhMJglP3zQcCTEW7C6pwfOfHhJdEp2Fcsv5mPxkWM2a/vF7XgakJyA13oYmnx9ftl12Kpqmv9uzZs3C//7v/+LNN99EQkICysrKUFZWhqamJgCAy+XC3Xffjblz52Lt2rXYsWMH7rzzTowbNw4XXXSR4OojT6udHX9ARlU9Z3aIQi0rMRYLfjwUAPDCpwexo0gbP1jo1Iw+r6OQJAnj+2jrFnRNh51ly5ahtrYWEydORGZmpvry9ttvq2/z7LPP4pprrsENN9yASy+9FBkZGXj33XcFVi2O0tkprfXC69POCatVDc0IyIAkAckOm+hyiAzluuFZmDYiCwEZeODtXeq1LKQtLa0BbC2sBmD8sAO0H5i4QSOXgmo67MiyfMqXO+64Q32bmJgYvPjii6iurkZDQwPefffdqJzXAYJBIqFtK2NJtXaWstyeYFcnxWGHxcCtWyJRfj91CHomxqK4uhGPf/iN6HLoFHaV1KDJ50eKw4YB6Qmiywm78W1zO7tKalDn9QmuRuNhh7pGkiTkpmpvR1YFl7CIwsoVa8XTNw2HJAFvby/Bqr2nP3qDxFCWsMb3TYXJZPwrc3olxSEvJQ7+gKx2tERi2DEYLc7tVHg4nEwUbhf1TsF/XtoHAPDou1/B7fEKrog62qjM6xjolvOzUW5B18I9WQw7BtN+1o6Gwg47O0QRMfdH/TEo04mTjT788h9f8XRljahvbsWukhoA0TGvo1DmdjZq4Lwdhh2Dae/saGcZS/kNk50dovCyWUx4bvoI2C0mfH6gAq9vKhJdEgHYWliF1oCMnOQ4ZCfHiS4nYsb1ToEkAfvL69Qrg0Rh2DEYLd5+zs4OUeT0S0/Ar64aCAB48qN9OFheJ7giWn/Q+Kcmn0qSw4bBWcEbCjYJ3pXFsGMwyjLW8ZNNmjm50q3O7PD0ZKJIuG1cLi7r3wPNrQHc/9YuzTwXRKuNh5XzdaJnXkcxoe28nfUHxc7tMOwYTI8EO2KtZgRk4NhJbSxlqZ0dJzs7RJEgSRL++8ZhSIqz4psTHixe9S0CAc7viOD2ePFtWbC7phy0F03a78mqFDpDxrBjMJIkIbetu6OFuR1Zlts7O/EMO0SRkuaMwcLrhwEA/rK+EFc+9wXe33UcrbxDKyI8Xh/++NkhXLX0CwDAoExnVB6qOjovGTazCaW1XqFHoliEfWYKm7wUB74tq9PE3E5Dix9Nbac5c0CZKLKuGJKBX11VgKVrDmF/eR3uf2sXnv73Acy8tDduvLAXYqxm0SUaTmV9M/5nfSH+uqkIdW2nWfdMjMX8qYMFVyZGrM2MK4dmwGo2Ce3sMOwYkHKwoBY6O8pOLIfNDIedf92IIm3mpX1w8+gc/HXTUfzPhqMorm7Eb1bsxXNrDuJnF+djxkW5iOe/zfN27GQj/vz5Eby1rQTNbTNSfdPice/EPrh2eJahL/48m+emjxRdAsOOEWlpR5Zy23mak8PJRKK4Yq2Y/cN+uPvi3nhrWzH+/PkRlNZ6sfBf3+LFtYdwx/g83DEhPyqXWc7XIXcdln12JLhE2DYXNTw7EfdO7IMfDUyPitOS9YBhx4C0NLNTUcd5HSKtiLWZceeEfMwYm4sVu45j+brDOFLRgKWfHsKfvyjE9DHZuOeS3shKjBVdquZ9dawGf1x7GB9/UwZldWZC3xTcO7EvxvdJgSQx5GgJw44BKZ2dkupGtPoDQi/fVDo7PbgTi0gzbBYTbhqVjRsu6IWPvy7DHz87hL3HPXhlw1H87+Yi/HhkT/z8sj7o3SNedKmaIssyNh2pwrLPDuOLDlupLx+Ujnt/0BcjshPFFUdnxLBjQBnOGNgsJrS0BlBa40VOirgTO5XODg8UJNIes0nCVUMzceWQDHxxsBIvrj2ELYXVeGf7Mfx9xzFcNSQTv5jYB0N6ukSXKlQgIGPNt2788bND+LK4BkDwezd1eBZ+PrEP+kfBLeZ6x7BjQCaThNzkOBx01+NoVYPQsKMcEc6dWETaJUkSLu3fA5f274EdRdX449rDWPOtG//ccwL/3HMCl/XvgXsn9sGY/OSoWp5p9Qew8qsT+ONnh3CgvB5AsCt286hszLy0d1Rd/aB3DDsGlZviwEF3fdvt5z2E1dHe2eGAMpEeXJibjJfvSMa3ZR4s++wwPtxdinUHKrDuQAVG5Sbh3h/0wQ8GpBk69Hh9fvx9xzG89PlhlFQ3AQDi7RbcOi4Xd03I5y9vOsSwY1Dtt5+LHVJWB5T55ECkKwUZTjw3fSTm/qg//vT5Efxj+zFsLzqJu17djoGZTvxiYh9cPTQTZgPtNqrz+vDGlmK8vL5Qfe5Kcdhw18X5+OlFuXDFWgVXSN3FsGNQuanK7edit59zZodI33JTHHjyx0Nx/6R+eHl9Id7YXIR9Jzy4729f4ul/78fPL+uD6y/oCbtFvwcUVje04JUNhXht41F4vMGDALNcMZh5aW/cPDoHsTb9fm0UxLBjUFro7Pj8AVQ1tABgZ4dI79KdMfjVVQNx78Q+eG1jEV7ZWIiiqkY8+u4eLPnkAH52cW/8x9gcXR0eWlrThD9/cQRvbS1RT3rv3cOBX1zWB1NH9ITNEr0HARqNfv5WUpco28+LqxrhD8hCWs1V9cGgYzZJSI7jYWVERpAYZ8P9k/vhZ5fk429bi/GXLwpR5vFiwUf78OJnh3D7uDzcMT4PSRo+oPBIRT2WrzuM9748Dp8/eEjO0J4u3DuxDy4fnGGopTkKYtgxqExXDKxmCS3+AMo8XvQUcEiYshMrNd7GU0SJDMZht+Bnl/TGreNyseLL41j22WEcrWrEc2sO4s9fHMF/jMnBzy7pjQyXdjYn7D1eiz9+dgj/2tt+EOBFvZNx78S+uKRfqqGHrqMdw45BWcwmZCfF4UhlA3YUnRRyAdu3ZXUAuBOLyMjsFjNuHp2DGy/Mxr/2nsAf1x7GNyc8+Mv6Qry+qQjXX9ATt44TO9xbXNWI5Z8fwecHKtTHJg9Mwy8m9sWFuUnC6qLIYdgxsNyUYNi5729fCq2D8zpExmc2SbhmWBauHpqJzw5UYNnaw9h6tBpvbSvBW9tKRJcHADBJwLXDs/CLiX1QkOEUXQ5FEMOOgU0b2RM7ik6qN/CKYLOYcO3wTGGfn4giS5Ik/GBAGn4wIA3bjlZj2WeHselwFQICusuK4PNQFv7z0t7IbZtnpOgiySLWNzTG4/HA5XKhtrYWTifTPhERkR6c689v7qsjIiIiQ2PYISIiIkNj2CEiIiJDY9ghIiIiQ2PYISIiIkNj2CEiIiJDY9ghIiIiQ2PYISIiIkNj2CEiIiJDY9ghIiIiQ2PYISIiIkNj2CEiIiJDY9ghIiIiQ2PYISIiIkOziC5AC2RZBhC8Kp6IiIj0Qfm5rfwcPx2GHQB1dXUAgOzsbMGVEBERUVfV1dXB5XKd9vWSfLY4FAUCgQBKS0uRkJAASZJC9nE9Hg+ys7NRUlICp9MZso+rJ9H+PeDXH91fP8DvQbR//QC/B+H8+mVZRl1dHbKysmAynX4yh50dACaTCb169Qrbx3c6nVH5F7yjaP8e8OuP7q8f4Pcg2r9+gN+DcH39Z+roKDigTERERIbGsENERESGxrATRna7Hb/97W9ht9tFlyJMtH8P+PVH99cP8HsQ7V8/wO+BFr5+DigTERGRobGzQ0RERIbGsENERESGxrBDREREhsawQ0RERIbGsBNGL774IvLy8hATE4OxY8di69atokuKiIULF2L06NFISEhAWloapk2bhv3794suS5innnoKkiRhzpw5okuJqOPHj+OnP/0pUlJSEBsbi6FDh2L79u2iy4oIv9+PefPmIT8/H7GxsejTpw8ef/zxs97fo2eff/45rr32WmRlZUGSJKxYsaLT62VZxmOPPYbMzEzExsZi8uTJOHjwoJhiw+BMX7/P58PDDz+MoUOHwuFwICsrC7fddhtKS0vFFRwGZ/s70NHPf/5zSJKEJUuWRKQ2hp0wefvttzF37lz89re/xc6dOzF8+HBMmTIFbrdbdGlht27dOsyaNQubN2/G6tWr4fP5cPnll6OhoUF0aRG3bds2/OlPf8KwYcNElxJRJ0+exIQJE2C1WvGvf/0L33zzDZ5++mkkJSWJLi0iFi1ahGXLluGFF17Avn37sGjRIixevBjPP/+86NLCpqGhAcOHD8eLL754ytcvXrwYS5cuxfLly7FlyxY4HA5MmTIFXq83wpWGx5m+/sbGRuzcuRPz5s3Dzp078e6772L//v247rrrBFQaPmf7O6B47733sHnzZmRlZUWoMgAyhcWYMWPkWbNmqX/2+/1yVlaWvHDhQoFVieF2u2UA8rp160SXElF1dXVyv3795NWrV8uXXXaZfP/994suKWIefvhh+eKLLxZdhjBXX321fNddd3V67Prrr5dnzJghqKLIAiC/99576p8DgYCckZEh//d//7f6WE1NjWy32+W//e1vAioMr+9+/aeydetWGYBcVFQUmaIi7HTfg2PHjsk9e/aU9+7dK+fm5srPPvtsROphZycMWlpasGPHDkyePFl9zGQyYfLkydi0aZPAysSora0FACQnJwuuJLJmzZqFq6++utPfg2jxwQcfYNSoUfjJT36CtLQ0jBw5En/+859FlxUx48ePx5o1a3DgwAEAwO7du7F+/XpceeWVgisTo7CwEGVlZZ3+LbhcLowdOzYqnxOB4POiJElITEwUXUrEBAIB3HrrrXjwwQcxePDgiH5uXgQaBpWVlfD7/UhPT+/0eHp6Or799ltBVYkRCAQwZ84cTJgwAUOGDBFdTsS89dZb2LlzJ7Zt2ya6FCGOHDmCZcuWYe7cufjVr36Fbdu24b777oPNZsPtt98uurywe+SRR+DxeFBQUACz2Qy/348FCxZgxowZoksToqysDABO+ZyovC6aeL1ePPzww7jlllui6mLQRYsWwWKx4L777ov452bYobCaNWsW9u7di/Xr14suJWJKSkpw//33Y/Xq1YiJiRFdjhCBQACjRo3Ck08+CQAYOXIk9u7di+XLl0dF2HnnnXfwxhtv4M0338TgwYOxa9cuzJkzB1lZWVHx9dPp+Xw+3HTTTZBlGcuWLRNdTsTs2LEDzz33HHbu3AlJkiL++bmMFQapqakwm80oLy/v9Hh5eTkyMjIEVRV5s2fPxsqVK7F27Vr06tVLdDkRs2PHDrjdblxwwQWwWCywWCxYt24dli5dCovFAr/fL7rEsMvMzMSgQYM6PTZw4EAUFxcLqiiyHnzwQTzyyCOYPn06hg4diltvvRUPPPAAFi5cKLo0IZTnvWh/TlSCTlFREVavXh1VXZ0vvvgCbrcbOTk56vNiUVER/t//+3/Iy8sL++dn2AkDm82GCy+8EGvWrFEfCwQCWLNmDcaNGyewssiQZRmzZ8/Ge++9h08//RT5+fmiS4qoSZMmYc+ePdi1a5f6MmrUKMyYMQO7du2C2WwWXWLYTZgw4XvHDRw4cAC5ubmCKoqsxsZGmEydn17NZjMCgYCgisTKz89HRkZGp+dEj8eDLVu2RMVzItAedA4ePIhPPvkEKSkpokuKqFtvvRVfffVVp+fFrKwsPPjgg/j444/D/vm5jBUmc+fOxe23345Ro0ZhzJgxWLJkCRoaGnDnnXeKLi3sZs2ahTfffBPvv/8+EhIS1DV5l8uF2NhYwdWFX0JCwvfmkxwOB1JSUqJmbumBBx7A+PHj8eSTT+Kmm27C1q1b8dJLL+Gll14SXVpEXHvttViwYAFycnIwePBgfPnll3jmmWdw1113iS4tbOrr63Ho0CH1z4WFhdi1axeSk5ORk5ODOXPm4IknnkC/fv2Qn5+PefPmISsrC9OmTRNXdAid6evPzMzEjTfeiJ07d2LlypXw+/3q82JycjJsNpuoskPqbH8HvhvwrFYrMjIyMGDAgPAXF5E9X1Hq+eefl3NycmSbzSaPGTNG3rx5s+iSIgLAKV9eeeUV0aUJE21bz2VZlj/88EN5yJAhst1ulwsKCuSXXnpJdEkR4/F45Pvvv1/OycmRY2Ji5N69e8u//vWv5ebmZtGlhc3atWtP+e/+9ttvl2U5uP183rx5cnp6umy32+VJkybJ+/fvF1t0CJ3p6y8sLDzt8+LatWtFlx4yZ/s78F2R3HouybKBj/QkIiKiqMeZHSIiIjI0hh0iIiIyNIYdIiIiMjSGHSIiIjI0hh0iIiIyNIYdIiIiMjSGHSIiIjI0hh0iIiIyNIYdIiIAeXl5WLJkiegyiCgMGHaIKOLuuOMO9U6kiRMnYs6cORH73K+++ioSExO/9/i2bdswc+bMiNVBRJHDi0CJyBBaWlrO60LFHj16hLAaItISdnaISJg77rgD69atw3PPPQdJkiBJEo4ePQoA2Lt3L6688krEx8cjPT0dt956KyorK9X3nThxImbPno05c+YgNTUVU6ZMAQA888wzGDp0KBwOB7Kzs3Hvvfeivr4eAPDZZ5/hzjvvRG1trfr5fve73wH4/jJWcXExpk6divj4eDidTtx0000oLy9XX/+73/0OI0aMwF//+lfk5eXB5XJh+vTpqKurC+83jYi6jGGHiIR57rnnMG7cONxzzz04ceIETpw4gezsbNTU1OCHP/whRo4cie3bt2PVqlUoLy/HTTfd1On9X3vtNdhsNmzYsAHLly8HAJhMJixduhRff/01XnvtNXz66ad46KGHAADjx4/HkiVL4HQ61c/3y1/+8nt1BQIBTJ06FdXV1Vi3bh1Wr16NI0eO4Oabb+70docPH8aKFSuwcuVKrFy5EuvWrcNTTz0Vpu8WEXUXl7GISBiXywWbzYa4uDhkZGSoj7/wwgsYOXIknnzySfWx//mf/0F2djYOHDiA/v37AwD69euHxYsXd/qYHed/8vLy8MQTT+DnP/85/vjHP8Jms8HlckGSpE6f77vWrFmDPXv2oLCwENnZ2QCA119/HYMHD8a2bdswevRoAMFQ9OqrryIhIQEAcOutt2LNmjVYsGDB+X1jiCik2NkhIs3ZvXs31q5di/j4ePWloKAAQLCborjwwgu/976ffPIJJk2ahJ49eyIhIQG33norqqqq0NjYeM6ff9++fcjOzlaDDgAMGjQIiYmJ2Ldvn/pYXl6eGnQAIDMzE263u0tfKxGFHzs7RKQ59fX1uPbaa7Fo0aLvvS4zM1P9b4fD0el1R48exTXXXINf/OIXWLBgAZKTk7F+/XrcfffdaGlpQVxcXEjrtFqtnf4sSRICgUBIPwcRnT+GHSISymazwe/3d3rsggsuwP/93/8hLy8PFsu5P03t2LEDgUAATz/9NEymYOP6nXfeOevn+66BAweipKQEJSUlanfnm2++QU1NDQYNGnTO9RCRNnAZi4iEysvLw5YtW3D06FFUVlYiEAhg1qxZqK6uxi233IJt27bh8OHD+Pjjj3HnnXeeMaj07dsXPp8Pzz//PI4cOYK//vWv6uByx89XX1+PNWvWoLKy8pTLW5MnT8bQoUMxY8YM7Ny5E1u3bsVtt92Gyy67DKNGjQr594CIwothh4iE+uUvfwmz2YxBgwahR48eKC4uRlZWFjZs2AC/34/LL78cQ4cOxZw5c5CYmKh2bE5l+PDheOaZZ7Bo0SIMGTIEb7zxBhYuXNjpbcaPH4+f//znuPnmm9GjR4/vDTgDweWo999/H0lJSbj00ksxefJk9O7dG2+//XbIv34iCj9JlmVZdBFERERE4cLODhERERkaww4REREZGsMOERERGRrDDhERERkaww4REREZGsMOERERGRrDDhERERkaww4REREZGsMOERERGRrDDhERERkaww4REREZ2v8HIXnv1Uyqb3EAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "\n", + "trace.GRAPH.clear()\n", + "\n", + "@trace.bundle(trainable=True)\n", + "def fun(x):\n", + " \"\"\" A linear predictor function \"\"\"\n", + " return 0\n", + "\n", + "optimizer = OptoPrime(fun.parameters())\n", + "\n", + "ls = []\n", + "for i in range(15):\n", + " try:\n", + " l_eval = compute_loss(inputs, outputs)\n", + " print(f'Iteration {i} Loss: {l_eval.data}')\n", + " ls.append(l_eval.data)\n", + "\n", + " ind = np.random.randint(0, N) % N\n", + " target = compute_loss([inputs[ind]], [outputs[ind]])\n", + " feedback = 'Minimize loss'\n", + " except trace.ExecutionError as e:\n", + " target = e.exception_node\n", + " feedback = str(e.exception_node.data)\n", + "\n", + " optimizer.zero_feedback()\n", + " optimizer.backward(target, feedback)\n", + " optimizer.step()\n", + "\n", + "\n", + "\n", + "# plot ls\n", + "import matplotlib.pyplot as plt\n", + "plt.plot(ls)\n", + "plt.xlabel('Iteration')\n", + "plt.ylabel('Loss')\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Batching Non-Commutative Feedbacks\n", + "\n", + "In the earlier numerical example, the loss function was commutative so that we can do `batch_loss += loss(each_input)`. What if the feedbacks received are not commutative? This can happen often with non-numeric (e.g. text) feedbacks. Here we will see a simple design pattern for using `trace` and `OptoPrime` for batch optimization in such cases." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "from opto.trace import bundle\n", + "\n", + "@bundle(trainable=False)\n", + "def concat(*items):\n", + " \"\"\" Concatenate the items into a single string \"\"\"\n", + " output = ''\n", + " for i, item in enumerate(items):\n", + " output += f'ID {[i]}: {item}\\n'\n", + " return output" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that the `concat` function when called with a list of feedbacks will concatenate them all with an identifier for each element. This way, the optimizer when given a batch of outputs and a corresponding batch of feedbacks can disambiguate which feedback corresponds to which output." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prompt\n", + " \n", + "You're tasked to solve a coding/algorithm problem. You will see the instruction, the code, the documentation of each function used in the code, and the feedback about the execution result.\n", + "\n", + "Specifically, a problem will be composed of the following parts:\n", + "- #Instruction: the instruction which describes the things you need to do or the question you should answer.\n", + "- #Code: the code defined in the problem.\n", + "- #Documentation: the documentation of each function used in #Code. The explanation might be incomplete and just contain high-level description. You can use the values in #Others to help infer how those functions work.\n", + "- #Variables: the input variables that you can change.\n", + "- #Constraints: the constraints or descriptions of the variables in #Variables.\n", + "- #Inputs: the values of other inputs to the code, which are not changeable.\n", + "- #Others: the intermediate values created through the code execution.\n", + "- #Outputs: the result of the code output.\n", + "- #Feedback: the feedback about the code's execution result.\n", + "\n", + "In #Variables, #Inputs, #Outputs, and #Others, the format is:\n", + "\n", + " = \n", + "\n", + "If is (code), it means is the source code of a python code, which may include docstring and definitions.\n", + "\n", + "Output_format: Your output should be in the following json format, satisfying the json syntax:\n", + "\n", + "{\n", + "\"reasoning\": ,\n", + "\"answer\": ,\n", + "\"suggestion\": {\n", + " : ,\n", + " : ,\n", + "}\n", + "}\n", + "\n", + "In \"reasoning\", explain the problem: 1. what the #Instruction means 2. what the #Feedback on #Output means to #Variables considering how #Variables are used in #Code and other values in #Documentation, #Inputs, #Others. 3. Reasoning about the suggested changes in #Variables (if needed) and the expected result.\n", + "\n", + "If #Instruction asks for an answer, write it down in \"answer\".\n", + "\n", + "If you need to suggest a change in the values of #Variables, write down the suggested values in \"suggestion\". Remember you can change only the values in #Variables, not others. When of a variable is (code), you should write the new definition in the format of python code without syntax errors, and you should not change the function name or the function signature.\n", + "\n", + "If no changes or answer are needed, just output TERMINATE.\n", + "\n", + "Now you see problem instance:\n", + "\n", + "================================\n", + "\n", + "#Instruction\n", + "You need to change the of the variables in #Variables to improve the output in accordance to #Feedback.\n", + "\n", + "#Code\n", + "eval90 = eval(lst=lst0, __code=__code1)\n", + "eval91 = eval(lst=lst1, __code=__code1)\n", + "eval92 = eval(lst=lst2, __code=__code1)\n", + "eval93 = eval(lst=lst3, __code=__code1)\n", + "eq0 = eq(x=eval90, y=list0)\n", + "eq1 = eq(x=eval91, y=list1)\n", + "eq2 = eq(x=eval92, y=list2)\n", + "eq3 = eq(x=eval93, y=list3)\n", + "concat1 = concat(args_0=eq0, args_1=eq1, args_2=eq2, args_3=eq3)\n", + "\n", + "#Documentation\n", + "[eval] This operator eval(__code, *args, **kwargs) evaluates the code block, where __code is the code (str) and *args and **kwargs are the arguments of the function. The output is the result of the evaluation, i.e., __code(*args, **kwargs).\n", + "[eq] This is an eq operator of x and y.\n", + "[concat] Concatenate the items into a single string\n", + "\n", + "#Variables\n", + "(code) __code1:def strange_sort_list(lst):\n", + " '''\n", + " Given list of integers, return list in strange order.\n", + " Strange sorting, is when you start with the minimum value,\n", + " then maximum of the remaining integers, then minimum and so on.\n", + " '''\n", + " lst = sorted(lst)\n", + " return lst\n", + "\n", + "#Constraints\n", + "(code) __code1: The code should start with:\n", + "def strange_sort_list(lst):\n", + "\n", + "#Inputs\n", + "(list) lst1=[5, 5, 5, 5]\n", + "(list) lst2=[]\n", + "(list) lst0=[1, 2, 3, 4]\n", + "(list) lst3=[9, 8, 7, 6, 5, 4]\n", + "(list) list1=[5, 5, 5, 5]\n", + "(list) list2=[]\n", + "(list) list0=[1, 4, 2, 3]\n", + "(list) list3=[4, 9, 5, 8, 6, 7]\n", + "\n", + "#Others\n", + "(list) eval91=[5, 5, 5, 5]\n", + "(list) eval92=[]\n", + "(list) eval90=[1, 2, 3, 4]\n", + "(list) eval93=[4, 5, 6, 7, 8, 9]\n", + "(bool) eq0=False\n", + "(bool) eq1=True\n", + "(bool) eq2=True\n", + "(bool) eq3=False\n", + "\n", + "#Outputs\n", + "(str) concat1=ID [0]: False\n", + "ID [1]: True\n", + "ID [2]: True\n", + "ID [3]: False\n", + "\n", + "\n", + "#Feedback\n", + "ID [0]: test case failed!\n", + "ID [1]: test case passed!\n", + "ID [2]: test case passed!\n", + "ID [3]: test case failed!\n", + "\n", + "\n", + "================================\n", + "\n", + "\n", + "Your response:\n", + "\n", + "LLM response:\n", + " {\n", + "\"reasoning\": \"The #Instruction requires us to modify the values in #Variables, specifically the function __code1, to improve the output according to the feedback. According to #Feedback, test cases 0 and 3 failed, while test cases 1 and 2 passed. The current definition of strange_sort_list only sorts the list in ascending order, which is not sufficient for the 'strange order' specified. The 'strange order' is defined as starting with the minimum, then the maximum of the remaining, then the next minimum, and so forth. Therefore, we need to modify the function strange_sort_list(lst) to implement this logic. \\n\\nThe correct transformation should alternate between taking the smallest and largest remaining values in the list until the list is exhausted. This adjustment will ensure lists such as lst0 and lst3 are correctly transformed to match list0 and list3, respectively.\",\n", + "\"answer\": null,\n", + "\"suggestion\": {\n", + " \"__code1\": \"def strange_sort_list(lst):\\n '''\\n Given list of integers, return list in strange order.\\n Strange sorting, is when you start with the minimum value,\\n then maximum of the remaining integers, then minimum and so on.\\n '''\\n lst = sorted(lst)\\n result = []\\n while lst:\\n result.append(lst.pop(0)) # take min\\n if lst:\\n result.append(lst.pop(-1)) # take max\\n return result\"\n", + "}\n", + "}\n" + ] + }, + { + "data": { + "text/plain": [ + "{: \"def strange_sort_list(lst):\\n '''\\n Given list of integers, return list in strange order.\\n Strange sorting, is when you start with the minimum value,\\n then maximum of the remaining integers, then minimum and so on.\\n '''\\n lst = sorted(lst)\\n result = []\\n while lst:\\n result.append(lst.pop(0)) # take min\\n if lst:\\n result.append(lst.pop(-1)) # take max\\n return result\"}" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "@bundle(trainable=True)\n", + "def strange_sort_list(lst):\n", + " '''\n", + " Given list of integers, return list in strange order.\n", + " Strange sorting, is when you start with the minimum value,\n", + " then maximum of the remaining integers, then minimum and so on.\n", + " '''\n", + " lst = sorted(lst)\n", + " return lst\n", + "\n", + "def get_feedback(predict, target):\n", + " if predict == target:\n", + " return \"test case passed!\"\n", + " else:\n", + " return \"test case failed!\"\n", + " \n", + "from opto.optimizers import OptoPrime\n", + "\n", + "test_ground_truths = [[1, 4, 2, 3], [5, 5, 5, 5], [], [4, 9, 5, 8, 6, 7]]\n", + "test_inputs = [[1, 2, 3, 4], [5, 5, 5, 5], [], [9, 8, 7, 6, 5, 4]]\n", + "\n", + "optimizer = OptoPrime(strange_sort_list.parameters())\n", + "\n", + "outputs = []\n", + "feedbacks = []\n", + "for i in range(len(test_inputs)):\n", + " try:\n", + " test_output = strange_sort_list(test_inputs[i])\n", + " feedback = get_feedback(test_output, test_ground_truths[i])\n", + " except trace.ExecutionError as e:\n", + " feedback = e.exception_node.data\n", + " test_output = e.exception_node\n", + " feedbacks.append(feedback)\n", + " \n", + " correctness = test_output.eq(test_ground_truths[i])\n", + " outputs.append(correctness)\n", + "\n", + "batched_feedback = concat(*feedbacks)\n", + "batched_outputs = concat(*outputs)\n", + "optimizer.zero_feedback()\n", + "optimizer.backward(batched_outputs, batched_feedback.data)\n", + "optimizer.step(verbose=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using the functions in `opto.trainer` to perform Batching\n", + "\n", + "In the earlier examples, we wrote our own design patterns for accomplishing batch optimization. However, Trace provides the `MiniBatchAlgorithm` to accomplish this automatically.\n", + "Let us see how the abstractions in `opto.trainer` allow us to scale up optimization, for example, doing minibatch optimization on the GSM 8K Dataset, which is a dataset of math word problems." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "import datasets\n", + "import numpy as np\n", + "\n", + "train_dataset = datasets.load_dataset('openai/gsm8k', 'main')['train'][:10]\n", + "train_dataset = dict(inputs=train_dataset['question'], infos=train_dataset['answer'])\n", + "test_dataset = train_dataset\n", + "\n", + "# set seed\n", + "seed = 42\n", + "num_epochs = 1\n", + "batch_size = 2\n", + "test_frequency = -1\n", + "num_threads = 3\n", + "verbose = True\n", + "\n", + "np.random.seed(seed)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We define the `Learner` agent which is a student LLM with a trainable system prompt. Trace will use a generative optimizer to tune the system prompt. Trace provides also a class for LLM-as-Judge called `VerbalJudgeGuide` that uses a Teacher LLM to provide rich feedbacks to the student LLM. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": "from opto import trace\nfrom opto.utils.llm import LLM\nfrom opto.optimizers import OptoPrime\nfrom opto.trainer.algorithms.basic_algorithms import MinibatchAlgorithm\nfrom opto.trainer.loggers import TensorboardLogger\nfrom opto.trainer.guide import LLMJudge\nfrom opto.features.predefined_agents import BasicLearner\nfrom typing import Any\n\n# Use the predefined BasicLearner instead of defining our own\nLearner = BasicLearner" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we use the `MiniBatchAlgorithm` as the trainer to sample batches from the GSM8K dataset, run the student model on the samples, gather feedback from the teacher model, and present the resulting traced graph to the optimizer." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "STARTING TRAINING\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Forward pass (batch size: 2): 100%|██████████| 2/2 [00:06<00:00, 3.12s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LLM response:\n", + " {\n", + "\"reasoning\": \"The #Instruction asks us to change the values of the variables in #Variables to improve the output according to #Feedback. The #Feedback section provides the analysis of the answers generated for each query. Both answers for the queries (regarding Alexis and Weng) are correct, as indicated by the statement 'Correct [TERMINATE]'. The #Output shows that the responses generated for each model (Learner.model0 and Learner.model1) are logical and correct given the input prompts. Therefore, there are no errors in the current setup, and no changes are needed in the variables.\",\n", + "\"answer\": \"TERMINATE\",\n", + "\"suggestion\": {}\n", + "}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Evaluating agent (iteration 1): 100%|██████████| 10/10 [00:22<00:00, 2.30s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[Step 1] \u001B[92mAverage test score: 1.0\u001B[0m\n", + "Epoch: 0. Iteration: 1\n", + "[Step 1] Instantaneous train score: 1.0\n", + "[Step 1] Average train score: 1.0\n", + "[Step 1] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Forward pass (batch size: 2): 100%|██████████| 2/2 [00:09<00:00, 4.65s/it]\n", + "/home/aswaminathan/miniconda3/envs/trace/lib/python3.9/copy.py:263: RuntimeWarning: coroutine 'main' was never awaited\n", + " args = (deepcopy(arg, memo) for arg in args)\n", + "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LLM response:\n", + " {\n", + "\"reasoning\": \"The instruction asks us to change the value of variables if necessary to improve the output based on the feedback provided. In this instance, the feedback for both outputs (ID [0] and ID [1]) states 'Correct' and suggests termination, which indicates that the outputs match the expected results. The variables in the code that we have control over are used to set up prompts for an LLM model to process. The feedback shows the model's output correctly answers the questions based on the inputs, matching the expected correct answers outlined in the feedback. Therefore, no changes to the variables are necessary as the task is operating as intended.\",\n", + "\"answer\": \"TERMINATE\",\n", + "\"suggestion\": {}\n", + "}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Evaluating agent (iteration 2): 100%|██████████| 10/10 [00:18<00:00, 1.88s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[Step 2] \u001B[92mAverage test score: 1.0\u001B[0m\n", + "Epoch: 0. Iteration: 2\n", + "[Step 2] Instantaneous train score: 1.0\n", + "[Step 2] Average train score: 1.0\n", + "[Step 2] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Forward pass (batch size: 2): 100%|██████████| 2/2 [00:04<00:00, 2.46s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LLM response:\n", + " {\n", + " \"reasoning\": \"The #Instruction asks us to adjust the #Variables to improve the output based on #Feedback. The feedback suggests that the answers provided by the models are correct for both IDs. The output of both Learner.model25 and Learner.model24 correctly represents the calculation processes needed to answer the given queries. As the feedback indicates '[TERMINATE]', it means the current outputs are satisfactory, and no changes to the #Variables are necessary.\",\n", + " \"answer\": \"TERMINATE\"\n", + "}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Evaluating agent (iteration 3): 100%|██████████| 10/10 [00:20<00:00, 2.05s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[Step 3] \u001B[92mAverage test score: 1.0\u001B[0m\n", + "Epoch: 0. Iteration: 3\n", + "[Step 3] Instantaneous train score: 1.0\n", + "[Step 3] Average train score: 1.0\n", + "[Step 3] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Forward pass (batch size: 2): 100%|██████████| 2/2 [00:08<00:00, 4.16s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LLM response:\n", + " {\n", + "\"reasoning\": \"The #Instruction requires us to change the values in #Variables to improve the output. However, based on #Feedback, both IDs in the #Outputs are correctly calculated according to the logic specified in #Documentation and supported by expert feedback. Therefore, no changes are needed to improve the outputs, as they already match the expected results provided in the feedback.\",\n", + "\"answer\": \"Both outputs are correct as per the feedback.\",\n", + "\"suggestion\": {}\n", + "}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Evaluating agent (iteration 4): 100%|██████████| 10/10 [00:19<00:00, 1.91s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[Step 4] \u001B[92mAverage test score: 1.0\u001B[0m\n", + "Epoch: 0. Iteration: 4\n", + "[Step 4] Instantaneous train score: 1.0\n", + "[Step 4] Average train score: 1.0\n", + "[Step 4] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Forward pass (batch size: 2): 100%|██████████| 2/2 [00:05<00:00, 2.63s/it]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LLM response:\n", + " {\n", + "\"reasoning\": \"The #Instruction requires adjusting the value of the variable in #Variables to improve the output based on #Feedback. In this scenario, the feedback has been provided for both outputs (ID [0] and ID [1]) as correct, with an explicit [TERMINATE] instruction from the expert feedback, indicating that no changes are needed for the variable's value, as the outputs align perfectly with the expected answers. The current settings in #Variables, #Inputs, and #Others, including the prompts and message, are correctly leading to the generation of accurate answers to the queries, both for Julie's reading task and Albert's pizza consumption problem.\",\n", + "\"answer\": \"TERMINATE\",\n", + "\"suggestion\": {}\n", + "}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Evaluating agent (iteration 5): 100%|██████████| 10/10 [00:17<00:00, 1.76s/it]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[Step 5] \u001B[92mAverage test score: 1.0\u001B[0m\n", + "Epoch: 0. Iteration: 5\n", + "[Step 5] Instantaneous train score: 1.0\n", + "[Step 5] Average train score: 1.0\n", + "[Step 5] \u001B[91mParameter: str:20: You're a helpful agent\u001B[0m\n", + "FINISHED TRAINING\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "agent = Learner(llm=LLM())\n", + "guide = LLMJudge(llm=LLM())\n", + "optimizer = OptoPrime(agent.parameters(), llm=LLM())\n", + "logger = TensorboardLogger(verbose=True)\n", + "\n", + "alg = MinibatchAlgorithm(\n", + " agent=agent,\n", + " optimizer=optimizer,\n", + " logger=logger)\n", + "\n", + "import nest_asyncio\n", + "nest_asyncio.apply()\n", + "import asyncio\n", + "\n", + "async def wrapper():\n", + " print(\"STARTING TRAINING\")\n", + " alg.train(guide,\n", + " train_dataset,\n", + " num_epochs=num_epochs,\n", + " batch_size=batch_size,\n", + " test_frequency=test_frequency,\n", + " test_dataset=test_dataset,\n", + " num_threads=num_threads,\n", + " verbose='output')\n", + " print(\"FINISHED TRAINING\")\n", + " \n", + "asyncio.run(wrapper())" + ] + }, + { + "cell_type": "markdown", + "source": "## Simplified Training with `trainer.train()`\n\nInstead of manually setting up the algorithm, optimizer, guide, and logger, you can use the simplified `trainer.train()` function that handles all the setup for you. This is the recommended approach for most use cases.", + "metadata": {} + }, + { + "cell_type": "code", + "source": "# Using the simplified trainer.train approach\nfrom opto import trainer\n\n# Create a fresh agent for simplified training\nsimple_agent = Learner(\n system_prompt=\"You're a helpful agent answering math problems.\",\n llm=LLM()\n)\n\nprint(\"STARTING SIMPLIFIED TRAINING\")\nmetrics, final_score = trainer.train(\n model=simple_agent,\n train_dataset=train_dataset,\n algorithm='MinibatchAlgorithm',\n guide=LLMJudge(llm=LLM()),\n # trainer kwargs\n num_epochs=num_epochs,\n batch_size=batch_size,\n eval_frequency=eval_frequency,\n test_dataset=test_dataset,\n num_threads=num_threads,\n verbose='output',\n)\nprint(\"FINISHED SIMPLIFIED TRAINING\")\nprint(f\"Final score: {final_score}\")", + "metadata": {}, + "execution_count": null, + "outputs": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "trace", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.23" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 7221e611179b14d84301cab500414fc0e570bfba Mon Sep 17 00:00:00 2001 From: windweller Date: Mon, 22 Dec 2025 11:39:57 -0500 Subject: [PATCH 42/51] refactored LLMFactory -- removing "cheap", "premium" descriptions because these meanings are shifting (the "premium" model of 2025 will be the "cheap" model of 2027, which causes confusion and unreliability for the users). --- opto/optimizers/backbone.py | 1 - opto/utils/llm.py | 335 ++++++++++++++---- setup.py | 3 +- .../test_gepa_benchmark.py | 10 +- tests/llm_optimizers_tests/test_optimizer.py | 2 +- .../test_optimizer_backbone.py | 7 + tests/unit_tests/test_optimizer_backbone.py | 29 +- 7 files changed, 311 insertions(+), 76 deletions(-) create mode 100644 tests/llm_optimizers_tests/test_optimizer_backbone.py diff --git a/opto/optimizers/backbone.py b/opto/optimizers/backbone.py index cce60832..3ef0bc0d 100644 --- a/opto/optimizers/backbone.py +++ b/opto/optimizers/backbone.py @@ -1,7 +1,6 @@ """ Flexible conversation manager for multi-turn LLM conversations. Uses LiteLLM unified format for all providers (OpenAI, Anthropic, Google, etc.). - """ from typing import List, Dict, Any, Optional, Literal, Union, Iterable, Tuple from dataclasses import dataclass, field diff --git a/opto/utils/llm.py b/opto/utils/llm.py index b6fbd4fe..71dfa586 100644 --- a/opto/utils/llm.py +++ b/opto/utils/llm.py @@ -1,3 +1,10 @@ +""" +This adapater is for three cases: +1. OpenAI's response API (which is new and not fully supported by LiteLLM yet) +2. Google's MultiPart API design (not supported by LiteLLM response API at all) +3. Generic fallback option for all other providers (through LiteLLM) +""" + from typing import List, Tuple, Dict, Any, Callable, Union import os import time @@ -6,6 +13,10 @@ import warnings from .auto_retry import retry_with_exponential_backoff +import openai +from google import genai +from google.genai import types + try: import autogen # We import autogen here to avoid the need of installing autogen except ImportError: @@ -249,10 +260,13 @@ class LiteLLM(AbstractModel): default model name through the environment variable TRACE_LITELLM_MODEL. When using Azure models via token provider, you can set the Azure token provider scope through the environment variable AZURE_TOKEN_PROVIDER_SCOPE. + + This class now supports storing default completion parameters (like temperature, + top_p, max_tokens, etc.) that will be used for all calls unless overridden. """ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None, - cache=True, max_retries=10, base_delay=1.0) -> None: + cache=True, max_retries=10, base_delay=1.0, **default_params) -> None: if model is None: model = os.environ.get('TRACE_LITELLM_MODEL') if model is None: @@ -261,11 +275,12 @@ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] self.model_name = model self.cache = cache - factory = lambda: self._factory(self.model_name, max_retries=max_retries, base_delay=base_delay) # an LLM instance uses a fixed model + self.default_params = default_params # Store default completion parameters + factory = lambda: self._factory(self.model_name, self.default_params, max_retries=max_retries, base_delay=base_delay) super().__init__(factory, reset_freq) @classmethod - def _factory(cls, model_name: str, max_retries=10, base_delay=1.0): + def _factory(cls, model_name: str, default_params: dict, max_retries=10, base_delay=1.0): import litellm if model_name.startswith('azure/'): # azure model azure_token_provider_scope = os.environ.get('AZURE_TOKEN_PROVIDER_SCOPE', None) @@ -274,13 +289,13 @@ def _factory(cls, model_name: str, max_retries=10, base_delay=1.0): credential = get_bearer_token_provider(DefaultAzureCredential(), azure_token_provider_scope) return lambda *args, **kwargs: retry_with_exponential_backoff( lambda: litellm.completion(model_name, *args, - azure_ad_token_provider=credential, **kwargs), + azure_ad_token_provider=credential, **{**default_params, **kwargs}), max_retries=max_retries, base_delay=base_delay, operation_name="LiteLLM_completion" ) return lambda *args, **kwargs: retry_with_exponential_backoff( - lambda: litellm.completion(model_name, *args, **kwargs), + lambda: litellm.completion(model_name, *args, **{**default_params, **kwargs}), max_retries=max_retries, base_delay=base_delay, operation_name="LiteLLM_completion" @@ -340,75 +355,203 @@ def create(self, **config: Any): } class LLMFactory: - """Factory for creating LLM instances with predefined profiles. - - The code comes with these built-in profiles: - - llm_default = LLM(profile="default") # gpt-4o-mini - llm_premium = LLM(profile="premium") # gpt-4 - llm_cheap = LLM(profile="cheap") # gpt-4o-mini - llm_fast = LLM(profile="fast") # gpt-3.5-turbo-mini - llm_reasoning = LLM(profile="reasoning") # o1-mini - - You can override those built-in profiles: - - LLMFactory.register_profile("default", "LiteLLM", model="gpt-4o", temperature=0.5) - LLMFactory.register_profile("premium", "LiteLLM", model="o1-preview", max_tokens=8000) - LLMFactory.register_profile("cheap", "LiteLLM", model="gpt-3.5-turbo", temperature=0.9) - LLMFactory.register_profile("fast", "LiteLLM", model="gpt-3.5-turbo", max_tokens=500) - LLMFactory.register_profile("reasoning", "LiteLLM", model="o1-preview") - - An Example of using Different Backends - - # Register custom profiles for different use cases - LLMFactory.register_profile("advanced_reasoning", "LiteLLM", model="o1-preview", max_tokens=4000) - LLMFactory.register_profile("claude_sonnet", "LiteLLM", model="claude-3-5-sonnet-latest", temperature=0.3) - LLMFactory.register_profile("custom_server", "CustomLLM", model="llama-3.1-8b") - - # Use in different contexts - reasoning_llm = LLM(profile="advanced_reasoning") # For complex reasoning - claude_llm = LLM(profile="claude_sonnet") # For Claude responses - local_llm = LLM(profile="custom_server") # For local deployment - - # Single LLM optimizer with custom profile - optimizer1 = OptoPrime(parameters, llm=LLM(profile="advanced_reasoning")) + """Factory for creating LLM instances with named profiles. + + Profiles allow you to save and reuse LLM configurations with specific settings. + Each profile can include any LiteLLM-supported parameters like model, temperature, + top_p, max_tokens, etc. + + The default profile uses 'gpt-4o-mini' with standard settings. + + Basic Usage: + # Use default model (gpt-4o-mini) + llm = LLM() + + # Specify a model directly + llm = LLM(model="gpt-4o") + + # Use a named profile + llm = LLM(profile="my_profile") + + Creating Custom Profiles: + # Register a profile with full LiteLLM configuration + LLMFactory.create_profile( + "creative_writer", + backend="LiteLLM", + model="gpt-4o", + temperature=0.9, + top_p=0.95, + max_tokens=2000, + presence_penalty=0.6 + ) + + # Register a reasoning profile + LLMFactory.create_profile( + "deep_thinker", + backend="LiteLLM", + model="o1-preview", + max_completion_tokens=8000 + ) + + # Register a profile with specific formatting + LLMFactory.create_profile( + "json_responder", + backend="LiteLLM", + model="gpt-4o-mini", + temperature=0.3, + response_format={"type": "json_object"} + ) - # Multi-LLM optimizer with multiple profiles - optimizer2 = OptoPrimeMulti(parameters, llm_profiles=["cheap", "premium", "claude_sonnet"], generation_technique="multi_llm") + Using Profiles: + # Use your custom profile + llm = LLM(profile="creative_writer") + + # In optimizers + optimizer = OptoPrime(parameters, llm=LLM(profile="deep_thinker")) + + Profile Management: + # List all available profiles + profiles = LLMFactory.list_profiles() + + # Get profile configuration + config = LLMFactory.get_profile_info("creative_writer") + + # Override existing profile + LLMFactory.create_profile("default", "LiteLLM", model="gpt-4o", temperature=0.5) + + Supported LiteLLM Parameters: + See https://docs.litellm.ai/docs/completion/input for full list: + - model: Model name (required) + - temperature: Sampling temperature (0-2) + - top_p: Nucleus sampling parameter + - max_tokens: Maximum tokens to generate + - max_completion_tokens: Upper bound for completion tokens + - presence_penalty: Penalize new tokens based on presence + - frequency_penalty: Penalize new tokens based on frequency + - stop: Stop sequences (string or list) + - stream: Enable streaming responses + - response_format: Output format specification + - seed: Deterministic sampling seed + - tools: Function calling tools + - tool_choice: Control function calling behavior + - logprobs: Return log probabilities + - top_logprobs: Number of most likely tokens to return + - n: Number of completions to generate + - and many more... """ - # Default profiles for different use cases + # Default profile - just gpt-4o-mini with no opinionated settings _profiles = { 'default': {'backend': 'LiteLLM', 'params': {'model': 'gpt-4o-mini'}}, - 'premium': {'backend': 'LiteLLM', 'params': {'model': 'gpt-4'}}, - 'cheap': {'backend': 'LiteLLM', 'params': {'model': 'gpt-4o-mini'}}, - 'fast': {'backend': 'LiteLLM', 'params': {'model': 'gpt-3.5-turbo-mini'}}, - 'reasoning': {'backend': 'LiteLLM', 'params': {'model': 'o1-mini'}}, } @classmethod - def get_llm(cls, profile: str = 'default') -> AbstractModel: - """Get an LLM instance for the specified profile.""" + def get_llm(cls, profile: str = 'default', model: str = None, **kwargs) -> AbstractModel: + """Get an LLM instance for the specified profile or model. + + Args: + profile: Name of the profile to use. Defaults to 'default'. + model: Model name to use directly. If provided, overrides profile. + **kwargs: Additional parameters to pass to the backend (e.g., temperature, top_p). + These override profile settings if both are specified. + + Returns: + An LLM instance configured according to the profile/model and parameters. + + Examples: + # Use default profile + llm = LLMFactory.get_llm() + + # Use specific model + llm = LLMFactory.get_llm(model="gpt-4o") + + # Use named profile + llm = LLMFactory.get_llm(profile="creative_writer") + + # Use model with custom parameters + llm = LLMFactory.get_llm(model="gpt-4o", temperature=0.7, max_tokens=1000) + + # Override profile settings + llm = LLMFactory.get_llm(profile="creative_writer", temperature=0.5) + """ + # If model is specified directly, create a simple config + if model is not None: + backend = kwargs.pop('backend', None) + if backend is None: + backend = 'LiteLLM' + backend_cls = _LLM_REGISTRY[backend] + params = {'model': model, **kwargs} + return backend_cls(**params) + + # Otherwise use profile if profile not in cls._profiles: - raise ValueError(f"Unknown profile '{profile}'. Available profiles: {list(cls._profiles.keys())}") + raise ValueError( + f"Unknown profile '{profile}'. Available profiles: {list(cls._profiles.keys())}. " + f"Use LLMFactory.create_profile() to create custom profiles, or pass model= directly." + ) - config = cls._profiles[profile] + config = cls._profiles[profile].copy() backend_cls = _LLM_REGISTRY[config['backend']] - return backend_cls(**config['params']) + + # Merge profile params with any override kwargs + params = config['params'].copy() + params.update(kwargs) + + return backend_cls(**params) @classmethod - def register_profile(cls, name: str, backend: str, **params): - """Register a new LLM profile.""" + def create_profile(cls, name: str, backend: str = 'LiteLLM', **params): + """Register a new LLM profile with custom configuration. + + Args: + name: Profile name to register. + backend: Backend to use ('LiteLLM', 'AutoGen', or 'CustomLLM'). Defaults to 'LiteLLM'. + **params: Configuration parameters for the backend. For LiteLLM, this can include + any parameters from https://docs.litellm.ai/docs/completion/input + + Examples: + # Simple profile with just a model + LLMFactory.create_profile("gpt4", model="gpt-4o") + + # Profile with temperature and token settings + LLMFactory.create_profile( + "creative", + model="gpt-4o", + temperature=0.9, + max_tokens=2000 + ) + + # Profile with advanced settings + LLMFactory.create_profile( + "structured_json", + model="gpt-4o-mini", + temperature=0.3, + response_format={"type": "json_object"}, + max_tokens=1500, + top_p=0.9 + ) + """ + if backend not in _LLM_REGISTRY: + raise ValueError( + f"Unknown backend '{backend}'. Valid options: {list(_LLM_REGISTRY.keys())}" + ) cls._profiles[name] = {'backend': backend, 'params': params} @classmethod def list_profiles(cls): - """List all available profiles.""" + """List all available profile names.""" return list(cls._profiles.keys()) @classmethod def get_profile_info(cls, profile: str = None): - """Get information about a profile or all profiles.""" + """Get configuration information about one or all profiles. + + Args: + profile: Profile name to get info for. If None, returns all profiles. + + Returns: + Dictionary with profile configuration(s). + """ if profile: return cls._profiles.get(profile) return cls._profiles @@ -446,21 +589,79 @@ class LLM: """ A unified entry point for all supported LLM backends. - Usage: - # pick by env var (default: LiteLLM) - llm = LLM() - # or override explicitly - llm = LLM(backend="AutoGen", config_list=my_configs) - # or use predefined profiles - llm = LLM(profile="premium") # Use premium model - llm = LLM(profile="cheap") # Use cheaper model - llm = LLM(profile="reasoning") # Use reasoning/thinking model + The LLM class provides a simple interface for creating language model instances. + By default, it uses gpt-4o-mini through LiteLLM. + + Basic Usage: + # Use default model (gpt-4o-mini) + llm = LLM() + + # Specify a model directly + llm = LLM(model="gpt-4o") + llm = LLM(model="claude-3-5-sonnet-latest") + llm = LLM(model="o1-preview") + + # Add LiteLLM parameters + llm = LLM(model="gpt-4o", temperature=0.7, max_tokens=2000) + llm = LLM(model="gpt-4o-mini", temperature=0.3, top_p=0.9) + + Using Named Profiles: + # Use a saved profile + llm = LLM(profile="my_custom_profile") + + # Create profiles with LLMFactory + LLMFactory.create_profile("creative", model="gpt-4o", temperature=0.9) + llm = LLM(profile="creative") + + Using Different Backends: + # Explicitly specify backend (default: LiteLLM) + llm = LLM(backend="AutoGen", config_list=my_configs) + llm = LLM(backend="CustomLLM", model="llama-3.1-8b") + + # Or set via environment variable + # export TRACE_DEFAULT_LLM_BACKEND=AutoGen + llm = LLM() + + Examples with LiteLLM Parameters: + # Structured output + llm = LLM( + model="gpt-4o-mini", + response_format={"type": "json_object"}, + temperature=0.3 + ) + + # High creativity + llm = LLM( + model="gpt-4o", + temperature=0.9, + top_p=0.95, + presence_penalty=0.6 + ) + + # Deterministic responses + llm = LLM( + model="gpt-4o-mini", + temperature=0, + seed=42 + ) + + See Also: + - LLMFactory: For managing named profiles + - https://docs.litellm.ai/docs/completion/input: Full list of LiteLLM parameters """ - def __new__(cls, *args, profile: str = None, backend: str = None, **kwargs): - # New: if profile is specified, use LLMFactory + def __new__(cls, model: str = None, profile: str = None, backend: str = None, **kwargs): + # Priority 1: If profile is specified, use LLMFactory if profile: - return LLMFactory.get_llm(profile) - # Decide which backend to use + return LLMFactory.get_llm(profile=profile, **kwargs) + + # Priority 2: If model is specified, use LLMFactory with model + if model: + if backend is not None: + kwargs['backend'] = backend + return LLMFactory.get_llm(model=model, **kwargs) + + # Priority 3: Use backend-specific instantiation (for AutoGen, CustomLLM, etc.) + # This path is for when neither profile nor model is specified name = backend or os.getenv("TRACE_DEFAULT_LLM_BACKEND", "LiteLLM") try: backend_cls = _LLM_REGISTRY[name] @@ -468,4 +669,4 @@ def __new__(cls, *args, profile: str = None, backend: str = None, **kwargs): raise ValueError(f"Unknown LLM backend: {name}. " f"Valid options are: {list(_LLM_REGISTRY)}") # Instantiate and return the chosen subclass - return backend_cls(*args, **kwargs) \ No newline at end of file + return backend_cls(**kwargs) \ No newline at end of file diff --git a/setup.py b/setup.py index ade71ac2..394d4046 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,8 @@ install_requires = [ "graphviz>=0.20.1", "pytest", - "litellm==1.75.0", + "litellm==1.80.8", + "google-genai", "black", "scikit-learn", "pillow", diff --git a/tests/llm_optimizers_tests/test_gepa_benchmark.py b/tests/llm_optimizers_tests/test_gepa_benchmark.py index 2811d4ec..0b7d0906 100644 --- a/tests/llm_optimizers_tests/test_gepa_benchmark.py +++ b/tests/llm_optimizers_tests/test_gepa_benchmark.py @@ -66,12 +66,12 @@ def test_gepa_benchmark_gsm8k_real_llm(): train = ds["train"][:6] train_dataset = {"inputs": train["question"], "infos": train["answer"]} - # Teacher/judge with a low-cost profile - guide = LLMJudge(llm=LLM(profile="cheap")) + # Teacher/judge with default model (gpt-4o-mini is cost-effective) + guide = LLMJudge(llm=LLM(model='gpt-4o-mini')) - # Agent and optimizer (low-cost profile) - agent = Learner(llm=LLM(profile="cheap")) - optimizer = OptoPrimeV2(agent.parameters(), llm=LLM(profile="cheap")) + # Agent and optimizer (using default model) + agent = Learner(llm=LLM(model='gpt-4o-mini')) + optimizer = OptoPrimeV2(agent.parameters(), llm=LLM(model='gpt-4o-mini')) algos = [ ("GEPA-Base", GEPAAlgorithmBase(agent, optimizer=optimizer, logger=None, num_threads=2), dict(num_iters=2, train_batch_size=1, merge_every=2)), diff --git a/tests/llm_optimizers_tests/test_optimizer.py b/tests/llm_optimizers_tests/test_optimizer.py index aa278d8e..ccabdd73 100644 --- a/tests/llm_optimizers_tests/test_optimizer.py +++ b/tests/llm_optimizers_tests/test_optimizer.py @@ -84,7 +84,7 @@ def model_profile(request, monkeypatch): # Register a runtime profile (does not modify source files) # Use CustomLLM backend which uses OpenAI-compatible calls. - LLMFactory.register_profile(profile_name, backend="CustomLLM", model=model_id) + LLMFactory.create_profile(profile_name, backend="CustomLLM", model=model_id) return profile_name diff --git a/tests/llm_optimizers_tests/test_optimizer_backbone.py b/tests/llm_optimizers_tests/test_optimizer_backbone.py new file mode 100644 index 00000000..e4d369db --- /dev/null +++ b/tests/llm_optimizers_tests/test_optimizer_backbone.py @@ -0,0 +1,7 @@ +""" +We need to test a few things: +1. Various use cases of ContentBlock and specialized ones +2. UserTurn, AssistantTurn and conversation manager +3. Multi-modal use of conversation manager, including multi-turn and image as output +""" + diff --git a/tests/unit_tests/test_optimizer_backbone.py b/tests/unit_tests/test_optimizer_backbone.py index e41f9a49..d36fab88 100644 --- a/tests/unit_tests/test_optimizer_backbone.py +++ b/tests/unit_tests/test_optimizer_backbone.py @@ -1,6 +1,11 @@ """ Comprehensive tests for optimizer backbone components (ConversationHistory, UserTurn, AssistantTurn) Tests include: truncation strategies, multimodal content, and conversation management + +We need to test a few things: +1. Various use cases of ContentBlock and specialized ones +2. UserTurn, AssistantTurn and conversation manager +3. Multi-modal use of conversation manager, including multi-turn and image as output """ import os import pytest @@ -471,7 +476,7 @@ def test_real_llm_multi_turn_with_images(): print(" Which of these flowers would be better for a romantic gift and why?") response2 = llm(messages=messages, max_tokens=300) - response2_content = response2.choices[0].message.content + response2_content = response2.choicbes[0].message.content print("\n🤖 Turn 2 - Assistant:") print(f" {response2_content[:200]}...") @@ -487,3 +492,25 @@ def test_real_llm_multi_turn_with_images(): print("\n✅ Multi-turn conversation with images completed successfully!") +if __name__ == '__main__': + import litellm + import base64 + + # Gemini image generation models don't require tools parameter + response = litellm.responses( + model="gemini/gemini-2.5-flash-image", + input="Generate a cute cat playing with yarn" + ) + + # Access generated images from output + for item in response.output: + if item.type == "image_generation_call": + # item.result contains pure base64 (no data: prefix) + image_bytes = base64.b64decode(item.result) + + # Save the image + with open(f"generated_{item.id}.png", "wb") as f: + f.write(image_bytes) + + print(f"Image saved: generated_{response.output[0].id}.png") + From 8becf51c4288b1db6bf06a560a2c18bc5dde01c6 Mon Sep 17 00:00:00 2001 From: windweller Date: Sat, 27 Dec 2025 00:31:19 -0500 Subject: [PATCH 43/51] Add GoogleGenAILLM backend. Reworked the LLM logic. Added test cases (automatically generated to increase coverage) --- opto/optimizers/opro_v3.py | 11 +- opto/optimizers/optoprime_v3.py | 13 +- opto/{optimizers => utils}/backbone.py | 556 ++++++++++++++++-- opto/utils/llm.py | 382 ++++++++++-- .../llm_optimizers_tests/test_optoprime_v3.py | 18 +- tests/unit_tests/test_llm.py | 446 +++++++++++++- tests/unit_tests/test_optimizer_backbone.py | 59 +- 7 files changed, 1368 insertions(+), 117 deletions(-) rename opto/{optimizers => utils}/backbone.py (68%) diff --git a/opto/optimizers/opro_v3.py b/opto/optimizers/opro_v3.py index 4b7af6a5..a52fcc2a 100644 --- a/opto/optimizers/opro_v3.py +++ b/opto/optimizers/opro_v3.py @@ -5,15 +5,14 @@ 3. Break from the OptoPrime style template, support more customizable template from user, for brevity and streamlined usage. """ -import json from textwrap import dedent -from dataclasses import dataclass, asdict -from typing import Dict, Optional, List, Union, Any -from opto.trace.nodes import ParameterNode, is_image +from dataclasses import dataclass +from typing import Dict, Optional, List, Union +from opto.trace.nodes import ParameterNode from opto.optimizers.optoprime_v3 import OptoPrimeV3, OptimizerPromptSymbolSet -from opto.optimizers.backbone import ( - ContentBlock, TextContent, ImageContent, ContentBlockList, +from opto.utils.backbone import ( + ContentBlock, ImageContent, ContentBlockList, DEFAULT_IMAGE_PLACEHOLDER ) diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py index e1435ed6..52376da4 100644 --- a/opto/optimizers/optoprime_v3.py +++ b/opto/optimizers/optoprime_v3.py @@ -5,25 +5,24 @@ """ import json -from typing import Any, List, Dict, Union, Tuple, Optional -from dataclasses import dataclass, field, asdict +from typing import List, Union, Tuple, Optional +from dataclasses import dataclass from opto.optimizers.optoprime import OptoPrime, node_to_function_feedback from opto.trace.utils import dedent from opto.optimizers.utils import truncate_expression, extract_xml_like_data -from opto.trace.nodes import ParameterNode, Node, MessageNode, is_image -from opto.trace.propagators import TraceGraph, GraphPropagator +from opto.trace.nodes import ParameterNode, is_image +from opto.trace.propagators import GraphPropagator from opto.trace.propagators.propagators import Propagator from opto.utils.llm import AbstractModel, LLM from opto.optimizers.buffers import FIFOBuffer -from opto.optimizers.backbone import ( +from opto.utils.backbone import ( ConversationHistory, UserTurn, AssistantTurn, PromptTemplate, - ContentBlock, TextContent, ImageContent, ContentBlockList, + TextContent, ImageContent, ContentBlockList, DEFAULT_IMAGE_PLACEHOLDER ) import copy import pickle -import re from typing import Dict, Any diff --git a/opto/optimizers/backbone.py b/opto/utils/backbone.py similarity index 68% rename from opto/optimizers/backbone.py rename to opto/utils/backbone.py index 3ef0bc0d..f966b70d 100644 --- a/opto/optimizers/backbone.py +++ b/opto/utils/backbone.py @@ -1,6 +1,24 @@ """ Flexible conversation manager for multi-turn LLM conversations. Uses LiteLLM unified format for all providers (OpenAI, Anthropic, Google, etc.). + +The class here follows this philosophy: +1. Every class is a data class (pickable/jsonable) +2. Most classes have `autocast` feature that takes in some data form and tries to automatically determine how to parse them into the right structured format. + +In order to support three types of data class construction methods: +1. Direct construction: `text = TextContent("Hello, world!")` +2. Build from a value: `text = TextContent.build("Hello, world!")` +3. Data class construction: `text = TextContent(text="Hello, world!")` + +We use this approach: +`autocast()` method is the main automatic conversion method that determines how to parse the data. +It will return a sequence of values that map to the fields of the data class. + +In `__init__()` method, if `kwargs` are provided, we follow path 3 to construct the data class. +If not, we do autocast to construct the data class (path 1) + +Alternatively, people can call `.build()` to construct the class. """ from typing import List, Dict, Any, Optional, Literal, Union, Iterable, Tuple from dataclasses import dataclass, field @@ -513,6 +531,15 @@ def __radd__(self, other) -> 'TextContent': @dataclass class ImageContent(ContentBlock): """Image content block - supports URLs, base64, file paths, and numpy arrays. + + OpenAI uses base64 encoded images in the image_data field and recombine it into a base64 string of the format `"image_url": f"data:image/jpeg;base64,{base64_image}"` when sending to the API. + Gemini uses raw bytes in the image_bytes field: + ``` + types.Part.from_bytes( + data=image_bytes, + mime_type='image/jpeg', + ) + ``` Supports multiple ways to create an ImageContent: 1. Direct instantiation with image_url or image_data @@ -524,6 +551,7 @@ class ImageContent(ContentBlock): type: Literal["image"] = "image" image_url: Optional[str] = None image_data: Optional[str] = None # base64 encoded + image_bytes: Optional[bytes] = None media_type: str = "image/jpeg" # image/jpeg, image/png, image/gif, image/webp detail: Optional[str] = None # OpenAI: "auto", "low", "high" @@ -549,30 +577,37 @@ def __init__(self, value: Any = None, format: str = "PNG", **kwargs): super().__init__(**kwargs) else: # Use autocast to detect and convert the value - image_url, image_data, media_type = self.autocast(value, format=format) - super().__init__( - image_url=image_url, - image_data=image_data, - media_type=media_type, - ) + value_dict = self.autocast(value, format=format) + super().__init__(**value_dict) + + def __str__(self) -> str: + # Truncate image_data and image_bytes for readability + image_data_str = f"{self.image_data[:10]}..." if self.image_data and len(self.image_data) > 10 else self.image_data + image_bytes_str = f"{str(self.image_bytes[:10])}..." if self.image_bytes and len(self.image_bytes) > 10 else self.image_bytes + return f"ImageContent(image_url={self.image_url}, image_data={image_data_str}, image_bytes={image_bytes_str}, media_type={self.media_type})" + + def __repr__(self) -> str: + # Truncate image_data and image_bytes for readability + image_data_str = f"{self.image_data[:10]}..." if self.image_data and len(self.image_data) > 10 else self.image_data + image_bytes_str = f"{str(self.image_bytes[:10])}..." if self.image_bytes and len(self.image_bytes) > 10 else self.image_bytes + return f"ImageContent(image_url={self.image_url}, image_data={image_data_str}, image_bytes={image_bytes_str}, media_type={self.media_type})" def is_empty(self) -> bool: """Check if the image content is empty (no URL or data).""" - return not self.image_url and not self.image_data + return not self.image_url and not self.image_data and not self.image_bytes def to_dict(self) -> Dict[str, Any]: + result = { + "type": self.type, + "media_type": self.media_type + } if self.image_url: - return { - "type": self.type, - "image_url": self.image_url, - "media_type": self.media_type - } - else: - return { - "type": self.type, - "image_data": self.image_data, - "media_type": self.media_type - } + result["image_url"] = self.image_url + if self.image_data: + result["image_data"] = self.image_data + if self.image_bytes: + result["image_bytes"] = self.image_bytes + return result @classmethod def from_file(cls, filepath: str, media_type: Optional[str] = None): @@ -687,7 +722,7 @@ def from_bytes(cls, data: bytes, media_type: str = "image/jpeg"): media_type: MIME type of the image (default: image/jpeg) Returns: - ImageContent with base64-encoded image data + ImageContent with base64-encoded data """ image_data = base64.b64encode(data).decode('utf-8') return cls(image_data=image_data, media_type=media_type) @@ -724,7 +759,7 @@ def from_data_url(cls, data_url: str): return cls(image_data=data_url.split(',')[-1], media_type="image/jpeg") @staticmethod - def autocast(value: Any, format: str = "PNG") -> Tuple[Optional[str], Optional[str], str]: + def autocast(value: Any, format: str = "PNG") -> Dict[str, Any]: """Auto-detect value type and return image field values. Args: @@ -739,33 +774,38 @@ def autocast(value: Any, format: str = "PNG") -> Tuple[Optional[str], Optional[s format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG Returns: - Tuple of (image_url, image_data, media_type) + Dictionary with keys: image_url, image_data, image_bytes, media_type """ # Handle None or empty if value is None: - return (None, None, "image/jpeg") + return {"image_url": None, "image_data": None, "image_bytes": None, "media_type": "image/jpeg"} # Handle ImageContentBlock instance if isinstance(value, ImageContent): - return (value.image_url, value.image_data, value.media_type) + return { + "image_url": value.image_url, + "image_data": value.image_data, + "image_bytes": value.image_bytes, + "media_type": value.media_type + } # Handle string inputs if isinstance(value, str): if not value.strip(): - return (None, None, "image/jpeg") + return {"image_url": None, "image_data": None, "image_bytes": None, "media_type": "image/jpeg"} # Data URL if value.startswith('data:image/'): try: header, b64_data = value.split(',', 1) media_type = header.split(':')[1].split(';')[0] - return (None, b64_data, media_type) + return {"image_url": None, "image_data": b64_data, "image_bytes": None, "media_type": media_type} except (ValueError, IndexError): - return (None, value.split(',')[-1], "image/jpeg") + return {"image_url": None, "image_data": value.split(',')[-1], "image_bytes": None, "media_type": "image/jpeg"} # HTTP/HTTPS URL if value.startswith('http://') or value.startswith('https://'): - return (value, None, "image/jpeg") + return {"image_url": value, "image_data": None, "image_bytes": None, "media_type": "image/jpeg"} # File path path = Path(value) @@ -780,14 +820,12 @@ def autocast(value: Any, format: str = "PNG") -> Tuple[Optional[str], Optional[s media_type = ext_to_type.get(path.suffix.lower(), 'image/jpeg') with open(value, 'rb') as f: image_data = base64.b64encode(f.read()).decode('utf-8') - return (None, image_data, media_type) - - return (None, None, "image/jpeg") - - # Handle bytes + return {"image_url": None, "image_data": image_data, "image_bytes": None, "media_type": media_type} + + # Handle bytes - store as base64 for portability if isinstance(value, bytes): image_data = base64.b64encode(value).decode('utf-8') - return (None, image_data, "image/jpeg") + return {"image_url": None, "image_data": image_data, "image_bytes": None, "media_type": "image/jpeg"} # Handle PIL Image try: @@ -800,7 +838,7 @@ def autocast(value: Any, format: str = "PNG") -> Tuple[Optional[str], Optional[s buffer.seek(0) image_data = base64.b64encode(buffer.getvalue()).decode('utf-8') media_type = f"image/{img_format.lower()}" - return (None, image_data, media_type) + return {"image_url": None, "image_data": image_data, "image_bytes": None, "media_type": media_type} except ImportError: pass @@ -834,11 +872,11 @@ def autocast(value: Any, format: str = "PNG") -> Tuple[Optional[str], Optional[s image_data = base64.b64encode(buffer.getvalue()).decode('utf-8') media_type = f"image/{format.lower()}" - return (None, image_data, media_type) + return {"image_url": None, "image_data": image_data, "image_bytes": None, "media_type": media_type} except ImportError: pass - return (None, None, "image/jpeg") + return {"image_url": None, "image_data": None, "image_bytes": None, "media_type": "image/jpeg"} @classmethod def build(cls, value: Any, format: str = "PNG") -> 'ImageContent': @@ -861,8 +899,8 @@ def build(cls, value: Any, format: str = "PNG") -> 'ImageContent': if isinstance(value, cls): return value - image_url, image_data, media_type = cls.autocast(value, format=format) - return cls(image_url=image_url, image_data=image_data, media_type=media_type) + value_dict = cls.autocast(value, format=format) + return cls(**value_dict) def set_image(self, image: Any, format: str = "PNG") -> None: """Set the image from various input formats (mutates self). @@ -881,7 +919,48 @@ def set_image(self, image: Any, format: str = "PNG") -> None: if result: self.image_url = result.image_url self.image_data = result.image_data + # Only copy image_bytes if it was explicitly set (e.g., from Google API) + if result.image_bytes: + self.image_bytes = result.image_bytes self.media_type = result.media_type + + def get_bytes(self) -> Optional[bytes]: + """Get raw image bytes. + + Returns image_bytes if available, otherwise decodes image_data from base64. + + Returns: + Raw image bytes or None if no image data available + """ + if self.image_bytes: + return self.image_bytes + elif self.image_data: + return base64.b64decode(self.image_data) + return None + + def get_base64(self) -> Optional[str]: + """Get base64-encoded image data. + + Returns image_data if available, otherwise encodes image_bytes to base64. + + Returns: + Base64-encoded string or None if no image data available + """ + if self.image_data: + return self.image_data + elif self.image_bytes: + return base64.b64encode(self.image_bytes).decode('utf-8') + return None + + def ensure_bytes(self) -> None: + """Ensure image_bytes is populated (converts from image_data if needed).""" + if not self.image_bytes and self.image_data: + self.image_bytes = base64.b64decode(self.image_data) + + def ensure_base64(self) -> None: + """Ensure image_data is populated (converts from image_bytes if needed).""" + if not self.image_data and self.image_bytes: + self.image_data = base64.b64encode(self.image_bytes).decode('utf-8') @dataclass class PDFContent(ContentBlock): @@ -1130,6 +1209,8 @@ def to_dict(self) -> Dict[str, Any]: @dataclass class UserTurn: """Represents a user message turn in the conversation""" + role: str = "user" + content: List[ContentBlock] = field(default_factory=list) tools: List[ToolDefinition] = field(default_factory=list) @@ -1235,10 +1316,17 @@ def to_litellm_format(self) -> Dict[str, Any]: "content": content } +@dataclass +class Turn: + def __init__(self, **kwargs): + for key, value in kwargs.items(): + setattr(self, key, value) + @dataclass -class AssistantTurn: +class AssistantTurn(Turn): """Represents an assistant message turn in the conversation""" + role: str = "assistant" content: List[ContentBlock] = field(default_factory=list) # Tool usage (Option B: Everything in AssistantTurn) @@ -1258,6 +1346,392 @@ class AssistantTurn: timestamp: Optional[str] = None metadata: Dict[str, Any] = field(default_factory=dict) + def __init__(self, *args, **kwargs): + """ + Initialize AssistantTurn from a raw response. + """ + if len(args) > 0 and len(kwargs) == 0: + value_dict = self.autocast(args[0]) + super().__init__(**value_dict) + else: + assert len(kwargs) > 0, "Either provide a raw response or keyword arguments" + super().__init__(**kwargs) + + @staticmethod + def from_google_genai(value: Any) -> Dict[str, Any]: + """Parse a Google GenAI response into a dictionary of AssistantTurn fields. + + Supports both the legacy generate_content API and the new Interactions API. + + Args: + value: Raw response from Google GenAI API + + Returns: + Dict[str, Any]: Dictionary with keys corresponding to AssistantTurn fields + """ + # Initialize the result dictionary with default values + result = { + "role": "assistant", + "content": [], + "tool_calls": [], + "tool_results": [], + "reasoning": None, + "finish_reason": None, + "prompt_tokens": None, + "completion_tokens": None, + "model": None, + "timestamp": None, + "metadata": {} + } + + # Check if this is a normalized response (from our GoogleGenAILLM) + if hasattr(value, 'raw_response'): + raw_response = value.raw_response + else: + raw_response = value + + # Handle Interactions API format (new) + if hasattr(raw_response, 'outputs'): + # This is an Interaction object + interaction = raw_response + + # Extract text from outputs + if interaction.outputs and len(interaction.outputs) > 0: + for output in interaction.outputs: + if hasattr(output, 'text') and output.text: + result["content"].append(TextContent(text=output.text)) + # Handle other output types if they exist + elif hasattr(output, 'content'): + # Content could be a list of parts + if isinstance(output.content, list): + for part in output.content: + if hasattr(part, 'text') and part.text: + result["content"].append(TextContent(text=part.text)) + else: + result["content"].append(TextContent(text=str(output.content))) + + # Extract model info + if hasattr(interaction, 'model'): + result["model"] = interaction.model + + # Extract status as finish_reason + if hasattr(interaction, 'status'): + result["finish_reason"] = interaction.status + + # Extract token usage from Interactions API + if hasattr(interaction, 'usage'): + usage = interaction.usage + if hasattr(usage, 'input_tokens'): + result["prompt_tokens"] = usage.input_tokens + elif hasattr(usage, 'prompt_token_count'): + result["prompt_tokens"] = usage.prompt_token_count + + if hasattr(usage, 'output_tokens'): + result["completion_tokens"] = usage.output_tokens + elif hasattr(usage, 'candidates_token_count'): + result["completion_tokens"] = usage.candidates_token_count + + # Extract interaction ID as metadata + if hasattr(interaction, 'id'): + result["metadata"]['interaction_id'] = interaction.id + + # Handle legacy generate_content API format + else: + # Extract thinking/reasoning (for Gemini 2.5+ models) + if hasattr(raw_response, 'thoughts') and raw_response.thoughts: + # Gemini's thinking budget feature + result["reasoning"] = str(raw_response.thoughts) + + # Extract model info + if hasattr(raw_response, 'model_version'): + result["model"] = raw_response.model_version + + # Extract token usage (if available) + if hasattr(raw_response, 'usage_metadata'): + usage = raw_response.usage_metadata + if hasattr(usage, 'prompt_token_count'): + result["prompt_tokens"] = usage.prompt_token_count + if hasattr(usage, 'candidates_token_count'): + result["completion_tokens"] = usage.candidates_token_count + + # Handle multimodal content from Gemini (candidates with parts) + content_extracted = False + if hasattr(raw_response, 'candidates') and raw_response.candidates: + candidate = raw_response.candidates[0] + + # Extract from parts (supports multimodal responses with text and images) + if hasattr(candidate, 'content') and hasattr(candidate.content, 'parts'): + for part in candidate.content.parts: + # Handle text parts + if hasattr(part, 'text') and part.text: + result["content"].append(TextContent(text=part.text)) + content_extracted = True + # Handle inline data (images, generated images, etc.) + elif hasattr(part, 'inline_data'): + # Try to extract image data, preferring direct inline_data access + inline = part.inline_data + image_bytes = None + image_data = None + media_type = 'image/jpeg' + + + # Extract from inline_data Blob (most reliable method) + # Google's Blob.data should be raw bytes + if hasattr(inline, 'data'): + data = inline.data + # Check if it's bytes or string + if isinstance(data, bytes): + # Store raw bytes for Gemini compatibility + # (Gemini prefers raw bytes when sending images) + image_bytes = data + elif isinstance(data, str): + # Already base64-encoded string + image_data = data + # Don't decode to bytes - keep as base64 for portability + + if hasattr(inline, 'mime_type'): + media_type = inline.mime_type + + # If we got the data, create ImageContent + # Store image_bytes only if we got raw bytes from Google + if image_data or image_bytes: + result["content"].append(ImageContent( + image_data=image_data, + image_bytes=image_bytes if isinstance(data, bytes) else None, + media_type=media_type + )) + content_extracted = True + + # Extract finish reason + if hasattr(candidate, 'finish_reason'): + result["finish_reason"] = str(candidate.finish_reason) + + # Fallback: Extract simple text content if no candidates/parts were found + if not content_extracted: + if hasattr(raw_response, 'text'): + result["content"].append(TextContent(text=raw_response.text)) + elif hasattr(value, 'choices'): + # Fallback to normalized format + result["content"].append(TextContent(text=value.choices[0].message.content)) + + return result + + @staticmethod + def from_litellm_openai_response_api(value: Any) -> Dict[str, Any]: + """Parse a LiteLLM/OpenAI-style response into a dictionary of AssistantTurn fields. + + Handles both formats: + - New Responses API: Has 'output' field with ResponseOutputMessage objects + - Legacy Completion API: Has 'choices' field with message objects + + Args: + value: Response from LiteLLM/OpenAI API (Responses API or Completion API) + + Returns: + Dict[str, Any]: Dictionary with keys corresponding to AssistantTurn fields + """ + # Initialize the result dictionary with default values + result = { + "role": "assistant", + "content": [], + "tool_calls": [], + "tool_results": [], + "reasoning": None, + "finish_reason": None, + "prompt_tokens": None, + "completion_tokens": None, + "model": None, + "timestamp": None, + "metadata": {} + } + + # Handle Responses API format (new format with 'output' field) + if hasattr(value, 'output') and hasattr(value, 'object') and value.object == 'response': + # Extract metadata + if hasattr(value, 'id'): + result["metadata"]['response_id'] = value.id + if hasattr(value, 'created_at'): + result["timestamp"] = str(value.created_at) + + # Extract model info + if hasattr(value, 'model'): + result["model"] = value.model + + # Extract status as finish_reason + if hasattr(value, 'status'): + result["finish_reason"] = value.status + + # Extract content from output + if value.output and len(value.output) > 0: + for output_item in value.output: + # Handle ImageGenerationCall + if hasattr(output_item, 'type') and output_item.type == 'image_generation_call': + # Extract generated image + if hasattr(output_item, 'result') and output_item.result: + # Determine media type from output_format + media_type = 'image/jpeg' # default + if hasattr(output_item, 'output_format'): + format_map = { + 'png': 'image/png', + 'jpeg': 'image/jpeg', + 'jpg': 'image/jpeg', + 'webp': 'image/webp', + 'gif': 'image/gif' + } + media_type = format_map.get(output_item.output_format.lower(), 'image/jpeg') + + # Add image to content + result["content"].append(ImageContent( + image_data=output_item.result, + media_type=media_type + )) + + # Store additional metadata about the image generation + if hasattr(output_item, 'revised_prompt') and output_item.revised_prompt: + if 'image_generation' not in result["metadata"]: + result["metadata"]['image_generation'] = [] + result["metadata"]['image_generation'].append({ + 'id': output_item.id if hasattr(output_item, 'id') else None, + 'revised_prompt': output_item.revised_prompt, + 'size': output_item.size if hasattr(output_item, 'size') else None, + 'quality': output_item.quality if hasattr(output_item, 'quality') else None, + 'status': output_item.status if hasattr(output_item, 'status') else None + }) + + # Handle ResponseOutputMessage + elif hasattr(output_item, 'type') and output_item.type == 'message': + # Extract role + if hasattr(output_item, 'role'): + result["role"] = output_item.role + + # Extract status for this message + if hasattr(output_item, 'status') and not result["finish_reason"]: + result["finish_reason"] = output_item.status + + # Extract content items + if hasattr(output_item, 'content') and output_item.content: + for content_item in output_item.content: + # Handle text content + if hasattr(content_item, 'type') and content_item.type == 'output_text': + if hasattr(content_item, 'text') and content_item.text: + result["content"].append(TextContent(text=content_item.text)) + # Handle other content types as they become available + elif hasattr(content_item, 'text') and content_item.text: + result["content"].append(TextContent(text=str(content_item.text))) + + # Extract reasoning (for models with reasoning capabilities) + if hasattr(value, 'reasoning'): + reasoning_parts = [] + if isinstance(value.reasoning, dict): + if value.reasoning.get('summary'): + reasoning_parts.append(f"Summary: {value.reasoning['summary']}") + if value.reasoning.get('effort'): + reasoning_parts.append(f"Effort: {value.reasoning['effort']}") + if reasoning_parts: + result["reasoning"] = "\n".join(reasoning_parts) + elif value.reasoning: + result["reasoning"] = str(value.reasoning) + + # Extract token usage (Responses API format) + if hasattr(value, 'usage'): + if hasattr(value.usage, 'input_tokens'): + result["prompt_tokens"] = value.usage.input_tokens + if hasattr(value.usage, 'output_tokens'): + result["completion_tokens"] = value.usage.output_tokens + + # Handle legacy Completion API format (has 'choices' field) + elif hasattr(value, 'choices') and len(value.choices) > 0: + choice = value.choices[0] + message = choice.message if hasattr(choice, 'message') else choice + + # Extract text content + if hasattr(message, 'content') and message.content: + result["content"].append(TextContent(text=str(message.content))) + + # Extract tool calls + if hasattr(message, 'tool_calls') and message.tool_calls: + for tc in message.tool_calls: + tool_call = ToolCall( + id=tc.id if hasattr(tc, 'id') else None, + type=tc.type if hasattr(tc, 'type') else "function", + name=tc.function.name if hasattr(tc, 'function') else tc.name, + arguments=json.loads(tc.function.arguments) if hasattr(tc, 'function') and hasattr(tc.function, 'arguments') else {} + ) + result["tool_calls"].append(tool_call) + + # Extract finish reason + if hasattr(choice, 'finish_reason'): + result["finish_reason"] = choice.finish_reason + + # Extract reasoning/thinking (for OpenAI o1/o3 models) + if hasattr(message, 'reasoning') and message.reasoning: + result["reasoning"] = message.reasoning + + # Extract token usage (Completion API format) + if hasattr(value, 'usage'): + if hasattr(value.usage, 'prompt_tokens'): + result["prompt_tokens"] = value.usage.prompt_tokens + if hasattr(value.usage, 'completion_tokens'): + result["completion_tokens"] = value.usage.completion_tokens + + # Extract model info + if hasattr(value, 'model'): + result["model"] = value.model + + return result + + @staticmethod + def autocast(value: Any) -> Dict[str, Any]: + """Automatically parse a response from any API into a dictionary of AssistantTurn fields. + + Automatically detects the response format and uses the appropriate parser: + - Google GenAI (generate_content or Interactions API) + - LiteLLM/OpenAI Responses API (new format with 'output' field) + - LiteLLM/OpenAI Completion API (legacy format with 'choices' field) + + Args: + value: Raw response from any supported API + + Returns: + Dict[str, Any]: Dictionary with keys corresponding to AssistantTurn fields + """ + # Check if this is a normalized response (from our GoogleGenAILLM) + raw_response = value.raw_response if hasattr(value, 'raw_response') else value + + # Detect Google GenAI format (Interactions API or generate_content) + # Google GenAI has 'outputs' (Google Interactions API) or 'candidates' (generate_content) + # Note: 'outputs' is for Google's Interactions API, 'output' is for LiteLLM Responses API + if hasattr(raw_response, 'outputs') or \ + (hasattr(raw_response, 'candidates') and not hasattr(value, 'choices')) or \ + hasattr(raw_response, 'usage_metadata'): + return AssistantTurn.from_google_genai(value) + + # Detect LiteLLM/OpenAI format (Responses API or Completion API) + # Responses API has 'output' field and object='response' + # Completion API has 'choices' field + elif hasattr(value, 'output') or hasattr(value, 'choices'): + return AssistantTurn.from_litellm_openai_response_api(value) + + # Fallback: if has 'text' attribute, might be a simple Google response + elif hasattr(raw_response, 'text'): + return AssistantTurn.from_google_genai(value) + + # Default to empty result if format is not recognized + else: + return { + "role": "assistant", + "content": [], + "tool_calls": [], + "tool_results": [], + "reasoning": None, + "finish_reason": None, + "prompt_tokens": None, + "completion_tokens": None, + "model": None, + "timestamp": None, + "metadata": {} + } + def add_text(self, text: str) -> 'AssistantTurn': """Add text content""" self.content.append(TextContent(text=text)) @@ -1293,7 +1767,7 @@ def get_text(self) -> str: def to_dict(self) -> Dict[str, Any]: """Convert to dictionary format""" return { - "role": "assistant", + "role": self.role, "content": [c.to_dict() for c in self.content], "tool_calls": [tc.to_dict() for tc in self.tool_calls] if self.tool_calls else None, "tool_results": [tr.to_dict() for tr in self.tool_results] if self.tool_results else None, @@ -1307,7 +1781,7 @@ def to_dict(self) -> Dict[str, Any]: def to_litellm_format(self) -> Dict[str, Any]: """Convert to LiteLLM format (OpenAI-compatible, works with all providers)""" - result = {"role": "assistant"} + result = {"role": self.role} if self.content: # For multimodal or simple text response diff --git a/opto/utils/llm.py b/opto/utils/llm.py index 71dfa586..826f1502 100644 --- a/opto/utils/llm.py +++ b/opto/utils/llm.py @@ -1,11 +1,13 @@ """ This adapater is for three cases: -1. OpenAI's response API (which is new and not fully supported by LiteLLM yet) -2. Google's MultiPart API design (not supported by LiteLLM response API at all) -3. Generic fallback option for all other providers (through LiteLLM) +When MM (multimodal) is enabled, we primarily either use: +1. LiteLLM's response API +2. Google's Interaction API design (not supported by LiteLLM response API at all) +When MM is disabled, for backward compatibility, we use: +1. LiteLLM's completion API """ -from typing import List, Tuple, Dict, Any, Callable, Union +from typing import List, Tuple, Dict, Any, Callable, Union, Optional import os import time import json @@ -17,6 +19,9 @@ from google import genai from google.genai import types +# Import AssistantTurn and related types for mm_beta mode +from .backbone import AssistantTurn, TextContent, ImageContent, ToolCall, ToolResult + try: import autogen # We import autogen here to avoid the need of installing autogen except ImportError: @@ -35,6 +40,9 @@ class AbstractModel: reset_freq : int or None, optional Number of seconds after which to refresh the model. If None, the model is never refreshed. + mm_beta : bool, optional + If True, returns AssistantTurn objects with rich multimodal content. + If False (default), returns raw API responses in legacy format. Attributes ---------- @@ -42,13 +50,17 @@ class AbstractModel: The factory function for creating model instances. reset_freq : int or None Refresh frequency in seconds. + mm_beta : bool + Whether to use multimodal beta mode. + model : Any Property that returns the current model instance. Methods ------- __call__(*args, **kwargs) - Execute the model, refreshing if needed. + Execute the model, refreshing if needed. Returns AssistantTurn if mm_beta=True, + otherwise returns raw API response. Notes ----- @@ -56,8 +68,9 @@ class AbstractModel: 1. **Automatic Refreshing**: Recreates the model instance periodically to prevent issues with long-running connections. 2. **Serialization**: Supports pickling by recreating the model on load. - 3. **Consistent Interface**: Ensures responses are available at - `response['choices'][0]['message']['content']`. + 3. **Response Formats**: + - Legacy (mm_beta=False): `response['choices'][0]['message']['content']` + - Multimodal (mm_beta=True): AssistantTurn object with .content, .tool_calls, etc. Subclasses should override the `model` property to customize behavior. @@ -67,17 +80,21 @@ class AbstractModel: LiteLLM : Concrete implementation using LiteLLM """ - def __init__(self, factory: Callable, reset_freq: Union[int, None] = None) -> None: + def __init__(self, factory: Callable, reset_freq: Union[int, None] = None, + mm_beta: bool = False) -> None: """ Args: factory: A function that takes no arguments and returns a model that is callable. reset_freq: The number of seconds after which the model should be refreshed. If None, the model is never refreshed. + mm_beta: If True, returns AssistantTurn objects with rich multimodal content. + If False (default), returns raw API responses in legacy format. """ self.factory = factory self._model = self.factory() self.reset_freq = reset_freq self._init_time = time.time() + self.mm_beta = mm_beta # Overwrite this `model` property when subclassing. @property @@ -88,11 +105,22 @@ def model(self): # This is the main API def __call__(self, *args, **kwargs) -> Any: """ The call function handles refreshing the model if needed. + + Returns: + If mm_beta=False: Raw completion API response (backward compatible) + If mm_beta=True: AssistantTurn object with parsed multimodal content """ if self.reset_freq is not None and time.time() - self._init_time > self.reset_freq: self._model = self.factory() self._init_time = time.time() - return self.model(*args, **kwargs) + + response = self.model(*args, **kwargs) + + # Parse to AssistantTurn if mm_beta mode is enabled + if self.mm_beta: + return AssistantTurn(response) + + return response def __getstate__(self): state = self.__dict__.copy() @@ -162,7 +190,8 @@ class AutoGenLLM(AbstractModel): >>> response = llm(messages=[{"role": "user", "content": "Hello"}]) """ - def __init__(self, config_list: List = None, filter_dict: Dict = None, reset_freq: Union[int, None] = None) -> None: + def __init__(self, config_list: List = None, filter_dict: Dict = None, + reset_freq: Union[int, None] = None, mm_beta: bool = False) -> None: if config_list is None: try: config_list = autogen.config_list_from_json("OAI_CONFIG_LIST") @@ -175,7 +204,7 @@ def __init__(self, config_list: List = None, filter_dict: Dict = None, reset_fre config_list = autogen.filter_config(config_list, filter_dict) factory = lambda *args, **kwargs: self._factory(config_list) - super().__init__(factory, reset_freq) + super().__init__(factory, reset_freq, mm_beta=mm_beta) @classmethod def _factory(cls, config_list): @@ -254,6 +283,7 @@ class LiteLLM(AbstractModel): This is an LLM backend supported by LiteLLM library. https://docs.litellm.ai/docs/completion/input + https://docs.litellm.ai/docs/response_api To use this, set the credentials through the environment variable as instructed in the LiteLLM documentation. For convenience, you can set the @@ -263,10 +293,17 @@ class LiteLLM(AbstractModel): This class now supports storing default completion parameters (like temperature, top_p, max_tokens, etc.) that will be used for all calls unless overridden. + + Responses API Support: + When mm_beta=True, the Responses API is used for rich multimodal content. + When mm_beta=False (default), the Completion API is used for backward compatibility. + + See: https://docs.litellm.ai/docs/response_api """ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None, - cache=True, max_retries=10, base_delay=1.0, **default_params) -> None: + cache=True, max_retries=10, base_delay=1.0, + mm_beta: bool = False, **default_params) -> None: if model is None: model = os.environ.get('TRACE_LITELLM_MODEL') if model is None: @@ -276,38 +313,92 @@ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] self.model_name = model self.cache = cache self.default_params = default_params # Store default completion parameters - factory = lambda: self._factory(self.model_name, self.default_params, max_retries=max_retries, base_delay=base_delay) - super().__init__(factory, reset_freq) + + factory = lambda: self._factory( + self.model_name, + self.default_params, + mm_beta, + max_retries=max_retries, + base_delay=base_delay + ) + super().__init__(factory, reset_freq, mm_beta=mm_beta) @classmethod - def _factory(cls, model_name: str, default_params: dict, max_retries=10, base_delay=1.0): + def _factory(cls, model_name: str, default_params: dict, mm_beta: bool, + max_retries=10, base_delay=1.0): import litellm + + # Use Responses API when mm_beta=True, otherwise use Completion API + api_func = litellm.responses if mm_beta else litellm.completion + operation_name = "LiteLLM_responses" if mm_beta else "LiteLLM_completion" + if model_name.startswith('azure/'): # azure model azure_token_provider_scope = os.environ.get('AZURE_TOKEN_PROVIDER_SCOPE', None) if azure_token_provider_scope is not None: from azure.identity import DefaultAzureCredential, get_bearer_token_provider credential = get_bearer_token_provider(DefaultAzureCredential(), azure_token_provider_scope) - return lambda *args, **kwargs: retry_with_exponential_backoff( - lambda: litellm.completion(model_name, *args, - azure_ad_token_provider=credential, **{**default_params, **kwargs}), + if mm_beta: + # Responses API: model as keyword argument, convert messages to input + def azure_responses_wrapper(*args, **kwargs): + # Convert 'messages' to 'input' for Responses API + if 'messages' in kwargs and 'input' not in kwargs: + kwargs['input'] = kwargs.pop('messages') + return retry_with_exponential_backoff( + lambda: api_func(model=model_name, + azure_ad_token_provider=credential, **{**default_params, **kwargs}), + max_retries=max_retries, + base_delay=base_delay, + operation_name=operation_name + ) + return azure_responses_wrapper + else: + # Completion API: model as positional argument + return lambda *args, **kwargs: retry_with_exponential_backoff( + lambda: api_func(model_name, *args, + azure_ad_token_provider=credential, **{**default_params, **kwargs}), + max_retries=max_retries, + base_delay=base_delay, + operation_name=operation_name + ) + + if mm_beta: + # Responses API: model as keyword argument, convert messages to input + def responses_wrapper(*args, **kwargs): + # Convert 'messages' to 'input' for Responses API + if 'messages' in kwargs and 'input' not in kwargs: + kwargs['input'] = kwargs.pop('messages') + return retry_with_exponential_backoff( + lambda: api_func(model=model_name, **{**default_params, **kwargs}), max_retries=max_retries, base_delay=base_delay, - operation_name="LiteLLM_completion" + operation_name=operation_name ) - return lambda *args, **kwargs: retry_with_exponential_backoff( - lambda: litellm.completion(model_name, *args, **{**default_params, **kwargs}), - max_retries=max_retries, - base_delay=base_delay, - operation_name="LiteLLM_completion" - ) + return responses_wrapper + else: + # Completion API: model as positional argument + return lambda *args, **kwargs: retry_with_exponential_backoff( + lambda: api_func(model_name, *args, **{**default_params, **kwargs}), + max_retries=max_retries, + base_delay=base_delay, + operation_name=operation_name + ) @property def model(self): """ - response = litellm.completion( - model=self.model, - messages=[{"content": message, "role": "user"}] - ) + Calls either litellm.completion() or litellm.responses() depending on mm_beta. + + For completion API (mm_beta=False): + response = litellm.completion( + model=self.model, + messages=[{"content": message, "role": "user"}] + ) + + For responses API (mm_beta=True): + response = litellm.responses( + model=self.model, + input="Your input text" + ) """ return lambda *args, **kwargs: self._model(*args, **kwargs) @@ -319,7 +410,7 @@ class CustomLLM(AbstractModel): """ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None, - cache=True) -> None: + cache=True, mm_beta: bool = False) -> None: if model is None: model = os.environ.get('TRACE_CUSTOMLLM_MODEL', 'gpt-4o') base_url = os.environ.get('TRACE_CUSTOMLLM_URL', 'http://xx.xx.xxx.xx:4000/') @@ -330,7 +421,7 @@ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] self.model_name = model self.cache = cache factory = lambda: self._factory(base_url, server_api_key) # an LLM instance uses a fixed model - super().__init__(factory, reset_freq) + super().__init__(factory, reset_freq, mm_beta=mm_beta) @classmethod def _factory(cls, base_url: str, server_api_key: str): @@ -347,11 +438,123 @@ def create(self, **config: Any): config['model'] = self.model_name return self._model.chat.completions.create(**config) +class GoogleGenAILLM(AbstractModel): + """ + This is an LLM backend using Google's GenAI SDK with the Interactions API. + + https://ai.google.dev/gemini-api/docs/text-generation + + The Interactions API is a unified interface for interacting with Gemini models, + similar to OpenAI's Response API. It provides better state management, tool + orchestration, and support for long-running tasks. + + To use this, set the GEMINI_API_KEY environment variable with your API key. + For convenience, you can set the default model name through the environment + variable TRACE_GOOGLE_GENAI_MODEL. + + Supported models: + - Gemini 3: gemini-3-flash-preview, gemini-3-pro-preview + - Gemini 2.5: gemini-2.5-flash, gemini-2.5-pro, gemini-2.5-flash-lite + + This class supports storing default generation parameters (like temperature, + max_output_tokens, etc.) that will be used for all calls unless overridden. + + Note system_instruction is supported. + Example: + llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash", mm_beta=True) + response = llm( + messages=[ + {"role": "user", "content": "Hello!"} + ], + system_instruction="You are a helpful assistant." + ) + """ + + def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None, + cache=True, mm_beta: bool = False, **default_params) -> None: + if model is None: + model = os.environ.get('TRACE_GOOGLE_GENAI_MODEL', 'gemini-2.5-flash') + + self.model_name = model + self.cache = cache + self.default_params = default_params # Store default generation parameters + factory = lambda: self._factory(self.model_name, self.default_params) + super().__init__(factory, reset_freq, mm_beta=mm_beta) + + @classmethod + def _factory(cls, model_name: str, default_params: dict): + """Create a Google GenAI client wrapper using the Interactions API.""" + # Get API key from environment variable + api_key = os.environ.get('GEMINI_API_KEY') + if api_key: + client = genai.Client(api_key=api_key) + else: + # Try without API key (will use default credentials or fail gracefully) + client = genai.Client() + + # Build config if there are generation parameters + config_params = {} + + # Handle thinking config for Gemini 2.5+ models + if 'thinking_budget' in default_params: + thinking_budget = default_params.pop('thinking_budget') + config_params['thinking_config'] = types.ThinkingConfig( + thinking_budget=thinking_budget + ) + + def api_func(model_name, *args, **kwargs): + # Extract system_instruction if present (needs to be at config level, not in kwargs) + system_instruction = kwargs.pop('system_instruction', None) + + # Handle messages parameter for automatic system instruction extraction + messages = kwargs.get('messages', None) + if messages: + # If system_instruction is explicitly passed, drop any system messages + if system_instruction is not None: + # Filter out system messages + filtered_messages = [msg for msg in messages if msg.get('role') != 'system'] + kwargs['messages'] = filtered_messages + else: + # If system_instruction not passed, check if first message is system + if messages and messages[0].get('role') == 'system': + system_instruction = messages[0].get('content') + # Remove the system message from messages + kwargs['messages'] = messages[1:] + + if system_instruction: + config_params_with_system = {**config_params, 'system_instruction': system_instruction} + else: + config_params_with_system = config_params + + return client.models.generate_content( + model=model_name, + contents=args, + config=types.GenerateContentConfig(**{**config_params_with_system, **kwargs}) + ) + + return lambda *args, **kwargs: retry_with_exponential_backoff( + lambda: api_func(model_name, *args, **{**default_params, **kwargs}), + max_retries=5, + base_delay=1, + operation_name=f"{model_name}" + ) + + @property + def model(self): + """ + Wrapper that injects the model name into calls. + + Example: + response = llm(contents="How does AI work?") + """ + return lambda *args, **kwargs: self._model(model=self.model_name, *args, **kwargs) + # Registry of available backends _LLM_REGISTRY = { "LiteLLM": LiteLLM, "AutoGen": AutoGenLLM, "CustomLLM": CustomLLM, + "GoogleGenAI": GoogleGenAILLM, } class LLMFactory: @@ -442,16 +645,17 @@ class LLMFactory: # Default profile - just gpt-4o-mini with no opinionated settings _profiles = { - 'default': {'backend': 'LiteLLM', 'params': {'model': 'gpt-4o-mini'}}, + 'default': {'backend': 'LiteLLM', 'params': {'model': 'gpt-4o'}}, } - @classmethod - def get_llm(cls, profile: str = 'default', model: str = None, **kwargs) -> AbstractModel: + def get_llm(cls, profile: str = 'default', model: str = None, mm_beta: bool = False, **kwargs) -> AbstractModel: """Get an LLM instance for the specified profile or model. Args: profile: Name of the profile to use. Defaults to 'default'. model: Model name to use directly. If provided, overrides profile. + mm_beta: If True, returns AssistantTurn objects with rich multimodal content. + If False (default), returns raw API responses in legacy format. **kwargs: Additional parameters to pass to the backend (e.g., temperature, top_p). These override profile settings if both are specified. @@ -473,16 +677,30 @@ def get_llm(cls, profile: str = 'default', model: str = None, **kwargs) -> Abstr # Override profile settings llm = LLMFactory.get_llm(profile="creative_writer", temperature=0.5) + + # Use mm_beta mode for multimodal responses + llm = LLMFactory.get_llm(model="gpt-4o", mm_beta=True) """ # If model is specified directly, create a simple config if model is not None: backend = kwargs.pop('backend', None) - if backend is None: - backend = 'LiteLLM' - backend_cls = _LLM_REGISTRY[backend] - params = {'model': model, **kwargs} + + # Determine backend with priority: Gemini models > explicit backend > default + if model.startswith('gemini'): + # Gemini models use GoogleGenAILLM backend (highest priority) + backend_cls = _LLM_REGISTRY['GoogleGenAILLM'] + # Strip 'gemini/' prefix if present (LiteLLM format: gemini/gemini-pro) + if model.startswith('gemini/'): + model = model[len('gemini/'):] + elif backend is not None: + # Explicit backend specified + backend_cls = _LLM_REGISTRY[backend] + else: + # Default to LiteLLM for other models + backend_cls = _LLM_REGISTRY['LiteLLM'] + + params = {'model': model, 'mm_beta': mm_beta, **kwargs} return backend_cls(**params) - # Otherwise use profile if profile not in cls._profiles: raise ValueError( @@ -495,6 +713,7 @@ def get_llm(cls, profile: str = 'default', model: str = None, **kwargs) -> Abstr # Merge profile params with any override kwargs params = config['params'].copy() + params['mm_beta'] = mm_beta params.update(kwargs) return backend_cls(**params) @@ -562,10 +781,11 @@ class DummyLLM(AbstractModel): def __init__(self, callable, - reset_freq: Union[int, None] = None) -> None: + reset_freq: Union[int, None] = None, + mm_beta: bool = False) -> None: # self.message = message self.callable = callable - super().__init__(self._factory, reset_freq) + super().__init__(self._factory, reset_freq, mm_beta=mm_beta) def _factory(self): @@ -584,7 +804,6 @@ def __init__(self, content): return lambda *args, **kwargs: Response(self.callable(*args, **kwargs)) - class LLM: """ A unified entry point for all supported LLM backends. @@ -605,6 +824,62 @@ class LLM: llm = LLM(model="gpt-4o", temperature=0.7, max_tokens=2000) llm = LLM(model="gpt-4o-mini", temperature=0.3, top_p=0.9) + Using Multimodal Beta Mode: + # Enable mm_beta for rich AssistantTurn responses + llm = LLM(model="gpt-4o", mm_beta=True) + response = llm(messages=[{"role": "user", "content": "Hello"}]) + # response is now an AssistantTurn object with .content, .tool_calls, etc. + + # Legacy mode (default, mm_beta=False) + llm = LLM(model="gpt-4o") + response = llm(messages=[{"role": "user", "content": "Hello"}]) + # response is raw API response: response.choices[0].message.content + + Using System Messages: + + # LiteLLM (OpenAI, Anthropic, etc.) - Use messages array with role="system" + llm = LLM(model="gpt-4o-mini", mm_beta=True) + response = llm(messages=[ + {"role": "system", "content": "You are a helpful math tutor."}, + {"role": "user", "content": "What is 2+2?"} + ]) + print(response.get_text()) # AssistantTurn object + + # LiteLLM Legacy mode (mm_beta=False) + llm = LLM(model="gpt-4o-mini") + response = llm(messages=[ + {"role": "system", "content": "You are a pirate assistant."}, + {"role": "user", "content": "Hello!"} + ]) + print(response.choices[0].message.content) # Raw API response + + # Google Gemini - Use system_instruction parameter (not in messages array) + llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash", mm_beta=True) + response = llm( + "Hello there", + system_instruction="You are a helpful assistant." + ) + print(response.get_text()) # AssistantTurn object + + # Gemini with messages format (system_instruction separate from messages) + llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash", mm_beta=True) + response = llm( + messages=[ + {"role": "user", "content": "What is your purpose?"} + ], + system_instruction="You are a creative writing instructor." + ) + + # Our Gemini wrapper also automatically extracts system instruction from messages array if not passed explicitly + messages = [ + {"role": "system", "content": "You are a Shakespearean poet."}, + {"role": "user", "content": "Tell me about the sun."} + ] + response1 = llm(messages=messages) + messages.append({"role": "assistant", "content": response1.get_text()}) + messages.append({"role": "user", "content": "And the moon?"}) + response2 = llm(messages=messages) # System message still applies + Using Named Profiles: # Use a saved profile llm = LLM(profile="my_custom_profile") @@ -617,6 +892,7 @@ class LLM: # Explicitly specify backend (default: LiteLLM) llm = LLM(backend="AutoGen", config_list=my_configs) llm = LLM(backend="CustomLLM", model="llama-3.1-8b") + llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash") # Or set via environment variable # export TRACE_DEFAULT_LLM_BACKEND=AutoGen @@ -645,20 +921,35 @@ class LLM: seed=42 ) + Key Differences Between Backends: + LiteLLM (OpenAI, Anthropic, etc.): + - System message: Include in messages array with role="system" + - Format: messages=[{"role": "system", "content": "..."}] + - Works with: OpenAI, Anthropic, Cohere, etc. + + Google Gemini: + - System instruction: Pass as system_instruction parameter + - Format: system_instruction="You are a helpful assistant." + - Separate from messages array + - Works with: gemini-2.5-flash, gemini-2.5-pro, etc. + See Also: - LLMFactory: For managing named profiles + - AssistantTurn: Returned when mm_beta=True - https://docs.litellm.ai/docs/completion/input: Full list of LiteLLM parameters + - https://ai.google.dev/gemini-api/docs/system-instructions: Gemini system instructions """ - def __new__(cls, model: str = None, profile: str = None, backend: str = None, **kwargs): + def __new__(cls, model: str = None, profile: str = 'default', backend: str = None, + mm_beta: bool = False, **kwargs): # Priority 1: If profile is specified, use LLMFactory if profile: - return LLMFactory.get_llm(profile=profile, **kwargs) + return LLMFactory.get_llm(profile=profile, mm_beta=mm_beta, **kwargs) # Priority 2: If model is specified, use LLMFactory with model if model: if backend is not None: kwargs['backend'] = backend - return LLMFactory.get_llm(model=model, **kwargs) + return LLMFactory.get_llm(model=model, mm_beta=mm_beta, **kwargs) # Priority 3: Use backend-specific instantiation (for AutoGen, CustomLLM, etc.) # This path is for when neither profile nor model is specified @@ -669,4 +960,5 @@ def __new__(cls, model: str = None, profile: str = None, backend: str = None, ** raise ValueError(f"Unknown LLM backend: {name}. " f"Valid options are: {list(_LLM_REGISTRY)}") # Instantiate and return the chosen subclass + kwargs['mm_beta'] = mm_beta return backend_cls(**kwargs) \ No newline at end of file diff --git a/tests/llm_optimizers_tests/test_optoprime_v3.py b/tests/llm_optimizers_tests/test_optoprime_v3.py index 0b9f5a3e..38d67efa 100644 --- a/tests/llm_optimizers_tests/test_optoprime_v3.py +++ b/tests/llm_optimizers_tests/test_optoprime_v3.py @@ -1,20 +1,14 @@ import os import pytest -from opto.trace import bundle, node, GRAPH -import opto.optimizers -import importlib -import inspect -import json -import pickle +from opto.trace import GRAPH from opto.utils.llm import LLM -from opto import trace from opto.trace import node, bundle from opto.optimizers.optoprime_v3 import ( OptoPrimeV3, OptimizerPromptSymbolSet2, ProblemInstance, OptimizerPromptSymbolSet, value_to_image_content ) -from opto.optimizers.backbone import TextContent, ImageContent, ContentBlock, ContentBlockList +from opto.utils.backbone import TextContent, ImageContent # You can override for temporarly testing a specific optimizer ALL_OPTIMIZERS = [TextGrad] # [OptoPrimeMulti] ALL_OPTIMIZERS = [OptoPrime] @@ -187,7 +181,7 @@ def test_extraction_pipeline(): def test_problem_instance_text_only(): """Test that ProblemInstance with text-only content works correctly.""" - from opto.optimizers.backbone import ContentBlockList + from opto.utils.backbone import ContentBlockList symbol_set = OptimizerPromptSymbolSet() instance = ProblemInstance( @@ -222,7 +216,7 @@ def test_problem_instance_text_only(): def test_problem_instance_with_content_blocks(): """Test ProblemInstance with ContentBlockList fields containing images.""" - from opto.optimizers.backbone import ContentBlockList + from opto.utils.backbone import ContentBlockList symbol_set = OptimizerPromptSymbolSet() # Create content blocks with an image @@ -265,7 +259,7 @@ def test_problem_instance_with_content_blocks(): def test_problem_instance_mixed_content(): """Test ProblemInstance with mixed text and image content in multiple fields.""" - from opto.optimizers.backbone import ContentBlockList + from opto.utils.backbone import ContentBlockList symbol_set = OptimizerPromptSymbolSet() # Variables with image @@ -493,7 +487,7 @@ def test_optimizer_step_with_content_blocks(): system_prompt, user_prompt = optimizer.construct_prompt(summary) # Verify content blocks structure - from opto.optimizers.backbone import ContentBlockList + from opto.utils.backbone import ContentBlockList assert isinstance(user_prompt, ContentBlockList) assert len(user_prompt) > 0 diff --git a/tests/unit_tests/test_llm.py b/tests/unit_tests/test_llm.py index 9435bf33..1b4a9934 100644 --- a/tests/unit_tests/test_llm.py +++ b/tests/unit_tests/test_llm.py @@ -1,8 +1,22 @@ -from opto.utils.llm import LLM +from opto.utils.llm import LLM, LLMFactory from opto.optimizers.utils import print_color import os +import pytest +from opto.utils.backbone import ( + ConversationHistory, + UserTurn, + AssistantTurn +) + +# Skip tests if no API credentials are available +SKIP_REASON = "No API credentials found" +HAS_CREDENTIALS = os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get( + "OPENAI_API_KEY") or os.environ.get("GEMINI_API_KEY") + + def test_llm_init(): + """Test basic LLM initialization with legacy mode (mm_beta=False)""" if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"): llm = LLM() system_prompt = 'You are a helpful assistant.' @@ -22,3 +36,433 @@ def test_llm_init(): print_color(f'System: {system_prompt}', 'red') print_color(f'User: {user_prompt}', 'blue') print_color(f'LLM: {response}', 'green') + + +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +class TestLLMMMBetaMode: + """Test suite for LLM class with mm_beta=True and mm_beta=False modes""" + + def test_mm_beta_false_legacy_response_format(self): + """Test that mm_beta=False returns raw API response (legacy format)""" + llm = LLM(mm_beta=False) + messages = [{"role": "user", "content": "Say 'test' and nothing else."}] + + response = llm(messages=messages) + + # Legacy mode should return raw API response with .choices attribute + assert hasattr(response, 'choices'), "Legacy mode should return raw API response" + assert hasattr(response.choices[0], 'message'), "Response should have message attribute" + assert hasattr(response.choices[0].message, 'content'), "Message should have content attribute" + + # Should NOT be an AssistantTurn object + assert not isinstance(response, AssistantTurn), "Legacy mode should not return AssistantTurn" + + content = response.choices[0].message.content + assert isinstance(content, str), "Content should be a string" + assert len(content) > 0, "Content should not be empty" + + print_color(f"✓ Legacy mode (mm_beta=False) returns raw API response", 'green') + + def test_mm_beta_true_assistant_turn_response(self): + """Test that mm_beta=True returns AssistantTurn object""" + llm = LLM(mm_beta=True) + messages = [{"role": "user", "content": "Say 'test' and nothing else."}] + + response = llm(messages=messages) + + # mm_beta mode should return AssistantTurn object + assert isinstance(response, AssistantTurn), "mm_beta mode should return AssistantTurn object" + + # Check AssistantTurn attributes + assert hasattr(response, 'content'), "AssistantTurn should have content attribute" + assert hasattr(response, 'tool_calls'), "AssistantTurn should have tool_calls attribute" + assert hasattr(response, 'role'), "AssistantTurn should have role attribute" + assert response.role == "assistant", "Role should be 'assistant'" + + # Content should be accessible + assert response.content is not None, "Content should not be None" + + print_color(f"✓ Multimodal mode (mm_beta=True) returns AssistantTurn object", 'green') + + def test_mm_beta_with_explicit_model(self): + """Test mm_beta parameter works with explicit model specification""" + # Test with mm_beta=False + llm_legacy = LLM(model="gpt-4o-mini", mm_beta=False) + messages = [{"role": "user", "content": "Hi"}] + + response_legacy = llm_legacy(messages=messages) + assert hasattr(response_legacy, 'choices'), "Should return raw API response" + assert not isinstance(response_legacy, AssistantTurn), "Should not be AssistantTurn" + + # Test with mm_beta=True + llm_mm = LLM(model="gpt-4o-mini", mm_beta=True) + response_mm = llm_mm(messages=messages) + assert isinstance(response_mm, AssistantTurn), "Should return AssistantTurn" + + print_color(f"✓ mm_beta parameter works correctly with explicit model", 'green') + + def test_mm_beta_with_profile(self): + """Test mm_beta parameter works with profile-based instantiation""" + # Create a test profile + LLMFactory.create_profile("test_profile", backend="LiteLLM", model="gpt-4o-mini", temperature=0.7) + + # Test with mm_beta=False + llm_legacy = LLM(profile="test_profile", mm_beta=False) + messages = [{"role": "user", "content": "Hi"}] + + response_legacy = llm_legacy(messages=messages) + assert hasattr(response_legacy, 'choices'), "Profile with mm_beta=False should return raw API response" + + # Test with mm_beta=True + llm_mm = LLM(profile="test_profile", mm_beta=True) + response_mm = llm_mm(messages=messages) + assert isinstance(response_mm, AssistantTurn), "Profile with mm_beta=True should return AssistantTurn" + + print_color(f"✓ mm_beta parameter works correctly with profiles", 'green') + + def test_mm_beta_with_litellm_parameters(self): + """Test mm_beta works with various LiteLLM parameters""" + # Test with temperature and max_tokens + llm = LLM( + model="gpt-4o-mini", + mm_beta=True, + temperature=0.3, + max_tokens=100 + ) + + messages = [{"role": "user", "content": "Say hello"}] + response = llm(messages=messages) + + assert isinstance(response, AssistantTurn), "Should return AssistantTurn with LiteLLM params" + assert response.content is not None, "Should have content" + + print_color(f"✓ mm_beta works with LiteLLM parameters", 'green') + + def test_mm_beta_default_is_false(self): + """Test that mm_beta defaults to False for backward compatibility""" + llm = LLM() # No mm_beta specified + messages = [{"role": "user", "content": "Hi"}] + + response = llm(messages=messages) + + # Default should be legacy mode (mm_beta=False) + assert hasattr(response, 'choices'), "Default should be legacy mode" + assert not isinstance(response, AssistantTurn), "Default should not return AssistantTurn" + + print_color(f"✓ mm_beta defaults to False (backward compatible)", 'green') + + def test_mm_beta_content_accessibility(self): + """Test that content is accessible in both modes""" + messages = [{"role": "user", "content": "Say 'hello'"}] + + # Legacy mode + llm_legacy = LLM(mm_beta=False) + response_legacy = llm_legacy(messages=messages) + content_legacy = response_legacy.choices[0].message.content + assert isinstance(content_legacy, str), "Legacy content should be string" + assert len(content_legacy) > 0, "Legacy content should not be empty" + + # mm_beta mode + llm_mm = LLM(mm_beta=True) + response_mm = llm_mm(messages=messages) + # AssistantTurn content is a list of ContentBlock objects + assert response_mm.content is not None, "mm_beta content should not be None" + + print_color(f"✓ Content accessible in both modes", 'green') + + def test_mm_beta_with_different_backends(self): + """Test mm_beta parameter with different backend specifications""" + # Test with explicit LiteLLM backend + llm = LLM(backend="LiteLLM", model="gpt-4o-mini", mm_beta=True) + messages = [{"role": "user", "content": "Hi"}] + + response = llm(messages=messages) + assert isinstance(response, AssistantTurn), "LiteLLM backend with mm_beta=True should return AssistantTurn" + + print_color(f"✓ mm_beta works with explicit backend specification", 'green') + + +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +class TestLLMConstructorPriorities: + """Test the priority logic in LLM constructor""" + + def test_priority_profile_over_default(self): + """Test that profile parameter takes priority""" + LLMFactory.create_profile("priority_test", backend="LiteLLM", model="gpt-4o-mini", temperature=0.5) + + llm = LLM(profile="priority_test", mm_beta=True) + messages = [{"role": "user", "content": "Hi"}] + + response = llm(messages=messages) + assert isinstance(response, AssistantTurn), "Profile-based LLM should respect mm_beta" + + print_color(f"✓ Profile parameter takes priority", 'green') + + def test_priority_model_over_profile(self): + """Test that model parameter takes priority over default profile""" + # When model is specified, it should use that model regardless of default profile + llm = LLM(model="gpt-4o-mini", mm_beta=True) + messages = [{"role": "user", "content": "Hi"}] + + response = llm(messages=messages) + assert isinstance(response, AssistantTurn), "Model-based LLM should respect mm_beta" + + print_color(f"✓ Model parameter creates correct LLM instance", 'green') + + def test_backend_fallback(self): + """Test that backend parameter works when neither profile nor model specified""" + # This tests the Priority 3 path in __new__ + llm = LLM(backend="LiteLLM", mm_beta=True, model="gpt-4o-mini") + messages = [{"role": "user", "content": "Hi"}] + + response = llm(messages=messages) + assert isinstance(response, AssistantTurn), "Backend-based LLM should respect mm_beta" + + print_color(f"✓ Backend parameter works correctly", 'green') + + +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +class TestLLMDocumentationExamples: + """Test examples from LLM class documentation""" + + def test_basic_usage_default_model(self): + """Test: llm = LLM()""" + llm = LLM() + messages = [{"role": "user", "content": "Hi"}] + response = llm(messages=messages) + + # Default is mm_beta=False + assert hasattr(response, 'choices'), "Default usage should return raw API response" + print_color(f"✓ Basic usage with default model works", 'green') + + def test_specify_model_directly(self): + """Test: llm = LLM(model='gpt-4o')""" + llm = LLM(model="gpt-4o-mini") # Using mini for cost efficiency + messages = [{"role": "user", "content": "Hi"}] + response = llm(messages=messages) + + assert hasattr(response, 'choices'), "Model specification should work" + print_color(f"✓ Model specification works", 'green') + + def test_multimodal_beta_mode_example(self): + """Test example from 'Using Multimodal Beta Mode' section""" + # Enable mm_beta for rich AssistantTurn responses + llm = LLM(model="gpt-4o-mini", mm_beta=True) + response = llm(messages=[{"role": "user", "content": "Hello"}]) + + # response is now an AssistantTurn object with .content, .tool_calls, etc. + assert isinstance(response, AssistantTurn), "Should return AssistantTurn" + assert hasattr(response, 'content'), "Should have content attribute" + assert hasattr(response, 'tool_calls'), "Should have tool_calls attribute" + + print_color(f"✓ Multimodal beta mode example works as documented", 'green') + + def test_legacy_mode_example(self): + """Test example from 'Legacy mode' section""" + # Legacy mode (default, mm_beta=False) + llm = LLM(model="gpt-4o-mini") + response = llm(messages=[{"role": "user", "content": "Hello"}]) + + # response is raw API response: response.choices[0].message.content + assert hasattr(response, 'choices'), "Should return raw API response" + content = response.choices[0].message.content + assert isinstance(content, str), "Content should be string" + + print_color(f"✓ Legacy mode example works as documented", 'green') + + def test_litellm_parameters_example(self): + """Test examples with LiteLLM parameters""" + # High creativity example + llm = LLM( + model="gpt-4o-mini", + temperature=0.9, + top_p=0.95, + presence_penalty=0.6 + ) + messages = [{"role": "user", "content": "Hi"}] + response = llm(messages=messages) + + assert hasattr(response, 'choices'), "LiteLLM parameters should work" + + print_color(f"✓ LiteLLM parameters example works", 'green') + + +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +def test_mm_beta_integration_with_conversation(): + """Test mm_beta mode with a multi-turn conversation""" + llm = LLM(model="gpt-4o-mini", mm_beta=True) + + # First turn + messages = [ + {"role": "user", "content": "My name is Alice."} + ] + response1 = llm(messages=messages) + assert isinstance(response1, AssistantTurn), "First response should be AssistantTurn" + + # Second turn - reference previous context + messages.append({"role": "assistant", "content": str(response1.content)}) + messages.append({"role": "user", "content": "What is my name?"}) + + response2 = llm(messages=messages) + assert isinstance(response2, AssistantTurn), "Second response should be AssistantTurn" + + print_color(f"✓ mm_beta mode works with multi-turn conversations", 'green') + + +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +class TestSystemMessages: + """Test suite for system message handling in different LLM backends""" + + def test_litellm_completion_api_system_message(self): + """Test system message with LiteLLM Completion API (mm_beta=False)""" + llm = LLM(model="gpt-4o-mini", mm_beta=False) + + messages = [ + {"role": "system", "content": "You are a cat. Your name is Neko. Always respond as a cat would."}, + {"role": "user", "content": "What is your name?"} + ] + + response = llm(messages=messages) + + # Legacy mode should return raw API response + assert hasattr(response, 'choices'), "Should return raw API response" + content = response.choices[0].message.content + assert isinstance(content, str), "Content should be a string" + assert len(content) > 0, "Content should not be empty" + + # Check that the response reflects the system message (should mention being a cat or Neko) + content_lower = content.lower() + assert 'neko' in content_lower or 'cat' in content_lower, \ + f"Response should reflect system message about being a cat named Neko. Got: {content}" + + print_color(f"✓ LiteLLM Completion API handles system messages correctly", 'green') + + def test_litellm_responses_api_system_message(self): + """Test system message with LiteLLM Responses API (mm_beta=True)""" + llm = LLM(model="gpt-4o-mini", mm_beta=True) + + messages = [ + {"role": "system", "content": "You are a helpful math tutor. Always explain concepts clearly."}, + {"role": "user", "content": "What is 2+2?"} + ] + + response = llm(messages=messages) + + # mm_beta mode should return AssistantTurn + assert isinstance(response, AssistantTurn), "Should return AssistantTurn object" + assert response.content is not None, "Content should not be None" + + # Get text content + text_content = response.get_text() + assert isinstance(text_content, str), "Text content should be a string" + assert len(text_content) > 0, "Text content should not be empty" + assert '4' in text_content, f"Response should contain the answer '4'. Got: {text_content}" + + print_color(f"✓ LiteLLM Responses API handles system messages correctly", 'green') + + @pytest.mark.skipif(not os.environ.get("GEMINI_API_KEY"), reason="No Gemini API key found") + def test_gemini_system_instruction_legacy_mode(self): + """Test system_instruction with Gemini API in legacy mode (mm_beta=False)""" + llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash", mm_beta=False) + + # For Gemini, system_instruction is passed as a parameter + response = llm( + "Hello there", + system_instruction="You are a cat. Your name is Neko. Always respond as a cat would." + ) + + # Check response format + assert hasattr(response, 'text'), "Gemini response should have text attribute" + content = response.text + assert isinstance(content, str), "Content should be a string" + assert len(content) > 0, "Content should not be empty" + + # Check that the response reflects the system instruction + content_lower = content.lower() + assert 'neko' in content_lower or 'cat' in content_lower or 'meow' in content_lower, \ + f"Response should reflect system instruction about being a cat named Neko. Got: {content}" + + print_color(f"✓ Gemini API handles system_instruction correctly (legacy mode)", 'green') + + @pytest.mark.skipif(not os.environ.get("GEMINI_API_KEY"), reason="No Gemini API key found") + def test_gemini_system_instruction_mm_beta_mode(self): + """Test system_instruction with Gemini API in mm_beta mode""" + llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash", mm_beta=True) + + # For Gemini, system_instruction is passed as a parameter + response = llm( + "What is your name?", + system_instruction="You are a helpful assistant named Claude. Always introduce yourself." + ) + + # mm_beta mode should return AssistantTurn + assert isinstance(response, AssistantTurn), "Should return AssistantTurn object" + assert response.content is not None, "Content should not be None" + + # Get text content + text_content = response.get_text() + assert isinstance(text_content, str), "Text content should be a string" + assert len(text_content) > 0, "Text content should not be empty" + + # Check that the response reflects the system instruction + text_lower = text_content.lower() + assert 'claude' in text_lower or 'assistant' in text_lower, \ + f"Response should reflect system instruction about being Claude. Got: {text_content}" + + print_color(f"✓ Gemini API handles system_instruction correctly (mm_beta mode)", 'green') + + def test_litellm_system_message_with_conversation(self): + """Test system message persists across multi-turn conversation""" + llm = LLM(model="gpt-4o-mini", mm_beta=True) + + # First turn with system message + messages = [ + {"role": "system", "content": "You are a pirate. Always talk like a pirate."}, + {"role": "user", "content": "Hello"} + ] + + response1 = llm(messages=messages) + assert isinstance(response1, AssistantTurn), "First response should be AssistantTurn" + text1 = response1.get_text() + + # Check pirate-like language in first response + pirate_indicators = ['arr', 'matey', 'ahoy', 'ye', 'aye'] + has_pirate_language = any(indicator in text1.lower() for indicator in pirate_indicators) + assert has_pirate_language, f"First response should use pirate language. Got: {text1}" + + # Second turn - system message should still apply + messages.append({"role": "assistant", "content": text1}) + messages.append({"role": "user", "content": "What's the weather like?"}) + + response2 = llm(messages=messages) + assert isinstance(response2, AssistantTurn), "Second response should be AssistantTurn" + text2 = response2.get_text() + + # Check pirate-like language persists + has_pirate_language_2 = any(indicator in text2.lower() for indicator in pirate_indicators) + assert has_pirate_language_2, f"Second response should still use pirate language. Got: {text2}" + + print_color(f"✓ System message persists across conversation turns", 'green') + + @pytest.mark.skipif(not os.environ.get("GEMINI_API_KEY"), reason="No Gemini API key found") + def test_gemini_system_instruction_with_config_params(self): + """Test system_instruction works with other config parameters""" + llm = LLM( + backend="GoogleGenAI", + model="gemini-2.5-flash", + mm_beta=True, + temperature=0.7, + max_output_tokens=100 + ) + + response = llm( + "Tell me a short joke", + system_instruction="You are a comedian who tells very short jokes." + ) + + assert isinstance(response, AssistantTurn), "Should return AssistantTurn object" + text_content = response.get_text() + assert len(text_content) > 0, "Should have content" + + print_color(f"✓ Gemini system_instruction works with other config parameters", 'green') + diff --git a/tests/unit_tests/test_optimizer_backbone.py b/tests/unit_tests/test_optimizer_backbone.py index d36fab88..da52dc2d 100644 --- a/tests/unit_tests/test_optimizer_backbone.py +++ b/tests/unit_tests/test_optimizer_backbone.py @@ -9,13 +9,10 @@ """ import os import pytest -import base64 -from opto.optimizers.backbone import ( +from opto.utils.backbone import ( ConversationHistory, UserTurn, - AssistantTurn, - TextContent, - ImageContent + AssistantTurn ) # Skip tests if no API credentials are available @@ -492,6 +489,57 @@ def test_real_llm_multi_turn_with_images(): print("\n✅ Multi-turn conversation with images completed successfully!") +# ==== Testing the Automatic Raw Response Parsing into AssistantTurn === +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +def test_automatic_openai_raw_response_parsing_into_assistant_turn(): + import litellm + import base64 + + # Simple OpenAI text generation + response = litellm.responses( + model="openai/gpt-4o", + input="Hello, how are you?" + ) + assistant_turn = AssistantTurn(response) + assert "Hello" in assistant_turn.content[0].text + + print(assistant_turn) + +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +def test_automatic_openai_multimodal_raw_response_parsing_into_assistant_turn(): + import litellm + import base64 + + # OpenAI models require tools parameter for image generation + response = litellm.responses( + model="openai/gpt-4o", + input="Generate a futuristic city at sunset and describe it in a sentence.", + tools=[{"type": "image_generation"}] + ) + + assistant_turn = AssistantTurn(response) + print(assistant_turn) + + +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +def test_automatic_google_generate_content_raw_response_parsing_into_assistant_turn(): + from google import genai + from google.genai import types + + client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY")) + + response = client.models.generate_content( + model="gemini-2.5-flash-image", + contents="A kawaii-style sticker of a happy red panda wearing a tiny bamboo hat. It's munching on a green bamboo leaf. The design features bold, clean outlines, simple cel-shading, and a vibrant color palette. The background must be white.", + ) + + assistant_turn = AssistantTurn(response) + print(assistant_turn) + + assert not assistant_turn.content[1].is_empty() + + + if __name__ == '__main__': import litellm import base64 @@ -514,3 +562,4 @@ def test_real_llm_multi_turn_with_images(): print(f"Image saved: generated_{response.output[0].id}.png") + From 87775289b67ed31cdc0a7e3a2e3049be8151022f Mon Sep 17 00:00:00 2001 From: windweller Date: Sat, 27 Dec 2025 00:41:53 -0500 Subject: [PATCH 44/51] fix test errors --- opto/utils/backbone.py | 27 ++++++++++++++++++--- opto/utils/llm.py | 1 - tests/unit_tests/test_optimizer_backbone.py | 3 ++- 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/opto/utils/backbone.py b/opto/utils/backbone.py index f966b70d..20ec0381 100644 --- a/opto/utils/backbone.py +++ b/opto/utils/backbone.py @@ -1348,14 +1348,35 @@ class AssistantTurn(Turn): def __init__(self, *args, **kwargs): """ - Initialize AssistantTurn from a raw response. + Initialize AssistantTurn from a raw response or with explicit fields. + + Three ways to initialize: + 1. Empty: AssistantTurn() - creates empty turn with defaults + 2. From raw response: AssistantTurn(response) - autocasts the response + 3. With fields: AssistantTurn(role="assistant", content=[...]) - explicit fields """ if len(args) > 0 and len(kwargs) == 0: + # Case 2: Single positional arg - autocast from raw response value_dict = self.autocast(args[0]) super().__init__(**value_dict) - else: - assert len(kwargs) > 0, "Either provide a raw response or keyword arguments" + elif len(kwargs) > 0: + # Case 3: Keyword arguments - use them directly super().__init__(**kwargs) + else: + # Case 1: No arguments - initialize with defaults + super().__init__( + role="assistant", + content=[], + tool_calls=[], + tool_results=[], + reasoning=None, + finish_reason=None, + prompt_tokens=None, + completion_tokens=None, + model=None, + timestamp=None, + metadata={} + ) @staticmethod def from_google_genai(value: Any) -> Dict[str, Any]: diff --git a/opto/utils/llm.py b/opto/utils/llm.py index 826f1502..42555c0e 100644 --- a/opto/utils/llm.py +++ b/opto/utils/llm.py @@ -1,5 +1,4 @@ """ -This adapater is for three cases: When MM (multimodal) is enabled, we primarily either use: 1. LiteLLM's response API 2. Google's Interaction API design (not supported by LiteLLM response API at all) diff --git a/tests/unit_tests/test_optimizer_backbone.py b/tests/unit_tests/test_optimizer_backbone.py index da52dc2d..4adbe39e 100644 --- a/tests/unit_tests/test_optimizer_backbone.py +++ b/tests/unit_tests/test_optimizer_backbone.py @@ -8,6 +8,7 @@ 3. Multi-modal use of conversation manager, including multi-turn and image as output """ import os +import base64 import pytest from opto.utils.backbone import ( ConversationHistory, @@ -473,7 +474,7 @@ def test_real_llm_multi_turn_with_images(): print(" Which of these flowers would be better for a romantic gift and why?") response2 = llm(messages=messages, max_tokens=300) - response2_content = response2.choicbes[0].message.content + response2_content = response2.choices[0].message.content print("\n🤖 Turn 2 - Assistant:") print(f" {response2_content[:200]}...") From 2252ccd6fa509990a51d788305b9d1152a5ae929 Mon Sep 17 00:00:00 2001 From: windweller Date: Sat, 27 Dec 2025 01:43:41 -0500 Subject: [PATCH 45/51] updated google genai llm to turn litellm format history messages into Gemini-compatible history. --- opto/utils/llm.py | 90 ++++++++++--- tests/unit_tests/test_optimizer_backbone.py | 133 +++++++++++++++++++- 2 files changed, 207 insertions(+), 16 deletions(-) diff --git a/opto/utils/llm.py b/opto/utils/llm.py index 42555c0e..1ea88dc6 100644 --- a/opto/utils/llm.py +++ b/opto/utils/llm.py @@ -506,31 +506,90 @@ def api_func(model_name, *args, **kwargs): system_instruction = kwargs.pop('system_instruction', None) # Handle messages parameter for automatic system instruction extraction - messages = kwargs.get('messages', None) + messages = kwargs.pop('messages', None) if messages: # If system_instruction is explicitly passed, drop any system messages if system_instruction is not None: # Filter out system messages filtered_messages = [msg for msg in messages if msg.get('role') != 'system'] - kwargs['messages'] = filtered_messages else: # If system_instruction not passed, check if first message is system if messages and messages[0].get('role') == 'system': system_instruction = messages[0].get('content') # Remove the system message from messages - kwargs['messages'] = messages[1:] + filtered_messages = messages[1:] + else: + filtered_messages = messages + + # Convert messages to Google GenAI contents format + # Google GenAI expects contents as a list of content items + contents = [] + for msg in filtered_messages: + role = msg.get('role') + content = msg.get('content') + + # Map roles: user -> user, assistant -> model + if role == 'assistant': + role = 'model' + + # Handle content (can be string or list of content blocks) + if isinstance(content, str): + contents.append({'role': role, 'parts': [{'text': content}]}) + elif isinstance(content, list): + # Convert content blocks to parts + parts = [] + for block in content: + if block.get('type') == 'text': + parts.append({'text': block.get('text', '')}) + elif block.get('type') == 'image_url': + # Handle image URLs + image_url = block.get('image_url', {}).get('url', '') + if image_url.startswith('data:'): + # Extract base64 data + import re + match = re.match(r'data:([^;]+);base64,(.+)', image_url) + if match: + mime_type, data = match.groups() + parts.append({'inline_data': {'mime_type': mime_type, 'data': data}}) + else: + # External URL + parts.append({'file_data': {'file_uri': image_url}}) + if parts: + contents.append({'role': role, 'parts': parts}) + + # Use converted contents instead of args + # Don't wrap in tuple since we're passing as keyword argument + contents_to_use = contents if contents else args[0] if args else None + else: + # No messages parameter, use args as-is + contents_to_use = args[0] if args else None + + # Map max_tokens to max_output_tokens for Google GenAI + if 'max_tokens' in kwargs: + kwargs['max_output_tokens'] = kwargs.pop('max_tokens') + + # Remove any other parameters that shouldn't go to GenerateContentConfig + # Keep only valid config parameters + valid_config_params = { + 'temperature', 'max_output_tokens', 'top_p', 'top_k', + 'stop_sequences', 'candidate_count', 'presence_penalty', + 'frequency_penalty', 'response_mime_type', 'response_schema' + } + config_kwargs = {k: v for k, v in kwargs.items() if k in valid_config_params} if system_instruction: config_params_with_system = {**config_params, 'system_instruction': system_instruction} else: config_params_with_system = config_params - return client.models.generate_content( + response = client.models.generate_content( model=model_name, - contents=args, - config=types.GenerateContentConfig(**{**config_params_with_system, **kwargs}) + contents=contents_to_use, + config=types.GenerateContentConfig(**{**config_params_with_system, **config_kwargs}) ) + return response + return lambda *args, **kwargs: retry_with_exponential_backoff( lambda: api_func(model_name, *args, **{**default_params, **kwargs}), max_retries=5, @@ -687,7 +746,7 @@ def get_llm(cls, profile: str = 'default', model: str = None, mm_beta: bool = Fa # Determine backend with priority: Gemini models > explicit backend > default if model.startswith('gemini'): # Gemini models use GoogleGenAILLM backend (highest priority) - backend_cls = _LLM_REGISTRY['GoogleGenAILLM'] + backend_cls = _LLM_REGISTRY['GoogleGenAI'] # Strip 'gemini/' prefix if present (LiteLLM format: gemini/gemini-pro) if model.startswith('gemini/'): model = model[len('gemini/'):] @@ -853,7 +912,7 @@ class LLM: print(response.choices[0].message.content) # Raw API response # Google Gemini - Use system_instruction parameter (not in messages array) - llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash", mm_beta=True) + llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash-image", mm_beta=True) response = llm( "Hello there", system_instruction="You are a helpful assistant." @@ -861,7 +920,7 @@ class LLM: print(response.get_text()) # AssistantTurn object # Gemini with messages format (system_instruction separate from messages) - llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash", mm_beta=True) + llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash-image", mm_beta=True) response = llm( messages=[ {"role": "user", "content": "What is your purpose?"} @@ -891,7 +950,7 @@ class LLM: # Explicitly specify backend (default: LiteLLM) llm = LLM(backend="AutoGen", config_list=my_configs) llm = LLM(backend="CustomLLM", model="llama-3.1-8b") - llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash") + llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash-image") # Or set via environment variable # export TRACE_DEFAULT_LLM_BACKEND=AutoGen @@ -940,16 +999,17 @@ class LLM: """ def __new__(cls, model: str = None, profile: str = 'default', backend: str = None, mm_beta: bool = False, **kwargs): - # Priority 1: If profile is specified, use LLMFactory - if profile: - return LLMFactory.get_llm(profile=profile, mm_beta=mm_beta, **kwargs) - - # Priority 2: If model is specified, use LLMFactory with model + + # Priority 1: If model is specified, use LLMFactory with model if model: if backend is not None: kwargs['backend'] = backend return LLMFactory.get_llm(model=model, mm_beta=mm_beta, **kwargs) + # Priority 2: If profile is specified, use LLMFactory + if profile: + return LLMFactory.get_llm(profile=profile, mm_beta=mm_beta, **kwargs) + # Priority 3: Use backend-specific instantiation (for AutoGen, CustomLLM, etc.) # This path is for when neither profile nor model is specified name = backend or os.getenv("TRACE_DEFAULT_LLM_BACKEND", "LiteLLM") diff --git a/tests/unit_tests/test_optimizer_backbone.py b/tests/unit_tests/test_optimizer_backbone.py index 4adbe39e..bf63101a 100644 --- a/tests/unit_tests/test_optimizer_backbone.py +++ b/tests/unit_tests/test_optimizer_backbone.py @@ -490,6 +490,132 @@ def test_real_llm_multi_turn_with_images(): print("\n✅ Multi-turn conversation with images completed successfully!") + +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +def test_real_llm_multi_turn_with_images_updated_assistant_turn(): + """Test a multi-turn conversation with images. + + First turn: Ask about images + Second turn: Follow-up question about the same images + """ + from opto.utils.llm import LLM + + history = ConversationHistory(system_prompt="You are a helpful assistant that can analyze images.") + llm = LLM() + + print("\n" + "="*80) + print("MULTI-TURN CONVERSATION WITH IMAGES") + print("="*80) + + # Turn 1: Send images and ask about them + user_turn1 = (UserTurn() + .add_text("What type of flowers are shown in these images?") + .add_image(url="https://images.pexels.com/photos/736230/pexels-photo-736230.jpeg") + .add_image(url="https://images.contentstack.io/v3/assets/bltcedd8dbd5891265b/blt134818d279038650/6668df6434f6fb5cd48aac34/beautiful-flowers-rose.jpeg")) + + history.add_user_turn(user_turn1) + messages = history.to_litellm_format() + + print("\n📷 Turn 1 - User:") + print(" What type of flowers are shown in these images? [+ 2 images]") + + response1 = llm(messages=messages, max_tokens=300) + at = AssistantTurn(response1) + + print("\n🤖 Turn 1 - Assistant:") + print(f" {at.get_text()[:200]}...") + + history.add_assistant_turn(at) + + # Turn 2: Follow-up question (no new images, but context from previous turn) + user_turn2 = UserTurn().add_text("Which of these flowers would be better for a romantic gift and why?") + history.add_user_turn(user_turn2) + + messages = history.to_litellm_format() + + print("\n📷 Turn 2 - User:") + print(" Which of these flowers would be better for a romantic gift and why?") + + response2 = llm(messages=messages, max_tokens=300) + response2_content = response2.choices[0].message.content + + print("\n🤖 Turn 2 - Assistant:") + print(f" {response2_content[:200]}...") + + # Verify responses + assert at.get_text() is not None and len(at.get_text()) > 20 + assert response2_content is not None and len(response2_content) > 20 + + # Turn 2 should reference the context from turn 1 + response2_lower = response2_content.lower() + assert any(word in response2_lower for word in ["flower", "rose", "romantic", "gift", "love"]), \ + "Turn 2 response doesn't seem to reference the flower context" + + print("\n✅ Multi-turn conversation with images completed successfully!") + +@pytest.mark.skipif(not os.environ.get("GEMINI_API_KEY"), reason="No GEMINI_API_KEY found") +def test_real_google_genai_multi_turn_with_images_updated(): + """Test multi-turn conversation with images using Google Gemini image generation model""" + from opto.utils.llm import LLM + + print("\n" + "="*80) + print("Testing Multi-turn Conversation with Gemini Image Generation") + print("="*80) + + # Initialize conversation history + history = ConversationHistory() + history.system_prompt = "You are a helpful assistant that can generate and discuss images." + + # Use a Gemini model that supports image generation + model = "gemini-2.5-flash-image" + llm = LLM(model=model) + + print("="*80) + + # Turn 1: Ask to generate an image + user_turn1 = UserTurn().add_text("Generate an image of a serene mountain landscape at sunrise with a lake in the foreground.") + + history.add_user_turn(user_turn1) + messages = history.to_litellm_format() + + print("\n📷 Turn 1 - User:") + print(" Generate an image of a serene mountain landscape at sunrise with a lake in the foreground.") + + response1 = llm(messages=messages, max_tokens=300) + at = AssistantTurn(response1) + + print("\n🤖 Turn 1 - Assistant:") + print(f" {at.get_text()[:200] if at.get_text() else '[Image generated]'}...") + + history.add_assistant_turn(at) + + # Turn 2: Follow-up question about the generated image + user_turn2 = UserTurn().add_text("Can you describe the colors and mood of the image you just generated?") + history.add_user_turn(user_turn2) + + messages = history.to_litellm_format() + + print("\n📷 Turn 2 - User:") + print(" Can you describe the colors and mood of the image you just generated?") + + response2 = llm(messages=messages, max_tokens=300) + at2 = AssistantTurn(response2) + response2_content = at2.get_text() + + print("\n🤖 Turn 2 - Assistant:") + print(f" {response2_content[:200]}...") + + # Verify responses + assert at.content is not None and len(at.content) > 0 + assert response2_content is not None and len(response2_content) > 20 + + # Turn 2 should reference the context from turn 1 + response2_lower = response2_content.lower() + assert any(word in response2_lower for word in ["mountain", "sunrise", "lake", "color", "mood", "landscape"]), \ + "Turn 2 response doesn't seem to reference the image generation context" + + print("\n✅ Multi-turn conversation with Gemini image generation completed successfully!") + # ==== Testing the Automatic Raw Response Parsing into AssistantTurn === @pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) def test_automatic_openai_raw_response_parsing_into_assistant_turn(): @@ -522,7 +648,7 @@ def test_automatic_openai_multimodal_raw_response_parsing_into_assistant_turn(): print(assistant_turn) -@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +@pytest.mark.skipif(not os.environ.get("GEMINI_API_KEY"), reason="No GEMINI_API_KEY found") def test_automatic_google_generate_content_raw_response_parsing_into_assistant_turn(): from google import genai from google.genai import types @@ -563,4 +689,9 @@ def test_automatic_google_generate_content_raw_response_parsing_into_assistant_t print(f"Image saved: generated_{response.output[0].id}.png") + from google import genai + + client = genai.Client() + chat = client.chats.create(model="gemini-2.5-flash") + From c171201d2d78df095c176f5c1d772792b82f4657 Mon Sep 17 00:00:00 2001 From: windweller Date: Sat, 27 Dec 2025 02:03:17 -0500 Subject: [PATCH 46/51] amend refactor and moved gemini input message history conversion to `ConversationHistory` --- opto/utils/backbone.py | 153 +++++++++++++++++++- opto/utils/llm.py | 97 +++++-------- tests/unit_tests/test_optimizer_backbone.py | 4 +- 3 files changed, 186 insertions(+), 68 deletions(-) diff --git a/opto/utils/backbone.py b/opto/utils/backbone.py index 20ec0381..52e2aca8 100644 --- a/opto/utils/backbone.py +++ b/opto/utils/backbone.py @@ -1948,7 +1948,19 @@ def to_messages( protected_rounds: Optional[int] = None ) -> List[Dict[str, Any]]: """ - Alias for to_litellm_format() for convenience + Smart message format conversion that auto-detects the appropriate format. + + This method automatically chooses between Gemini format and LiteLLM format based on + the model name found in the most recent AssistantTurn. If a Gemini model is detected, + it uses to_gemini_format(), otherwise it uses to_litellm_format(). + + Model detection: + - If any AssistantTurn has a model name containing "gemini" (case-insensitive), + uses Gemini format + - Otherwise, uses LiteLLM format (default) + + Note: This detection may not work for custom LLM backends with Gemini model names. + In such cases, call to_gemini_format() or to_litellm_format() explicitly. Args: n: Number of historical rounds (user+assistant pairs) to include. @@ -1961,9 +1973,144 @@ def to_messages( If None, uses self.protected_rounds. Counts towards n. Returns: - List of message dictionaries in LiteLLM format + List of message dictionaries in the appropriate format + + Example: + # Automatically uses Gemini format if model is Gemini + history = ConversationHistory() + history.system_prompt = "You are helpful." + history.add_user_turn(UserTurn().add_text("Hello")) + + # If you used a Gemini model, this will auto-detect and use Gemini format + messages = history.to_messages() + + # Or be explicit: + messages = history.to_gemini_format() # Force Gemini format + messages = history.to_litellm_format() # Force LiteLLM format + """ + # Check if any AssistantTurn has a Gemini model + use_gemini_format = False + for turn in self.turns: + if isinstance(turn, AssistantTurn) and turn.model: + if 'gemini' in turn.model.lower(): + use_gemini_format = True + break + + # Use the appropriate format + if use_gemini_format: + return self.to_gemini_format( + n=n, + truncate_strategy=truncate_strategy, + protected_rounds=protected_rounds + ) + else: + return self.to_litellm_format( + n=n, + truncate_strategy=truncate_strategy, + protected_rounds=protected_rounds + ) + + def to_gemini_format( + self, + n: int = -1, + truncate_strategy: Literal["from_start", "from_end"] = "from_start", + protected_rounds: Optional[int] = None + ) -> List[Dict[str, Any]]: + """ + Convert to Google Gemini format (messages with 'model' role instead of 'assistant') + + This method converts the conversation history to a format compatible with Google's + Gemini API. The main differences from LiteLLM format are: + - Uses 'model' instead of 'assistant' for role names + - Content is structured as 'parts' (list of text/image parts) + - System message (if present) remains as first message with role='system' + + The GoogleGenAILLM class will extract the system message and convert it to + system_instruction when making the API call. + + Args: + n: Number of historical rounds (user+assistant pairs) to include. + -1 means all history (default: -1). + The current (potentially incomplete) round is always included. + truncate_strategy: How to truncate when n is specified: + - "from_start": Remove oldest rounds, keep the most recent n rounds (default) + - "from_end": Remove newest rounds, keep the oldest n rounds + protected_rounds: Number of initial rounds to never truncate (task definition). + If None, uses self.protected_rounds. These rounds count towards n. + + Returns: + List of message dictionaries in Gemini format with 'role' and 'parts'. + System message (if present) is included as first message with role='system'. + + Example: + from opto.utils.llm import LLM + from opto.utils.backbone import ConversationHistory, UserTurn + + # Create conversation + history = ConversationHistory() + history.system_prompt = "You are a helpful assistant." + history.add_user_turn(UserTurn().add_text("Hello!")) + + # Convert to Gemini format + messages = history.to_gemini_format() + + # Use with GoogleGenAILLM + llm = LLM(model="gemini-2.5-flash") + response = llm(messages=messages) """ - return self.to_litellm_format(n=n, truncate_strategy=truncate_strategy, protected_rounds=protected_rounds) + # Get the LiteLLM format messages first (handles truncation logic) + litellm_messages = self.to_litellm_format( + n=n, + truncate_strategy=truncate_strategy, + protected_rounds=protected_rounds + ) + + # Convert messages to Google GenAI format + gemini_messages = [] + + for msg in litellm_messages: + role = msg.get('role') + content = msg.get('content') + + # Keep system messages as-is (will be extracted by GoogleGenAILLM) + if role == 'system': + gemini_messages.append({'role': 'system', 'content': content}) + continue + + # Map roles: user -> user, assistant -> model + if role == 'assistant': + role = 'model' + elif role == 'tool': + # Skip tool messages for now - Gemini handles these differently + # TODO: Handle tool results properly if needed + continue + + # Handle content (can be string or list of content blocks) + if isinstance(content, str): + gemini_messages.append({'role': role, 'parts': [{'text': content}]}) + elif isinstance(content, list): + # Convert content blocks to parts + parts = [] + for block in content: + if block.get('type') == 'text': + parts.append({'text': block.get('text', '')}) + elif block.get('type') == 'image_url': + # Handle image URLs + image_url = block.get('image_url', {}).get('url', '') + if image_url.startswith('data:'): + # Extract base64 data + import re + match = re.match(r'data:([^;]+);base64,(.+)', image_url) + if match: + mime_type, data = match.groups() + parts.append({'inline_data': {'mime_type': mime_type, 'data': data}}) + else: + # External URL + parts.append({'file_data': {'file_uri': image_url}}) + if parts: + gemini_messages.append({'role': role, 'parts': parts}) + + return gemini_messages def save_to_file(self, filepath: str): """Save conversation history to JSON file""" diff --git a/opto/utils/llm.py b/opto/utils/llm.py index 1ea88dc6..9409505d 100644 --- a/opto/utils/llm.py +++ b/opto/utils/llm.py @@ -458,15 +458,28 @@ class GoogleGenAILLM(AbstractModel): This class supports storing default generation parameters (like temperature, max_output_tokens, etc.) that will be used for all calls unless overridden. - Note system_instruction is supported. + Note: Use ConversationHistory.to_gemini_format() to convert conversation history + to the format expected by Google GenAI. + Example: - llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash", mm_beta=True) - response = llm( - messages=[ - {"role": "user", "content": "Hello!"} - ], - system_instruction="You are a helpful assistant." - ) + from opto.utils.llm import LLM + from opto.utils.backbone import ConversationHistory, UserTurn, AssistantTurn + + # Initialize LLM + llm = LLM(model="gemini-2.5-flash") + + # Create conversation history + history = ConversationHistory() + history.system_prompt = "You are a helpful assistant." + history.add_user_turn(UserTurn().add_text("What is AI?")) + + # Convert to Gemini format and call LLM + messages = history.to_gemini_format() + response = llm(messages=messages, max_tokens=100) + + # Parse response + at = AssistantTurn(response) + print(at.get_text()) """ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None, @@ -505,64 +518,22 @@ def api_func(model_name, *args, **kwargs): # Extract system_instruction if present (needs to be at config level, not in kwargs) system_instruction = kwargs.pop('system_instruction', None) - # Handle messages parameter for automatic system instruction extraction + # Handle messages parameter (from history.to_gemini_format()) messages = kwargs.pop('messages', None) + contents = kwargs.pop('contents', None) + if messages: - # If system_instruction is explicitly passed, drop any system messages - if system_instruction is not None: - # Filter out system messages - filtered_messages = [msg for msg in messages if msg.get('role') != 'system'] - else: - # If system_instruction not passed, check if first message is system - if messages and messages[0].get('role') == 'system': + # Extract system message if present and not explicitly overridden + if messages and messages[0].get('role') == 'system': + if system_instruction is None: system_instruction = messages[0].get('content') - # Remove the system message from messages - filtered_messages = messages[1:] - else: - filtered_messages = messages - - # Convert messages to Google GenAI contents format - # Google GenAI expects contents as a list of content items - contents = [] - for msg in filtered_messages: - role = msg.get('role') - content = msg.get('content') - - # Map roles: user -> user, assistant -> model - if role == 'assistant': - role = 'model' - - # Handle content (can be string or list of content blocks) - if isinstance(content, str): - contents.append({'role': role, 'parts': [{'text': content}]}) - elif isinstance(content, list): - # Convert content blocks to parts - parts = [] - for block in content: - if block.get('type') == 'text': - parts.append({'text': block.get('text', '')}) - elif block.get('type') == 'image_url': - # Handle image URLs - image_url = block.get('image_url', {}).get('url', '') - if image_url.startswith('data:'): - # Extract base64 data - import re - match = re.match(r'data:([^;]+);base64,(.+)', image_url) - if match: - mime_type, data = match.groups() - parts.append({'inline_data': {'mime_type': mime_type, 'data': data}}) - else: - # External URL - parts.append({'file_data': {'file_uri': image_url}}) - if parts: - contents.append({'role': role, 'parts': parts}) - - # Use converted contents instead of args - # Don't wrap in tuple since we're passing as keyword argument - contents_to_use = contents if contents else args[0] if args else None - else: - # No messages parameter, use args as-is - contents_to_use = args[0] if args else None + # Remove system message from contents + contents = messages[1:] + else: + contents = messages + + # Use contents if provided, otherwise use positional args + contents_to_use = contents if contents is not None else (args[0] if args else None) # Map max_tokens to max_output_tokens for Google GenAI if 'max_tokens' in kwargs: diff --git a/tests/unit_tests/test_optimizer_backbone.py b/tests/unit_tests/test_optimizer_backbone.py index bf63101a..193add49 100644 --- a/tests/unit_tests/test_optimizer_backbone.py +++ b/tests/unit_tests/test_optimizer_backbone.py @@ -576,7 +576,7 @@ def test_real_google_genai_multi_turn_with_images_updated(): user_turn1 = UserTurn().add_text("Generate an image of a serene mountain landscape at sunrise with a lake in the foreground.") history.add_user_turn(user_turn1) - messages = history.to_litellm_format() + messages = history.to_gemini_format() print("\n📷 Turn 1 - User:") print(" Generate an image of a serene mountain landscape at sunrise with a lake in the foreground.") @@ -593,7 +593,7 @@ def test_real_google_genai_multi_turn_with_images_updated(): user_turn2 = UserTurn().add_text("Can you describe the colors and mood of the image you just generated?") history.add_user_turn(user_turn2) - messages = history.to_litellm_format() + messages = history.to_gemini_format() print("\n📷 Turn 2 - User:") print(" Can you describe the colors and mood of the image you just generated?") From d9231c182a9c0bf5e01f593a4e9d1839b31f8633 Mon Sep 17 00:00:00 2001 From: windweller Date: Thu, 8 Jan 2026 22:45:45 -0500 Subject: [PATCH 47/51] fixed a few bugs, multi-turn multi-modal optimizer is runnable end-to-end now. --- opto/optimizers/optoprime_v3.py | 189 +++++--- opto/trace/nodes.py | 10 + opto/utils/backbone.py | 444 ++++++++++++++---- opto/utils/llm.py | 209 +++++++-- .../test_optimizer_backbone.py | 7 - .../llm_optimizers_tests/test_optoprime_v3.py | 6 +- tests/unit_tests/test_llm.py | 10 +- tests/unit_tests/test_optimizer_backbone.py | 8 +- 8 files changed, 683 insertions(+), 200 deletions(-) delete mode 100644 tests/llm_optimizers_tests/test_optimizer_backbone.py diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py index 52376da4..900b1f6c 100644 --- a/opto/optimizers/optoprime_v3.py +++ b/opto/optimizers/optoprime_v3.py @@ -4,6 +4,7 @@ 2. Support multimodal node (both trainable and non-trainable) """ +import re import json from typing import List, Union, Tuple, Optional from dataclasses import dataclass @@ -76,6 +77,9 @@ class OptimizerPromptSymbolSet: improved_variable_tag = "variable" name_tag = "name" + # only used by JSON format + suggestion_tag = "suggestion" + expect_json = False # this will stop `enforce_json` arguments passed to LLM calls # custom output format @@ -168,7 +172,9 @@ def default_prompt_symbols(self) -> Dict[str, str]: "instruction": self.instruction_section_title, "code": self.code_section_title, "documentation": self.documentation_section_title, - "context": self.context_section_title + "context": self.context_section_title, + "reasoning": self.reasoning_tag, + "suggestion": self.suggestion_tag } @@ -178,13 +184,13 @@ class OptimizerPromptSymbolSetJSON(OptimizerPromptSymbolSet): expect_json = True custom_output_format_instruction = dedent(""" - {{ + { "reasoning": , - "suggestion": {{ + "suggestion": { : , : , - }} - }} + } + } """) def example_output(self, reasoning, variables): @@ -205,8 +211,62 @@ def output_response_extractor(self, response: str) -> Dict[str, Any]: Extracts reasoning and suggestion variables from the LLM response using OptoPrime's extraction logic. """ # Use the centralized extraction logic from OptoPrime - optoprime_instance = OptoPrime() - return optoprime_instance.extract_llm_suggestion(response) + suggestion_tag = self.default_prompt_symbols.get("suggestion", "suggestion") + reasoning_tag = self.default_prompt_symbols.get("reasoning", "reasoning") + + ignore_extraction_error = True + + reasoning = "(Unable to extract, possibly due to parsing failure)" + + if "```" in response: + match = re.findall(r"```(.*?)```", response, re.DOTALL) + if len(match) > 0: + response = match[0] + + json_extracted = {} + suggestion = {} + attempt_n = 0 + while attempt_n < 2: + try: + json_extracted = json.loads(response) + if isinstance(json_extracted, dict): # trim all whitespace keys in the json_extracted + json_extracted = {k.strip(): v for k, v in json_extracted.items()} + suggestion = json_extracted.get(suggestion_tag, json_extracted) + reasoning = json_extracted.get(reasoning_tag, "") + break + except json.JSONDecodeError: + response = re.findall(r"{.*}", response, re.DOTALL) + if len(response) > 0: + response = response[0] + attempt_n += 1 + except Exception: + attempt_n += 1 + + if not isinstance(suggestion, dict): + suggestion = json_extracted if isinstance(json_extracted, dict) else {} + + if len(suggestion) == 0: + pattern = rf'"{suggestion_tag}"\s*:\s*\{{(.*?)\}}' + suggestion_match = re.search(pattern, str(response), re.DOTALL) + if suggestion_match: + suggestion = {} + suggestion_content = suggestion_match.group(1) + pair_pattern = r'"([a-zA-Z0-9_]+)"\s*:\s*"(.*)"' + pairs = re.findall(pair_pattern, suggestion_content, re.DOTALL) + for key, value in pairs: + suggestion[key] = value + + if len(suggestion) == 0 and not ignore_extraction_error: + print(f"Cannot extract {suggestion_tag} from LLM's response:\n{response}") + + keys_to_remove = [] + for key, value in suggestion.items(): + if "__code" in key and value.strip() == "": + keys_to_remove.append(key) + for key in keys_to_remove: + del suggestion[key] + + return {"reasoning": reasoning, "variables": suggestion} class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet): @@ -290,6 +350,7 @@ class ProblemInstance: others: ContentBlockList outputs: ContentBlockList feedback: ContentBlockList # May contain images mixed with text + context: Optional[ContentBlockList] optimizer_prompt_symbol_set: OptimizerPromptSymbolSet @@ -382,14 +443,15 @@ def to_content_blocks(self) -> ContentBlockList: blocks.append("\n\n# Outputs\n") blocks.extend(self.outputs) + # Context section (optional) + if self.context is not None and self.context.to_text().strip() != "": + blocks.append(f"\n\n# Context\n") # section name + blocks.extend(self.context) # extend the blocks + # Feedback section (may contain images) blocks.append("\n\n# Feedback\n") blocks.extend(self.feedback) - # Context section (optional) - if self.context is not None and self.context.strip() != "": - blocks.append(f"\n\n# Context\n{self.context}") - return blocks def has_images(self) -> bool: @@ -566,11 +628,6 @@ def _build_from_template( ) self.append(image_content) - def ensure(self, *args, **kwargs) -> 'Content': - """Ensure the value is a Content object.""" - if len(args) == 1 and isinstance(args[0], Content): - return args[0] - return Content(args, **kwargs) # we provide two aliases for the Content class for semantic convenience Context = Content @@ -590,6 +647,7 @@ class OptoPrimeV3(OptoPrime): - {others_section_title}: the intermediate values created through the code execution. - {outputs_section_title}: the result of the code output. - {feedback_section_title}: the feedback about the code's execution result. + - {context_section_title}: the context information that might be useful to solve the problem. In `{variables_section_title}`, `{inputs_section_title}`, `{outputs_section_title}`, and `{others_section_title}`, the format is: @@ -604,11 +662,9 @@ class OptoPrimeV3(OptoPrime): output_format_prompt_template = dedent( """ - Output_format: Your output should be in the following XML/HTML format: + Output_format: Your output should be in the following XML or JSON format: - ``` {output_format} - ``` In <{reasoning_tag}>, explain the problem: 1. what the {instruction_section_title} means 2. what the {feedback_section_title} on {outputs_section_title} means to {variables_section_title} considering how {variables_section_title} are used in {code_section_title} and other values in {documentation_section_title}, {inputs_section_title}, {others_section_title}. 3. Reasoning about the suggested changes in {variables_section_title} (if needed) and the expected result. @@ -631,16 +687,6 @@ class OptoPrimeV3(OptoPrime): """ )) - user_prompt_context_template = PromptTemplate(dedent( - """ - Now you see a new problem instance. Here is some context for this problem: - - ================================ - {context} - ================================ - """ - )) - user_prompt_template = PromptTemplate(dedent( """ Now you see problem instance: @@ -665,6 +711,7 @@ def __init__( parameters: List[ParameterNode], llm: AbstractModel = None, *args, + image_llm: AbstractModel = None, propagator: Propagator = None, objective: Union[None, str] = None, ignore_extraction_error: bool = True, @@ -677,17 +724,22 @@ def __init__( optimizer_prompt_symbol_set: OptimizerPromptSymbolSet = OptimizerPromptSymbolSet(), use_json_object_format=True, # whether to use json object format for the response when calling LLM truncate_expression=truncate_expression, - problem_context: Optional[Content] = None, + problem_context: Optional[ContentBlockList] = None, **kwargs, ): super().__init__(parameters, *args, propagator=propagator, **kwargs) self.truncate_expression = truncate_expression - self.problem_context: Optional[Content] = problem_context + self.problem_context: Optional[ContentBlockList] = problem_context + self.output_contains_image = False self.use_json_object_format = use_json_object_format if optimizer_prompt_symbol_set.expect_json and use_json_object_format else False self.ignore_extraction_error = ignore_extraction_error - self.llm = llm or LLM() + self.llm = llm or LLM(mm_beta=True) + self.image_llm = image_llm + + assert self.llm.mm_beta, "OptoPrimeV3 enables multi-modal LLM backbone by default. Please use LLM(model='...', mm_beta=True)." + self.objective = objective or self.default_objective.format(value_tag=optimizer_prompt_symbol_set.value_tag, variables_section_title=optimizer_prompt_symbol_set.variables_section_title, feedback_section_title=optimizer_prompt_symbol_set.feedback_section_title) @@ -752,6 +804,8 @@ def parameter_check(self, parameters: List[ParameterNode]): f"OptoPrimeV3 supports at most one image parameter, but found {len(image_params)}: " f"{param_names}. LLMs can only generate one image at a time." ) + if len(image_params) == 1: + self.output_contains_image = True def add_context(self, *args, images: Optional[List[Any]] = None, format: str = "PNG"): """Add context to the optimizer, supporting both text and images. @@ -830,6 +884,7 @@ def initialize_instruct_prompt(self): code_section_title=self.optimizer_prompt_symbol_set.code_section_title.replace(" ", ""), documentation_section_title=self.optimizer_prompt_symbol_set.documentation_section_title.replace(" ", ""), others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", ""), + context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "") ) self.output_format_prompt = self.output_format_prompt_template.format( output_format=self.optimizer_prompt_symbol_set.output_format, @@ -1090,12 +1145,8 @@ def construct_prompt(self, summary, mask=None, *args, **kwargs): ) user_content_blocks.append(example_text) - # Add context here - user_content_blocks.append(self.user_prompt_context_template.format( - user_prompt_context=self.problem_context, - )) - # Add problem instance template + # context is part of the problem instance user_content_blocks.append(self.user_prompt_template.format( problem_instance=problem_inst.to_content_blocks(), )) @@ -1131,6 +1182,13 @@ def problem_instance(self, summary: FunctionFeedback, mask=None): if self.optimizer_prompt_symbol_set.variables_section_title not in mask else ContentBlockList() ) + + # we add a temporary check here to ensure no more than 1 parameter is an image + variable_stats = variables_content.count_blocks() + if 'ImageContent' in variable_stats: + assert variable_stats['ImageContent'] <= 1, "Currently we do not support generating multiple images (more than 1 parameter is an image)" + self.output_contains_image = True + inputs_content = ( self.repr_node_value_compact_as_content_blocks( summary.inputs, @@ -1178,8 +1236,8 @@ def problem_instance(self, summary: FunctionFeedback, mask=None): inputs=inputs_content, outputs=outputs_content, others=others_content, - feedback=Content.ensure( - summary.user_feedback) if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else Content(""), + feedback=Content(summary.user_feedback) if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else Content(""), + context=self.problem_context, optimizer_prompt_symbol_set=self.optimizer_prompt_symbol_set ) @@ -1198,22 +1256,30 @@ def _step( assert isinstance(self.propagator, GraphPropagator) summary = self.summarize() - system_prompt, user_prompt = self.construct_prompt(summary, mask=mask) + system_prompt, user_content_blocks = self.construct_prompt(summary, mask=mask) response = self.call_llm( system_prompt=system_prompt, - user_prompt=user_prompt, + user_prompt=user_content_blocks, verbose=verbose, max_tokens=self.max_tokens, ) - if "TERMINATE" in response: + if "TERMINATE" in response.to_text(): return {} - suggestion = self.extract_llm_suggestion(response) + suggestion = self.extract_llm_suggestion(response.to_text()) update_dict = self.construct_update_dict(suggestion['variables']) # suggestion has two keys: reasoning, and variables + # for update_dict, we manually update the image according to the variable name + if response.get_images().has_images(): + images = response.get_images() + assert len(images) == 1, "Currently we only allow at most one image parameter" + # find the variable name + image_param = [param for param in self.parameters if param.is_image][0] + update_dict[image_param] = images[0].as_image() # parameter as PIL Image + if self.log is not None: # For logging, use text representation log_user_prompt = str(self.problem_instance(summary)) @@ -1248,7 +1314,7 @@ def call_llm( user_prompt: ContentBlockList, verbose: Union[bool, str] = False, max_tokens: int = 4096, - ): + ) -> AssistantTurn: """Call the LLM with a prompt and return the response. Args: @@ -1258,7 +1324,7 @@ def call_llm( max_tokens: Maximum tokens in the response. Returns: - The LLM response content as a string. + assistant_turn: AssistantTurn object """ if verbose not in (False, "output"): # Print text portions, indicate if images present @@ -1271,16 +1337,7 @@ def call_llm( self.conversation_history.system_prompt = system_prompt # Create user turn with content - user_turn = UserTurn() - - # Add content blocks from user_prompt - for block in user_prompt: - if isinstance(block, TextContent): - user_turn.content.append(block) - elif isinstance(block, ImageContent): - user_turn.content.append(block) - # Handle other content types if needed - + user_turn = UserTurn(user_prompt) self.conversation_history.add_user_turn(user_turn) # Get messages with conversation length control (truncate from start) @@ -1288,23 +1345,27 @@ def call_llm( # The current user turn is automatically included by to_messages() messages = self.conversation_history.to_messages( n=self.conversation_length if self.conversation_length > 0 else -1, - truncate_strategy="from_start" + truncate_strategy="from_start", + model_name=self.llm.model_name ) response_format = {"type": "json_object"} if self.use_json_object_format else None - response = self.llm(messages=messages, max_tokens=max_tokens, response_format=response_format) + # Prepare common arguments + llm_kwargs = {"messages": messages, "max_tokens": max_tokens, "response_format": response_format} + + # Add image generation tool only for non-Gemini models when output contains image + if self.output_contains_image and 'gemini' not in self.llm.model_name: + llm_kwargs["tools"] = [{"type": "image_generation"}] + + assistant_turn = self.llm(**llm_kwargs) - response_content = response.choices[0].message.content + if verbose: + print("LLM response:\n", assistant_turn) - # Store assistant response in conversation history - assistant_turn = AssistantTurn() - assistant_turn.add_text(response_content) self.conversation_history.add_assistant_turn(assistant_turn) - if verbose: - print("LLM response:\n", response_content) - return response_content + return assistant_turn def save(self, path: str): """Save the optimizer state to a file.""" diff --git a/opto/trace/nodes.py b/opto/trace/nodes.py index 696c5339..0e721706 100644 --- a/opto/trace/nodes.py +++ b/opto/trace/nodes.py @@ -391,6 +391,15 @@ def is_image(data) -> bool: except (ValueError, AttributeError): pass + # Check if it's a specialized container class + # We don't use isinstance check because we can't import other files into nodes.py, this file should have no + # external dependencies on other files. + try: + if 'ImageContent' in data.__class__.__name__: + return True + except AttributeError: + pass + return False class AbstractNode(Generic[T]): @@ -484,6 +493,7 @@ def is_image(self) -> bool: 2. PIL Image object 3. Raw image bytes 4. URL string pointing to an image (pattern-based check, no network request) + 5. An ImageContent (customized data container) For URLs, this performs a fast pattern-based check only. For verification with a network request, use verify_image_url() method. diff --git a/opto/utils/backbone.py b/opto/utils/backbone.py index 52e2aca8..3143f60c 100644 --- a/opto/utils/backbone.py +++ b/opto/utils/backbone.py @@ -20,16 +20,19 @@ Alternatively, people can call `.build()` to construct the class. """ -from typing import List, Dict, Any, Optional, Literal, Union, Iterable, Tuple +from typing import List, Dict, Any, Optional, Literal, Union, Iterable, Tuple, TypeVar, Generic from dataclasses import dataclass, field import json import base64 from pathlib import Path import warnings +from PIL import Image +import io + # Default placeholder for images that cannot be rendered as text -DEFAULT_IMAGE_PLACEHOLDER = "[IMAGE]" +DEFAULT_IMAGE_PLACEHOLDER = "\n[IMAGE]\n" @dataclass class ContentBlock: @@ -77,6 +80,10 @@ class ContentBlockList(list): - ImageContent -> [ImageContent] - List[ContentBlock] -> ContentBlockList - None/empty -> [] + + Note: This list can contain mixed types of ContentBlocks (text, images, PDFs, etc.). + Type annotations like ContentBlockList[TextContent] are used for documentation + purposes in specialized methods but don't restrict the actual content. """ def __init__(self, content: Union[str, 'ContentBlock', List['ContentBlock'], None] = None): @@ -115,11 +122,27 @@ def ensure(cls, content: Union[str, 'ContentBlock', List['ContentBlock'], None]) if isinstance(content, cls): return content return cls(content) + + def __getitem__(self, key: Union[int, slice]) -> Union['ContentBlock', 'ContentBlockList']: + """Support indexing and slicing. + + Args: + key: Integer index or slice object + + Returns: + ContentBlock for single index, ContentBlockList for slices + """ + if isinstance(key, slice): + # Return a new ContentBlockList with the sliced items + return ContentBlockList(list.__getitem__(self, key)) + else: + # Return the single item for integer index + return list.__getitem__(self, key) def to_dict(self) -> Dict[str, Any]: return {"type": "list", "blocks": [b.to_dict() for b in self]} - def append(self, item: Union[str, 'ContentBlock']) -> 'ContentBlockList': + def append(self, item: Union[str, 'ContentBlock', 'ContentBlockList']) -> 'ContentBlockList': """Append a string or ContentBlock, merging consecutive text. Args: @@ -139,6 +162,9 @@ def append(self, item: Union[str, 'ContentBlock']) -> 'ContentBlockList': self[-1] = TextContent(text=self[-1].text + " " + item.text) else: super().append(item) + elif isinstance(item, ContentBlockList): + # we silently call extend here + super().extend(item) else: # Other ContentBlock types (ImageContent, etc.): just add super().append(item) @@ -193,23 +219,22 @@ def is_empty(self) -> bool: def has_images(self) -> bool: """Check if the content block list contains any images.""" return any(isinstance(block, ImageContent) for block in self) - + def has_text(self) -> bool: """Check if the content block list contains any text.""" return any(isinstance(block, TextContent) for block in self) # --- Multimodal utilities --- - @staticmethod def blocks_to_text(blocks: Iterable['ContentBlock'], image_placeholder: str = DEFAULT_IMAGE_PLACEHOLDER) -> str: """Convert any iterable of ContentBlocks to text representation. This is a utility that can be used by composite classes containing - multiple ContentBlockLists. + multiple ContentBlockLists. Handles nested ContentBlockLists recursively. Args: - blocks: Iterable of ContentBlock objects + blocks: Iterable of ContentBlock objects (may include nested ContentBlockLists) image_placeholder: Placeholder string for images (default: "[IMAGE]") Returns: @@ -221,8 +246,13 @@ def blocks_to_text(blocks: Iterable['ContentBlock'], text_parts.append(block.text) elif isinstance(block, ImageContent): text_parts.append(image_placeholder) + elif isinstance(block, ContentBlockList): + # Recursively handle nested ContentBlockList + nested_text = ContentBlockList.blocks_to_text(block, image_placeholder) + if nested_text: + text_parts.append(nested_text) return " ".join(text_parts) - + def to_text(self, image_placeholder: str = DEFAULT_IMAGE_PLACEHOLDER) -> str: """Convert this list to text representation. @@ -234,14 +264,6 @@ def to_text(self, image_placeholder: str = DEFAULT_IMAGE_PLACEHOLDER) -> str: """ return self.blocks_to_text(self, image_placeholder) - def has_images(self) -> bool: - """Check if any image content exists in this list. - - Returns: - bool: True if any ImageContent block is present. - """ - return any(isinstance(block, ImageContent) for block in self) - def __bool__(self) -> bool: """Check if there's any actual content (not just empty text). @@ -276,6 +298,96 @@ def to_content_blocks(self) -> 'ContentBlockList': ContentBlockList: Self reference. """ return self + + def count_blocks(self) -> Dict[str, int]: + """Count blocks by type, including nested structures. + + Recursively traverses the content block structure and counts + each block type by its class name. + + Returns: + Dict[str, int]: Dictionary mapping block class names to counts. + Example: {"TextContent": 3, "ImageContent": 1} + """ + counts: Dict[str, int] = {} + + def _count_recursive(item: Any) -> None: + """Recursively count blocks in nested structures.""" + if isinstance(item, ContentBlock): + # Count this block + class_name = item.__class__.__name__ + counts[class_name] = counts.get(class_name, 0) + 1 + + # Check if this block has any attributes that might contain nested blocks + if hasattr(item, '__dict__'): + for attr_value in item.__dict__.values(): + if isinstance(attr_value, (ContentBlockList, list)): + for nested_item in attr_value: + _count_recursive(nested_item) + elif isinstance(attr_value, ContentBlock): + _count_recursive(attr_value) + elif isinstance(item, (ContentBlockList, list)): + # Recursively count items in lists + for nested_item in item: + _count_recursive(nested_item) + + # Count all blocks in this list + for block in self: + _count_recursive(block) + + return counts + + def to_litellm_format(self, role: Optional[str] = None) -> List[Dict[str, Any]]: + """Convert content blocks to LiteLLM Response API format. + + Args: + role: Optional role context ("user" or "assistant") to determine the correct type. + If not provided, defaults to "user" for backward compatibility. + + Returns: + List[Dict[str, Any]]: List of content block dictionaries in Response API format + """ + if role is None: + role = "user" + + content = [] + for block in self: + # Skip empty content blocks + if block.is_empty(): + continue + + # Handle different content block types + if isinstance(block, TextContent): + # Pass role context to TextContent for proper type selection + content.append(block.to_litellm_format(role=role)) + elif isinstance(block, ImageContent): + # ImageContent always uses input_image for user messages + content.append(block.to_litellm_format()) + elif isinstance(block, PDFContent): + # LiteLLM supports PDFs for providers like Claude + # Use input_file type with PDF data URL for Response API + if block.pdf_url: + warnings.warn("PDF URLs may not be supported by all providers through LiteLLM") + content.append({"type": "input_text", "text": f"[PDF: {block.pdf_url}]"}) + else: + # Encode as data URL for providers that support PDFs + data_url = f"data:application/pdf;base64,{block.pdf_data}" + content.append({"type": "input_file", "input_file": {"url": data_url}}) + elif isinstance(block, FileContent): + # For file content, add as text or data URL based on type + if block.is_binary: + data_url = f"data:{block.mime_type};base64,{block.file_data}" + content.append({"type": "input_file", "input_file": {"url": data_url}}) + else: + content.append({"type": "input_text", "text": f"[File: {block.filename}]\n{block.file_data}"}) + elif hasattr(block, 'to_litellm_format'): + # Fallback: use block's own to_litellm_format method + content.append(block.to_litellm_format()) + else: + # Last resort: use to_dict() + content.append(block.to_dict()) + + return content class PromptTemplate: @@ -495,8 +607,22 @@ def build(cls, value: Any = "", **kwargs) -> 'TextContent': return cls(text=str(value)) def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for serialization.""" return {"type": self.type, "text": self.text} + def to_litellm_format(self, role: str = "user") -> Dict[str, Any]: + """Convert to LiteLLM/OpenAI Response API compatible format. + + Args: + role: The role context ("user" or "assistant") to determine the correct type + + Returns dict in format: + - {"type": "input_text", "text": "..."} for user messages + - {"type": "output_text", "text": "..."} for assistant messages + """ + text_type = "input_text" if role == "user" else "output_text" + return {"type": text_type, "text": self.text} + def __add__(self, other) -> 'TextContent': """Concatenate text content with strings or other TextContent objects. @@ -597,6 +723,10 @@ def is_empty(self) -> bool: return not self.image_url and not self.image_data and not self.image_bytes def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for serialization (not LiteLLM format). + + For LiteLLM format, use to_litellm_format() instead. + """ result = { "type": self.type, "media_type": self.media_type @@ -607,6 +737,41 @@ def to_dict(self) -> Dict[str, Any]: result["image_data"] = self.image_data if self.image_bytes: result["image_bytes"] = self.image_bytes + if self.detail: + result["detail"] = self.detail + return result + + def to_litellm_format(self) -> Dict[str, Any]: + """Convert to LiteLLM Response API compatible format. + + Returns dict in format: + {"type": "input_image", "image_url": {"url": "..."}} + """ + # Determine the URL to use + if self.image_url: + url = self.image_url + elif self.image_data: + # Convert base64 data to data URL + url = f"data:{self.media_type};base64,{self.image_data}" + elif self.image_bytes: + # Convert bytes to base64 and then to data URL + import base64 + b64_data = base64.b64encode(self.image_bytes).decode('utf-8') + url = f"data:{self.media_type};base64,{b64_data}" + else: + # Empty image + return {"type": "input_image", "image_url": ""} + + # Build the result in Response API format + result = { + "type": "input_image", + "image_url": url + } + + # Add detail if specified (OpenAI-specific) + if self.detail: + result["detail"] = self.detail + return result @classmethod @@ -923,6 +1088,46 @@ def set_image(self, image: Any, format: str = "PNG") -> None: if result.image_bytes: self.image_bytes = result.image_bytes self.media_type = result.media_type + + def as_image(self) -> Image.Image: + """Convert the image to a PIL Image. + + Fetches the image from URL if necessary (including HTTP/HTTPS URLs). + + Returns: + PIL Image object + + Raises: + ValueError: If no image data is available + requests.RequestException: If fetching from URL fails + """ + # Try to get image bytes from any available source + image_bytes = self.get_bytes() + + if image_bytes: + return Image.open(io.BytesIO(image_bytes)) + elif self.image_url: + if self.image_url.startswith(('http://', 'https://')): + # Fetch image from URL + try: + import requests + response = requests.get(self.image_url, timeout=30) + response.raise_for_status() + return Image.open(io.BytesIO(response.content)) + except ImportError: + # Fallback to urllib if requests is not available + from urllib.request import urlopen + with urlopen(self.image_url, timeout=30) as response: + return Image.open(io.BytesIO(response.read())) + else: + # If it's a local file path + return Image.open(self.image_url) + else: + raise ValueError("No image data available to convert to PIL Image") + + def show(self) -> Image.Image: + """A convenience alias for as_image()""" + return self.as_image() def get_bytes(self) -> Optional[bytes]: """Get raw image bytes. @@ -1211,7 +1416,7 @@ class UserTurn: """Represents a user message turn in the conversation""" role: str = "user" - content: List[ContentBlock] = field(default_factory=list) + content: ContentBlockList = field(default_factory=ContentBlockList) tools: List[ToolDefinition] = field(default_factory=list) # Provider-specific settings @@ -1223,6 +1428,57 @@ class UserTurn: timestamp: Optional[str] = None metadata: Dict[str, Any] = field(default_factory=dict) + def __init__(self, content=None, tools=None, **kwargs): + """ + Initialize UserTurn with content and tools. + + Four ways to initialize: + 1. Empty: UserTurn() - creates empty turn with defaults + 2. Copy: UserTurn(existing_turn) - creates a copy of an existing UserTurn + 3. Positional args: UserTurn(content, tools) - pass content and/or tools + 4. Keyword args: UserTurn(content=..., tools=..., temperature=...) - explicit fields + + Args: + content: ContentBlockList, list of content blocks, UserTurn (for copying), or None + tools: List of ToolDefinition or None + **kwargs: Additional fields (temperature, max_tokens, top_p, timestamp, metadata) + """ + self.output_contains_image = False + + # Handle copy constructor: UserTurn(existing_turn) + if isinstance(content, UserTurn): + source = content + self.role = source.role + self.content = ContentBlockList(source.content) # Deep copy the content list + self.tools = list(source.tools) # Shallow copy the tools list + self.temperature = source.temperature + self.max_tokens = source.max_tokens + self.top_p = source.top_p + self.timestamp = source.timestamp + self.metadata = dict(source.metadata) # Copy the metadata dict + return + + # Handle content + if content is None: + content = ContentBlockList() + elif not isinstance(content, ContentBlockList): + # If it's a list, wrap it in ContentBlockList + content = ContentBlockList(content) if isinstance(content, list) else ContentBlockList([content]) + + # Handle tools + if tools is None: + tools = [] + + # Set all fields + self.role = kwargs.get('role', "user") + self.content = content + self.tools = tools + self.temperature = kwargs.get('temperature', None) + self.max_tokens = kwargs.get('max_tokens', None) + self.top_p = kwargs.get('top_p', None) + self.timestamp = kwargs.get('timestamp', None) + self.metadata = kwargs.get('metadata', {}) + def add_text(self, text: str) -> 'UserTurn': """Add text content""" self.content.append(TextContent(text=text)) @@ -1275,47 +1531,17 @@ def to_dict(self) -> Dict[str, Any]: "metadata": self.metadata } - def to_litellm_format(self) -> Dict[str, Any]: - """Convert to LiteLLM format (OpenAI-compatible, works with all providers)""" - content = [] - for block in self.content: - # Skip empty content blocks - if block.is_empty(): - continue - if isinstance(block, TextContent): - content.append({"type": "text", "text": block.text}) - elif isinstance(block, ImageContent): - if block.image_url: - img_dict = {"type": "image_url", "image_url": {"url": block.image_url}} - if block.detail: - img_dict["image_url"]["detail"] = block.detail - content.append(img_dict) - else: - data_url = f"data:{block.media_type};base64,{block.image_data}" - content.append({"type": "image_url", "image_url": {"url": data_url}}) - elif isinstance(block, PDFContent): - # LiteLLM supports PDFs for providers like Claude - # Use image_url type with PDF data URL for compatibility - if block.pdf_url: - warnings.warn("PDF URLs may not be supported by all providers through LiteLLM") - content.append({"type": "text", "text": f"[PDF: {block.pdf_url}]"}) - else: - # Encode as data URL for providers that support PDFs - data_url = f"data:application/pdf;base64,{block.pdf_data}" - content.append({"type": "image_url", "image_url": {"url": data_url}}) - elif isinstance(block, FileContent): - # For file content, add as text or data URL based on type - if block.is_binary: - data_url = f"data:{block.mime_type};base64,{block.file_data}" - content.append({"type": "text", "text": f"[File: {block.filename}]\n{data_url}"}) - else: - content.append({"type": "text", "text": f"[File: {block.filename}]\n{block.file_data}"}) + def enable_image_generation(self): + self.output_contains_image = True + def to_litellm_format(self) -> Dict[str, Any]: + """Convert to LiteLLM Response API format (OpenAI Response API compatible)""" return { "role": "user", - "content": content + "content": self.content.to_litellm_format(role="user") } + @dataclass class Turn: def __init__(self, **kwargs): @@ -1327,7 +1553,7 @@ def __init__(self, **kwargs): class AssistantTurn(Turn): """Represents an assistant message turn in the conversation""" role: str = "assistant" - content: List[ContentBlock] = field(default_factory=list) + content: ContentBlockList = field(default_factory=ContentBlockList) # Tool usage (Option B: Everything in AssistantTurn) tool_calls: List[ToolCall] = field(default_factory=list) @@ -1355,6 +1581,24 @@ def __init__(self, *args, **kwargs): 2. From raw response: AssistantTurn(response) - autocasts the response 3. With fields: AssistantTurn(role="assistant", content=[...]) - explicit fields """ + if len(args) == 1 and isinstance(args[0], AssistantTurn): + # Case: Copy constructor - create a copy of another AssistantTurn + other = args[0] + super().__init__( + role=other.role, + content=ContentBlockList(other.content), + tool_calls=list(other.tool_calls), + tool_results=list(other.tool_results), + reasoning=other.reasoning, + finish_reason=other.finish_reason, + prompt_tokens=other.prompt_tokens, + completion_tokens=other.completion_tokens, + model=other.model, + timestamp=other.timestamp, + metadata=dict(other.metadata) + ) + return + if len(args) > 0 and len(kwargs) == 0: # Case 2: Single positional arg - autocast from raw response value_dict = self.autocast(args[0]) @@ -1366,7 +1610,7 @@ def __init__(self, *args, **kwargs): # Case 1: No arguments - initialize with defaults super().__init__( role="assistant", - content=[], + content=ContentBlockList(), tool_calls=[], tool_results=[], reasoning=None, @@ -1393,7 +1637,7 @@ def from_google_genai(value: Any) -> Dict[str, Any]: # Initialize the result dictionary with default values result = { "role": "assistant", - "content": [], + "content": ContentBlockList(), "tool_calls": [], "tool_results": [], "reasoning": None, @@ -1554,7 +1798,7 @@ def from_litellm_openai_response_api(value: Any) -> Dict[str, Any]: # Initialize the result dictionary with default values result = { "role": "assistant", - "content": [], + "content": ContentBlockList(), "tool_calls": [], "tool_results": [], "reasoning": None, @@ -1741,7 +1985,7 @@ def autocast(value: Any) -> Dict[str, Any]: else: return { "role": "assistant", - "content": [], + "content": ContentBlockList(), "tool_calls": [], "tool_results": [], "reasoning": None, @@ -1755,7 +1999,7 @@ def autocast(value: Any) -> Dict[str, Any]: def add_text(self, text: str) -> 'AssistantTurn': """Add text content""" - self.content.append(TextContent(text=text)) + self.content.append(text) return self def add_image(self, url: Optional[str] = None, data: Optional[str] = None, @@ -1778,12 +2022,9 @@ def add_tool_result(self, result: ToolResult) -> 'AssistantTurn': self.tool_results.append(result) return self - def get_text(self) -> str: - """Get all text content concatenated""" - return " ".join( - block.text for block in self.content - if isinstance(block, TextContent) - ) + def to_text(self) -> str: + """Get all text content concatenated. Images will be presented as placeholder text.""" + return self.content.to_text() def to_dict(self) -> Dict[str, Any]: """Convert to dictionary format""" @@ -1800,15 +2041,36 @@ def to_dict(self) -> Dict[str, Any]: "metadata": self.metadata } + def get_text(self) -> ContentBlockList: + """Get all text content blocks. + + Returns: + ContentBlockList: List containing only TextContent blocks + """ + text_blocks = ContentBlockList() + for block in self.content: + if isinstance(block, TextContent): + text_blocks.append(block) + return text_blocks + + def get_images(self) -> ContentBlockList: + """Get all image content blocks. + + Returns: + ContentBlockList: List containing only ImageContent blocks + """ + image_blocks = ContentBlockList() + for block in self.content: + if isinstance(block, ImageContent): + image_blocks.append(block) + return image_blocks + def to_litellm_format(self) -> Dict[str, Any]: - """Convert to LiteLLM format (OpenAI-compatible, works with all providers)""" + """Convert to LiteLLM Response API format (OpenAI Response API compatible)""" result = {"role": self.role} - if self.content: - # For multimodal or simple text response - text = self.get_text() - if text: - result["content"] = text + # Handle content blocks (text, images, etc.) - delegate to ContentBlockList + result["content"] = self.content.to_litellm_format(role=self.role) if self.tool_calls: result["tool_calls"] = [ @@ -1945,19 +2207,17 @@ def to_messages( self, n: int = -1, truncate_strategy: Literal["from_start", "from_end"] = "from_start", - protected_rounds: Optional[int] = None + protected_rounds: Optional[int] = None, + model_name: Optional[str] = None ) -> List[Dict[str, Any]]: """ Smart message format conversion that auto-detects the appropriate format. This method automatically chooses between Gemini format and LiteLLM format based on - the model name found in the most recent AssistantTurn. If a Gemini model is detected, - it uses to_gemini_format(), otherwise it uses to_litellm_format(). - - Model detection: - - If any AssistantTurn has a model name containing "gemini" (case-insensitive), - uses Gemini format - - Otherwise, uses LiteLLM format (default) + the model name. Detection priority: + 1. If model_name argument is provided and contains "gemini", uses Gemini format + 2. Otherwise, checks if any AssistantTurn has a model name containing "gemini" + 3. If no Gemini model detected, uses LiteLLM format (default) Note: This detection may not work for custom LLM backends with Gemini model names. In such cases, call to_gemini_format() or to_litellm_format() explicitly. @@ -1971,6 +2231,8 @@ def to_messages( - "from_end": Remove newest rounds, keep the oldest n rounds protected_rounds: Number of initial rounds to never truncate (task definition). If None, uses self.protected_rounds. Counts towards n. + model_name: Optional model name to use for format detection. If provided and + contains "gemini" (case-insensitive), forces Gemini format. Returns: List of message dictionaries in the appropriate format @@ -1981,20 +2243,24 @@ def to_messages( history.system_prompt = "You are helpful." history.add_user_turn(UserTurn().add_text("Hello")) - # If you used a Gemini model, this will auto-detect and use Gemini format - messages = history.to_messages() + # Force Gemini format by providing model name + messages = history.to_messages(model_name="gemini-2.5-flash") # Or be explicit: messages = history.to_gemini_format() # Force Gemini format messages = history.to_litellm_format() # Force LiteLLM format """ - # Check if any AssistantTurn has a Gemini model + # Check if model_name argument indicates Gemini (highest priority) use_gemini_format = False - for turn in self.turns: - if isinstance(turn, AssistantTurn) and turn.model: - if 'gemini' in turn.model.lower(): - use_gemini_format = True - break + if model_name and 'gemini' in model_name.lower(): + use_gemini_format = True + else: + # Check if any AssistantTurn has a Gemini model + for turn in self.turns: + if isinstance(turn, AssistantTurn) and turn.model: + if 'gemini' in turn.model.lower(): + use_gemini_format = True + break # Use the appropriate format if use_gemini_format: @@ -2094,9 +2360,9 @@ def to_gemini_format( for block in content: if block.get('type') == 'text': parts.append({'text': block.get('text', '')}) - elif block.get('type') == 'image_url': + elif block.get('type') == 'image': # Handle image URLs - image_url = block.get('image_url', {}).get('url', '') + image_url = block.get('image_url', '') if image_url.startswith('data:'): # Extract base64 data import re diff --git a/opto/utils/llm.py b/opto/utils/llm.py index 9409505d..ec42ce59 100644 --- a/opto/utils/llm.py +++ b/opto/utils/llm.py @@ -26,6 +26,25 @@ except ImportError: pass + +def _is_image_generation_model(model_name: str) -> bool: + """Detect if a model is for image generation based on its name. + + Detects: + - OpenAI: gpt-image-1, gpt-image-1.5, gpt-image-1-mini, dall-e-2, dall-e-3 + - Gemini: gemini-2.5-flash-image, gemini-2.5-pro-image, etc. + + Args: + model_name: The name of the model to check + + Returns: + bool: True if the model is an image generation model, False otherwise + """ + if model_name is None: + return False + model_lower = model_name.lower() + return 'image' in model_lower or 'dall-e' in model_lower + class AbstractModel: """Abstract base class for LLM model wrappers with automatic refreshing. @@ -42,6 +61,9 @@ class AbstractModel: mm_beta : bool, optional If True, returns AssistantTurn objects with rich multimodal content. If False (default), returns raw API responses in legacy format. + model_name : str or None, optional + The name of the model being used (e.g., "gpt-4o", "claude-3-5-sonnet-latest"). + If None, no model name is stored. Attributes ---------- @@ -51,6 +73,10 @@ class AbstractModel: Refresh frequency in seconds. mm_beta : bool Whether to use multimodal beta mode. + model_name : str or None + The name of the model being used. + is_image_model : bool + Whether the model is for image generation (auto-detected from model name). model : Any Property that returns the current model instance. @@ -80,7 +106,7 @@ class AbstractModel: """ def __init__(self, factory: Callable, reset_freq: Union[int, None] = None, - mm_beta: bool = False) -> None: + mm_beta: bool = False, model_name: Union[str, None] = None) -> None: """ Args: factory: A function that takes no arguments and returns a model that is callable. @@ -88,18 +114,29 @@ def __init__(self, factory: Callable, reset_freq: Union[int, None] = None, refreshed. If None, the model is never refreshed. mm_beta: If True, returns AssistantTurn objects with rich multimodal content. If False (default), returns raw API responses in legacy format. + model_name: The name of the model being used (e.g., "gpt-4o", "claude-3-5-sonnet-latest"). + If None, no model name is stored. """ self.factory = factory self._model = self.factory() self.reset_freq = reset_freq self._init_time = time.time() self.mm_beta = mm_beta + self.model_name = model_name # Overwrite this `model` property when subclassing. @property def model(self): """When self.model is called, text responses should always be available at `response['choices'][0]['message']['content']`""" return self._model + + @property + def is_image_model(self) -> bool: + """Check if this model is for image generation based on model name. + + Returns True if the model name contains 'image' or 'dall-e', False otherwise. + """ + return _is_image_generation_model(self.model_name) # This is the main API def __call__(self, *args, **kwargs) -> Any: @@ -202,8 +239,11 @@ def __init__(self, config_list: List = None, filter_dict: Dict = None, if filter_dict is not None: config_list = autogen.filter_config(config_list, filter_dict) + # Extract model name from config_list if available + model_name = config_list[0].get('model') if config_list and len(config_list) > 0 else None + factory = lambda *args, **kwargs: self._factory(config_list) - super().__init__(factory, reset_freq, mm_beta=mm_beta) + super().__init__(factory, reset_freq, mm_beta=mm_beta, model_name=model_name) @classmethod def _factory(cls, config_list): @@ -283,6 +323,7 @@ class LiteLLM(AbstractModel): https://docs.litellm.ai/docs/completion/input https://docs.litellm.ai/docs/response_api + https://docs.litellm.ai/docs/image_generation To use this, set the credentials through the environment variable as instructed in the LiteLLM documentation. For convenience, you can set the @@ -293,11 +334,23 @@ class LiteLLM(AbstractModel): This class now supports storing default completion parameters (like temperature, top_p, max_tokens, etc.) that will be used for all calls unless overridden. - Responses API Support: + Text Generation: When mm_beta=True, the Responses API is used for rich multimodal content. When mm_beta=False (default), the Completion API is used for backward compatibility. See: https://docs.litellm.ai/docs/response_api + + Image Generation: + Automatically detects image generation models (containing 'image' or 'dall-e' in name). + Uses litellm.image_generation() API for models like: + - gpt-image-1, gpt-image-1.5, gpt-image-1-mini + - dall-e-2, dall-e-3 + + Image models require a single string prompt: + llm = LLM(model="gpt-image-1.5") + result = llm(prompt="A serene mountain landscape") + + Check llm.is_image_model to determine if a model is for image generation. """ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None, @@ -312,7 +365,7 @@ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] self.model_name = model self.cache = cache self.default_params = default_params # Store default completion parameters - + factory = lambda: self._factory( self.model_name, self.default_params, @@ -320,13 +373,36 @@ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] max_retries=max_retries, base_delay=base_delay ) - super().__init__(factory, reset_freq, mm_beta=mm_beta) + super().__init__(factory, reset_freq, mm_beta=mm_beta, model_name=model) @classmethod def _factory(cls, model_name: str, default_params: dict, mm_beta: bool, max_retries=10, base_delay=1.0): import litellm + # Check if this is an image generation model + is_image_model = _is_image_generation_model(model_name) + + if is_image_model: + # Image generation API + api_func = litellm.image_generation + operation_name = "LiteLLM_image_generation" + + # Standard image generation wrapper + def image_wrapper(prompt, **kwargs): + assert isinstance(prompt, str), ( + f"Image generation requires a single string prompt. " + f"Got {type(prompt).__name__}. " + f"Usage: llm(prompt='your prompt here')" + ) + return retry_with_exponential_backoff( + lambda: api_func(model=model_name, prompt=prompt, **{**default_params, **kwargs}), + max_retries=max_retries, + base_delay=base_delay, + operation_name=operation_name + ) + return image_wrapper + # Use Responses API when mm_beta=True, otherwise use Completion API api_func = litellm.responses if mm_beta else litellm.completion operation_name = "LiteLLM_responses" if mm_beta else "LiteLLM_completion" @@ -420,7 +496,7 @@ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] self.model_name = model self.cache = cache factory = lambda: self._factory(base_url, server_api_key) # an LLM instance uses a fixed model - super().__init__(factory, reset_freq, mm_beta=mm_beta) + super().__init__(factory, reset_freq, mm_beta=mm_beta, model_name=model) @classmethod def _factory(cls, base_url: str, server_api_key: str): @@ -442,6 +518,7 @@ class GoogleGenAILLM(AbstractModel): This is an LLM backend using Google's GenAI SDK with the Interactions API. https://ai.google.dev/gemini-api/docs/text-generation + https://ai.google.dev/gemini-api/docs/image-generation The Interactions API is a unified interface for interacting with Gemini models, similar to OpenAI's Response API. It provides better state management, tool @@ -452,34 +529,45 @@ class GoogleGenAILLM(AbstractModel): variable TRACE_GOOGLE_GENAI_MODEL. Supported models: - - Gemini 3: gemini-3-flash-preview, gemini-3-pro-preview - - Gemini 2.5: gemini-2.5-flash, gemini-2.5-pro, gemini-2.5-flash-lite + - Text: gemini-2.5-flash, gemini-2.5-pro, gemini-2.5-flash-lite + - Image: gemini-2.5-flash-image, gemini-2.5-pro-image This class supports storing default generation parameters (like temperature, max_output_tokens, etc.) that will be used for all calls unless overridden. - Note: Use ConversationHistory.to_gemini_format() to convert conversation history - to the format expected by Google GenAI. - - Example: - from opto.utils.llm import LLM - from opto.utils.backbone import ConversationHistory, UserTurn, AssistantTurn - - # Initialize LLM - llm = LLM(model="gemini-2.5-flash") + Text Generation: + Use ConversationHistory.to_gemini_format() to convert conversation history + to the format expected by Google GenAI. - # Create conversation history - history = ConversationHistory() - history.system_prompt = "You are a helpful assistant." - history.add_user_turn(UserTurn().add_text("What is AI?")) + Example: + from opto.utils.llm import LLM + from opto.utils.backbone import ConversationHistory, UserTurn, AssistantTurn + + # Initialize LLM + llm = LLM(model="gemini-2.5-flash") + + # Create conversation history + history = ConversationHistory() + history.system_prompt = "You are a helpful assistant." + history.add_user_turn(UserTurn().add_text("What is AI?")) + + # Convert to Gemini format and call LLM + messages = history.to_gemini_format() + response = llm(messages=messages, max_tokens=100) + + # Parse response + at = AssistantTurn(response) + print(at.get_text()) + + Image Generation: + Automatically detects image generation models (containing 'image' in name). + Uses client.models.generate_images() API for models like gemini-2.5-flash-image. - # Convert to Gemini format and call LLM - messages = history.to_gemini_format() - response = llm(messages=messages, max_tokens=100) + Image models require a single string prompt: + llm = LLM(model="gemini-2.5-flash-image") + result = llm(prompt="A serene mountain landscape", number_of_images=2) - # Parse response - at = AssistantTurn(response) - print(at.get_text()) + Check llm.is_image_model to determine if a model is for image generation. """ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None, @@ -491,7 +579,7 @@ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] self.cache = cache self.default_params = default_params # Store default generation parameters factory = lambda: self._factory(self.model_name, self.default_params) - super().__init__(factory, reset_freq, mm_beta=mm_beta) + super().__init__(factory, reset_freq, mm_beta=mm_beta, model_name=model) @classmethod def _factory(cls, model_name: str, default_params: dict): @@ -504,6 +592,39 @@ def _factory(cls, model_name: str, default_params: dict): # Try without API key (will use default credentials or fail gracefully) client = genai.Client() + # Check if this is an image generation model + is_image_model = _is_image_generation_model(model_name) + + if is_image_model: + # Image generation for Gemini + def image_api_func(prompt, **kwargs): + assert isinstance(prompt, str), ( + f"Image generation requires a single string prompt. " + f"Got {type(prompt).__name__}. " + f"Usage: llm(prompt='your prompt here')" + ) + + # Gemini image generation API + # https://ai.google.dev/gemini-api/docs/image-generation + # Filter kwargs to only valid parameters for generate_images + valid_params = { + k: v for k, v in kwargs.items() + if k in ['number_of_images', 'aspect_ratio', 'safety_filter_level'] + } + response = client.models.generate_images( + model=model_name, + prompt=prompt, + **valid_params + ) + return response + + return lambda *args, **kwargs: retry_with_exponential_backoff( + lambda: image_api_func(*args, **{**default_params, **kwargs}), + max_retries=5, + base_delay=1, + operation_name=f"{model_name}_image_gen" + ) + # Build config if there are generation parameters config_params = {} @@ -811,10 +932,11 @@ class DummyLLM(AbstractModel): def __init__(self, callable, reset_freq: Union[int, None] = None, - mm_beta: bool = False) -> None: + mm_beta: bool = False, + model_name: Union[str, None] = None) -> None: # self.message = message self.callable = callable - super().__init__(self._factory, reset_freq, mm_beta=mm_beta) + super().__init__(self._factory, reset_freq, mm_beta=mm_beta, model_name=model_name) def _factory(self): @@ -852,6 +974,30 @@ class LLM: # Add LiteLLM parameters llm = LLM(model="gpt-4o", temperature=0.7, max_tokens=2000) llm = LLM(model="gpt-4o-mini", temperature=0.3, top_p=0.9) + + Image Generation: + # OpenAI image models (auto-detected by 'image' or 'dall-e' in name) + img_llm = LLM(model="gpt-image-1.5") + print(img_llm.is_image_model) # True + result = img_llm(prompt="A serene mountain landscape at sunset") + + # With additional parameters + img_llm = LLM(model="gpt-image-1", size="1024x1024", quality="hd") + result = img_llm(prompt="A futuristic cityscape") + + # DALL-E models + dalle = LLM(model="dall-e-3") + result = dalle(prompt="A cat astronaut in space", size="1024x1792") + + # Gemini image models + gemini_img = LLM(model="gemini-2.5-flash-image") + result = gemini_img(prompt="Abstract art", number_of_images=2) + + # Check if model generates images + if llm.is_image_model: + result = llm(prompt="Your prompt here") + else: + result = llm(messages=[{"role": "user", "content": "Your message"}]) Using Multimodal Beta Mode: # Enable mm_beta for rich AssistantTurn responses @@ -971,6 +1117,9 @@ class LLM: def __new__(cls, model: str = None, profile: str = 'default', backend: str = None, mm_beta: bool = False, **kwargs): + if _is_image_generation_model(model): + mm_beta = True + # Priority 1: If model is specified, use LLMFactory with model if model: if backend is not None: diff --git a/tests/llm_optimizers_tests/test_optimizer_backbone.py b/tests/llm_optimizers_tests/test_optimizer_backbone.py deleted file mode 100644 index e4d369db..00000000 --- a/tests/llm_optimizers_tests/test_optimizer_backbone.py +++ /dev/null @@ -1,7 +0,0 @@ -""" -We need to test a few things: -1. Various use cases of ContentBlock and specialized ones -2. UserTurn, AssistantTurn and conversation manager -3. Multi-modal use of conversation manager, including multi-turn and image as output -""" - diff --git a/tests/llm_optimizers_tests/test_optoprime_v3.py b/tests/llm_optimizers_tests/test_optoprime_v3.py index 38d67efa..36126114 100644 --- a/tests/llm_optimizers_tests/test_optoprime_v3.py +++ b/tests/llm_optimizers_tests/test_optoprime_v3.py @@ -15,7 +15,7 @@ # Skip tests if no API credentials are available SKIP_REASON = "No API credentials found" HAS_CREDENTIALS = os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get( - "OPENAI_API_KEY") + "OPENAI_API_KEY") or os.environ.get("GEMINI_API_KEY") llm = LLM() @@ -503,3 +503,7 @@ def test_optimizer_step_with_content_blocks(): # Verify the step completed assert optimizer.log is not None assert len(optimizer.log) > 0 + +@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON) +def test_optimizer_multimodal_parameter_update(): + pass \ No newline at end of file diff --git a/tests/unit_tests/test_llm.py b/tests/unit_tests/test_llm.py index 1b4a9934..244bbf2e 100644 --- a/tests/unit_tests/test_llm.py +++ b/tests/unit_tests/test_llm.py @@ -353,7 +353,7 @@ def test_litellm_responses_api_system_message(self): assert response.content is not None, "Content should not be None" # Get text content - text_content = response.get_text() + text_content = response.to_text() assert isinstance(text_content, str), "Text content should be a string" assert len(text_content) > 0, "Text content should not be empty" assert '4' in text_content, f"Response should contain the answer '4'. Got: {text_content}" @@ -400,7 +400,7 @@ def test_gemini_system_instruction_mm_beta_mode(self): assert response.content is not None, "Content should not be None" # Get text content - text_content = response.get_text() + text_content = response.to_text() assert isinstance(text_content, str), "Text content should be a string" assert len(text_content) > 0, "Text content should not be empty" @@ -423,7 +423,7 @@ def test_litellm_system_message_with_conversation(self): response1 = llm(messages=messages) assert isinstance(response1, AssistantTurn), "First response should be AssistantTurn" - text1 = response1.get_text() + text1 = response1.to_text() # Check pirate-like language in first response pirate_indicators = ['arr', 'matey', 'ahoy', 'ye', 'aye'] @@ -436,7 +436,7 @@ def test_litellm_system_message_with_conversation(self): response2 = llm(messages=messages) assert isinstance(response2, AssistantTurn), "Second response should be AssistantTurn" - text2 = response2.get_text() + text2 = response2.to_text() # Check pirate-like language persists has_pirate_language_2 = any(indicator in text2.lower() for indicator in pirate_indicators) @@ -461,7 +461,7 @@ def test_gemini_system_instruction_with_config_params(self): ) assert isinstance(response, AssistantTurn), "Should return AssistantTurn object" - text_content = response.get_text() + text_content = response.to_text() assert len(text_content) > 0, "Should have content" print_color(f"✓ Gemini system_instruction works with other config parameters", 'green') diff --git a/tests/unit_tests/test_optimizer_backbone.py b/tests/unit_tests/test_optimizer_backbone.py index 193add49..283a309e 100644 --- a/tests/unit_tests/test_optimizer_backbone.py +++ b/tests/unit_tests/test_optimizer_backbone.py @@ -523,7 +523,7 @@ def test_real_llm_multi_turn_with_images_updated_assistant_turn(): at = AssistantTurn(response1) print("\n🤖 Turn 1 - Assistant:") - print(f" {at.get_text()[:200]}...") + print(f" {at.to_text()[:200]}...") history.add_assistant_turn(at) @@ -543,7 +543,7 @@ def test_real_llm_multi_turn_with_images_updated_assistant_turn(): print(f" {response2_content[:200]}...") # Verify responses - assert at.get_text() is not None and len(at.get_text()) > 20 + assert at.to_text() is not None and len(at.to_text()) > 20 assert response2_content is not None and len(response2_content) > 20 # Turn 2 should reference the context from turn 1 @@ -585,7 +585,7 @@ def test_real_google_genai_multi_turn_with_images_updated(): at = AssistantTurn(response1) print("\n🤖 Turn 1 - Assistant:") - print(f" {at.get_text()[:200] if at.get_text() else '[Image generated]'}...") + print(f" {at.to_text()[:200] if at.to_text() else '[Image generated]'}...") history.add_assistant_turn(at) @@ -600,7 +600,7 @@ def test_real_google_genai_multi_turn_with_images_updated(): response2 = llm(messages=messages, max_tokens=300) at2 = AssistantTurn(response2) - response2_content = at2.get_text() + response2_content = at2.to_text() print("\n🤖 Turn 2 - Assistant:") print(f" {response2_content[:200]}...") From 2ae6d107e3ec470abdfa1e2b6eab7b820bc6eb39 Mon Sep 17 00:00:00 2001 From: windweller Date: Thu, 8 Jan 2026 23:53:02 -0500 Subject: [PATCH 48/51] update tests to use the response API --- tests/unit_tests/test_optimizer_backbone.py | 76 +++++++++++---------- 1 file changed, 40 insertions(+), 36 deletions(-) diff --git a/tests/unit_tests/test_optimizer_backbone.py b/tests/unit_tests/test_optimizer_backbone.py index 283a309e..dfb4a2e3 100644 --- a/tests/unit_tests/test_optimizer_backbone.py +++ b/tests/unit_tests/test_optimizer_backbone.py @@ -87,11 +87,13 @@ def test_truncate_from_start(): assert messages[1]["role"] == "user" assert "umbrella" in messages[1]["content"][0]["text"] assert messages[2]["role"] == "assistant" - assert "umbrella" in messages[2]["content"] + # Content is now a list of dicts with type and text fields + assert any("umbrella" in item.get("text", "") for item in messages[2]["content"]) assert messages[3]["role"] == "user" assert "Thanks" in messages[3]["content"][0]["text"] assert messages[4]["role"] == "assistant" - assert "welcome" in messages[4]["content"] + # Content is now a list of dicts with type and text fields + assert any("welcome" in item.get("text", "") for item in messages[4]["content"]) def test_truncate_from_end(): @@ -111,11 +113,13 @@ def test_truncate_from_end(): assert messages[1]["role"] == "user" assert "Hello" in messages[1]["content"][0]["text"] assert messages[2]["role"] == "assistant" - assert "sunny" in messages[2]["content"] + # Content is now a list of dicts with type and text fields + assert any("sunny" in item.get("text", "") for item in messages[2]["content"]) assert messages[3]["role"] == "user" assert "tomorrow" in messages[3]["content"][0]["text"] assert messages[4]["role"] == "assistant" - assert "rainy" in messages[4]["content"] + # Content is now a list of dicts with type and text fields + assert any("rainy" in item.get("text", "") for item in messages[4]["content"]) def test_truncate_zero_turns(): @@ -216,16 +220,16 @@ def test_user_turn_multiple_images(): assert len(user_msg["content"]) == 3 # Check first item is text - assert user_msg["content"][0]["type"] == "text" + assert user_msg["content"][0]["type"] == "input_text" assert user_msg["content"][0]["text"] == "What are in these images? Is there any difference between them?" # Check second item is first image - assert user_msg["content"][1]["type"] == "image_url" - assert user_msg["content"][1]["image_url"]["url"] == "https://images.pexels.com/photos/736230/pexels-photo-736230.jpeg" + assert user_msg["content"][1]["type"] == "input_image" + assert user_msg["content"][1]["image_url"] == "https://images.pexels.com/photos/736230/pexels-photo-736230.jpeg" # Check third item is second image - assert user_msg["content"][2]["type"] == "image_url" - assert user_msg["content"][2]["image_url"]["url"] == "https://images.contentstack.io/v3/assets/bltcedd8dbd5891265b/blt134818d279038650/6668df6434f6fb5cd48aac34/beautiful-flowers-rose.jpeg" + assert user_msg["content"][2]["type"] == "input_image" + assert user_msg["content"][2]["image_url"] == "https://images.contentstack.io/v3/assets/bltcedd8dbd5891265b/blt134818d279038650/6668df6434f6fb5cd48aac34/beautiful-flowers-rose.jpeg" def test_assistant_turn_multiple_images(): @@ -246,8 +250,8 @@ def test_assistant_turn_multiple_images(): assert len(messages) == 1 assert messages[0]["role"] == "assistant" - # Assistant should have text content - assert "Here are two generated images" in messages[0]["content"] + # Assistant should have text content (now in list format) + assert any("Here are two generated images" in item.get("text", "") for item in messages[0]["content"]) def test_mixed_content_types_in_turn(): @@ -270,10 +274,10 @@ def test_mixed_content_types_in_turn(): # Should have 4 content blocks: text, image, image, text assert len(user_msg["content"]) == 4 - assert user_msg["content"][0]["type"] == "text" - assert user_msg["content"][1]["type"] == "image_url" - assert user_msg["content"][2]["type"] == "image_url" - assert user_msg["content"][3]["type"] == "text" + assert user_msg["content"][0]["type"] == "input_text" + assert user_msg["content"][1]["type"] == "input_image" + assert user_msg["content"][2]["type"] == "input_image" + assert user_msg["content"][3]["type"] == "input_text" def test_multiple_images_with_base64(): @@ -300,11 +304,11 @@ def test_multiple_images_with_base64(): assert len(user_msg["content"]) == 3 # Check base64 data URLs are properly formatted - assert user_msg["content"][1]["type"] == "image_url" - assert user_msg["content"][1]["image_url"]["url"].startswith("data:image/png;base64,") + assert user_msg["content"][1]["type"] == "input_image" + assert user_msg["content"][1]["image_url"].startswith("data:image/png;base64,") - assert user_msg["content"][2]["type"] == "image_url" - assert user_msg["content"][2]["image_url"]["url"].startswith("data:image/jpeg;base64,") + assert user_msg["content"][2]["type"] == "input_image" + assert user_msg["content"][2]["image_url"].startswith("data:image/jpeg;base64,") def test_conversation_with_multiple_multi_image_turns(): @@ -366,7 +370,7 @@ def test_truncate_multimodal_conversation(): # Check that multimodal content is preserved assert len(messages[1]["content"]) == 2 # text + image - assert messages[1]["content"][1]["type"] == "image_url" + assert messages[1]["content"][1]["type"] == "input_image" # ============================================================================ # Real LLM Call Tests with Images @@ -399,11 +403,12 @@ def test_real_llm_call_with_multiple_images(): print("="*80) print(f"\nSending {len(user_turn.content)} content blocks (1 text + 2 images)...") - # Make the LLM call - llm = LLM() + # Make the LLM call with mm_beta=True for Response API format + llm = LLM(mm_beta=True) response = llm(messages=messages, max_tokens=500) - response_content = response.choices[0].message.content + # response is now an AssistantTurn object + response_content = response.to_text() print("\n📷 User Query:") print(" What are in these images? Is there any difference between them?") @@ -413,8 +418,7 @@ def test_real_llm_call_with_multiple_images(): print("-" * 40) # Store assistant response in history - assistant_turn = AssistantTurn().add_text(response_content) - history.add_assistant_turn(assistant_turn) + history.add_assistant_turn(response) # Verify we got a meaningful response assert response_content is not None @@ -438,7 +442,7 @@ def test_real_llm_multi_turn_with_images(): from opto.utils.llm import LLM history = ConversationHistory(system_prompt="You are a helpful assistant that can analyze images.") - llm = LLM() + llm = LLM(mm_beta=True) print("\n" + "="*80) print("MULTI-TURN CONVERSATION WITH IMAGES") @@ -457,12 +461,12 @@ def test_real_llm_multi_turn_with_images(): print(" What type of flowers are shown in these images? [+ 2 images]") response1 = llm(messages=messages, max_tokens=300) - response1_content = response1.choices[0].message.content + response1_content = response1.to_text() print("\n🤖 Turn 1 - Assistant:") print(f" {response1_content[:200]}...") - history.add_assistant_turn(AssistantTurn().add_text(response1_content)) + history.add_assistant_turn(response1) # Turn 2: Follow-up question (no new images, but context from previous turn) user_turn2 = UserTurn().add_text("Which of these flowers would be better for a romantic gift and why?") @@ -474,7 +478,7 @@ def test_real_llm_multi_turn_with_images(): print(" Which of these flowers would be better for a romantic gift and why?") response2 = llm(messages=messages, max_tokens=300) - response2_content = response2.choices[0].message.content + response2_content = response2.to_text() print("\n🤖 Turn 2 - Assistant:") print(f" {response2_content[:200]}...") @@ -501,7 +505,7 @@ def test_real_llm_multi_turn_with_images_updated_assistant_turn(): from opto.utils.llm import LLM history = ConversationHistory(system_prompt="You are a helpful assistant that can analyze images.") - llm = LLM() + llm = LLM(mm_beta=True) print("\n" + "="*80) print("MULTI-TURN CONVERSATION WITH IMAGES") @@ -519,8 +523,7 @@ def test_real_llm_multi_turn_with_images_updated_assistant_turn(): print("\n📷 Turn 1 - User:") print(" What type of flowers are shown in these images? [+ 2 images]") - response1 = llm(messages=messages, max_tokens=300) - at = AssistantTurn(response1) + at = llm(messages=messages, max_tokens=300) print("\n🤖 Turn 1 - Assistant:") print(f" {at.to_text()[:200]}...") @@ -537,7 +540,7 @@ def test_real_llm_multi_turn_with_images_updated_assistant_turn(): print(" Which of these flowers would be better for a romantic gift and why?") response2 = llm(messages=messages, max_tokens=300) - response2_content = response2.choices[0].message.content + response2_content = response2.to_text() print("\n🤖 Turn 2 - Assistant:") print(f" {response2_content[:200]}...") @@ -568,7 +571,7 @@ def test_real_google_genai_multi_turn_with_images_updated(): # Use a Gemini model that supports image generation model = "gemini-2.5-flash-image" - llm = LLM(model=model) + llm = LLM(model=model, mm_beta=True) print("="*80) @@ -576,12 +579,13 @@ def test_real_google_genai_multi_turn_with_images_updated(): user_turn1 = UserTurn().add_text("Generate an image of a serene mountain landscape at sunrise with a lake in the foreground.") history.add_user_turn(user_turn1) - messages = history.to_gemini_format() print("\n📷 Turn 1 - User:") print(" Generate an image of a serene mountain landscape at sunrise with a lake in the foreground.") - response1 = llm(messages=messages, max_tokens=300) + # For image generation models, pass the prompt directly instead of messages + prompt = user_turn1.content.to_text() + response1 = llm(prompt=prompt, max_tokens=300) at = AssistantTurn(response1) print("\n🤖 Turn 1 - Assistant:") From 9ea88c9079abc16558c139b0262f224620ffa751 Mon Sep 17 00:00:00 2001 From: windweller Date: Fri, 9 Jan 2026 00:14:28 -0500 Subject: [PATCH 49/51] making the JSON extraction more robust. Adding a notebook for the demo. --- .../multimodal_html_example/example.ipynb | 891 ++++++++++++++++++ opto/optimizers/optoprime_v3.py | 19 +- 2 files changed, 907 insertions(+), 3 deletions(-) create mode 100644 examples/multimodal_html_example/example.ipynb diff --git a/examples/multimodal_html_example/example.ipynb b/examples/multimodal_html_example/example.ipynb new file mode 100644 index 00000000..9f6a7949 --- /dev/null +++ b/examples/multimodal_html_example/example.ipynb @@ -0,0 +1,891 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "f206f3a7-faee-4fe2-adb4-72cc95e75c05", + "metadata": {}, + "source": [ + "# Multi-turn Multi-modal Optimizer Example\n", + "\n", + "On an ubuntu/linux machine, install the following:\n", + "\n", + "```\n", + "# Install Chrome\n", + "wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb\n", + "sudo apt install -y ./google-chrome-stable_current_amd64.deb\n", + "\n", + "# Install dependencies\n", + "sudo apt update\n", + "sudo apt install -y unzip xvfb libxi6 libgconf-2-4 libnss3\n", + "```\n", + "\n", + "Install the following packages as well:\n", + "```\n", + "pip install selenium\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "24dd2a9b-c091-484f-83a3-496926d86a28", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from opto.trace import bundle, node, GRAPH\n", + "import opto.optimizers\n", + "import importlib\n", + "import inspect\n", + "import json\n", + "import pickle\n", + "from opto.utils.llm import LLM\n", + "\n", + "from opto import trace\n", + "from opto.trace import node, bundle\n", + "from opto.optimizers.optoprime_v3 import (\n", + " OptoPrimeV3, ProblemInstance,\n", + " Content, OptimizerPromptSymbolSetJSON\n", + ")\n", + "from opto.utils.backbone import TextContent, ImageContent, ContentBlock, ContentBlockList, UserTurn, AssistantTurn, ConversationHistory" + ] + }, + { + "cell_type": "markdown", + "id": "3b87ad16-1d56-458e-90de-6ddafe0b2b17", + "metadata": {}, + "source": [ + "We first create some reference material for the optimization:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "ab1fa301-a1b9-4b75-acb5-bfaa61064ad1", + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import math\n", + "from PIL import Image, ImageDraw, ImageFont\n", + "\n", + "spec = \"\"\"{\n", + " \"elements\": [\n", + " {\n", + " \"id\": \"title\",\n", + " \"type\": \"text\",\n", + " \"content\": \"Welcome\",\n", + " \"x\": 100,\n", + " \"y\": 50,\n", + " \"fontSize\": 24\n", + " },\n", + " {\n", + " \"id\": \"button\",\n", + " \"type\": \"button\",\n", + " \"content\": \"Submit\",\n", + " \"x\": 120,\n", + " \"y\": 150,\n", + " \"width\": 100,\n", + " \"height\": 40\n", + " }\n", + " ]\n", + "}\"\"\"\n", + "\n", + "HTML_TEMPLATE = \"\"\"\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "{body}\n", + "\n", + "\n", + "\"\"\"\n", + "\n", + "def spec_to_html(spec, filename=\"layout.html\"):\n", + " body = \"\"\n", + " for e in spec[\"elements\"]:\n", + " if e[\"type\"] == \"text\":\n", + " body += f'
{e[\"content\"]}
\\n'\n", + " elif e[\"type\"] == \"button\":\n", + " body += f'\\n'\n", + " \n", + " with open(filename, \"w\") as f:\n", + " f.write(HTML_TEMPLATE.format(body=body))" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "776d29e6-c4a6-4a62-8251-66f44545c7ec", + "metadata": {}, + "outputs": [], + "source": [ + "from selenium import webdriver\n", + "from selenium.webdriver.chrome.options import Options\n", + "\n", + "def take_screenshot(html_path, output_img):\n", + " opts = Options()\n", + " opts.headless = True # headless mode for automation\n", + "\n", + " options = Options()\n", + " options.add_argument(\"--headless=new\")\n", + " options.add_argument(\"--no-sandbox\")\n", + " options.add_argument(\"--disable-dev-shm-usage\")\n", + " options.add_argument(\"--disable-gpu\")\n", + " options.add_argument(\"--allow-file-access-from-files\")\n", + " options.add_argument(\"--enable-local-file-accesses\")\n", + "\n", + " driver = webdriver.Chrome(options=options)\n", + " driver.get(\"file:///home/ubuntu/Trace/tests/\" + html_path)\n", + " driver.save_screenshot(output_img)\n", + " driver.quit()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "5528becd-9e68-413f-8398-a8207f4eed73", + "metadata": {}, + "outputs": [], + "source": [ + "spec_to_html(spec = json.loads(spec))\n", + "take_screenshot(\"layout.html\", \"layout.png\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "026caff0-f1d8-4353-887f-5a18db921058", + "metadata": {}, + "outputs": [ + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG5AwwDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiuQ+JnheLxT4H1G1ECPexRGa1cqCyuvzYB7Zxj8axIPFTT/AAdsLzQYIo9T1CJbO1ggUIFuWyrEAdMEM34Ur6N9v1HbVef6HpVFeX2Pij/hIvg0J7uGO51Ugac8M6B/9L3CMEg98kNWppmqHRLlvB/hTQ4Lx9ItozeSSXAtowzLwAQjFnbGTwB71T0bX9f1/mStk/6/r/I6/UNX0zSUR9S1G0slc4Q3M6xhj6DcRmroORkdK8l8c+JrfxZ8F7vU4IJLdhdxQzQSEFopFmUMuR1+tdnrXittL1XTdD06wOoaveoZEhMvlJHGvV3fBwM8cAk0W0+f6XH/AF+h09FcnP4wudG0zV77xLpK6fHpyI6tb3BuFuN2QAhKJzkAYI7iq1x421LR49OvNf0COx029lSETxXvnPAz/d81PLUAZ4JDNiktXYPM7WiuIvPGutReML7w5Y+GY724gtluo5E1EIrIxwN25BtPsN1UrH4i67qmk39zY+C5JLrTZ3gvbeTUo0COgyQrYO/j2A9M0r6X/rsO2tj0SiuLf4i2knhnQ9UsrGa4utbdYrOyLhCX53bm6BVwcnB+lXNH8WXF14puPDeraYthqUdsLqPybjz4pYicEhiqkEHjBWqs72Jvpc6iiiikMKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK8v8GeCtR0jx9q4uAw0KxuHudLQj5TJOBuI/wB0Ar7bjXqFFC0dweqseYab4K1Cw+Ld9MisPDkrDVVXb8v2sgpj68s3/fNU73QrbSPibrd/r3hJ9b0rVUikt7qPTftpgdV2spUKzLn1x2H4et0ULS3l/X+X3A9b+f8AX+f3nmHxAsbWT4XS2Ph7w/dQfaZ4pI7O00x0biRSxZEX5Tgd8ZxUutR32l/EDSfGVtpWoX+myaebG5jt7djPAd25W8ogMR64FelUU7/18rB5f1vc4HxjBN8QPAmpWGlWd9BdIY5YlvrV7bzGVg20eYB1xjPTkVn+M5bzxt4fsfDdjo2qQ3U9xA11Jc2bxRWqIwZiZGAVjxgbCc16dRSWj+afzQM89slmh+NOo3bWV+LNtKjt0uTZy+W0iuSQH246e9VPBpntIvHDXOn6nCLnUZ54A+nzgyoygAqNmTz2HNem0Umrq3k197uNOzv5p/crHhCeG9Rl+Hfgq7l8O3N82h3Lm/0q4tCJHjYnOI3A34GDgZr03wtb+GmuZLnRvCp0mdU2tLJo5s2IOPlBKKT+HHFdVRVX1b76k20QUUUUhhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFZra9YAnabiQZI3RWksinnHDKpBrSrA0Qk6BpxJyTaxZJ/3RQBc/t+x/uXv/AIAT/wDxFH9v2P8Acvf/AAAn/wDiKfRQAz+37H+5e/8AgBP/APEUf2/Y/wBy9/8AACf/AOIp9FADP7fsf7l7/wCAE/8A8RR/b9j/AHL3/wAAJ/8A4in0UAM/t+x/uXv/AIAT/wDxFH9v2P8Acvf/AAAn/wDiKfRQAz+37H+5e/8AgBP/APEUf2/Y/wBy9/8AACf/AOIp9FADP7fsf7l7/wCAE/8A8RVu0vre9D+Q7EoQGV0ZGXIyMqwBFV6gsCf7fvhnj7Lb8f8AApqANeiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACsDQ/8AkX9N/wCvWL/0AVv1gaH/AMi/pv8A16xf+gCgC/RRRQBXvb2206ylvLyZYbeFd0kjdFFUdP8AEml6pd/ZLeaVbnYZBFcW8kDMoOCyiRVLDkcjNO8RQxXGgXcM9hNfROoD28DYdhkcryOR1HOeK46VNYvIL6w0641fULCbT7iOT+1bIwPFJtwgRmRGfJ68N65pX3Ha9jvL+/ttLspLy8l8q3jxvfaWxk4HABPUirNeUeIIb3WLL/RNJ1RlXQxAQ1pJGxk82IlAGAOcAn+vFPvdDSd9QTRtGvLbS5WsVkgNrJF5konzIwQgE4TG5sYPqaq2tv63sTfS/wDXQ9UqrHqNtJNHDvZJZC4SOSNkZthwxAYAkcjnocjFefXfheO1l1uax0cxyRapaSWRityNi/uvMMWBwPv7tvHXNLpOktb+JdIu73SZjtutRUStZs5jLTBoySFO0EZIY4HPXmktbf10Q3ov68z0qiiigAqCw/5GC+/69bf/ANDmqeoLD/kYL7/r1t//AEOagDXooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArA0P/AJF/Tf8Ar1i/9AFb9c9aJfafY29mdLuZvIjWLzIpItrbRjI3ODzjPSgDRoqp9pvv+gLe/wDfyD/45R9pvv8AoC3v/fyD/wCOUAW6Kqfab7/oC3v/AH8g/wDjlH2m+/6At7/38g/+OUAW6Kqfab7/AKAt7/38g/8AjlH2m+/6At7/AN/IP/jlAFuiqn2m+/6At7/38g/+OUfab7/oC3v/AH8g/wDjlAFuiqn2m+/6At7/AN/IP/jlH2m+/wCgLe/9/IP/AI5QBbqCw/5GC+/69bf/ANDmqP7Tff8AQFvf+/kH/wAcqfTYJ/t11eTwNB5sccSxuylvlLnJ2kgffx1PSgDUooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigD/9k=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwwAAAG5CAIAAACshSTDAAAilklEQVR4Ae3dT2xd5ZkHYGdUKbdiYbPBnkUTs4B4pIF4URKPgMFVg3JHUMVtkTBUCFMWBDak2STuZmBTwqa4iwF3AXHFFIxUWo9ERFCpcIeMCKUS7mQRA4sYNknYJB4J5WaV+a7tN34TPiAhTcDmuYu8r797/t3nfBI/nXPuZc2ZM2c6vAgQIECAAAECBM4V+Idz//QXAQIECBAgQIBAW0BIMg8IECBAgAABAhUBIamCYogAAQIECBAgICSZAwQIECBAgACBioCQVEExRIAAAQIECBAQkswBAgQIECBAgEBFQEiqoBgiQIAAAQIECAhJ5gABAgQIECBAoCIgJFVQDBEgQIAAAQIEhCRzgAABAgQIECBQERCSKiiGCBAgQIAAAQJCkjlAgAABAgQIEKgICEkVFEMECBAgQIAAASHJHCBAgAABAgQIVASEpAqKIQIECBAgQICAkGQOECBAgAABAgQqAkJSBcUQAQIECBAgQEBIMgcIECBAgAABAhUBIamCYogAAQIECBAgICSZAwQIECBAgACBioCQVEExRIAAAQIECBAQkswBAgQIECBAgEBFQEiqoBgiQIAAAQIECAhJ5gABAgQIECBAoCIgJFVQDBEgQIAAAQIEhCRzgAABAgQIECBQERCSKiiGCBAgQIAAAQJCkjlAgAABAgQIEKgICEkVFEMECBAgQIAAASHJHCBAgAABAgQIVASEpAqKIQIECBAgQICAkGQOECBAgAABAgQqAkJSBcUQAQIECBAgQEBIMgcIECBAgAABAhUBIamCYogAAQIECBAgICSZAwQIECBAgACBioCQVEExRIAAAQIECBAQkswBAgQIECBAgEBFQEiqoBgiQIAAAQIECAhJ5gABAgQIECBAoCIgJFVQDBEgQIAAAQIEhCRzgAABAgQIECBQERCSKiiGCBAgQIAAAQJCkjlAgAABAgQIEKgICEkVFEMECBAgQIAAASHJHCBAgAABAgQIVASEpAqKIQIECBAgQICAkGQOECBAgAABAgQqAkJSBcUQAQIECBAgQEBIMgcIECBAgAABAhUBIamCYogAAQIECBAgICSZAwQIECBAgACBioCQVEExRIAAAQIECBAQkswBAgQIECBAgEBFQEiqoHw1QydnJnYMNwd615TXwJ6Z1ldzFPZKgAABAgQILAoISRc8E1qzEzuG+nvaGWZNo3egOTyye2Lm5Oev3pqd2rN9eGhwYbWegaHdk7OfGX66+kfGJqfGtm/8/E16lwABAgQIELgiAt+6IntZFTtp9I2MTQ0ODPXf81/zXQM7xieHe8/7XCendw+P9Y5Nbu9rLL3T6BvaPT7Umh0fGtzTNT41OdRz3ir+JECAAAECBL6mAq4kXdyJ6R0caXZ3dBzfPzk9d/6axw5OTL62f+JTl5dac9OTM43B4QEJ6XwyfxMgQIAAga+vgJB0keemZ2C4ub6jY366pKRz75zN7R+f+rDj9NtTk+fehGtnpNmupox0kdIWJ0CAAAECX62AkHSx/pGSDk7uzympPH00sX++vbH3piamjy1vdSEj9SxfRzo2PT4y2NfT1X60qau3b3B4zznbWV4xdWXju4cGllYqT0R19fT2NXdPn4xFTs5O7jj7fqO91ZGx6WNLGW5ucqSv0d7bmt6R8Yk9I82yofbfXb0Dw2WpYzNTe0YG+3u7ylijp//8w1nY9eK75e3e/ub2iYNn9xv7VwkQIECAwCoUEJIu+qSWlDS04fxrSeWraRMzPVu3bVzb0fHh/onlq0ytuf2TsyUj9Xe1dzQ3NTL4vYenWoN79h85cfTw5I6+2ZdG/625fSqlqvMPqDwwPtz84ZP7F1c6ceLIu1NjI32tubmlFFSeeBoZvGd8tm/H5MGjJ04cnZl6bLA19bNmc/f+ha32Dk/Mzu7d1lkObGpsYrrV0z84NHTbhrXzH7790s+aA0M7Jua6BppDw83N3aeP/+2l0ZHdU3NLx3ByZk9z4IdPzvRun5w5euLowbFmx/SvH2gOjfnu3flnyd8ECBAgsAoFznhdtMCJt3a1v4K29ranDp9aXPnoqw9t6L7tibcOv3h3eWKpvPPM0jun3n3qtvUbd711or3c4nprNz/x7tJqZ04dfmZrSS+d2/YeaS9w5sypt55ob3p5kVOHn7qtBK/OrbHFhcVOvPHo1of+cLS9wpG97S1037/w18KbsaOODY++cWJx5MhCSFp//6vtVRZepw4vrNe5de+ROJil41te7cjCp+m+e3nTZb8lH5YlFj9QbEwlQIAAAQKrUMCVpPLf/It9dfUPDZcoc/rg5NTCN/rLHbXx/ScHRob7+5oj7atMpw9OTM2eLJttzU5PzfUML15HKotNz3Z09PYP9DaWdlnub/X1lKtSszPH2ot/+lXudk0ePN2xfmj70NmvzJWlugYeG989WNYsF6ompuc7ugeb+bHwcoDNcoDvlXVPfnqbCyONnr6+3rJ+62Tr7BJdvf19JeOdPLY4Vm7ETR/vWNs/2NcVi5QlekskOza78OliVCVAgAABAqtRQEj6Mme10d8c3ry2/ZD2/nLjqQSV8YON5kizZJ+ugZGhdn56e+FLbgsZqXeouZhvWsdm506X6PLr711dHglafF39L796rxxBySr14zg2e3D2dMfanr7ernMWKA8eLYycnGu/39HV0xO5a2GxEoF62mFmZjaeTDpn7cU/ltZYDknnLtM6OTd3snyU1x7+p2/H4a659oHXyoNXrfbr3KX9RYAAAQIEVpuAkPSlzmj5/aPhgXIX7O3JqZm58jjSbE9zpH1hp8SV/uGR9v2x9uPbczPlOlLvUFwDWgoWG5fvtp29NDk33uyqHUgJT+2rOuVJ7Ubt7TK2eNGnq6fr3AUaXe2B062li0KfsfLnD5ccVPJX990vnr1HF8fbml7+KajP34Z3CRAgQIDAihUQkr7cqWv0llRUrtX8bWpsbGxqrm9kZKBrcUuN/qGRZnnnw6nxsfHJub7h9gWmhVejvEpzMcGl0ehqr7MYlRa3ct6/i+Eo7pCdfbPcRWuVZ6Ma54enswtcQFMOt6S9sqXFIHYBa1iEAAECBAisIgEh6UuezEbv4HA7Jb330q9eKo8jxdWi9tZKfhpaXx40eu1Xv5lNGamjPIDUU1LH3MGZuRJgLuzVfgioXBGaO9i+81V5le/x9y0mmXM2WVJV+z5b+y5do7LWBQ217+j1tJ+Xmv6MfV/QVixEgAABAgRWqoCQ9GXPXDsltX98u6Nj/eLjSMsb6hlceHy7fCetBKnlG2Xlf/g22Nd+Xmlsz9SFxqSuvubghvIL3+XL+9VnsJey2ocHp2aOLR9A+1momfK496X9gmVPf7O/fUls8rH6vpf3pyNAgAABAqtQwP+77cuf1Pa1pPUvvdQ1tH3xcaTlLZXHt0c2j4/ONZfvtbXf7BrYvvv+yXt+8+FLI82OucdGBpYCVKu8M7j4Q0rL21jqugZ37Hlo/w9//efRwYGD23cMN8svQZa3WuUGW9fA0GBv+/8oNzY9tH1ix47+ibFy0+/k7MHJx7b/7M+nu+9+bOELcJ/a5AUP9DZ3775tevTPb48ODR3bs3to6b5hq9XoG1z+ht4Fb86CBAgQIEBgZQnEs7jqlxAoP4+0eetTZ3/1KG2h/ADSts3n/HZRvHn0jWfu37y+3CJbfHVv3Hr/E384fKL8uNG7ex+9e+vmcqOuvNZv3nr3o3vfLcPt14nDL+7atrFc1Vl6dW7YeNu2Xa+mXzg6/OpTD23bvGFxkbXrN9//1BtHl37/6MRbTz20dfGdzrKzXWVnR99oL71wGawsu+2hZ9462t7F3bedXawMnVja9R92bVvabtl75/rNdz/6zNltLyziHwIECBAgsCoF1pRPFf/pVQkQIECAAAECBJYEPJNkKhAgQIAAAQIEKgJCUgXFEAECBAgQIEBASDIHCBAgQIAAAQIVASGpgmKIAAECBAgQICAkmQMECBAgQIAAgYqAkFRBMUSAAAECBAgQEJLMAQIECBAgQIBARUBIqqAYIkCAAAECBAgISeYAAQIECBAgQKAiICRVUAwRIECAAAECBIQkc4AAAQIECBAgUBEQkioohggQIECAAAECQpI5QIAAAQIECBCoCAhJFRRDBAgQIECAAAEhyRwgQIAAAQIECFQEhKQKiiECBAgQIECAgJBkDhAgQIAAAQIEKgJCUgXFEAECBAgQIEBASDIHCBAgQIAAAQIVASGpgmKIAAECBAgQICAkmQMECBAgQIAAgYqAkFRBMUSAAAECBAgQEJLMAQIECBAgQIBARUBIqqAYIkCAAAECBAgISeYAAQIECBAgQKAiICRVUAwRIECAAAECBIQkc4AAAQIECBAgUBEQkioohggQIECAAAECQpI5QIAAAQIECBCoCAhJFRRDBAgQIECAAAEhyRwgQIAAAQIECFQEhKQKiiECBAgQIECAgJBkDhAgQIAAAQIEKgJCUgXFEAECBAgQIEBASDIHCBAgQIAAAQIVASGpgmKIAAECBAgQICAkmQMECBAgQIAAgYqAkFRBMUSAAAECBAgQEJLMAQIECBAgQIBARUBIqqAYIkCAAAECBAgISeYAAQIECBAgQKAiICRVUAwRIECAAAECBIQkc4AAAQIECBAgUBEQkioohggQIECAAAECQpI5QIAAAQIECBCoCAhJFRRDBAgQIECAAAEhyRwgQIAAAQIECFQEhKQKiiECBAgQIECAgJBkDhAgQIAAAQIEKgJCUgXFEAECBAgQIEBASDIHCBAgQIAAAQIVASGpgmKIAAECBAgQICAkmQMECBAgQIAAgYqAkFRBMUSAAAECBAgQEJLMAQIECBAgQIBARUBIqqAYIkCAAAECBAgISeYAAQIECBAgQKAiICRVUAwRIECAAAECBIQkc4AAAQIECBAgUBEQkioohggQIECAAAECQpI5QIAAAQIECBCoCAhJFRRDBAgQIECAAAEhyRwgQIAAAQIECFQEhKQKiiECBAgQIECAgJBkDhAgQIAAAQIEKgJCUgXFEAECBAgQIEBASDIHCBAgQIAAAQIVASGpgmKIAAECBAgQICAkmQMECBAgQIAAgYqAkFRBMUSAAAECBAgQEJLMAQIECBAgQIBARUBIqqAYIkCAAAECBAgISeYAAQIECBAgQKAiICRVUAwRIECAAAECBIQkc4AAAQIECBAgUBEQkioohggQIECAAAECQpI5QIAAAQIECBCoCAhJFRRDBAgQIECAAIFvIbhiAs8///wnn3xy5MiRK7bHlbWja6+99qqrrrrvvvtW1mE7WgIECBBYrQJrzpw5s1o/29fqc5WE9O1vf3vLli1fq6P6uh3M66+/furUKTnp63ZeHA8BAgS+mQJut12h816uIUlIX2hdiArUFy5mAQIECBAgcAUEhKQrgNzehbtsFwgN6gKhLEaAAAECl1tASLrcwrZPgAABAgQIrEgBIWlFnjYHTYAAAQIECFxuASHpcgvbPgECBAgQILAiBYSkFXnaHDQBAgQIECBwuQWEpMstbPsECBAgQIDAihQQklbkaXPQBAgQIECAwOUWEJIut7DtEyBAgAABAitSQEhakafNQRMgQIAAAQKXW0BIutzCtk+AAAECBAisSAEhaUWeNgdNgAABAgQIXG4BIelyC9s+AQIECBAgsCIFhKQVedocNAECBAgQIHC5BYSkyy182bffOn7guZ0/ufPWG9dffXXPjbfe+eDocwc+an3mbo/ve2TTrTsPzH/mAvU3WoeeufPGu557v2y49f7rLx84/tl7qG/AKAECBAgQWGECQtIKO2GfOtz5A0/ufPydznt/+cpf5o6+98rTD19z6PGfPLL30N85xDRuePiV//3dT69vdLQ+2Dc2/qcPTn/qSAwQIECAAIFVJSAkrfTTOf/x8Vb3TT+646Z13Z2NznU33PHEb99889kHbmh0zB/YeeumB/cdX/iE6UpQR8fp+T/tuWtTufK0/sY7d75wqFxVar3/zJ2b7tw5+uCdt2/asP7G2x957uWnHin9jRs23TW6r1yYWlr/0Acv73zkyf/5a7l29cgL7ctKXgQIECBAYLUKCEkr/cxec8uPblm77+ePPP7C6wfeOdS+z9a5bl134/M+VuuDA+903PMfb86+/ewDjX2Pj77cTjtrO44fev+a7b/941/++9kt87//+fj/3Vv6N5+9Y37fM/s+iDi09js//vcdd1z3z/f+8rdP31suK3kRIECAAIFVKyAkrfRT21j346df+c/t3Yf2Pr7zrn/d+I/rb/3J6MLFoc/5YN/57gMP/7hcerp+y70P3LL2o79+tPCEUuf137/lhs6Oju7rb77umnXf/X6777zuu9c15j+ad3Ptczi9RYAAAQKrU0BIWg3ntfOmn/7yd3988y8fnph7++ktp/eN/mR06S5b9dM1Gt2dJQC1X43O0s0vZaC1jcVLQ2vb4421pZTX4r8LrX8IECBAgMA3SUBIWuFnu/XRgZf3vXP2u2ad19+x6xc7bjh96K/tG2854LQ+nj8dN81a8/NLbenmlyPTCrdw+AQIECBA4O8pICT9PTW/gm2dnn9n784Hd74QOamEphdf/KDzppvXNdZec113a+le2vyhA++c/V2A1scHXvz94uPaf3rhnY7rbr4+p6kv/BDlctNpt9++kMkCBAgQILDSBYSkFX4GO294+OlnH+7+089/sLHn6vJ1tU0P7m3d8+zvfrGlu6Nx/R0PbGk994Nbb739rtEDnTfd1N2xmG0a133/5o/33Lnpxk13jc1v2b37jnUl91zwq/O6m9fN7/3Bjbc/ftE/tnTB+7AgAQIECBD46gXWnDlz5qs/im/AEezatWt0dPQb8EEv9SM+8cQTTz755KVuxfoECBAgQOCSBVxJumRCGyBAgAABAgRWo4CQtBrPqs9EgAABAgQIXLKAkHTJhDZAgAABAgQIrEYBIWk1nlWfiQABAgQIELhkASHpkgltgAABAgQIEFiNAkLSajyrPhMBAgQIECBwyQJC0iUT2gABAgQIECCwGgWEpNV4Vn0mAgQIECBA4JIFhKRLJrQBAgQIECBAYDUKCEmr8az6TAQIECBAgMAlCwhJl0xoAwQIECBAgMBqFBCSrtBZvfbaa6/Qnlb4bkCt8BPo8AkQILB6BISkK3Qur7rqqtdff/0K7WzF7qYQFagVe/gOnAABAgRWlcCaM2fOrKoP9DX+MM8///wnn3xy5MiRr/ExfpWHVq4hlYR03333fZUHYd8ECBAgQCAEhKSQUAkQIECAAAECScDttoShJUCAAAECBAiEgJAUEioBAgQIECBAIAkISQlDS4AAAQIECBAIASEpJFQCBAgQIECAQBIQkhKGlgABAgQIECAQAkJSSKgECBAgQIAAgSQgJCUMLQECBAgQIEAgBISkkFAJECBAgAABAklASEoYWgIECBAgQIBACAhJIaESIECAAAECBJKAkJQwtAQIECBAgACBEBCSQkIlQIAAAQIECCQBISlhaAkQIECAAAECISAkhYRKgAABAgQIEEgCQlLC0BIgQIAAAQIEQkBICgmVAAECBAgQIJAEhKSEoSVAgAABAgQIhICQFBIqAQIECBAgQCAJCEkJQ0uAAAECBAgQCAEhKSRUAgQIECBAgEASEJIShpYAAQIECBAgEAJCUkioBAgQIECAAIEkICQlDC0BAgQIECBAIASEpJBQCRAgQIAAAQJJQEhKGFoCBAgQIECAQAgISSGhEiBAgAABAgSSgJCUMLQECBAgQIAAgRAQkkJCJUCAAAECBAgkASEpYWgJECBAgAABAiEgJIWESoAAAQIECBBIAkJSwtASIECAAAECBEJASAoJlQABAgQIECCQBISkhKElQIAAAQIECISAkBQSKgECBAgQIEAgCQhJCUNLgAABAgQIEAgBISkkVAIECBAgQIBAEhCSEoaWAAECBAgQIBACQlJIqAQIECBAgACBJCAkJQwtAQIECBAgQCAEhKSQUAkQIECAAAECSUBIShhaAgQIECBAgEAICEkhoRIgQIAAAQIEkoCQlDC0BAgQIECAAIEQEJJCQiVAgAABAgQIJAEhKWFoCRAgQIAAAQIhICSFhEqAAAECBAgQSAJCUsLQEiBAgAABAgRCQEgKCZUAAQIECBAgkASEpIShJUCAAAECBAiEgJAUEioBAgQIECBAIAkISQlDS4AAAQIECBAIASEpJFQCBAgQIECAQBIQkhKGlgABAgQIECAQAkJSSKgECBAgQIAAgSQgJCUMLQECBAgQIEAgBISkkFAJECBAgAABAklASEoYWgIECBAgQIBACAhJIaESIECAAAECBJKAkJQwtAQIECBAgACBEBCSQkIlQIAAAQIECCQBISlhaAkQIECAAAECISAkhYRKgAABAgQIEEgCQlLC0BIgQIAAAQIEQkBICgmVAAECBAgQIJAEhKSEoSVAgAABAgQIhICQFBIqAQIECBAgQCAJCEkJQ0uAAAECBAgQCAEhKSRUAgQIECBAgEASEJIShpYAAQIECBAgEAJCUkioBAgQIECAAIEkICQlDC0BAgQIECBAIASEpJBQCRAgQIAAAQJJQEhKGFoCBAgQIECAQAgISSGhEiBAgAABAgSSgJCUMLQECBAgQIAAgRAQkkJCJUCAAAECBAgkASEpYWgJECBAgAABAiEgJIWESoAAAQIECBBIAkJSwtASIECAAAECBEJASAoJlQABAgQIECCQBISkhKElQIAAAQIECISAkBQSKgECBAgQIEAgCQhJCUNLgAABAgQIEAgBISkkVAIECBAgQIBAEhCSEoaWAAECBAgQIBACQlJIqAQIECBAgACBJCAkJQwtAQIECBAgQCAEhKSQUAkQIECAAAECSUBIShhaAgQIECBAgEAICEkhoRIgQIAAAQIEkoCQlDC0BAgQIECAAIEQEJJCQiVAgAABAgQIJAEhKWFoCRAgQIAAAQIhICSFhEqAAAECBAgQSAJCUsLQEiBAgAABAgRCQEgKCZUAAQIECBAgkASEpIShJUCAAAECBAiEgJAUEioBAgQIECBAIAkISQlDS4AAAQIECBAIASEpJFQCBAgQIECAQBIQkhKGlgABAgQIECAQAkJSSKgECBAgQIAAgSQgJCUMLQECBAgQIEAgBISkkFAJECBAgAABAklASEoYWgIECBAgQIBACAhJIaESIECAAAECBJKAkJQwtAQIECBAgACBEBCSQkIlQIAAAQIECCQBISlhaAkQIECAAAECISAkhYRKgAABAgQIEEgCQlLC0BIgQIAAAQIEQkBICgmVAAECBAgQIJAEhKSEoSVAgAABAgQIhICQFBIqAQIECBAgQCAJCEkJQ0uAAAECBAgQCAEhKSRUAgQIECBAgEASEJIShpYAAQIECBAgEAJCUkioBAgQIECAAIEkICQlDC0BAgQIECBAIASEpJBQCRAgQIAAAQJJQEhKGFoCBAgQIECAQAgISSGhEiBAgAABAgSSgJCUMLQECBAgQIAAgRAQkkJCJUCAAAECBAgkASEpYWgJECBAgAABAiEgJIWESoAAAQIECBBIAkJSwtASIECAAAECBEJASAoJlQABAgQIECCQBISkhKElQIAAAQIECISAkBQSKgECBAgQIEAgCQhJCUNLgAABAgQIEAgBISkkVAIECBAgQIBAEhCSEoaWAAECBAgQIBACQlJIqAQIECBAgACBJCAkJQwtAQIECBAgQCAEhKSQUAkQIECAAAECSUBIShhaAgQIECBAgEAICEkhoRIgQIAAAQIEkoCQlDC0BAgQIECAAIEQEJJCQiVAgAABAgQIJAEhKWFoCRAgQIAAAQIhICSFhEqAAAECBAgQSAJCUsLQEiBAgAABAgRCQEgKCZUAAQIECBAgkASEpIShJUCAAAECBAiEgJAUEioBAgQIECBAIAkISQlDS4AAAQIECBAIASEpJFQCBAgQIECAQBIQkhKGlgABAgQIECAQAkJSSKgECBAgQIAAgSQgJCUMLQECBAgQIEAgBISkkFAJECBAgAABAklASEoYWgIECBAgQIBACAhJIaESIECAAAECBJKAkJQwtAQIECBAgACBEBCSQkIlQIAAAQIECCQBISlhaAkQIECAAAECISAkhYRKgAABAgQIEEgCQlLC0BIgQIAAAQIEQkBICgmVAAECBAgQIJAEhKSEoSVAgAABAgQIhICQFBIqAQIECBAgQCAJCEkJQ0uAAAECBAgQCAEhKSRUAgQIECBAgEASEJIShpYAAQIECBAgEAJCUkioBAgQIECAAIEkICQlDC0BAgQIECBAIASEpJBQCRAgQIAAAQJJQEhKGFoCBAgQIECAQAgISSGhEiBAgAABAgSSgJCUMLQECBAgQIAAgRAQkkJCJUCAAAECBAgkASEpYWgJECBAgAABAiEgJIWESoAAAQIECBBIAkJSwtASIECAAAECBEJASAoJlQABAgQIECCQBISkhKElQIAAAQIECISAkBQSKgECBAgQIEAgCQhJCUNLgAABAgQIEAgBISkkVAIECBAgQIBAEhCSEoaWAAECBAgQIBACQlJIqAQIECBAgACBJCAkJQwtAQIECBAgQCAEhKSQUAkQIECAAAECSUBIShhaAgQIECBAgEAICEkhoRIgQIAAAQIEkoCQlDC0BAgQIECAAIEQEJJCQiVAgAABAgQIJAEhKWFoCRAgQIAAAQIhICSFhEqAAAECBAgQSAJCUsLQEiBAgAABAgRCQEgKCZUAAQIECBAgkASEpIShJUCAAAECBAiEgJAUEioBAgQIECBAIAkISQlDS4AAAQIECBAIASEpJFQCBAgQIECAQBIQkhKGlgABAgQIECAQAkJSSKgECBAgQIAAgSQgJCUMLQECBAgQIEAgBISkkFAJECBAgAABAklASEoYWgIECBAgQIBACAhJIaESIECAAAECBJKAkJQwtAQIECBAgACBEBCSQkIlQIAAAQIECCQBISlhaAkQIECAAAECISAkhYRKgAABAgQIEEgCQlLC0BIgQIAAAQIEQkBICgmVAAECBAgQIJAEhKSEoSVAgAABAgQIhICQFBIqAQIECBAgQCAJCEkJQ0uAAAECBAgQCAEhKSRUAgQIECBAgEASEJIShpYAAQIECBAgEAJCUkioBAgQIECAAIEkICQlDC0BAgQIECBAIASEpJBQCRAgQIAAAQJJQEhKGFoCBAgQIECAQAgISSGhEiBAgAABAgSSgJCUMLQECBAgQIAAgRAQkkJCJUCAAAECBAgkASEpYWgJECBAgAABAiEgJIWESoAAAQIECBBIAkJSwtASIECAAAECBEJASAoJlQABAgQIECCQBISkhKElQIAAAQIECISAkBQSKgECBAgQIEAgCQhJCUNLgAABAgQIEAgBISkkVAIECBAgQIBAEhCSEoaWAAECBAgQIBACQlJIqAQIECBAgACBJCAkJQwtAQIECBAgQCAEhKSQUAkQIECAAAECSUBIShhaAgQIECBAgEAICEkhoRIgQIAAAQIEkoCQlDC0BAgQIECAAIEQEJJCQiVAgAABAgQIJAEhKWFoCRAgQIAAAQIhICSFhEqAAAECBAgQSAJCUsLQEiBAgAABAgRCQEgKCZUAAQIECBAgkASEpIShJUCAAAECBAiEgJAUEioBAgQIECBAIAkISQlDS4AAAQIECBAIASEpJFQCBAgQIECAQBIQkhKGlgABAgQIECAQAkJSSKgECBAgQIAAgSQgJCUMLQECBAgQIEAgBISkkFAJECBAgAABAklASEoYWgIECBAgQIBACAhJIaESIECAAAECBJLA/wPBKjSNxi3sfwAAAABJRU5ErkJggg==", + "text/plain": [ + "" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ImageContent(\"layout.png\").show()" + ] + }, + { + "cell_type": "markdown", + "id": "45d2a69f-a28f-40a6-a619-427f133359f6", + "metadata": {}, + "source": [ + "Now we have successfully took a screenshot of an HTML, we can start our optimization." + ] + }, + { + "cell_type": "markdown", + "id": "67cb3612-b7ce-467d-b8b3-b7259a313378", + "metadata": {}, + "source": [ + "### Step 1: Using the multi-modal context block to provide a reference image \n", + "\n", + "The task is for the model to implement an HTML page according to the image" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "7ab8f0d7-9f08-4bc1-aad8-065334b14e88", + "metadata": {}, + "outputs": [], + "source": [ + "html_param = node(\"\", trainable=True)\n", + "optimizer = OptoPrimeV3([html_param], use_json_object_format=True,\n", + " memory_size=5,\n", + " ignore_extraction_error=False,\n", + " include_example=False,\n", + " optimizer_prompt_symbol_set=OptimizerPromptSymbolSetJSON())\n", + "\n", + "optimizer.add_context(\"The reference image looks like this: \", ImageContent(\"./layout.png\"))\n", + "optimizer.zero_feedback()\n", + "\n", + "feedback_text = \"\"\"Please fill in the empty HTML page such that it looks like the target image.\"\"\"\n", + "\n", + "optimizer.backward(html_param, feedback=feedback_text)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "d5be0db1-9561-4b9d-9268-7a77706cf0f2", + "metadata": {}, + "outputs": [], + "source": [ + "summary = optimizer.summarize()\n", + "system_prompt, user_content_blocks = optimizer.construct_prompt(summary)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "36c5c516-702d-41f0-8bb4-781c991df55a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "TextContent(type='text', text='\\nNow you see problem instance:\\n\\n================================\\n \\n# Instruction\\nYou need to change the `value` of the variables in # Variables to improve the output in accordance to # Feedback.\\n\\n# Code\\n\\n\\n# Documentation\\n\\n\\n# Variables\\n \\n\\n\\n\\n\\n\\n \\n\\n# Inputs\\n \\n\\n# Others\\n \\n\\n# Outputs\\n \\n\\n# Context\\n The reference image looks like this: ')\n", + "ImageContent(image_url=None, image_data=iVBORw0KGg..., image_bytes=None, media_type=image/png)\n", + "TextContent(type='text', text='\\n\\n# Feedback\\n Please fill in the empty HTML page such that it looks like the target image. \\n================================\\n\\n \\nWhat are your suggestions on variables str0?\\n\\nYour response:\\n')\n" + ] + } + ], + "source": [ + "for block in user_content_blocks:\n", + " print(block)" + ] + }, + { + "cell_type": "markdown", + "id": "3b3ea5fe-9a3b-4273-8280-5fd626356e4c", + "metadata": {}, + "source": [ + "The above information might be hard to read, but we can also directly print this.\n", + "\n", + "Note that the image becomes `[IMAGE]`, a placeholder in the text rendering (we are **not** sending this placeholder to the LLM)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "0db82fa4-91b6-4d94-b77b-1ead0489c034", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Now you see problem instance:\n", + "\n", + "================================\n", + " \n", + "# Instruction\n", + "You need to change the `value` of the variables in # Variables to improve the output in accordance to # Feedback.\n", + "\n", + "# Code\n", + "\n", + "\n", + "# Documentation\n", + "\n", + "\n", + "# Variables\n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + " \n", + "\n", + "# Inputs\n", + " \n", + "\n", + "# Others\n", + " \n", + "\n", + "# Outputs\n", + " \n", + "\n", + "# Context\n", + " The reference image looks like this: \n", + "[IMAGE]\n", + " \n", + "\n", + "# Feedback\n", + " Please fill in the empty HTML page such that it looks like the target image. \n", + "================================\n", + "\n", + " \n", + "What are your suggestions on variables str0?\n", + "\n", + "Your response:\n", + "\n" + ] + } + ], + "source": [ + "print(user_content_blocks)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "161f1f09-e385-4bf5-ae28-d3c477873f12", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prompt\n", + " You're tasked to solve a coding/algorithm problem. You will see the instruction, the code, the documentation of each function used in the code, and the feedback about the execution result.\n", + "\n", + "Specifically, a problem will be composed of the following parts:\n", + "- #Instruction: the instruction which describes the things you need to do or the question you should answer.\n", + "- #Code: the code defined in the problem.\n", + "- #Documentation: the documentation of each function used in #Code. The explanation might be incomplete and just contain high-level description. You can use the values in #Others to help infer how those functions work.\n", + "- #Variables: the input variables that you can change/tweak (trainable).\n", + "- #Inputs: the values of fixed inputs to the code, which CANNOT be changed (fixed).\n", + "- #Others: the intermediate values created through the code execution.\n", + "- #Outputs: the result of the code output.\n", + "- #Feedback: the feedback about the code's execution result.\n", + "- #Context: the context information that might be useful to solve the problem.\n", + "\n", + "In `#Variables`, `#Inputs`, `#Outputs`, and `#Others`, the format is:\n", + "\n", + "For variables we express as this:\n", + "\n", + "\n", + "\n", + "value\n", + "\n", + "\n", + "constraint_expression\n", + "\n", + "\n", + "\n", + "\n", + "If `data_type` is `code`, it means `value` is the source code of a python code, which may include docstring and definitions.\n", + "Output_format: Your output should be in the following XML or JSON format:\n", + "\n", + "{\n", + "\"reasoning\": ,\n", + "\"suggestion\": {\n", + ": ,\n", + ": ,\n", + "}\n", + "}\n", + "\n", + "In , explain the problem: 1. what the #Instruction means 2. what the #Feedback on #Outputs means to #Variables considering how #Variables are used in #Code and other values in #Documentation, #Inputs, #Others. 3. Reasoning about the suggested changes in #Variables (if needed) and the expected result.\n", + "\n", + "If you need to suggest a change in the values of #Variables, write down the suggested values in . Remember you can change only the values in #Variables, not others. When `type` of a variable is `code`, you should write the new definition in the format of python code without syntax errors, and you should not change the function name or the function signature.\n", + "\n", + "If no changes are needed, just output TERMINATE.\n", + "\n", + "Now you see problem instance:\n", + "\n", + "================================\n", + " \n", + "# Instruction\n", + "You need to change the `value` of the variables in # Variables to improve the output in accordance to # Feedback.\n", + "\n", + "# Code\n", + "\n", + "\n", + "# Documentation\n", + "\n", + "\n", + "# Variables\n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + " \n", + "\n", + "# Inputs\n", + " \n", + "\n", + "# Others\n", + " \n", + "\n", + "# Outputs\n", + " \n", + "\n", + "# Context\n", + " The reference image looks like this: \n", + "\n", + "# Feedback\n", + " Please fill in the empty HTML page such that it looks like the target image. \n", + "================================\n", + "\n", + " \n", + "What are your suggestions on variables str0?\n", + "\n", + "Your response:\n", + " [+ \n", + "[IMAGE]\n", + "]\n", + "LLM response:\n", + " AssistantTurn(role='assistant', content={\n", + "\"reasoning\": \"The instruction requires modifying the HTML code represented by the variable `str0` so that the output matches the reference image. The feedback indicates that the current HTML is empty. The reference image displays a webpage with the text 'Welcome' and a button labeled 'Submit'. To recreate this, the HTML structure should include these elements.\",\n", + "\"suggestion\": {\n", + "\"str0\": \"

Welcome

\"\n", + "}\n", + "}, tool_calls=[], tool_results=[], reasoning=None, finish_reason='completed', prompt_tokens=1086, completion_tokens=105, model='gpt-4o-2024-08-06', timestamp='1767935267', metadata={'response_id': 'resp_bGl0ZWxsbTpjdXN0b21fbGxtX3Byb3ZpZGVyOm9wZW5haTttb2RlbF9pZDpOb25lO3Jlc3BvbnNlX2lkOnJlc3BfMDBlZjY2NWFjOGJlMWE3YzAwNjk2MDhkMjMyM2Y4ODE5MmFlZTJlYjUxZjRkMDVkODA='})\n" + ] + }, + { + "data": { + "text/plain": [ + "{: '

Welcome

'}" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "optimizer.step(verbose=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "b99be6f4-6cd8-41d9-843a-71e0a0050c25", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "

Welcome

\n" + ] + } + ], + "source": [ + "print(html_param.data)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "640c2a4e-c371-4891-bb77-ccd91ddcba33", + "metadata": {}, + "outputs": [], + "source": [ + "def save_html_to_file(html, filename=\"new_layout.html\"):\n", + " with open(filename, \"w\") as f:\n", + " f.write(html)\n", + "\n", + "save_html_to_file(html_param.data)" + ] + }, + { + "cell_type": "markdown", + "id": "4dc5e13a-ab04-4ffe-bbae-3e0b49828ff2", + "metadata": {}, + "source": [ + "## Step 2: Using visual feedback to guide optimization / HTML generation\n", + "\n", + "In order to test if the LLM optimizer has true visual understanding, we want to test if it understands **visual feedback**. We use the following function to annotate the generated HTML" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "3f37431a-7976-4596-be78-b1995c7736ae", + "metadata": {}, + "outputs": [], + "source": [ + "from selenium import webdriver\n", + "from selenium.webdriver.chrome.options import Options\n", + "from selenium.webdriver.common.by import By\n", + "from PIL import Image, ImageDraw, ImageFont\n", + "\n", + "def annotate_screenshot(html_path, output_img):\n", + " options = Options()\n", + " options.add_argument(\"--headless=new\")\n", + " # ... your other options ...\n", + "\n", + " driver = webdriver.Chrome(options=options)\n", + " # Ensure you use the absolute path for the file URL\n", + " driver.get(\"file:///home/ubuntu/Trace/tests/\" + html_path)\n", + "\n", + " # 1. Find the button and get its location/size\n", + " button = driver.find_element(By.TAG_NAME, \"button\")\n", + " location = button.location # {'x': 100, 'y': 200}\n", + " size = button.size # {'height': 30, 'width': 100}\n", + "\n", + " # 2. Save the initial screenshot\n", + " driver.save_screenshot(output_img)\n", + " driver.quit()\n", + "\n", + " # 3. Use PIL to draw the annotation\n", + " img = Image.open(output_img)\n", + " draw = ImageDraw.Draw(img)\n", + " \n", + " # Calculate positions\n", + " # We'll point to the right side of the button\n", + " arrow_start = (location['x'] + size['width'] + 50, location['y'] + (size['height'] // 2))\n", + " arrow_end = (location['x'] + size['width'] + 5, location['y'] + (size['height'] // 2))\n", + " text_pos = (arrow_start[0] + 5, arrow_start[1] - 10)\n", + "\n", + " # Draw a red arrow (line)\n", + " draw.line([arrow_start, arrow_end], fill=\"red\", width=3)\n", + " # Draw a simple arrowhead\n", + " draw.polygon([arrow_end, (arrow_end[0]+10, arrow_end[1]-5), (arrow_end[0]+10, arrow_end[1]+5)], fill=\"red\")\n", + "\n", + " # Draw text\n", + " # Note: To use a specific font size, you'll need to point to a .ttf file on your system\n", + " try:\n", + " font = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf\", 20)\n", + " except:\n", + " font = ImageFont.load_default()\n", + "\n", + " draw.text(text_pos, \"Make this bigger\", fill=\"red\", font=font)\n", + "\n", + " # Save the final annotated image\n", + " img.save(output_img)\n", + "\n", + "annotate_screenshot(\"new_layout.html\", \"new_layout_feedback.png\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "c37eb39b-830b-4c47-a335-bc48dfd7004c", + "metadata": {}, + "outputs": [ + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG5AwwDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooooAKKKKACiqmo6pp+kW32nU7+1soCwXzbmZY1yegyxAzTdN1jS9ZhebS9Ss76JG2u9rOsoU9cEqTg0AXaKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK+Yfjz4StfD3iWx1jTrOKG0vlPmRomE85Tk8f7QI/I19PVwPxk8Of8JF8Ob8RpuubLF3Fgc/L94fipaonpaXYuGvu9zS0jXtF034ZQa/ZW8FppkVj9pEMKhVU4yVAHfdkfWuI+CvxJ1Hxbe6rput3Ilu1P2m2O0LiMnDIMAcA4/OvOvAGsXnivQtN+HAWQ28mo/aLiTsLVfnZPxYfrT/ABPC3wm+NcepWkJXT5HFwkaDAML5DoPod2B9K1bSqXez2/P+vRmSvyWW6/4b+vVHoll4W0LX/jlqsyaRZ/YtHt081ViAWW6c7tzDoSBn8RXb6t8SPB+h30llf65AlzHxJHGjymP/AHtgO38cVzvw6iubb4dat4llUjUNXe41I56gYOwfkM/jXkPwlfXLy18VQaXoEesS39uIZ5Zb1ITFv38/MDuyTnt0qNV7nZX+b6ffoXo/e7u3y/rU918b6jp2u/CfXb3T7mC8tHsZSksbBlyB+hB/EVxX7Nv/ACKWr/8AX8P/AEBao+HPCXiLwd8HvGtlr0CwLLbvLAizLIPuEMflJx0FY/wx1G60r4IeNL2zZkuI3bY69VJRRkfTOabai5tdl+YrOSgn3f5HsmpfEvwdpOovp93rcX2pM744IpJimOu7YpAx3zWvp/iXRdU0Rtas9SgfTV3Frlm2Iu3rktjGPevKv2cLG3Xwtquo7FN1LeeU0h+9tVQQM/Viai+OFjH4a+HEen6Z5kdtfaq006k9S25yPpuwce1E/cWvl+Ngj7z08/wO9T4r+B5LhYR4ggG5tokeORYyfTzCu39a1/EPi7Q/CtlDe6zem3tpjtSVYJJVJ69UU4/GvG/E1jbL+y9pDCNAYxDKpx/GznJ/8eNQXF9cX/7KytcuztFKsKs3XYs4C/kOPwolpzeTSCOvL5np7fF/wCllHdnxJb+W5ICiKQuMHHKBdw/EVcm+JfguDS49Rk8R2Itpc7MPlzjr8gG79K8p8J6Tp9z+zXq081lbvOEuHEpjG/cpypz14pfgdpWn6j8OfEwvLK3n3SOhaSMMceUOMnn3on7vN5K4R15fNtHtHh7xToniuye80PUI7yFG2OVVlKn0KsAR+IrN1b4k+D9DvpLK/wBcgS5j4kjjR5TH/vbAdv44rxb4A3U1pZ+MZYSd0VkkiD/aAkxWR8JX1y8tfFUGl6BHrEt/biGeWW9SExb9/PzA7sk57dKJLWy7XBba97H0/pmqWGs2Ed9pt3Dd2sn3ZYWDKfb6+1W68v8Agr4S8ReDtH1Oy16BYFlnWWBFmWQfdwx+UnHQV6hTkknoKLbWoUUUUhhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABTZI1ljaN1DI4KsD3Bp1FD1A8z+GnwsPgXXNa1Gea3m+0uY7PyySY4dxOGyBhj8vTPTrVn4r/DiXx/p+niymt4L60m/wBZOSAYm+8OATngEV6HRQ9bX6AnZt9yrZafBY6Vb6dEg+zwwrCq9toGMflXkemfCvxb4H8U3eo+CdW0prG5BDWuqCQDGchTsBzjscg17NRR9rm6gl7vL0OJ1nR/GWteBNR0q8m0KTU75DCTF5sMEKEYJBO9nP4LWF8NfhvrHhbQtX0HXxpd1p2o5LNbTyF8lQpUqyAYx3z+Fep0Ud/PQO3keN+Hfhv47+H9/ep4U1jRLrTLlg3k6osoII6HCDqBxkHn0rr9f8GX/jXwPJo/iW7s11B3Esc9hCyxwuOmA7EsOueRnPau1ooeqswWjujxK4+GHjzUfBVh4Nu9S0GPSbWYMbmJpmmZASQNpULxnpkdBzXS+KPh3ez/AAwtvBfhxrNYk2CSe9lZD8rbiwCo2SWz6Yr0iih6pp9dQWjTXQ8p0TwD4q0n4T6j4PY6NJc3G9Ipxcy7Ar/eLfus5HYDr7Unw88A+K/BHhrWtKlOi3LXgLwOt1KAHKhcN+66Y5yMntjuPV6KHre/VWBaW8tTyL4U/DPxF4D1PUDqcmk3NlfRBJDBPIXXbnGFaMAg555FVtM+Ffi3wP4pu9R8E6tpTWNyCGtdUEgGM5CnYDnHY5Br2ainfW4dGjK0FNfSxJ8RT6dJeM2QunxOkaD0y7Et9ePpWrRRSYBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc9aPfahY294dUuYfPjWXy4o4tq7hnA3ITxnHWuhrA0P/kX9N/69Yv/AEAUASfZr7/oNXv/AH7g/wDjdH2a+/6DV7/37g/+N153qWm2tmur65JZabrdmlzJLLeC6aG9tsHmNGwfukYADJ/jqX/ia/g0jxLcR3Zje0vII7Xei5RHWI4wRznc3XJ5oWoPRnYfZr7/AKDV7/37g/8AjdH2a+/6DV7/AN+4P/jded6fe6jpqXl9a6t+7/4SJ7VtP8pCrh5QDk437sHIwQMDoatWepeILt9IkbXpkW/1C6tZEW3hwiIZCpUlM7vkAycjnpQtUn/XT/MHpf8Arv8A5HdfZr7/AKDV7/37g/8AjdH2e+GSNZuyccbo4SM++Ix/OvMNR1nU7vSrsy3mLmLS79ftUcMayt5dwEB3beMgcgYHfAOMep2SPHYwI8zzMI1zJJjc3HU4AH5Chaq/9df8gejt/XT/ADL2m3RvdLtLtgA08KSEDoNyg/1qzXOpNqlv4CspdFtornUFtIPKilOFbhc55H8OT17V5B441nxjPcfZPECS2UD/AHbaIbYHwFJwQSHwdp5LYJ7dK56+IVJapv8AruevlWUSzCVozjH1evyju/y8z36KWOeFJoZFkikUMjocqwPIII6in15douq/EePQtPSx0DTpbNbaMQSPINzR7RtJ/ejkjHYVe/tf4o/9C5pf/fxf/j1CxCavyv7mOpk8ozcVVhp/fieh0V55/a/xR/6FzS/+/i//AB6j+1/ij/0Lml/9/F/+PU/rC/lf3Mj+yZ/8/af/AIHH/M9DrCuPGWgWutf2PPqAS/8AMWPyjE/3mxgbtu3uO9ReFrzxRd/a/wDhJdNtbLbs8jyGB353bs4duny+nWvLfFGkSaz8SNfhgLC4ig8+Lb1LJGhx+IyPrUVq8owUoLd9TfLsro1sRUo4iekY3vFprp11utdT2LWde0zw/apc6pdC3id9inYzEnBPRQT2qbTNUs9Z0+K/sJvOtpc7H2lc4JB4IB6g14trWrXHju2NywZbfSNN8ybsGnbAP5nB+imux8K3BtfhHZyrrEeksGkxdvEsgX983G09SelKGJc5tJe7a5tickjQwsJSb9q5KLW6V02tEm72s9L77HolZk3iHSoNdh0WS626jMu5IfLY5GCfvYx2PevMJviFe6TqFo0HieHXrZnxPE2n/Zyi8cg4Ge/f8Kl8YatBofxcstSuQzRW9qGKoMkna4AH4kUSxUbXj3V/n6CpZBV9pyVdbxk42uruPS0kn17ejPXaK4bw/q2v3GmXninWboRaYIXnt9PhRDlACclsbu3HPX06Vx4+I2q3lpc37eIrWwnQkw6aLAyCQDoDJtOCenX8qqWKhFJvqYUsjxFWcowafLZNq7V300T26vZdz2mq97qFlp0ImvruC1iZtoeeQIpPXGSevB/Ksnwf4gbxN4cg1GSJY5iSkqr03A9vY8H8a5D4yQXX9i2dx9s/0P7QqG18ocybXO/f16ZGOnOaupW5aXtI6nPhMvdXHLB1nyu9n8v69Dv9S1nT9I006jfXIjtBt/ehS456fdBJqezu4L+yhu7Z/MgmQSRvgjcpGQcHmvM/FNjqtp8M531HWf7QjkFsYY/sqxeSMjjK/e6jr6VVudY8U+HfBGh6xBf2y2ZSKEWXkA5XaSGZzychegxjNZPEuMveWlr/ANanbDJo1aKdKonJzcVvZ6J6e7e/roela14g0vw9bxz6pdfZ4pH2I3ls+TjOPlBrSVgyhgcgjIrzj4m6vKnhXSb2CK3P2iRX2z28cwAKE9HUgH3qHxJ4w1Oz8WLpJ1aPRLFLdHW6Nn55kJUHpjpnI49DVSxCjJp7afiZ0cnnXowlT+J8173fwtLRJN9el7+R6dVLVdWsdEsHvtRn8m2QgM+xmwScDhQTWd4Uv7jUNNkkn1ay1QCTCT2qbOMDh17N/TFZPxU/5EO6/wCusX/oQrSdS1NzXY48Pg1LGww1R7ySdvP1X5o62zu4L+yhu7Z/MgmQSRvgjcpGQcHmqeteINL8PW8c+qXX2eKR9iN5bPk4zj5Qa86fUvFeheA9J1u3v7VbKKKJDZeQDlMABi55JPHAxjNWviF4ge48FaLqltFb/wClurlLi3jmC5QkjDqRkHuKyeJtBu2qVzvp5K5YmEOZOEpOOj1TXR6b+iaPTVYMoYHIIyKWvPNU8Sa3qHja38MaJdRWIjiV57hoVkP3A5wDxjBAx6nrTNJ8Q+Iv+FnvoGo3kT28UZykUSqr/uwwbONwznOM8HIq/rEb2s97fM5/7HrcjnzRXu89ru/L91tfX1sejUV51H4h8Q+JfG+oaVo99Dp9lpxZXZ4BKZCrbTnPqc9COBUXhrxjrD674lTWp1eDTIZZPIiRQFKNyFOMkcYGSaX1mN7WG8mrqDlzK6SbV3fXbpa/zPSqK8WHxG1W8tLm/bxFa2E6EmHTRYGQSAdAZNpwT06/lXS3/ja/u/hf/b9ky2l+sqxOVUMA24A4DA8EflmlHF05Xt0VzSrkGLpOKlb3pKPXRv1Wq81dHolFcl4TPiTU7TTtX1HVohbSwBjZx2y5kyvDM/Yk/NgDHautreEuZXseXiKHsKjpuSbW9r79tUvw08woooqjAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACuasLkWGnWtnPb3olgiWJttnK4yoxwVUgjj1rpaKAORltdAnvxfy6CZL0HIuH0eQyA/wC95eadcW+hXd011c6I01yyhWmk0iRnIHQEmPOK6yigDk1t9CS9F6mhst2rFhONIkEgJ5J3eXnJyfzqZJNLj8rZpk6+S7SR7dLlGxmzuYfJwTk5I65NdNRQByuzRtrL/Y8m10dGH9ky/MrnLg/u+jHkjuetWLa6s7S2S2s7C7ihjXEcMWnyoqgdgNgA/SuiooAp6TbvaaPY20oxJDbxxsPcKAamurW3vbd7e7t4p4HxujlQOrYORkHjqAamooGm07ohtbWGys4LS3TZBBGsca5J2qowBk89BU1FFANtu7CiiigQVxtn4Sv7f4lXniN5rY2c8exUDN5gO1RyMY/hPeuyoqJQUrX6anRQxNSgpqH2lyv0ZzF94NsY/DeraboltDay36/MXdipb36kDrwKxZvh9eXfw5svD013BHeWszTK6FmjYlnIB4B6P6da9BoqHQg+nSx0U80xVNJKV2pKV3q7pW6+R5jrfgTxRrul6dbXN9pSmy+VIoldE24AznaSWOPQAfjWtrXgebWvHltq9x9kk0xYPKmhdm3t8rDgYx1Yd+1dxRS+rw6+X4Gv9s4rTlsrKSVlb4tzhtB8Hatok19pTXdvd+G7pXURSSMJowwwcfLjvg889eOlRaP4X8W+F4pbDR9R0uewaQun2yN96Z9AvH6131FNUIK1uhMs2xEnLnSfNa90rNrr6+fUp6ZDfQafHHqV0lzdjJeWOPYpySQAPYYH4Vj+OPDMvivQBYwTpDNHMsyNIDtJAIwcezGukoq5QUo8r2OSlialKsq8NJJ320+7Y4TUPDfijWfBs+i6hNpAmBhFu8LSAFVPO8kdeB0HrT9f8G6jqngHS9BgmtVurXyt7uzBDtQqcEKT1PpXcUVDoRd79VY6o5rXi4uNlyy5lp1tY4fxf4N1HX/C+laZazWqT2mzzGlZgpwm3jCk9far2taR4hupsWjaJdWm1QtvqVszeWQADgjrk8811VFN0Y3b7/oRHMqyjGOjUb2uv5tX+RyfgbwjN4Vtr03NxFLcXcgdlhUiNAM4C5+p/SrnjTQrrxH4Zn02zkhSaR0YNMSF4YE9AT+ldBRTVKKh7NbESx9aWK+tyfv3T+7Y83u/BHii88PWPh6TVdPGmxBPNYI/mgj+EdmUHp909M1oeL/A9xrHhnTNH0mWCNbJlwbhiMqFK9gea7iio+rws13Oj+2MSpxmrLlbeiVrvdvuebeJdMTS/GlrrNhrmm2GpmAebDqDFY5FA2ZB78DGPbNZfhHz9V+LF3qSXK30cURM11Gm2MsUC4X2zwPULmvVLzTrHUFC3tnb3Kr0E0SuB+Yp9taW1lCIbS3igiHOyJAq/kKh4e8730vc6Y5wo4Z03G8nHku7aL7r6dF+Z5tfWa+HfHWoahpPiHSLOS4Gbq21FipUthiVA+9zyMEdcVT+HOn/ANra34nuJWkubC6V4GuGXb529iSfYkc47ZFen3mk6bqDK17p9rcsvQzQq5H5irEMEVvEsUESRRrwqIoUD6AULD+/foEs5vhnTSfO0k3p09Fd+V9vM4XR/C/i3wvFLYaPqOlz2DSF0+2RvvTPoF4/WtLxD4d1fXPBLaTNe202ou6u0zKY4zh84wATwOPfFdZRWioRUXHocc8zrSqxrNLmTTvZXbXcz9BsJdL0DT7CdkaW3t0icoSVJAAOM44rQoorVKysjhqTc5uct3qFFFFMgKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigD/9k=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwwAAAG5CAIAAACshSTDAAAu+0lEQVR4Ae3dC5Cd9Vk/8Dda3UW0u3hht16yQYVER0gcSxKHULYCEkuVFHBIgZq0OCZQlYDjAI5K8BbqhVC1JV5o4tCSVKGktjT5SzqkLUq4KKFUCaAmxEs2eMmuWrPxlv/z7jl7Lpuzmz3JJjzpft7JZM/lvTzv58nMfvN7f+c9Mw4fPlxYCBAgQIAAAQIEmgW+rPmpZwQIECBAgAABAqWAkOTfAQECBAgQIECghYCQ1ALFSwQIECBAgAABIcm/AQIECBAgQIBACwEhqQWKlwgQIECAAAECQpJ/AwQIECBAgACBFgJCUgsULxEgQIAAAQIEhCT/BggQIECAAAECLQSEpBYoXiJAgAABAgQICEn+DRAgQIAAAQIEWggISS1QvESAAAECBAgQEJL8GyBAgAABAgQItBAQklqgeIkAAQIECBAgICT5N0CAAAECBAgQaCGQOyQN77x74YzmZdbK7YMtzsNLBAgQIECAAIEpFRgvJA1uXzWnOZ00PJuzavJBZWDz8t6GTSsP592+Y3gyp9E5b9WmJ9evmNsxmZWtQ4AAAQIECBCYOoHxQlJ3/93bn3tk7bLmfNJz0c3rtzy5e8fd/d1HlDC4Z8f27Tt2DoxJP71L1u188cktG9dc0Vdu0jF32X1bntu6emHnETto+ULnrIWLF8/rbvmeFwkQIECAAAECJ0xgvJBUFJ2985asXL2qv6vh2IPFrHn9C2d1H5lwBrevXvy9b33r9/Yv37RrTEzq7J2zcHH/nJFtuhavWr1y8bzeI3fQcBQPCRAgQIAAAQKvu8D4IaksrbO3f3l/T73IQzs2bR2bgUbeHdixYfNL5aOh7es2t1hjYOfWHa8WRdfCJf299d15RIAAAQIECBDIKjBxSCo6Z/UvbUpJT23atHPwiJPZs3Xd5shA5XLoqQ0bdgyOPKz/Nbhr6/bRjGQQqe7iEQECBAgQIJBW4CghqSh6m1NS8fzmI1LS8K7NG7YO1U7xpc0btg/UnpUPBndt3h7jTB3zYhxJRmqi8YQAAQIECBBIKnDUkFT0Lly6eGTOdfUMXtq8qXmkaHDnhg2fOdRwfq9GZtozXH+hzEjPR0ZauLR/VnNGGt6zfcPtK5cuXjint3PGjO6Y8LRk5d2bdjZHrPqOxns0PLhz893LF8+bVe5lxozO3llzFvYvXrJ05e1jAt3wwI5Nd69auqR/3qzuWLE71lu8dOXdG7Y3llsMbFo65gN58Wm8Pbu2b4hNq6XO6J3Tv2TVuup2wwM7N6+7ffmSymmUc7CW3t0kMFr4FJ3v6O78JECAAAECBE6gwOGjL/u2rGhMSUXfsi376lsd2LJi9tj6Oi5a+9zB0VUOPrdmQXyGP157sfZavHfgufuuqe22Z8EVV1xUe9a14LbH60fY98iyhmlRfSsePzC65/LngefWN3wEr6PvomuWLbvmotldIyV1XbFx9+jKB1/cuGx21FF5Y/ZlI6uNPu9oPOLBfY/fd/NltWqqm7T80XfFzTdfs6BysOYVmggqhU7yfEcL9pMAAQIECBB4PQWKyRx8bA7qu+aRWobZvfGaMsJ0za7mkmpUmHvbkwcquz744tqLIot0LGjITYcP79ty8+zRVNGxYE0ZqQ6+uP6yWtyob394gpB08Ln76psUs1c8sns0hh18rjxqPSQdePK2MqlVlr5llfoP7t54RVn8yNITgWp068hej9fri7e75l627Oa16zduXLviotoWI5t1zL7ompvX3Ldx4/o1y5rT0uybG+JcG+c7mY5YhwABAgQIEDjRApMKSWMjQ9FzTXWE5uCLIyklBmK2bLltbiVtVP7uW1EZbqqtMRKEqqfTlIeqGalMSWsbg8zocNW4IamMX7VUFeNb9eQWhynHr7rK4DNyyGqUq5ZWX3NfJeJVXp+9YsuBkbXjr+aQFOdbS4VRTl/Dic6upcHDB3c3pLymobO2zne0BD8JECBAgACB11Pg6HOSykDQPW/p4sYItH/rppHpOOWU7e1DMdto+fL+/qXLyxGj0SUmJm3fE08GdmzeEZO65y1ZXLlT0sj71Rcr63Z2V2+81NnZXb8D06s7mycKVdZt/HtkLlRtvnjf4qULexve7py1ePmS/oWVieIxZ2j7/tE3O3pjRlL1SfecebNqRb+0ddOOo0+H6p3XP68ezYrhweHRHcedpRbPqe2tODQ4PPrWlJzv6GH8JECAAAECBE6GwORCUtE9pzklDW3fFDOTB3ds2PDUoaJn8cpIQJ1zlixf3JAe9m9dt3VX3IZ7884IMnP7q3eTHDmnwT07d9XSTdHZ3dlZPdXO2qP4SNxgPX60kohJ0OV08OrSNSfucTn6ZORn97yVGzbdPnJbpqhiVy0jxfG662s25rJiYNcRNwxv2mXlSdMmze/HnkdPZeSNakiakvNtPpJnBAgQIECAwAkWeMMk9x9jSUsX3Pt8RKLKUqak7d07yztI9i1e2T8rXh0Zu+n7+B+8OrrKjnUbtnfu2RHxZO7iJQ3jSDH60ph/9n/0HW/6aHWbhh8xRDM6ENPwav3h4MCugfqzMvk05ZP6W/EoDtjwPJLY6KoxeDX6MFY4NLCnLKzhlYatjuPhlJzvcRzfpgQIECBAgED7ApMNSUXnnMVL5q1+qpaSDn3m7lV7BiMQzV26fPQ6V9yfe8nsP3h/BKeR5dDz61atHi4zUv+SOd3VF0d+DDfln66+uXELgMb343Fcu2rML2PeLZ82hagJhnciIzUdL4ar6gerPBkd1mpescUxj/GlpuMXx3i+x3hsmxEgQIAAAQLHJDDpkFSmpKULVz9VvyPS/pciInUsWLp0YffoobtjctKCdXfUktTQq+Ww0uyxGamoX2CLt/sW37t105Le0X1M9mfDVbpyk+Yc0ryTpljUHJmax6uaV2zeyfE8m4rzPZ7j25YAAQIECBBoW2CSc5LK/Y6kpPq85JFDdfWvbLqQVk5MavpK3FhrdgxBdY+sPfpXd3dvwwtNI0Kjqxz9Z9yzsSFYDQ4MNI1ONW0fU8N7GwqPFYer75dDR6OPI/H1zjnK6FXTbif9ZErOd9JHsyIBAgQIECAwFQJthKTKF7k1TM0uqlO2G+uIJLV8cU/jKzGONCYjxY4WzquvM7Rn5556UGncdMLH5U76amsM7dze4jvlqm/HpbuGQDU0MDBY225wz8Ch2pNYbVZn7dnUPZiS8526cuyJAAECBAgQmIRAOyGpnJq9dGFDSpodw0a9Yw8yq5yYVH+x78iMFF8It3BJw9fmPr/p3s3HEJO65y1fWb+v0v7N6zbvGi9r9ca3xtUD1cDOXYPVAgd27dpTq7VlqbV3j+fBlJzv8RRgWwIECBAgQKBdgbZCUsym7l9eTzdzlyyvT0eqH7h34fIlc0ef9h1xrW3knd7+lctr6xSvfnT5kpUb6l/ZNjw4sGfXnsHRfYz3M2ZArV5ey2ND/+/G/iW3b67ln/IzbXv2VPcSdS+tBar9O7ZW7oc0HDcoKG/iNLJ0LFi5cnQG+nhHPObXp+R8j/noNiRAgAABAgSOQaDdO1nW7l099rvYGnZU/SaSqKb5a94aVinvr71xxdyOcSuu3rH7wO4n1zet1rHg5o1P1r8/pPwGuNkT7KV+c+246/WyWqLqqnx3W9/olh1zV2xs+Ga5Ay8+clvDzbzj83krNj63b+RLSw48t/HmWtyK6st6Km8d3PdkwxHirZ7L1mx58cDoSU/ufEfX9pMAAQIECBB4nQUm97UkjUXue6TyZW1XrN/d+HLT4/iCjpEvRRvzVSFN65RPDu7esuaaBbWoUiamjp65l61Y+0glXDR/OUj5dmVp+Ga3kb08vnbZ6HfaVteI3Vxx8/rHK8Gmdtwy4VzW/CVz5Yq3PdIQkOJ75Rq/q2T0kEV8xcm+g/u2NH0nSfXNrsvW7z7Q/J0ko5s1CxztfGuFekCAAAECBAi83gIzooDRX+h+EiBAgAABAgQIVAXam5OEjQABAgQIECAwTQSEpGnSaKdJgAABAgQItCcgJLXnZW0CBAgQIEBgmggISdOk0U6TAAECBAgQaE9ASGrPy9oECBAgQIDANBEQkqZJo50mAQIECBAg0J6AkNSel7UJECBAgACBaSIgJE2TRjtNAgQIECBAoD0BIak9L2sTIECAAAEC00RASJomjXaaBAgQIECAQHsCQlJ7XtYmQIAAAQIEpomAkDRNGu00CRAgQIAAgfYEhKT2vKxNgAABAgQITBMBIWmaNNppEiBAgAABAu0JCEnteVmbAAECBAgQmCYCQtI0abTTJECAAAECBNoTEJLa87I2AQIECBAgME0EhKRp0minSYAAAQIECLQnICS152VtAgQIECBAYJoICEnTpNFOkwABAgQIEGhPQEhqz8vaBAgQIECAwDQREJKmSaOdJgECBAgQINCegJDUnpe1CRAgQIAAgWkiICRNk0Y7TQIECBAgQKA9ASGpPS9rEyBAgAABAtNEQEiaJo12mgQIECBAgEB7AkJSe17WJkCAAAECBKaJgJA0TRrtNAkQIECAAIH2BN4wweoPPPDAF7/4xd27d0+wzuv41llnnXX66ae/613veh1rcGgCBAgQIEDgS1VgxuHDh1ueWySk00477ZJLLmn5bpIXt23bdvDgQTkpSTuUQYAAAQIEvpQExr3cFmNIyRNStCEqjDq/lPrhXAgQIECAAIEkAuOGpLRX2cbAnSp1jinbUwIECBAgQCC5wLghKXndp2R5u3YVM2bU/3zXd409i//+76K3t75CrDw4OHadiZ9v3lzf/AtfmHjdE/JuWwW0tfLE5ba1q7ZWnvi43iVAgACBL12B6ReSInkkWf7yL4vPfKaplj/6o2L//qZX0j6ppb3Vq9PWqDACBAgQIHA8AscSkoaeefCOG95+4Xl9Z5zRO3v+26+7de2jLw+PW8TQE3dcOP+Gh9v+3b/3wevOu3TtC7Hj4b1PPLxt7/hHGPfQY96o/F4f8+Lr+/QDH2g6/pinTe95QoAAAQIECJxUgfZD0tAz9/7MHY8WV6556Ok9+1767P2r3rx33U03vO+ZoSmue+a1H/n8Y7ec2xkZadu96z728vHsP2E8qmjFdZ99+6pwO3cWf/ZnU4yYfHdLlhTx4crKnyMvPp644l+v4564M7JnAgQIEDgBAu2HpOGh/UNd577tkkXn9HR1dvWce8ktH/7s5x5adX5XMfzM2kvPu/rB6pjPyEjQXeVIUIwFDT277oYLZ/ee0XvepTd9aCRP7X34uvmX3nTHTVdfOn923+wLb7jv4Q/devWlF543+7y33/pgOTBVHUnau+2u99716WcfvfX6G+6r7KwthbTxaP788jxiEtLv/m71hH7rt6oPFiyoPmj8ERGqdoUrHnz5lxdnnFHEmr/4i8W//Vvjii0eP/JI8ZVfWd38B3+wGB4dlHvyySLuMnXWWUVnZ/HVX12cd15xxx3Fa6+12EPjS3FXiCigttx1V72wv//72svVB9u2FRdeWJx+etHdXcSh4wpj49JyblC8+La3lXOzouav//rizW8ubrqp+PSni//7v8ZNj/L44x8vFi4svuqrim/4huL664u9e5vWb3nc//qv4pd/ufj2by86Oopv+7bil36p+J//KWbNqp5d7KRxaWvlyTiPKekP/7A4//yy/jh9CwECBAi8TgLth6Sec6+8uOfZu26948FHn3jmhb0xwNPZM7Ona8L6X3vmif0Xr3ns+ecfuvHMJ+76mXsrw05DL79QXPmBx55++qFrOx+9631fuPieT3zu6Ydu7Nx23/oXauNGZ15y+51XnX325fd8+P4bY1hpMktjmGi5fuMKJ/Rxy6NXXnznO4uv+7ryYYSk+GV84ECxcWP5NH5JX3ZZ+WDiJRJDzOl++uni53++/D36r/867uqRkK65pkxjscSDj32sjESxxIYXXFB8+MPFnj3FoUNF3EnhhReKu+8uzj23+Iu/KFc4/mXDhuL7v7944oniP/+zGBoqPvnJ4uKLjxLpPvjB4h3vKLZsKedmRc3/8i/Fn/95cd99cbOH8mQnufzO7xQxVvTUU8XBg8U//3PxkY+UafJv/3aircPzyiuLn/3Z4m/+pogAFCv/3M8VV1/dOpm1tfIxOP/2b5edevbZsv62ouFEZ+g9AgQIEGhboP2QVPRcsuahh24/e++D9956/Vvmzuqdf/WtH3pi/+joRMsKus69dtWVi2b2zFx07Y2X9+x/+ZUyA3V29Jx/+fk9RdF1zgVnzzzz3IvfPLOz6Jx57tldh157bcLdtTzGKfZiJJX3vKes+R//sYhRhPvvL38jxhKjJo3jNOVLI8u8efUrU3F9KhLSpz5VDpPE8sorxa/9WmWtsX83JqQf/dHiwQeLr/iKcp1Nm8ohqNhPjEhFBIm9/d3flcEilhhJigeVYsrnRywxOBQb1pY776wX9s3fXHu5fLBuXREjIrHztWurr0f0+dCHmtYZ86S25q/+ahmn4s/zz5cVvuUtZamTXCKcRRaM4z78cDmIFcvAQLFy5URb/97vFY8+Wl3hJ36iRIjjBmywHLlMfuVjc16/vnj/+8uYGM5TFViPPAuvECBAgMDRBI4hJMUuI/Ss+cgnH3v6pQP7nv/wO7ueuOuG935sgrnbRUfXmZXhi7iUcWZXjCqMXPHp7OrqqNbXWcTjkQGOoxX8JfT+jTcWXzbiHxfaIgfEEpdX3v3uSZ1hV1fxAz9QvP3t1ZX/5E9abNWYkG65pYhf7ZXDxaoxYlRZrr22TA+xt8g3lRri9UgGse3xLz/5k+VgTOx81ary4mBlefHFiXZcGxKLYbbIN1/zNeVFwKgwPgYYl58muUTKieGoOG4MDkXorCyPPTbRYFLkucoS19fuvbdMn3HcGNZquUx+5WNzfu97i6A788yWB/ciAQIECJw0gbZD0vD+Zx59+InaZ806Z15yy523LerY++zeQ8VIyqnOeBkeHh46NFw9j0PVWFTEhZ3XhopaZDoxpxn//679aXmE2rsn+kHLo9dejMlAEXRi+exnq7+/I7LE3J3xlrgI9cM/XHzrt5ZZqnKVMIYcKktt9nfjtnGhp3KVLR7cc0/9nX//93KYpLI88EB9RtGb3lRfJ6ZAHf8SF9dqy9d+bfVhXEGbYIloUlluuKGcJhXjZ3FdMi4/HXWmVOM+v+/76s/6++uP4wJWyyWur33+89V33vrWepSM4auYFzVmmfzKx+xc+Vcx5rieEiBAgMBJF2g7JBVDr6y/64Zb7xv9TP7w/ice3fhCcfYF53R0nnnOzM69XyinKZUf2//UyEW1kTMaemHjxmfjJgDx6oOfHpq56Ny2/5Mcs2aOaanEoGPa9GRs9OM/3nSUGEIYb4nLTzGd+aGHivi+4SOvhcVv7vGWiFMxDblxqY3WNL445nH8gj/+pXEs5A1vmNT+IszVYmKcZoS5uGIVI0NxCjHHaJJLLZDF+jH1u7bExK+WS1zUq039aVw/6Bp3Vdl28isfs3NMWrcQIECAQAKBtkNS5znXfuD+O8/+wr3XzY/7JJ3RN//qe1+5+J6HPnhVTCjqWfTuy7sevv4t8y+9+r0biwsWxUuVsaSZixa9cf3188+bf+kdr7z5tjuvPWdkzGmSp98x84Jzhz92/dwLb300ctYxLWmjUszRjpnalSWmUcfAScslMlBMIaos3/3d5QzrGCKKk1q+vOXq1Re/4zvKB7FaTH766Efrazb+4r/99vqoW0Wp8nft0lt9s/YftZxcNfFu4uxi0nRcGYzLjgFSmdsem/zHfxS33TbxpvV3G9NJ48BVLX7VVx159MY31kePGrcNisanla0mv/IxOx+D25gz8pQAAQIEpkJgcv+/bz5Sz/nXrjn/2ubXKs+6Ft352Kt31t55zy0jD9d87umRn5Vn1XdnXnX/01dVH3ddcs/Tl4w+XjS6etwnqXKQWPXzV91fff84fsTvvFhS/QaKYmJm0k/9VFnYBMNIMe84UkJl+ZEfKSq3FIrBj4k/8BXzl2MqUtx76X//t/wYfHyyvTI1uzLRp3KB6ROfKOPXJId5qhWM/IjKK56x86ldYvZSzDGPP7HEISIbVaalTzyZqbGG7dvLT8NVlnhcW8ab1RTX1CKeVqZIf+5ztdWLeHzkEN3kV54S53o1HhEgQIDAyRZoeyTpZBc45cer/Gqf8t0e8w5vvbU6lhOTb8ZbYh5xdeJ7UX5u6x/+oZyjExeh/uqvxtuifD2mLsWn7uMj/bHEXQbiU+XxgbjKUhuViRsXXXdd8dJL5V0AYr52fFw/7nsUiaFxAKa6TfOP2qW02CQ+RzZVS0y4/umfLidpxZ2NIqDEXKu//uvqvid/Eeo3f7OI+yTFdbH42GBt8nVMkIrpXOMtK1ZU33n55SJG1+L0Y7huvNg6+ZWP33m8gr1OgAABAideYPqFpBNvOvVHOO208gNilSWGN+KTaD09xR//cfFDP3SUY8WoTHzwrTInKTLHVVcV8QH+WGKGeNwTqDKoFp/SnzOnDGEzZ5Y3fly9upwJdNQoWTt0DNXEUWJXU3Lbwwhqv/7rxUUXFX195dDXN31T9XN28bm8KHiSy7Jl5ZhZfLotIldlBC4iXe0jaS13EgNXl19efed97ytnMsUU8nD7lm+pvlixqjyZ/MrH79yyWi8SIECAwEkRGDcknRWfvToVllOlzuO1/JVfKX7/94u5c8vxocpdpGMic+Pn0cY7QAzAxKffv/Eby/fjk4dXXFGO08QSV9l27ChnNcWkqAhh8ScGWuKzXXHX6bgS1zh/uVz7iOU3fqP8dH3EtcnfvuiIfbR4IZJfjP3EFPVKVXFtK9LS0qXlla/4ZN8klxjpiTGkuKd5nFRMDIqkEla1uV8tdxIhLIbowiQQ4qDxj/8XfqG8ydM//VN19YhctaWtlY/TuXZQDwgQIEDgpAvMODzOmMEDDzxw2mmnXVKb23HSK5vMAbdt23bw4MF3xXdrWAhMuUAMkkVqrCxxt8/KzT/HO0pbK4+3E68TIECAQCaBcUNSFBk56Ytf/OLu+Mx5yiXGkE4//XQJKWVzTsGiYm57zIiPQay4yhZf0hLjbTFdrHLH7Rixi2lbMRG7trS1cm0rDwgQIEDglBKYKCSdUieiWALHJ7BoUfGnf9piF3G9Mua/f8/3NL3V1spNW3pCgAABAqeMgJB0yrRKoSdWIAaN4nvl4hvc4kZN8cm4uKnSd35nOZv7x36snAM+Zmlr5THbekqAAAECp4iAkHSKNEqZBAgQIECAwMkVGPfTbSe3DEcjQIAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BL4/1S4HGuV6Y/HAAAAAElFTkSuQmCC", + "text/plain": [ + "" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ImageContent(\"new_layout_feedback.png\").show()" + ] + }, + { + "cell_type": "markdown", + "id": "dc95795b-914f-426f-9779-6b28bb092a92", + "metadata": {}, + "source": [ + "We essentially use an arrow and red text to indicate the feedback! Now let's see how to pass this in!" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "5ddddcbb-e6bf-41e0-91db-45486c3cdaa5", + "metadata": {}, + "outputs": [], + "source": [ + "# 2nd optimization step!\n", + "\n", + "optimizer.zero_feedback()\n", + "\n", + "feedback_text = \"\"\"Here’s the annotated visual feedback, please update the spec according to the annotation on the image.\"\"\"\n", + "\n", + "optimizer.backward(html_param, feedback=Content(feedback_text, ImageContent(\"./new_layout_feedback.png\")))" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "9652055f-97e8-4c50-a602-b5f19e8e2f17", + "metadata": {}, + "outputs": [], + "source": [ + "summary = optimizer.summarize()\n", + "system_prompt, user_content_blocks = optimizer.construct_prompt(summary)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "d1491b35-e9e5-4205-afaa-b1857c74d2ab", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Now you see problem instance:\n", + "\n", + "================================\n", + " \n", + "# Instruction\n", + "You need to change the `value` of the variables in # Variables to improve the output in accordance to # Feedback.\n", + "\n", + "# Code\n", + "\n", + "\n", + "# Documentation\n", + "\n", + "\n", + "# Variables\n", + " \n", + "\n", + "

Welcome

\n", + "
\n", + "
\n", + "\n", + " \n", + "\n", + "# Inputs\n", + " \n", + "\n", + "# Others\n", + " \n", + "\n", + "# Outputs\n", + " \n", + "\n", + "# Context\n", + " The reference image looks like this: \n", + "[IMAGE]\n", + " \n", + "\n", + "# Feedback\n", + " Here’s the annotated visual feedback, please update the spec according to the annotation on the image. \n", + "[IMAGE]\n", + " \n", + "================================\n", + "\n", + " \n", + "What are your suggestions on variables str0?\n", + "\n", + "Your response:\n", + "\n" + ] + } + ], + "source": [ + "print(user_content_blocks)" + ] + }, + { + "cell_type": "markdown", + "id": "ba6c3673-872d-4b50-bb2e-e6b7ac2f254d", + "metadata": {}, + "source": [ + "Now you can see we have two `[IMAGE]` placeholders because we have two images. Note that with `memory_size=5`, it means we are sending in ALL past history to the LLM. So since this is the 2nd optimization step, we are sending in **3** images in total." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "d3bc8e3b-6465-4ad9-81f6-bc5c7e286528", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prompt\n", + " You're tasked to solve a coding/algorithm problem. You will see the instruction, the code, the documentation of each function used in the code, and the feedback about the execution result.\n", + "\n", + "Specifically, a problem will be composed of the following parts:\n", + "- #Instruction: the instruction which describes the things you need to do or the question you should answer.\n", + "- #Code: the code defined in the problem.\n", + "- #Documentation: the documentation of each function used in #Code. The explanation might be incomplete and just contain high-level description. You can use the values in #Others to help infer how those functions work.\n", + "- #Variables: the input variables that you can change/tweak (trainable).\n", + "- #Inputs: the values of fixed inputs to the code, which CANNOT be changed (fixed).\n", + "- #Others: the intermediate values created through the code execution.\n", + "- #Outputs: the result of the code output.\n", + "- #Feedback: the feedback about the code's execution result.\n", + "- #Context: the context information that might be useful to solve the problem.\n", + "\n", + "In `#Variables`, `#Inputs`, `#Outputs`, and `#Others`, the format is:\n", + "\n", + "For variables we express as this:\n", + "\n", + "\n", + "\n", + "value\n", + "\n", + "\n", + "constraint_expression\n", + "\n", + "\n", + "\n", + "\n", + "If `data_type` is `code`, it means `value` is the source code of a python code, which may include docstring and definitions.\n", + "Output_format: Your output should be in the following XML or JSON format:\n", + "\n", + "{\n", + "\"reasoning\": ,\n", + "\"suggestion\": {\n", + ": ,\n", + ": ,\n", + "}\n", + "}\n", + "\n", + "In , explain the problem: 1. what the #Instruction means 2. what the #Feedback on #Outputs means to #Variables considering how #Variables are used in #Code and other values in #Documentation, #Inputs, #Others. 3. Reasoning about the suggested changes in #Variables (if needed) and the expected result.\n", + "\n", + "If you need to suggest a change in the values of #Variables, write down the suggested values in . Remember you can change only the values in #Variables, not others. When `type` of a variable is `code`, you should write the new definition in the format of python code without syntax errors, and you should not change the function name or the function signature.\n", + "\n", + "If no changes are needed, just output TERMINATE.\n", + "\n", + "Now you see problem instance:\n", + "\n", + "================================\n", + " \n", + "# Instruction\n", + "You need to change the `value` of the variables in # Variables to improve the output in accordance to # Feedback.\n", + "\n", + "# Code\n", + "\n", + "\n", + "# Documentation\n", + "\n", + "\n", + "# Variables\n", + " \n", + "\n", + "

Welcome

\n", + "
\n", + "
\n", + "\n", + " \n", + "\n", + "# Inputs\n", + " \n", + "\n", + "# Others\n", + " \n", + "\n", + "# Outputs\n", + " \n", + "\n", + "# Context\n", + " The reference image looks like this: \n", + "\n", + "# Feedback\n", + " Here’s the annotated visual feedback, please update the spec according to the annotation on the image.\n", + "================================\n", + "\n", + " \n", + "What are your suggestions on variables str0?\n", + "\n", + "Your response:\n", + " [+ \n", + "[IMAGE]\n", + "]\n", + "LLM response:\n", + " AssistantTurn(role='assistant', content={\n", + "\"reasoning\": \"The feedback suggests that the 'Submit' button should be made larger. To achieve this, we can apply CSS styling to increase the button's size. This can be done using inline CSS styles to specify a larger width and height for the button.\",\n", + "\"suggestion\": {\n", + "\"str0\": \"

Welcome

\"\n", + "}\n", + "}, tool_calls=[], tool_results=[], reasoning=None, finish_reason='completed', prompt_tokens=2196, completion_tokens=102, model='gpt-4o-2024-08-06', timestamp='1767935521', metadata={'response_id': 'resp_bGl0ZWxsbTpjdXN0b21fbGxtX3Byb3ZpZGVyOm9wZW5haTttb2RlbF9pZDpOb25lO3Jlc3BvbnNlX2lkOnJlc3BfMGYwMWJjOWY0ZGE2NTg3NjAwNjk2MDhlMjE1ZDU0ODE5M2IyYjdmOWU0ODEzOWJkYmQ='})\n" + ] + }, + { + "data": { + "text/plain": [ + "{: \"

Welcome

\"}" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "optimizer.step(verbose=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "22e3d487-8b69-4588-b64e-59715de9dc18", + "metadata": {}, + "outputs": [], + "source": [ + "save_html_to_file(html_param.data, \"new_layout_rev2.html\")" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "12b71a5b-3c50-4a4b-9b9b-401e4ac9b3fe", + "metadata": {}, + "outputs": [], + "source": [ + "take_screenshot(\"new_layout_rev2.html\", \"new_layout_rev2.png\")" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "a75f561e-4b2f-4d2d-acbf-a68aed3df410", + "metadata": {}, + "outputs": [ + { + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG5AwwDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooooAKKKKACiqmo6pp+kW32nU7+1soCwXzbmZY1yegyxAzTdN1jS9ZhebS9Ss76JG2u9rOsoU9cEqTg0AXaKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK+Yfjz4StfD3iWx1jTrOKG0vlPmRomE85Tk8f7QI/I19PVwPxk8Of8JF8Ob8RpuubLF3Fgc/L94fipaonpaXYuGvu9zS0jXtF034ZQa/ZW8FppkVj9pEMKhVU4yVAHfdkfWuI+CvxJ1Hxbe6rput3Ilu1P2m2O0LiMnDIMAcA4/OvOvAGsXnivQtN+HAWQ28mo/aLiTsLVfnZPxYfrT/ABPC3wm+NcepWkJXT5HFwkaDAML5DoPod2B9K1bSqXez2/P+vRmSvyWW6/4b+vVHoll4W0LX/jlqsyaRZ/YtHt081ViAWW6c7tzDoSBn8RXb6t8SPB+h30llf65AlzHxJHGjymP/AHtgO38cVzvw6iubb4dat4llUjUNXe41I56gYOwfkM/jXkPwlfXLy18VQaXoEesS39uIZ5Zb1ITFv38/MDuyTnt0qNV7nZX+b6ffoXo/e7u3y/rU918b6jp2u/CfXb3T7mC8tHsZSksbBlyB+hB/EVxX7Nv/ACKWr/8AX8P/AEBao+HPCXiLwd8HvGtlr0CwLLbvLAizLIPuEMflJx0FY/wx1G60r4IeNL2zZkuI3bY69VJRRkfTOabai5tdl+YrOSgn3f5HsmpfEvwdpOovp93rcX2pM744IpJimOu7YpAx3zWvp/iXRdU0Rtas9SgfTV3Frlm2Iu3rktjGPevKv2cLG3Xwtquo7FN1LeeU0h+9tVQQM/Viai+OFjH4a+HEen6Z5kdtfaq006k9S25yPpuwce1E/cWvl+Ngj7z08/wO9T4r+B5LhYR4ggG5tokeORYyfTzCu39a1/EPi7Q/CtlDe6zem3tpjtSVYJJVJ69UU4/GvG/E1jbL+y9pDCNAYxDKpx/GznJ/8eNQXF9cX/7KytcuztFKsKs3XYs4C/kOPwolpzeTSCOvL5np7fF/wCllHdnxJb+W5ICiKQuMHHKBdw/EVcm+JfguDS49Rk8R2Itpc7MPlzjr8gG79K8p8J6Tp9z+zXq081lbvOEuHEpjG/cpypz14pfgdpWn6j8OfEwvLK3n3SOhaSMMceUOMnn3on7vN5K4R15fNtHtHh7xToniuye80PUI7yFG2OVVlKn0KsAR+IrN1b4k+D9DvpLK/wBcgS5j4kjjR5TH/vbAdv44rxb4A3U1pZ+MZYSd0VkkiD/aAkxWR8JX1y8tfFUGl6BHrEt/biGeWW9SExb9/PzA7sk57dKJLWy7XBba97H0/pmqWGs2Ed9pt3Dd2sn3ZYWDKfb6+1W68v8Agr4S8ReDtH1Oy16BYFlnWWBFmWQfdwx+UnHQV6hTkknoKLbWoUUUUhhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABTZI1ljaN1DI4KsD3Bp1FD1A8z+GnwsPgXXNa1Gea3m+0uY7PyySY4dxOGyBhj8vTPTrVn4r/DiXx/p+niymt4L60m/wBZOSAYm+8OATngEV6HRQ9bX6AnZt9yrZafBY6Vb6dEg+zwwrCq9toGMflXkemfCvxb4H8U3eo+CdW0prG5BDWuqCQDGchTsBzjscg17NRR9rm6gl7vL0OJ1nR/GWteBNR0q8m0KTU75DCTF5sMEKEYJBO9nP4LWF8NfhvrHhbQtX0HXxpd1p2o5LNbTyF8lQpUqyAYx3z+Fep0Ud/PQO3keN+Hfhv47+H9/ep4U1jRLrTLlg3k6osoII6HCDqBxkHn0rr9f8GX/jXwPJo/iW7s11B3Esc9hCyxwuOmA7EsOueRnPau1ooeqswWjujxK4+GHjzUfBVh4Nu9S0GPSbWYMbmJpmmZASQNpULxnpkdBzXS+KPh3ez/AAwtvBfhxrNYk2CSe9lZD8rbiwCo2SWz6Yr0iih6pp9dQWjTXQ8p0TwD4q0n4T6j4PY6NJc3G9Ipxcy7Ar/eLfus5HYDr7Unw88A+K/BHhrWtKlOi3LXgLwOt1KAHKhcN+66Y5yMntjuPV6KHre/VWBaW8tTyL4U/DPxF4D1PUDqcmk3NlfRBJDBPIXXbnGFaMAg555FVtM+Ffi3wP4pu9R8E6tpTWNyCGtdUEgGM5CnYDnHY5Br2ainfW4dGjK0FNfSxJ8RT6dJeM2QunxOkaD0y7Et9ePpWrRRSYBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc9aPfahY294dUuYfPjWXy4o4tq7hnA3ITxnHWuhrA0P/kX9N/69Yv/AEAUASfZr7/oNXv/AH7g/wDjdH2a+/6DV7/37g/+N1booAqfZr7/AKDV7/37g/8AjdH2a+/6DV7/AN+4P/jdW6KAKn2a+/6DV7/37g/+N0fZr7/oNXv/AH7g/wDjdW6KAKn2a+/6DV7/AN+4P/jdH2a+/wCg1e/9+4P/AI3VuigCp9mvv+g1e/8AfuD/AON0fZr7/oNXv/fuD/43VuigCp9mvv8AoNXv/fuD/wCN0fZr7/oNXv8A37g/+N1booAqfZr7/oNXv/fuD/43UN299p9jcXg1S5m8iNpfLlji2ttGcHagPOMda0aoa5/yL+pf9esv/oBoA36KKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArA0P/kX9N/69Yv/AEAVv1gaH/yL+m/9esX/AKAKAL9FFFABRRQehoAz017R5NQOnpq1i16Dg2y3CGQH/dzmtCvLY7uy02CC00y903VYFvA66JeWuL2NjJkkYOcqSTlk6D73ercmvyRxNYtqcv8AaC+IxG0PnHzFhMvAIzkIVI9ucULW39dv8wel/wCu/wDkeh29xBdwrNbzRzRNna8bBlODg8j3qSvI/Dd0kEGhrp2r3cmoSXU6Xdl9pYokH705MWcKAQpDYBJPU1d0S2uLweGvP1fV3GoaXNNc/wDEwlG912bSMN8pG4/dxnvmjz/ra/6B1t/XY9JN3bCd4DcRCZFVnjLjcoJwCR2BIIH0qavGrnULnUfDzPqOoXLRJYabNIxuHQAm4YO5IIxwOT7A9q9hgaNreNoXEkRUFHD7twxwc9/rTsHUkooopAFUNc/5F/Uv+vWX/wBANX6oa5/yL+pf9esv/oBoA36KKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArA0P/kX9N/69Yv/AEAVv1gaH/yL+m/9esX/AKAKAL9FFFABRRRQAUUUUAFFFFABRRRQAUUUUAFUNc/5F/Uv+vWX/wBANX6oa5/yL+pf9esv/oBoA36KKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArGj0m/toUgtb+2WCNdkYltCzBR0BIcA4GB0rZooAyPsGr/9BCy/8An/APjtH2DV/wDoIWX/AIBP/wDHa16KAMj7Bq//AEELL/wCf/47R9g1f/oIWX/gE/8A8drXooAyPsGr/wDQQsv/AACf/wCO0fYNX/6CFl/4BP8A/Ha16KAMj7Bq/wD0ELL/AMAn/wDjtH2DV/8AoIWX/gE//wAdrXooAyPsGr/9BCy/8An/APjtH2DV/wDoIWX/AIBP/wDHa16KAMj7Bq//AEELL/wCf/47R9g1f/oIWX/gE/8A8drXooAyPsGr/wDQQsv/AACf/wCO02TSb+5heC6v7ZoJF2SCK0KsVPUAlyBkZHStmigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA//Z", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwwAAAG5CAIAAACshSTDAAAlrklEQVR4Ae3dT2ycdZon8MpqJFeLg90X7DlMHA7gHBbiQ5N4RDN41GlhCVp4p5Hw0EIxwyGGS6JcQvqycJmES4c9LOQCyYhtkpaaISt1lKyGFp4lK0LTUsLkQGgOMVzi9CX2SijOKfOWXX8TO3HF9TR+yp86xPXnfZ/3+X0eS/nqrddVm27evFlyI0CAAAECBAgQaBX4L60PPSJAgAABAgQIEKgICEl+DwgQIECAAAECywgIScugeIoAAQIECBAgICT5HSBAgAABAgQILCMgJC2D4ikCBAgQIECAgJDkd4AAAQIECBAgsIyAkLQMiqcIECBAgAABAkKS3wECBAgQIECAwDICQtIyKJ4iQIAAAQIECAhJfgcIECBAgAABAssICEnLoHiKAAECBAgQICAk+R0gQIAAAQIECCwjICQtg+IpAgQIECBAgICQ5HeAAAECBAgQILCMwPoOSQsXDo1sar1tmZqeW2YdniJAgAABAgQIdFRgpZA0N713a2s6aXq0de/qg8rsycmBpl2X7g6/em5hNcsoD+898enR3dt6VrOxbQgQIECAAAECnRNYKST1jR6aPv/h4V2t+aT/iT1HT396+dyh0b7bWpibOTc9fe7C7C3pZ2D8yIUvPz19/OAzg5Vderbtevv0+TOvjZRvK7DsE+UtI2Njw33LvuZJAgQIECBAgECYwEohqVQqDwyPT722d7S36dhzpS3DoyNb+m5POHPTr4397d///d+OTp64dEtMKg9sHRkb3bq4T+/Y3temxoYHbi/QdBR3CRAgQIAAAQLfu8DKIanSWnlgdHK0v9HkjXMnztyagRZfnT137ORXlXvz00dOLrPF7IUz574plXpHxkcHGuXcI0CAAAECBAisV4E7h6RSecvoREtK+uzEiQtzty1m5syRk0UGqtxufHbs2Lm5xbuNf+YunZmuZSQnkRou7hEgQIAAAQLrVuAuIalUGmhNSaUvTt6WkhYunTx2Zr6+xK9OHpuerT+q3Jm7dHK6OM/UM1ycR5KRWmg8IECAAAECBNapwF1DUmlgZGJs8Zrr6gq+Onmi9UzR3IVjx/79RtP6viky08xC44lKRvqiyEgjE6NbWjPSwsz0sVenJsZGtg6UN23qKy54Gp86dOJCa8RqFFrp3sLchZOHJseGt1SqbNpUHtiydWR0bHxi6tVbAt3C7LkTh/ZOjI8Ob+krNuwrthubmDp0bLq53dLsiYlb/iCv+Gu8mUvTx4pdq61uGtg6Or73SHW/hdkLJ4+8Ojm+tIzKNVgTh1oEao13aL21cn4SIECAAAECgQI37367cnp3c0oqDe46faWx17XTu4du7a/nicPnr9c2uX7+4I7ib/iL576sP1e8du3828/Vy/bveOaZJ+qPenfs/7hxhCsf7mq6LGpw98fXapUrP6+dP9r0J3g9g088t2vXc08M9S621PvM8cu1ja9/eXzXUNHH0gtDTy5uVnvc03zE61c+fnvPk/Vuqrss+2PwmT17ntuxdLDWDVoIlhpd5XprDftJgAABAgQIfJ8CpdUc/NYcNPjch/UMc/n4c5UI0ztUzSXVqLBt/6fXlkpf//LwE0UW6dnRlJtu3rxyes9QLVX07DhYiVTXvzz6ZD1uNPa/eYeQdP38241dSkO7P7xci2HXz1eO2ghJ1z7dX0lqS7fBXUv9X798/JlK84u3/iJQ1fYustfHjf6Kl3u3Pblrz+Gjx48f3v1EfY/F3XqGnnhuz8G3jx8/enBXa1oa2tMU59pY72omYhsCBAgQIEAgWmBVIenWyFDqf656hub6l4sppTgRc/r0/m1LaWPp38HdS6eb6lssBqHqclryUDUjVVLS4eYgUztdtWJIqsSveqoqzm81kltxmMr5q95K8Fk8ZDXKVVtrbHllKeItPT+0+/S1xa2Lf1pDUrHeeios2hlsWuhQPQ3evH65KeW1nDpra721FvwkQIAAAQIEvk+Bu1+TVAkEfcMTY80R6OqZE4uX41Qu2Z6eL642mpwcHZ2YrJwxqt2KC5OmZ4oHs+dOnisu6h4eH1v6pKTF16tPLm1b7qt+8FK53Nf4BKZvLrReKLS0bfO/i9dC1a8XHxybGBloerm8ZWxyfHRk6ULx4pqh6au1F3sGiiuSqg/6tg5vqTf91ZkT5+5+OdTA8OhwI5qVFuYWaoWLT5Ya21qvVroxt1B7qSPrrR3GTwIECBAgQOAvIbC6kFTq29qakuanTxRXJs+dO3bssxul/rGpIgGVt45PjjWlh6tnjpy5VHwM98kLRZDZNlr9NMnFNc3NXLhUTzelcl+5XF1quX6v+JO4uUb8WE6iuAi6cjl49da7tfiMy9qDxZ99w1PHTry6+LFMRReX6hmpOF5fY8vmXFaavXTbB4a3lFx60LJL6+tF5dpSFl+ohqSOrLf1SB4RIECAAAECwQJ/tcr6xbmkiR1vflFEoqVbJSVN912ofILk4NjU6Jbi2cVzN4P/+1++qW1y7six6fLMuSKebBsbbzqPVJx9ac4/V3/z3/76N9V9mn4Up2hqJ2Kanm3cnZu9NNt4VEk+Lfmk8VJxrzhg0+MiidU2LU5e1e4WG9yYnak01vRM015ruNuR9a7h+HYlQIAAAQIE2hdYbUgqlbeOjQ+/9lk9Jd3490N7Z+aKQLRtYrL2Plfx+dzjQ//yP4rgtHi78cWRva8tVDLS6PjWvuqTiz8WWvJP7+C24iMAml8v7hfvXTXnl1terTxsCVF3OL1TZKSW4xWnqxoHW3pQO63VuuEyx7zHp1qOX7rH9d7jse1GgAABAgQI3JPAqkNSJSVNjLz2WeMTka5+VUSknh0TEyN9tUP3FRcn7ThyoJ6k5r+pnFYaujUjlRpvsBUvD469eebE+ECtxmp/Nr1LV9mlNYe0FmmJRa2RqfV8VeuGrUXW8qgT613L8e1LgAABAgQItC2wymuSKnUXU1LjuuTFQ/WOTrW8kVa5MKnlK3GLrYaKU1B9i1vX/unrG2h6ouWMUG2Tu/8sPrOxKVjNzc62nJ1q2b+4NHygqfFiw4Xq65VTR7X7ReIb2HqXs1ctZVf9oCPrXfXRbEiAAAECBAh0QqCNkLT0RW5Nl2aXqpdsN/dRJKnJsf7mZ4rzSLdkpKLQyHBjm/mZCzONoNK86x3vV4oM1reYvzC9zHfKVV8u3rprClTzs7Nz9f3mZmZv1B8Um20p1x917k5H1tu5dlQiQIAAAQIEViHQTkiqXJo9MdKUkoaK00YDtx5kS+XCpMaTg7dnpOIL4UbGm74294sTb568h5jUNzw51fhcpasnj5y8tFLWGii+Na4RqGYvXJqrNjh76dJMvddlW62/upY7HVnvWhqwLwECBAgQINCuQFshqbiaenSykW62jU82LkdqHHhgZHJ8W+3h4G3vtS2+MjA6NVnfpvTNbybHp441vrJtYW525tLMXK3GSj+LK6Bem6znsfn/8/Lo+Ksn6/mn8jdtMzPVKkXfE/VAdfXcmaXPQ1ooPqCg8iFOi7eeHVNTtSvQVzriPT/fkfXe89HtSIAAAQIECNyDQLufZFn/7Opbv4utqVD1m0iKblq/5q1pk8rnax/fva1nxY6rn9h97fKnR1s269mx5/inje8PqXwD3NAdqjQ+XLv41Otd9UTVu/TdbYO1PXu27T7e9M1y1778cH/Th3kXf5+3+/j5K4tfWnLt/PE99bhVdF/pZ+ml61c+bTpC8VL/kwdPf3mttujVrbe2tZ8ECBAgQIDA9yywuq8laW7yyodLX9b2zNHLzU+33C++oGPxS9Fu+aqQlm0qD65fPn3wuR31qFJJTD39257cffjDpXDR+uUglZeXbk3f7LZY5ePDu2rfaVvdoijzzJ6jHy8Fm/pxKwnnydYvmatsuP/DpoBUfK9c83eV1A5ZKr7i5Mr1K6dbvpOk+mLvk0cvX2v9TpLabq0Cd1tvvVF3CBAgQIAAge9bYFPRQO0/dD8JECBAgAABAgSqAu1dk4SNAAECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaE/irO2z+3nvvfffdd5cvX77DNl66N4EHHnjgvvvue+GFF+5td3sRIECAAAEC0QKbbt68uewxioT0gx/8YOfOncu+6sm1C3z00UfXr1+Xk9YuqQIBAgQIEIgQWPHttuIckoQUIV6vWfAWyPWH7hAgQIAAAQLrSmDFkORdtr/AnCD/BZAdggABAgQI3JvAiiHp3srZiwABAgQIECDQHQJCUnfM0SoIECBAgACBDgsISR0GVY4AAQIECBDoDgEhqTvmaBUECBAgQIBAhwWEpA6DKkeAAAECBAh0h4CQ1B1ztAoCBAgQIECgwwJCUodBlSNAgAABAgS6Q0BI6o45WgUBAgQIECDQYQEhqcOgyhEgQIAAAQLdISAkdcccrYIAAQIECBDosICQ1GFQ5QgQIECAAIHuEBCSumOOVkGAAAECBAh0WEBI6jCocgQIECBAgEB3CAhJ3TFHqyBAgAABAgQ6LCAkdRhUOQIECBAgQKA7BISk7pijVRAgQIAAAQIdFhCSOgyqHAECBAgQINAdAusuJC1cPfvuvl88/fgjgz/84cAjjz/90oF3z367sCL21VOvbH9839n5FTdY/oWFi28//ciz7/6pKLzwp48+OHt15SMsX8CzBAgQIECAQJcLrLeQNH/2jX2vf977/K9+94eZK1/97q2X77/4+i9eOXqxwyGm/PDLv/uP3/7TQ+XSwten3jzy+69vdPmcLY8AAQIECBBoU2DdhaQ/X13of/Qfnnp0c39vuXfzw08d/PUnn7zz4sPl0vzZfY9vf+nU1cUFNp0JKpVuzP/+0LPbizNPg488ve/9i8VZpYU/vf309qf3HXjp6Z9uHxp85KevvPvB4VeK+48MbX/2wKnixFR1/4tff7DvlTf+3x+Lc1evvF85reRGgAABAgQIEFgSWG8h6f4f/8OPe0798pXX3//o7OcXK++z9W7e3F++07gWvj77eekf/+cnlz5758XyqdcPfFBJOz2lqxf/dP/Ur//tD//3nZ3z//rLI///+eL+J+88NX/q7VNf1+JQz9/8/L/vferB//r8r3791vPFaSU3AgQIECBAgEBVYL2FpPLmn7/1u/811X/x6Ov7nv27bX89+PgvDiyeHLrDxP7mRy++/PPi1NNDO59/8cc93/7x28UrlHof+smPH+4tlfofeuzB+zf/6CeV+70P/ujB8vy3895cuwOnlwgQIECAAIGKwHoLSZWeeh/9p1/99t8++cM312Y+e2vnjVMHfnGg+i5b5dXbbuVyf28RgCq3cm9xb76agXrKS6eGeirPl3uKH8Vt6d/Fu/4hQIAAAQIECKwssM5C0sK3Zz849Xn9b816H3pq/z/vffjGxT9W3nhrDjgLf56/UXvTbGF+vnq3uDffiEwrr9orBAgQIECAAIG7CKyzkHRj/vOj+17a934tJxWh6fjxr3sffWxzuef+B/sXqu+lzV88+3n9cwEW/nz2+L8uXa79+/c/Lz342EPNaeou6y/OMhVXfnv77a5MNiBAgAABAhtNYJ2FpN6HX37rnZf7f//Ln20b+GHx52rbXzq68I/v/Pafd/aXyg899eLOhXd/9vjjP332wNneRx/tLy1lm/KDP3nsz4ee3v7I9mffnN/56qtPbS5yz6pvvQ8+tnn+6M8e+enrbX/Y0qqPYUMCBAgQIEAgn8CmmzdvLtv1/v37Dxw4sOxLnuyUwMGDB994441OVVOHAAECBAgQ6KDAOjuT1MGVKUWAAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SuwYkh64IEHunfV62VlkNfLJPRBgAABAgRuE1gxJN13330fffTRbdt7omMCBW+B3LFyChEgQIAAAQIdFdh08+bNlQq+995733333eXLl1fawPP3LFCcQyoS0gsvvHDPFexIgAABAgQIhArcKSSFHlhxAgQIECBAgMB6Fljx7bb13LTeCBAgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikF/hPfMETLfKxqDQAAAABJRU5ErkJggg==", + "text/plain": [ + "" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ImageContent(\"new_layout_rev2.png\").show()" + ] + }, + { + "cell_type": "markdown", + "id": "316fd8d5-cef7-47ec-a129-24e5df6b55e7", + "metadata": {}, + "source": [ + "We can see now the button has become bigger!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4b92dfb7-6dd0-44ee-9961-2f50fc16490d", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py index 900b1f6c..fc9f49d2 100644 --- a/opto/optimizers/optoprime_v3.py +++ b/opto/optimizers/optoprime_v3.py @@ -219,9 +219,22 @@ def output_response_extractor(self, response: str) -> Dict[str, Any]: reasoning = "(Unable to extract, possibly due to parsing failure)" if "```" in response: - match = re.findall(r"```(.*?)```", response, re.DOTALL) - if len(match) > 0: - response = match[0] + # First try to extract from ```json ... ``` blocks + json_match = re.findall(r"```json\s*(.*?)```", response, re.DOTALL) + if len(json_match) > 0: + response = json_match[0].strip() + else: + # Fall back to regular ``` ... ``` blocks + match = re.findall(r"```(.*?)```", response, re.DOTALL) + if len(match) > 0: + # Remove language identifier if present (e.g., "json", "python") + content = match[0].strip() + # Check if first line is a language identifier + lines = content.split('\n', 1) + if len(lines) > 1 and lines[0].strip().isalpha() and len(lines[0].strip()) < 20: + response = lines[1].strip() + else: + response = content json_extracted = {} suggestion = {} From a72b4461ec9ab23f8df3b96e8b69e428ef83934c Mon Sep 17 00:00:00 2001 From: windweller Date: Fri, 9 Jan 2026 01:42:09 -0500 Subject: [PATCH 50/51] update oprov3 --- opto/optimizers/opro_v3.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/opto/optimizers/opro_v3.py b/opto/optimizers/opro_v3.py index a52fcc2a..39aab1fe 100644 --- a/opto/optimizers/opro_v3.py +++ b/opto/optimizers/opro_v3.py @@ -61,7 +61,7 @@ class OPROPromptSymbolSet(OptimizerPromptSymbolSet): """ instruction_section_title = "# Instruction" - variable_section_title = "# Solution" + variables_section_title = "# Solution" feedback_section_title = "# Feedback" context_section_title = "# Context" @@ -129,7 +129,7 @@ class ProblemInstance: instruction: str variables: Union[str, List[ContentBlock]] feedback: str - context: Optional[str] + context: Optional[ContentBlockList] optimizer_prompt_symbol_set: OPROPromptSymbolSet @@ -172,8 +172,8 @@ def __repr__(self) -> str: {context} """) - if self.context is not None and self.context.strip() != "": - context_section = context_section.format(context=self.context) + if self.context is not None and self.context.to_text().strip() != "": + context_section = context_section.format(context=self.context.to_text()) optimization_query += context_section return optimization_query @@ -200,8 +200,9 @@ def to_content_blocks(self) -> ContentBlockList: blocks.append(f"\n\n# Feedback\n{self.feedback}") # Context section (optional) - if self.context is not None and self.context.strip() != "": - blocks.append(f"\n\n# Context\n{self.context}") + if self.context is not None and self.context.to_text().strip() != "": + blocks.append(f"\n\n# Context\n") + blocks.extend(self.context) return blocks @@ -340,7 +341,7 @@ def __init__(self, *args, optimizer_prompt_symbol_set: OptimizerPromptSymbolSet = None, include_example=False, # default example in OptoPrimeV2 does not work in OPRO memory_size=5, - problem_context: Optional[str] = None, + problem_context: Optional[ContentBlockList] = None, **kwargs): """Initialize the OPROv2 optimizer. From 125d9a27c358b9b8d253bde009621d3a29203905 Mon Sep 17 00:00:00 2001 From: windweller Date: Fri, 9 Jan 2026 10:59:24 -0500 Subject: [PATCH 51/51] move Content to backbone for better import to other optimziers --- opto/optimizers/optoprime_v3.py | 159 +------------------------------ opto/utils/backbone.py | 160 ++++++++++++++++++++++++++++++++ 2 files changed, 161 insertions(+), 158 deletions(-) diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py index fc9f49d2..ae464497 100644 --- a/opto/optimizers/optoprime_v3.py +++ b/opto/optimizers/optoprime_v3.py @@ -20,7 +20,7 @@ from opto.utils.backbone import ( ConversationHistory, UserTurn, AssistantTurn, PromptTemplate, TextContent, ImageContent, ContentBlockList, - DEFAULT_IMAGE_PLACEHOLDER + DEFAULT_IMAGE_PLACEHOLDER, Content ) import copy import pickle @@ -482,164 +482,7 @@ def has_images(self) -> bool: ) -class Content(ContentBlockList): - """Semantic wrapper providing multi-modal content for the optimizer agent. - - Inherits all ContentBlockList functionality (append, extend, has_images, - to_text, __bool__, __repr__, etc.) with a flexible constructor that - supports multiple input patterns. - - The goal is to provide a flexible interface for user to add mixed text and image content to the optimizer agent. - - Primary use cases: - - Building problem context for the optimizer agent - - Providing user feedback - - Creation patterns: - - Variadic: Content("text", image, "more text") - - Template: Content("See [IMAGE] here", images=[img]) - - Empty: Content() - - Examples: - # Text-only content - ctx = Content("Important background information") - - # Image content - ctx = Content(ImageContent.build("diagram.png")) - - # Mixed content (variadic mode) - ctx = Content( - "Here's the diagram:", - "diagram.png", # auto-detected as image file - "And the analysis." - ) - - # Template mode with placeholders - ctx = Content( - "Compare [IMAGE] with [IMAGE]:", - images=[img1, img2] - ) - - # Manual building - ctx = Content() - ctx.append("Here's the relevant diagram:") - ctx.append(ImageContent.build("diagram.png")) - """ - - def __init__( - self, - *args, - images: Optional[List[Any]] = None, - format: str = "PNG" - ): - """Initialize a Content from various input patterns. - - Supports two usage modes: - - **Mode 1: Variadic (images=None)** - Pass any mix of text and image sources as arguments. - Strings are auto-detected as text or image paths/URLs. - - Content("Hello", some_image, "World") - Content("Check this:", "path/to/image.png") - - **Mode 2: Template (images provided)** - Pass a template string with [IMAGE] placeholders and a list of images. - - Content( - "Compare [IMAGE] with [IMAGE]", - images=[img1, img2] - ) - - Args: - *args: Variable arguments - text strings and/or image sources (Mode 1), - or a single template string (Mode 2) - images: Optional list of images for template mode. When provided, - expects exactly one template string in args. - format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG - - Raises: - ValueError: In template mode, if placeholder count doesn't match image count, - or if args is not a single template string. - """ - # Initialize empty list first - super().__init__() - - # Build content based on mode - if images is not None: - if len(args) != 1 or not isinstance(args[0], str): - raise ValueError( - "Template mode requires exactly one template string as the first argument. " - f"Got {len(args)} arguments." - ) - self._build_from_template(args[0], images=images, format=format) - elif args: - self._build_from_variadic(*args) - - def _build_from_variadic(self, *args) -> None: - """Populate self from variadic arguments. - - Each argument is either text (str) or an image source. - Strings are auto-detected: if they look like image paths/URLs, - they're converted to ImageContent; otherwise treated as text. - - Args: - *args: Alternating text and image sources - format: Image format for numpy arrays - """ - for arg in args: - # for Future expansion, we can check if the string is any special content type - # by is_empty() on special ContentBlock subclasses - image_content = ImageContent.build(arg) - if not image_content.is_empty(): - self.append(image_content) - else: - self.append(arg) - - def _build_from_template( - self, - template: str, - images: List[Any], - format: str = "PNG" - ) -> None: - """Populate self from template with [IMAGE] placeholders. - - The template string contains [IMAGE] placeholders that are replaced - by images from the images list in order. - - Args: - template: Template string containing [IMAGE] placeholders - images: List of image sources to insert at placeholders - format: Image format for numpy arrays - - Raises: - ValueError: If placeholder count doesn't match the number of images. - """ - placeholder = DEFAULT_IMAGE_PLACEHOLDER - - # Count placeholders - placeholder_count = template.count(placeholder) - if placeholder_count != len(images): - raise ValueError( - f"Number of {placeholder} placeholders ({placeholder_count}) " - f"does not match number of images ({len(images)})" - ) - # Split template by placeholder and interleave with images - parts = template.split(placeholder) - - for i, part in enumerate(parts): - if part: # Add text part if non-empty - self.append(part) - - # Add image after each part except the last - if i < len(images): - image_content = ImageContent.build(images[i], format=format) - if image_content is None: - raise ValueError( - f"Could not convert image at index {i} to ImageContent: {type(images[i])}" - ) - self.append(image_content) # we provide two aliases for the Content class for semantic convenience diff --git a/opto/utils/backbone.py b/opto/utils/backbone.py index 3143f60c..95885773 100644 --- a/opto/utils/backbone.py +++ b/opto/utils/backbone.py @@ -390,6 +390,166 @@ def to_litellm_format(self, role: Optional[str] = None) -> List[Dict[str, Any]]: return content +class Content(ContentBlockList): + """Semantic wrapper providing multi-modal content for the optimizer agent. + + The goal is to provide a flexible interface for user to add mixed text and image content to the optimizer agent. + + Inherits all ContentBlockList functionality (append, extend, has_images, + to_text, __bool__, __repr__, etc.) with a flexible constructor that + supports multiple input patterns. + + Primary use cases: + - Building problem context for the optimizer agent + - Providing user feedback + + Creation patterns: + - Variadic: Content("text", image, "more text") + - Template: Content("See [IMAGE] here", images=[img]) + - Empty: Content() + + Examples: + # Text-only content + ctx = Content("Important background information") + + # Image content + ctx = Content(ImageContent.build("diagram.png")) + + # Mixed content (variadic mode) + ctx = Content( + "Here's the diagram:", + "diagram.png", # auto-detected as image file + "And the analysis." + ) + + # Template mode with placeholders + ctx = Content( + "Compare [IMAGE] with [IMAGE]:", + images=[img1, img2] + ) + + # Manual building + ctx = Content() + ctx.append("Here's the relevant diagram:") + ctx.append(ImageContent.build("diagram.png")) + """ + + def __init__( + self, + *args, + images: Optional[List[Any]] = None, + format: str = "PNG" + ): + """Initialize a Content from various input patterns. + + Supports two usage modes: + + **Mode 1: Variadic (images=None)** + Pass any mix of text and image sources as arguments. + Strings are auto-detected as text or image paths/URLs. + + Content("Hello", some_image, "World") + Content("Check this:", "path/to/image.png") + + **Mode 2: Template (images provided)** + Pass a template string with [IMAGE] placeholders and a list of images. + + Content( + "Compare [IMAGE] with [IMAGE]", + images=[img1, img2] + ) + + Args: + *args: Variable arguments - text strings and/or image sources (Mode 1), + or a single template string (Mode 2) + images: Optional list of images for template mode. When provided, + expects exactly one template string in args. + format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG + + Raises: + ValueError: In template mode, if placeholder count doesn't match image count, + or if args is not a single template string. + """ + # Initialize empty list first + super().__init__() + + # Build content based on mode + if images is not None: + if len(args) != 1 or not isinstance(args[0], str): + raise ValueError( + "Template mode requires exactly one template string as the first argument. " + f"Got {len(args)} arguments." + ) + self._build_from_template(args[0], images=images, format=format) + elif args: + self._build_from_variadic(*args) + + def _build_from_variadic(self, *args) -> None: + """Populate self from variadic arguments. + + Each argument is either text (str) or an image source. + Strings are auto-detected: if they look like image paths/URLs, + they're converted to ImageContent; otherwise treated as text. + + Args: + *args: Alternating text and image sources + format: Image format for numpy arrays + """ + for arg in args: + # for Future expansion, we can check if the string is any special content type + # by is_empty() on special ContentBlock subclasses + image_content = ImageContent.build(arg) + if not image_content.is_empty(): + self.append(image_content) + else: + self.append(arg) + + def _build_from_template( + self, + template: str, + images: List[Any], + format: str = "PNG" + ) -> None: + """Populate self from template with [IMAGE] placeholders. + + The template string contains [IMAGE] placeholders that are replaced + by images from the images list in order. + + Args: + template: Template string containing [IMAGE] placeholders + images: List of image sources to insert at placeholders + format: Image format for numpy arrays + + Raises: + ValueError: If placeholder count doesn't match the number of images. + """ + placeholder = DEFAULT_IMAGE_PLACEHOLDER + + # Count placeholders + placeholder_count = template.count(placeholder) + if placeholder_count != len(images): + raise ValueError( + f"Number of {placeholder} placeholders ({placeholder_count}) " + f"does not match number of images ({len(images)})" + ) + + # Split template by placeholder and interleave with images + parts = template.split(placeholder) + + for i, part in enumerate(parts): + if part: # Add text part if non-empty + self.append(part) + + # Add image after each part except the last + if i < len(images): + image_content = ImageContent.build(images[i], format=format) + if image_content is None: + raise ValueError( + f"Could not convert image at index {i} to ImageContent: {type(images[i])}" + ) + self.append(image_content) + + class PromptTemplate: """Template for building ContentBlockLists with {placeholder} support.