From 8fb2c3056426ebe20d0fc08d8173dc509d19698e Mon Sep 17 00:00:00 2001 From: chinganc Date: Wed, 19 Feb 2025 16:18:15 -0800 Subject: [PATCH 1/5] Add the ability to switch backend. --- README.md | 34 ++++++++++++++++++++++++------ opto/utils/llm.py | 19 +++++++++++++---- tests/unit_tests/test_llm.py | 2 +- tests/unit_tests/test_optimizer.py | 4 ++-- 4 files changed, 46 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index accd2f8a..0aac0869 100644 --- a/README.md +++ b/README.md @@ -274,17 +274,38 @@ with TraceGraph coming soon). ## LLM API Setup Currently we rely on [LiteLLM](https://github.com/BerriAI/litellm) or [AutoGen v0.2](https://github.com/microsoft/autogen/tree/0.2) for LLM caching and API-Key management. -By default, LiteLLM is used. To use it, set the keys as the environment variables, e.g. + +By default, LiteLLM is used. To change the default backend, set the environment variable `TRACE_DEFAULT_LLM_BACKEND` on terminal +```bash +export TRACE_DEFAULT_LLM_BACKEND="" # 'LiteLLM' or 'AutoGenLLM` +``` +or in python before importing `opto` +```python +import os +os.environ["TRACE_DEFAULT_LLM_BACKEND"] = "" # 'LiteLLM' or 'AutoGenLLM` +import opto +``` + + + +### Using LiteLLM as Backend + +Set the keys as the environment variables, following the [documentation of LiteLLM](https://docs.litellm.ai/docs/providers). For example, ```python import os -os.environ["OPENAI_API_KEY"] = "your-openai-key" -os.environ["ANTHROPIC_API_KEY"] = "your-anthropic-key" +os.environ["OPENAI_API_KEY"] = "" +os.environ["ANTHROPIC_API_KEY"] = "" +``` +In Trace, we add another environment variable `TRACE_LITELLM_MODEL` to set the default model name used by LiteLLM for convenience, e.g., +```bash +export TRACE_LITELLM_MODEL='gpt-4o' ``` +will set all LLM instances in Trace to use `gpt-4o` by default. -Please see the [documentation of LiteLLM](https://docs.litellm.ai/docs/providers) for more details on setting keys and end-point urls. -On the other hand, to use AutoGen, install Trace with autogen flag, `pip install trace-opt[autogen]`. AutoGen relies on `OAI_CONFIG_LIST`, which is a file you put in your working directory. It has the format of: +### Using AutoGen as Backend +First install Trace with autogen flag, `pip install trace-opt[autogen]`. AutoGen relies on `OAI_CONFIG_LIST`, which is a file you put in your working directory. It has the format of: ```json lines [ @@ -298,7 +319,8 @@ On the other hand, to use AutoGen, install Trace with autogen flag, `pip install } ] ``` -You switch between different LLM models by changing the `model` field in this configuration file. +You can switch between different LLM models by changing the `model` field in this configuration file. +Note AutoGen by default will use the first model available in this config file. You can also set an `os.environ` variable `OAI_CONFIG_LIST` to point to the location of this file or directly set a JSON string as the value of this variable. diff --git a/opto/utils/llm.py b/opto/utils/llm.py index c0bf0cd3..fd5e0a6e 100644 --- a/opto/utils/llm.py +++ b/opto/utils/llm.py @@ -153,7 +153,7 @@ class LiteLLM(AbstractModel): To use this, set the credentials through the environment variable as instructed in the LiteLLM documentation. For convenience, you can set the - default model name through the environment variable DEFAULT_LITELLM_MODEL. + default model name through the environment variable TRACE_LITELLM_MODEL. When using Azure models via token provider, you can set the Azure token provider scope through the environment variable AZURE_TOKEN_PROVIDER_SCOPE. """ @@ -161,7 +161,7 @@ class LiteLLM(AbstractModel): def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None, cache=True) -> None: if model is None: - model = os.environ.get('DEFAULT_LITELLM_MODEL', 'gpt-4o') + model = os.environ.get('TRACE_LITELLM_MODEL', 'gpt-4o') self.model_name = model self.cache = cache factory = lambda: self._factory(self.model_name) # an LLM instance uses a fixed model @@ -224,5 +224,16 @@ def create(self, **config: Any): return self._model.chat.completions.create(**config) -# Set Default LLM class -LLM = LiteLLM # synonym + +TRACE_DEFAULT_LLM_BACKEND = os.getenv('TRACE_DEFAULT_LLM_BACKEND', 'LiteLLM') +if TRACE_DEFAULT_LLM_BACKEND == 'AutoGenLLM': + print("Using AutoGenLLM as the default LLM backend.") + LLM = AutoGenLLM +elif TRACE_DEFAULT_LLM_BACKEND == 'CustomLLM': + print("Using CustomLLM as the default LLM backend.") + LLM = CustomLLM +elif TRACE_DEFAULT_LLM_BACKEND == 'LiteLLM': + print("Using LiteLLM as the default LLM backend.") + LLM = LiteLLM +else: + raise ValueError(f"Unknown LLM backend: {TRACE_DEFAULT_LLM_BACKEND}") diff --git a/tests/unit_tests/test_llm.py b/tests/unit_tests/test_llm.py index d6606020..4b61e0ed 100644 --- a/tests/unit_tests/test_llm.py +++ b/tests/unit_tests/test_llm.py @@ -2,7 +2,7 @@ from opto.optimizers.utils import print_color import os -if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("DEFAULT_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"): +if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"): llm = LLM() system_prompt = 'You are a helpful assistant.' user_prompt = "Hello world." diff --git a/tests/unit_tests/test_optimizer.py b/tests/unit_tests/test_optimizer.py index d77d38be..dc111b53 100644 --- a/tests/unit_tests/test_optimizer.py +++ b/tests/unit_tests/test_optimizer.py @@ -34,7 +34,7 @@ def user(x): else: return "Success." -if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("DEFAULT_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"): +if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"): # One-step optimization example x = node(-1.0, trainable=True) optimizer = OptoPrime([x]) @@ -124,7 +124,7 @@ def foobar_text(x): GRAPH.clear() x = node("negative point one", trainable=True) -if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("DEFAULT_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"): +if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"): optimizer = OptoPrime([x]) output = foobar_text(x) feedback = user(output.data) From 928865fa9aebaed6a8d3ee461726c1f082fa8e44 Mon Sep 17 00:00:00 2001 From: windweller Date: Wed, 19 Feb 2025 17:01:26 -0800 Subject: [PATCH 2/5] rename AutoGenLLM to AutoGen? Happy to change back. Rewrote the CustomLLM env names. --- opto/utils/llm.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/opto/utils/llm.py b/opto/utils/llm.py index fd5e0a6e..d2b5728e 100644 --- a/opto/utils/llm.py +++ b/opto/utils/llm.py @@ -198,9 +198,9 @@ class CustomLLM(AbstractModel): def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None, cache=True) -> None: if model is None: - model = os.environ.get('DEFAULT_LITELLM_CUSTOM_MODEL', 'gpt-4o') - base_url = os.environ.get('DEFAULT_LITELLM_CUSTOM_URL', 'http://xx.xx.xxx.xx:4000') - server_api_key = os.environ.get('DEFAULT_LITELLM_CUSTOM_API', + model = os.environ.get('TRACE_CUSTOMLLM_MODEL', 'gpt-4o') + base_url = os.environ.get('TRACE_CUSTOMLLM_URL', 'http://xx.xx.xxx.xx:4000') + server_api_key = os.environ.get('TRACE_CUSTOMLLM_API_KEY', 'sk-Xhg...') # we assume the server has an API key # the server API is set through `master_key` in `config.yaml` for LiteLLM proxy server @@ -226,8 +226,8 @@ def create(self, **config: Any): TRACE_DEFAULT_LLM_BACKEND = os.getenv('TRACE_DEFAULT_LLM_BACKEND', 'LiteLLM') -if TRACE_DEFAULT_LLM_BACKEND == 'AutoGenLLM': - print("Using AutoGenLLM as the default LLM backend.") +if TRACE_DEFAULT_LLM_BACKEND == 'AutoGen': + print("Using AutoGen as the default LLM backend.") LLM = AutoGenLLM elif TRACE_DEFAULT_LLM_BACKEND == 'CustomLLM': print("Using CustomLLM as the default LLM backend.") From 9e6525503cdbba0e94d68a509988e88200526a79 Mon Sep 17 00:00:00 2001 From: chinganc Date: Fri, 21 Feb 2025 09:02:32 -0800 Subject: [PATCH 3/5] Make LiteLLM backend backward compatible. Update Readme. --- README.md | 4 ++-- opto/utils/llm.py | 7 ++++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 0aac0869..d3b24ec9 100644 --- a/README.md +++ b/README.md @@ -277,12 +277,12 @@ Currently we rely on [LiteLLM](https://github.com/BerriAI/litellm) or [AutoGen v By default, LiteLLM is used. To change the default backend, set the environment variable `TRACE_DEFAULT_LLM_BACKEND` on terminal ```bash -export TRACE_DEFAULT_LLM_BACKEND="" # 'LiteLLM' or 'AutoGenLLM` +export TRACE_DEFAULT_LLM_BACKEND="" # 'LiteLLM' or 'AutoGen` ``` or in python before importing `opto` ```python import os -os.environ["TRACE_DEFAULT_LLM_BACKEND"] = "" # 'LiteLLM' or 'AutoGenLLM` +os.environ["TRACE_DEFAULT_LLM_BACKEND"] = "" # 'LiteLLM' or 'AutoGen` import opto ``` diff --git a/opto/utils/llm.py b/opto/utils/llm.py index d2b5728e..df2edb56 100644 --- a/opto/utils/llm.py +++ b/opto/utils/llm.py @@ -5,6 +5,7 @@ import litellm import os import openai +import warnings try: import autogen # We import autogen here to avoid the need of installing autogen @@ -161,7 +162,11 @@ class LiteLLM(AbstractModel): def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None, cache=True) -> None: if model is None: - model = os.environ.get('TRACE_LITELLM_MODEL', 'gpt-4o') + model = os.environ.get('TRACE_LITELLM_MODEL') + if model is None: + warnings.warn("TRACE_LITELLM_MODEL environment variable is not found when loading the default model for LiteLLM. Attempt to load the default model from DEFAULT_LITELLM_MODEL environment variable. The usage of DEFAULT_LITELLM_MODEL will be deprecated. Please use the environment variable TRACE_LITELLM_MODEL for setting the default model name for LiteLLM.") + model = os.environ.get('DEFAULT_LITELLM_MODEL', 'gpt-4o') + self.model_name = model self.cache = cache factory = lambda: self._factory(self.model_name) # an LLM instance uses a fixed model From 9187dac7e46533195e2c81256a75b9697179910b Mon Sep 17 00:00:00 2001 From: chinganc Date: Fri, 21 Feb 2025 09:07:14 -0800 Subject: [PATCH 4/5] Make Node.__iter__ to work with numpy.ndarray. --- opto/trace/iterators.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/opto/trace/iterators.py b/opto/trace/iterators.py index 8bc28779..207e7bb9 100644 --- a/opto/trace/iterators.py +++ b/opto/trace/iterators.py @@ -4,14 +4,14 @@ from opto.trace.bundle import bundle import opto.trace.operators as ops from opto.trace.errors import ExecutionError - +import numpy as np # List[Nodes], Node[List] def iterate(x: Any): """Return an iterator object for node of list, tuple, set, or dict.""" if not isinstance(x, Node): x = node(x) - if issubclass(x.type, list) or issubclass(x.type, tuple) or issubclass(x.type, str): + if issubclass(x.type, list) or issubclass(x.type, tuple) or issubclass(x.type, str) or issubclass(x.type, np.ndarray): return SeqIterable(x) elif issubclass(x.type, set): converted_list = ops.to_list(x) From 3c3478f6f3c00b634d310f230df034607ad3adfd Mon Sep 17 00:00:00 2001 From: chinganc Date: Fri, 21 Feb 2025 09:10:47 -0800 Subject: [PATCH 5/5] Add test for iterating over np.ndarray. --- tests/unit_tests/test_nodes.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/unit_tests/test_nodes.py b/tests/unit_tests/test_nodes.py index 7c7990f2..3d9969ca 100644 --- a/tests/unit_tests/test_nodes.py +++ b/tests/unit_tests/test_nodes.py @@ -2,7 +2,7 @@ from opto.trace import node from opto.trace import operators as ops from opto.trace.utils import contain - +import numpy as np # Sum of str x = node("NodeX") @@ -151,4 +151,11 @@ def fun(x): assert x.description == "[ParameterNode] x" x = node(1, trainable=True) -assert x.description == "[ParameterNode] This is a ParameterNode in a computational graph." \ No newline at end of file +assert x.description == "[ParameterNode] This is a ParameterNode in a computational graph." + + +# Test iterating numpy array +x = node(np.array([1, 2, 3])) +for i, v in enumerate(x): + assert isinstance(v, type(x)) + assert v.data == x.data[i]