Skip to content

Unable to connect to my serverless runpod VLLM pod that I created should be same base url #58

@TheMindExpansionNetwork

Description

I am not sure what I am missing I am getting this error

{'result': "Error running crew: litellm.BadRequestError: LLM Provider NOT provided. Pass in the LLM provider you are trying to call. You passed model=TheMindExpansionNetwork/Torque_14B_MED_0.1-AWQ-4bitgpt-4-turbo\n Pass model as E.g. For 'Huggingface' inference endpoints pass in completion(model='huggingface/starcoder',..) Learn more: https://docs.litellm.ai/docs/providers", 'stack_trace': 'Traceback (most recent call last):\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\app\pg_crew_run.py", line 62, in run_crew\n result = crewai_crew.kickoff(inputs=inputs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\crew.py", line 551, in kickoff\n result = self._run_sequential_process()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\crew.py", line 658, in _run_sequential_process\n return self._execute_tasks(self.tasks)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\crew.py", line 760, in _execute_tasks\n task_output = task.execute_sync(\n ^^^^^^^^^^^^^^^^^^\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\task.py", line 302, in execute_sync\n return self._execute_core(agent, context, tools)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\task.py", line 366, in _execute_core\n result = agent.execute_task(\n ^^^^^^^^^^^^^^^^^^^\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\agent.py", line 264, in execute_task\n raise e\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\agent.py", line 253, in execute_task\n result = self.agent_executor.invoke(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\agents\crew_agent_executor.py", line 106, in invoke\n raise e\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\agents\crew_agent_executor.py", line 102, in invoke\n formatted_answer = self._invoke_loop()\n ^^^^^^^^^^^^^^^^^^^\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\agents\crew_agent_executor.py", line 154, in _invoke_loop\n raise e\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\agents\crew_agent_executor.py", line 134, in _invoke_loop\n answer = self._get_llm_response()\n ^^^^^^^^^^^^^^^^^^^^^^^^\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\agents\crew_agent_executor.py", line 199, in _get_llm_response\n raise e\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\agents\crew_agent_executor.py", line 190, in _get_llm_response\n answer = self.llm.call(\n ^^^^^^^^^^^^^^\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\crewai\llm.py", line 246, in call\n response = litellm.completion(**params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\litellm\utils.py", line 1022, in wrapper\n raise e\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\litellm\utils.py", line 900, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\litellm\main.py", line 2955, in completion\n raise exception_type(\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\litellm\main.py", line 927, in completion\n model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider(\n ^^^^^^^^^^^^^^^^^\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\litellm\litellm_core_utils\get_llm_provider_logic.py", line 351, in get_llm_provider\n raise e\n File "Z:\GIT\CR3AT10N-ST4T1ON\Massumis\CrewAI-Studio\miniconda\envs\crewai_env\Lib\site-packages\litellm\litellm_core_utils\get_llm_provider_logic.py", line 328, in get_llm_provider\n raise litellm.exceptions.BadRequestError( # type: ignore\nlitellm.exceptions.BadRequestError: litellm.BadRequestError: LLM Provider NOT provided. Pass in the LLM provider you are trying to call. You passed model=TheMindExpansionNetwork/Torque_14B_MED_0.1-AWQ-4bitgpt-4-turbo\n Pass model as E.g. For 'Huggingface' inference endpoints pass in completion(model='huggingface/starcoder',..) Learn more: https://docs.litellm.ai/docs/providers\n'}

The model is this TheMindExpansionNetwork/Torque_14B_MED_0.1-AWQ-4

https://github.com/runpod-workers/worker-vllm

This should be it

from openai import OpenAI
import os

Initialize the OpenAI Client with your RunPod API Key and Endpoint URL

client = OpenAI(
api_key=os.environ.get("RUNPOD_API_KEY"),
base_url="https://api.runpod.ai/v2//openai/v1",
)

I have the API Changed

Then I attempted the .env

OPENAI_PROXY_MODELS

any help is appreciated

this is also what I attempted same error

import os
from dotenv import load_dotenv
import streamlit as st
from langchain_openai import ChatOpenAI
from langchain_groq import ChatGroq
from langchain_anthropic import ChatAnthropic
from crewai import LLM

def load_secrets_fron_env():
load_dotenv(override=True)
if "env_vars" not in st.session_state:
st.session_state.env_vars = {
"OPENAI_API_KEY": os.getenv("OPENAI_API_KEY"),
"OPENAI_API_BASE": os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1/"),
"GROQ_API_KEY": os.getenv("GROQ_API_KEY"),
"LMSTUDIO_API_BASE": os.getenv("LMSTUDIO_API_BASE"),
"ANTHROPIC_API_KEY": os.getenv("ANTHROPIC_API_KEY"),
"OLLAMA_HOST": os.getenv("OLLAMA_HOST"),
}
else:
st.session_state.env_vars = st.session_state.env_vars

def switch_environment(new_env_vars):
for key, value in new_env_vars.items():
if value is not None:
os.environ[key] = value
st.session_state.env_vars[key] = value

def restore_environment():
for key, value in st.session_state.env_vars.items():
if value is not None:
os.environ[key] = value
elif key in os.environ:
del os.environ[key]

def safe_pop_env_var(key):
os.environ.pop(key, None)

def create_openai_llm(model, temperature):
switch_environment({
"OPENAI_API_KEY": st.session_state.env_vars["OPENAI_API_KEY"],
"OPENAI_API_BASE": st.session_state.env_vars["OPENAI_API_BASE"],
})
api_key = os.getenv("OPENAI_API_KEY")
api_base = os.getenv("OPENAI_API_BASE")

if api_key:
    return LLM(model=model, temperature=temperature, base_url=api_base)
else:
    raise ValueError("OpenAI API key not set in .env file")

def create_anthropic_llm(model, temperature):
switch_environment({
"ANTHROPIC_API_KEY": st.session_state.env_vars["ANTHROPIC_API_KEY"],
})
api_key = os.getenv("ANTHROPIC_API_KEY")

if api_key:
    return ChatAnthropic(
        anthropic_api_key=api_key,
        model_name=model,
        temperature=temperature,
        max_tokens=4095,
    )
else:
    raise ValueError("Anthropic API key not set in .env file")

def create_groq_llm(model, temperature):
switch_environment({
"GROQ_API_KEY": st.session_state.env_vars["GROQ_API_KEY"],
})
api_key = os.getenv("GROQ_API_KEY")

if api_key:
    return ChatGroq(groq_api_key=api_key, model_name=model, temperature=temperature, max_tokens=4095)
else:
    raise ValueError("Groq API key not set in .env file")

def create_ollama_llm(model, temperature):
host = st.session_state.env_vars["OLLAMA_HOST"]
if host:
switch_environment({
"OPENAI_API_KEY": "ollama", # Nastaví OpenAI API klíč na "ollama"
"OPENAI_API_BASE": host, # Nastaví OpenAI API Base na hodnotu OLLAMA_HOST
})
return LLM(model=model, temperature=temperature, base_url=host)
else:
raise ValueError("Ollama Host is not set in .env file")

def create_lmstudio_llm(model, temperature):
switch_environment({
"OPENAI_API_KEY": "lm-studio",
"OPENAI_API_BASE": st.session_state.env_vars["LMSTUDIO_API_BASE"],
})
api_base = os.getenv("OPENAI_API_BASE")

if api_base:
    return ChatOpenAI(
        openai_api_key="lm-studio",
        openai_api_base=api_base,
        temperature=temperature,
        max_tokens=4095,
    )
else:
    raise ValueError("LM Studio API base not set in .env file")

LLM_CONFIG = {
"OpenAI": {
"models": os.getenv("OPENAI_PROXY_MODELS", "").split(",") if os.getenv("OPENAI_PROXY_MODELS") else ["gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo", "TheMindExpansionNetwork/Torque_14B_MED_0.1-AWQ-4bit" "gpt-4-turbo"],
"create_llm": create_openai_llm,
},
"Groq": {
"models": ["groq/llama3-8b-8192", "groq/llama3-70b-8192", "groq/mixtral-8x7b-32768"],
"create_llm": create_groq_llm,
},
"Ollama": {
"models": os.getenv("OLLAMA_MODELS", "").split(",") if os.getenv("OLLAMA_MODELS") else [],
"create_llm": create_ollama_llm,
},
"Anthropic": {
"models": ["claude-3-5-sonnet-20240620"],
"create_llm": create_anthropic_llm,
},
"LM Studio": {
"models": ["lms-default"],
"create_llm": create_lmstudio_llm,
},
}

def llm_providers_and_models():
return [f"{provider}: {model}" for provider in LLM_CONFIG.keys() for model in LLM_CONFIG[provider]["models"]]

def create_llm(provider_and_model, temperature=0.15):
provider, model = provider_and_model.split(": ")
create_llm_func = LLM_CONFIG.get(provider, {}).get("create_llm")

if create_llm_func:
    llm = create_llm_func(model, temperature)
    restore_environment()  # Obnoví původní prostředí po vytvoření LLM
    return llm
else:
    raise ValueError(f"LLM provider {provider} is not recognized or not supported")

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions