diff --git a/apps/copilot/client.py b/apps/copilot/client.py index ac5d789e..d4dce286 100644 --- a/apps/copilot/client.py +++ b/apps/copilot/client.py @@ -4,11 +4,21 @@ import dotenv dotenv.load_dotenv() PROVIDER_BASE_URL = os.getenv('PROVIDER_BASE_URL', '') -PROVIDER_API_KEY = os.getenv('PROVIDER_API_KEY', os.getenv('OPENAI_API_KEY', '')) -PROVIDER_DEFAULT_MODEL = os.getenv('PROVIDER_DEFAULT_MODEL', 'gpt-4.1') +PROVIDER_API_KEY = os.getenv('PROVIDER_API_KEY') +PROVIDER_DEFAULT_MODEL = os.getenv('PROVIDER_DEFAULT_MODEL') +PROVIDER_COPILOT_MODEL = os.getenv('PROVIDER_COPILOT_MODEL') + +if not PROVIDER_COPILOT_MODEL: + PROVIDER_COPILOT_MODEL = 'gpt-4.1' if not PROVIDER_API_KEY: - raise ValueError("No LLM Provider API key found") + PROVIDER_API_KEY = os.getenv('OPENAI_API_KEY') + +if not PROVIDER_API_KEY: + raise(ValueError("No LLM Provider API key found")) + +if not PROVIDER_DEFAULT_MODEL: + PROVIDER_DEFAULT_MODEL = 'gpt-4.1' completions_client = None if PROVIDER_BASE_URL: diff --git a/apps/copilot/copilot.py b/apps/copilot/copilot.py index 4d5d31f9..3f4697d0 100644 --- a/apps/copilot/copilot.py +++ b/apps/copilot/copilot.py @@ -4,7 +4,7 @@ from pydantic import BaseModel, ValidationError from typing import List, Dict, Any, Literal import json from lib import AgentContext, PromptContext, ToolContext, ChatContext -from client import PROVIDER_DEFAULT_MODEL +from client import PROVIDER_COPILOT_MODEL from client import completions_client class UserMessage(BaseModel): @@ -75,7 +75,7 @@ User: {last_message.content} ] response = completions_client.chat.completions.create( - model=PROVIDER_DEFAULT_MODEL, + model=PROVIDER_COPILOT_MODEL, messages=updated_msgs, temperature=0.0, response_format={"type": "json_object"} diff --git a/apps/copilot/copilot_multi_agent.md b/apps/copilot/copilot_multi_agent.md index 1bdda648..6f283ad8 100644 --- a/apps/copilot/copilot_multi_agent.md +++ b/apps/copilot/copilot_multi_agent.md @@ -153,7 +153,7 @@ You are responsible for providing delivery information to the user. - Do not leave the user with partial information. Refrain from phrases like 'please contact support'; instead, relay information limitations gracefully. ''' -use GPT-4o as the default model for new agents. +use {agent_model} as the default model for new agents. ## Section 9: General Guidelines diff --git a/apps/copilot/streaming.py b/apps/copilot/streaming.py index 2e7d65d5..faf97ab0 100644 --- a/apps/copilot/streaming.py +++ b/apps/copilot/streaming.py @@ -4,7 +4,7 @@ from pydantic import BaseModel, ValidationError from typing import List, Dict, Any, Literal import json from lib import AgentContext, PromptContext, ToolContext, ChatContext -from client import PROVIDER_DEFAULT_MODEL +from client import PROVIDER_COPILOT_MODEL, PROVIDER_DEFAULT_MODEL from client import completions_client class UserMessage(BaseModel): @@ -71,6 +71,9 @@ def get_streaming_response( # add the workflow schema to the system prompt sys_prompt = streaming_instructions.replace("{workflow_schema}", workflow_schema) + # add the agent model to the system prompt + sys_prompt = sys_prompt.replace("{agent_model}", PROVIDER_DEFAULT_MODEL) + # add the current workflow config to the last user message last_message = messages[-1] last_message.content = f""" @@ -90,7 +93,7 @@ User: {last_message.content} ] return completions_client.chat.completions.create( - model=PROVIDER_DEFAULT_MODEL, + model=PROVIDER_COPILOT_MODEL, messages=updated_msgs, temperature=0.0, stream=True diff --git a/apps/rowboat_agents/src/graph/core.py b/apps/rowboat_agents/src/graph/core.py index 480d33be..f16196af 100644 --- a/apps/rowboat_agents/src/graph/core.py +++ b/apps/rowboat_agents/src/graph/core.py @@ -49,15 +49,7 @@ def set_sys_message(messages): """ If the system message is empty, set it to the default message: "You are a helplful assistant." """ - if not any(msg.get("role") == "system" for msg in messages): - messages.insert(0, { - "role": "system", - "content": "You are a helpful assistant." - }) - print("Inserted system message: ", messages[0]) - logger.info("Inserted system message: ", messages[0]) - - elif messages[0].get("role") == "system" and messages[0].get("content") == "": + if messages[0].get("role") == "system" and messages[0].get("content") == "": messages[0]["content"] = "You are a helpful assistant." print("Updated system message: ", messages[0]) logger.info("Updated system message: ", messages[0]) diff --git a/apps/rowboat_agents/src/graph/swarm_wrapper.py b/apps/rowboat_agents/src/graph/swarm_wrapper.py index deb77f56..e3c9859e 100644 --- a/apps/rowboat_agents/src/graph/swarm_wrapper.py +++ b/apps/rowboat_agents/src/graph/swarm_wrapper.py @@ -52,7 +52,6 @@ async def mock_tool(tool_name: str, args: str, description: str, mock_instructio print(f"Generating simulated response for tool: {tool_name}") response_content = None response_content = generate_openai_output(messages, output_type='text', model=PROVIDER_DEFAULT_MODEL) - print("Custom provider client not found, using default model: gpt-4o") return response_content except Exception as e: logger.error(f"Error in mock_tool: {str(e)}") diff --git a/apps/rowboat_agents/src/utils/client.py b/apps/rowboat_agents/src/utils/client.py index 06de9088..44af3e97 100644 --- a/apps/rowboat_agents/src/utils/client.py +++ b/apps/rowboat_agents/src/utils/client.py @@ -5,15 +5,21 @@ import dotenv dotenv.load_dotenv() PROVIDER_BASE_URL = os.getenv('PROVIDER_BASE_URL', '') -PROVIDER_API_KEY = os.getenv('PROVIDER_API_KEY', os.getenv('OPENAI_API_KEY', '')) -PROVIDER_DEFAULT_MODEL = os.getenv('PROVIDER_DEFAULT_MODEL', 'gpt-4.1') +PROVIDER_API_KEY = os.getenv('PROVIDER_API_KEY') +PROVIDER_DEFAULT_MODEL = os.getenv('PROVIDER_DEFAULT_MODEL') client = None if not PROVIDER_API_KEY: - raise ValueError("No LLM Provider API key found") + PROVIDER_API_KEY = os.getenv('OPENAI_API_KEY') + +if not PROVIDER_API_KEY: + raise(ValueError("No LLM Provider API key found")) + +if not PROVIDER_DEFAULT_MODEL: + PROVIDER_DEFAULT_MODEL = 'gpt-4.1' if PROVIDER_BASE_URL: - print(f"Using provider {PROVIDER_BASE_URL} with API key {PROVIDER_API_KEY}") + print(f"Using provider {PROVIDER_BASE_URL}") client = AsyncOpenAI(base_url=PROVIDER_BASE_URL, api_key=PROVIDER_API_KEY) else: print("No provider base URL configured, using OpenAI directly") diff --git a/docker-compose.yml b/docker-compose.yml index 44558a99..4db22aee 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -70,6 +70,7 @@ services: - PROVIDER_BASE_URL=${PROVIDER_BASE_URL} - PROVIDER_API_KEY=${PROVIDER_API_KEY} - PROVIDER_DEFAULT_MODEL=${PROVIDER_DEFAULT_MODEL} + - PROVIDER_COPILOT_MODEL=${PROVIDER_COPILOT_MODEL} restart: unless-stopped # tools_webhook: @@ -157,7 +158,6 @@ services: - MONGODB_CONNECTION_STRING=mongodb://mongo:27017/rowboat - QDRANT_URL=${QDRANT_URL} - QDRANT_API_KEY=${QDRANT_API_KEY} - - REDIS_URL=redis://redis:6379 restart: unless-stopped # chat_widget: