Add support for other providers - litellm, openrouter

This commit is contained in:
akhisud3195 2025-04-25 23:50:26 +05:30 committed by Ramnique Singh
parent 8c2c21a239
commit 14eee3e0c3
24 changed files with 398 additions and 95 deletions

View file

@ -31,6 +31,8 @@ Powered by OpenAI's Agents SDK, Rowboat is the fastest way to build multi-agents
3. Access the app at [http://localhost:3000](http://localhost:3000). 3. Access the app at [http://localhost:3000](http://localhost:3000).
Note: See the [Using custom LLM providers](https://docs.rowboatlabs.com/setup/#using-custom-llm-providers) section of our docs for using custom providers like OpenRouter and LiteLLM.
## Demo ## Demo
#### Create a multi-agent assistant with MCP tools by chatting with Rowboat #### Create a multi-agent assistant with MCP tools by chatting with Rowboat

View file

@ -1,7 +1,7 @@
from flask import Flask, request, jsonify, Response, stream_with_context from flask import Flask, request, jsonify, Response, stream_with_context
from pydantic import BaseModel, ValidationError from pydantic import BaseModel, ValidationError
from typing import List from typing import List
from copilot import UserMessage, AssistantMessage, get_response, openai_client from copilot import UserMessage, AssistantMessage, get_response
from streaming import get_streaming_response from streaming import get_streaming_response
from lib import AgentContext, PromptContext, ToolContext, ChatContext from lib import AgentContext, PromptContext, ToolContext, ChatContext
import os import os

24
apps/copilot/client.py Normal file
View file

@ -0,0 +1,24 @@
import os
from openai import OpenAI
import dotenv
dotenv.load_dotenv()
PROVIDER_BASE_URL = os.getenv('PROVIDER_BASE_URL', '')
PROVIDER_API_KEY = os.getenv('PROVIDER_API_KEY', os.getenv('OPENAI_API_KEY', ''))
PROVIDER_DEFAULT_MODEL = os.getenv('PROVIDER_DEFAULT_MODEL', 'gpt-4.1')
if not PROVIDER_API_KEY:
raise ValueError("No LLM Provider API key found")
completions_client = None
if PROVIDER_BASE_URL:
print(f"Using provider {PROVIDER_BASE_URL}, for completions")
completions_client = OpenAI(
base_url=PROVIDER_BASE_URL,
api_key=PROVIDER_API_KEY
)
else:
print(f"Using OpenAI directly for completions")
completions_client = OpenAI(
api_key=PROVIDER_API_KEY
)

View file

@ -4,9 +4,8 @@ from pydantic import BaseModel, ValidationError
from typing import List, Dict, Any, Literal from typing import List, Dict, Any, Literal
import json import json
from lib import AgentContext, PromptContext, ToolContext, ChatContext from lib import AgentContext, PromptContext, ToolContext, ChatContext
from client import PROVIDER_DEFAULT_MODEL
openai_client = OpenAI() from client import completions_client
MODEL_NAME = "gpt-4.1" # OpenAI model name
class UserMessage(BaseModel): class UserMessage(BaseModel):
role: Literal["user"] role: Literal["user"]
@ -75,8 +74,8 @@ User: {last_message.content}
message.model_dump() for message in messages message.model_dump() for message in messages
] ]
response = openai_client.chat.completions.create( response = completions_client.chat.completions.create(
model=MODEL_NAME, model=PROVIDER_DEFAULT_MODEL,
messages=updated_msgs, messages=updated_msgs,
temperature=0.0, temperature=0.0,
response_format={"type": "json_object"} response_format={"type": "json_object"}

View file

@ -18,6 +18,7 @@ openai==1.61.0
packaging==24.2 packaging==24.2
pydantic==2.10.3 pydantic==2.10.3
pydantic_core==2.27.1 pydantic_core==2.27.1
python-dotenv
sniffio==1.3.1 sniffio==1.3.1
tqdm==4.67.1 tqdm==4.67.1
typing_extensions==4.12.2 typing_extensions==4.12.2

View file

@ -4,9 +4,8 @@ from pydantic import BaseModel, ValidationError
from typing import List, Dict, Any, Literal from typing import List, Dict, Any, Literal
import json import json
from lib import AgentContext, PromptContext, ToolContext, ChatContext from lib import AgentContext, PromptContext, ToolContext, ChatContext
from client import PROVIDER_DEFAULT_MODEL
openai_client = OpenAI() from client import completions_client
MODEL_NAME = "gpt-4.1" # OpenAI model name
class UserMessage(BaseModel): class UserMessage(BaseModel):
role: Literal["user"] role: Literal["user"]
@ -90,8 +89,8 @@ User: {last_message.content}
message.model_dump() for message in messages message.model_dump() for message in messages
] ]
return openai_client.chat.completions.create( return completions_client.chat.completions.create(
model=MODEL_NAME, model=PROVIDER_DEFAULT_MODEL,
messages=updated_msgs, messages=updated_msgs,
temperature=0.0, temperature=0.0,
stream=True stream=True

127
apps/docs/docs/setup.md Normal file
View file

@ -0,0 +1,127 @@
## Getting started
- ✨ **Start from an idea → Copilot builds your multi-agent workflows**
E.g. "Build me an assistant for a food delivery company to handle delivery status and missing items. Include the necessary tools."
- 🌐 **Connect MCP servers**
Add the MCP servers in Settings → import the tools into Rowboat.
- 📞 **Integrate into your app using the HTTP API or Python SDK**
Grab the Project ID and generated API Key from Settings and use the API.
Powered by OpenAI's Agents SDK, Rowboat is the fastest way to build multi-agents!
## Quick start
Step 1. Set your OpenAI key:
```bash
export OPENAI_API_KEY=your-openai-api-key
```
Step 2. Clone the repository and start Rowboat docker
```bash
git clone git@github.com:rowboatlabs/rowboat.git
cd rowboat
docker-compose up --build
```
Step 3. Access the app at [http://localhost:3000](http://localhost:3000).
Note: See the [Using custom LLM providers](#using-custom-llm-providers) section below for using custom providers like OpenRouter and LiteLLM.
## Demo
#### Create a multi-agent assistant with MCP tools by chatting with Rowboat
[![Screenshot 2025-04-23 at 00 25 31](https://github.com/user-attachments/assets/c8a41622-8e0e-459f-becb-767503489866)](https://youtu.be/YRTCw9UHRbU)
## Integrate with Rowboat agents
There are 2 ways to integrate with the agents you create in Rowboat
**Option #1: HTTP API**
You can use the API directly at [http://localhost:3000/api/v1/](http://localhost:3000/api/v1/). See [API Docs](https://docs.rowboatlabs.com/using_the_api/) for details.
```bash
curl --location 'http://localhost:3000/api/v1/<PROJECT_ID>/chat' \
--header 'Content-Type: application/json' \
--header 'Authorization: Bearer <API_KEY>' \
--data '{
"messages": [
{
"role": "user",
"content": "tell me the weather in london in metric units"
}
],
"state": null
}'
```
**Option #2: Python SDK**
You can use the included Python SDK to interact with the Agents
```python
from rowboat import Client, StatefulChat
from rowboat.schema import UserMessage, SystemMessage
# Initialize the client
client = Client(
host="http://localhost:3000",
project_id="<PROJECT_ID>",
api_key="<API_KEY>"
)
# Create a stateful chat session (recommended)
chat = StatefulChat(client)
response = chat.run("What's the weather in London?")
print(response)
# Or use the low-level client API
messages = [
SystemMessage(role='system', content="You are a helpful assistant"),
UserMessage(role='user', content="Hello, how are you?")
]
# Get response
response = client.chat(messages=messages)
print(response.messages[-1].content)
```
## Using custom LLM providers
By default, Rowboat uses OpenAI LLMs (gpt-4o, gpt-4.1, etc.) for both agents and copilot, when you export your OPENAI_API_KEY.
However, you can also configure custom LLM providers (e.g. LiteLLM, OpenRouter) to use any of the hundreds of available LLMs beyond OpenAI, such as Claude, DeepSeek, Ollama LLMs and so on.
**Step 1:** Set up your custom LLM provider using the variables below, for example (assuming LiteLLM):
```bash
export PROVIDER_BASE_URL=http://host.docker.internal:4000/
export PROVIDER_API_KEY=sk-1234
```
Rowboat uses "gpt-4.1" as the default model for agents and copilot but this can be overridden as follows, for example (assuming LiteLLM):
```bash
export PROVIDER_DEFAULT_MODEL=claude-3-7-sonnet-latest
export PROVIDER_COPILOT_MODEL=gpt-4o
```
**Notes:**
- Copilot is optimized for gpt-4o/gpt-4.1. We strongly recommend using these models for best performance.
- You can specify different models for the copilot and each agent, but all of them must belong to the same provider (e.g. LiteLLM)
- The integration is provider-agnostic and should work with any service that implements the OpenAI messages format.
- OpenAI-specific tools (e.g., web_search) will not work with non-OpenAI providers. If you get an error, remove these tools.
**Step 2 (No change):** Clone the repository and start Rowboat docker
```bash
git clone git@github.com:rowboatlabs/rowboat.git
cd rowboat
docker-compose up --build
```
**Step 3 (No change):** Access the app at [http://localhost:3000](http://localhost:3000).

View file

@ -4,6 +4,7 @@ theme:
name: material name: material
favicon: img/favicon.ico favicon: img/favicon.ico
nav: nav:
- Getting Started: setup.md
- Overview: - Overview:
- Introduction: index.md - Introduction: index.md
- Open Source License: license.md - Open Source License: license.md

View file

@ -1,6 +1,8 @@
import { WorkflowTemplate } from "./types/workflow_types"; import { WorkflowTemplate } from "./types/workflow_types";
import { z } from 'zod'; import { z } from 'zod';
const DEFAULT_MODEL = process.env.PROVIDER_DEFAULT_MODEL || "gpt-4.1";
export const templates: { [key: string]: z.infer<typeof WorkflowTemplate> } = { export const templates: { [key: string]: z.infer<typeof WorkflowTemplate> } = {
// Default template // Default template
'default': { 'default': {
@ -37,7 +39,7 @@ You are an helpful customer support assistant
Don'ts: Don'ts:
- don't ask user any other detail than email`, - don't ask user any other detail than email`,
model: "gpt-4o", model: DEFAULT_MODEL,
toggleAble: true, toggleAble: true,
ragReturnType: "chunks", ragReturnType: "chunks",
ragK: 3, ragK: 3,

View file

@ -10,12 +10,7 @@ export const WorkflowAgent = z.object({
disabled: z.boolean().default(false).optional(), disabled: z.boolean().default(false).optional(),
instructions: z.string(), instructions: z.string(),
examples: z.string().optional(), examples: z.string().optional(),
model: z.union([ model: z.string(),
z.literal('gpt-4.1'),
z.literal('gpt-4o'),
z.literal('gpt-4.1-mini'),
z.literal('gpt-4o-mini'),
]),
locked: z.boolean().default(false).describe('Whether this agent is locked and cannot be deleted').optional(), locked: z.boolean().default(false).describe('Whether this agent is locked and cannot be deleted').optional(),
toggleAble: z.boolean().default(true).describe('Whether this agent can be enabled or disabled').optional(), toggleAble: z.boolean().default(true).describe('Whether this agent can be enabled or disabled').optional(),
global: z.boolean().default(false).describe('Whether this agent is a global agent, in which case it cannot be connected to other agents').optional(), global: z.boolean().default(false).describe('Whether this agent is a global agent, in which case it cannot be connected to other agents').optional(),

View file

@ -20,6 +20,8 @@ import { Button as CustomButton } from "@/components/ui/button";
import clsx from "clsx"; import clsx from "clsx";
import { EditableField } from "@/app/lib/components/editable-field"; import { EditableField } from "@/app/lib/components/editable-field";
import { USE_TRANSFER_CONTROL_OPTIONS } from "@/app/lib/feature_flags"; import { USE_TRANSFER_CONTROL_OPTIONS } from "@/app/lib/feature_flags";
import { Input } from "@/components/ui/input";
import { Info } from "lucide-react";
// Common section header styles // Common section header styles
const sectionHeaderStyles = "text-xs font-medium uppercase tracking-wider text-gray-500 dark:text-gray-400"; const sectionHeaderStyles = "text-xs font-medium uppercase tracking-wider text-gray-500 dark:text-gray-400";
@ -405,20 +407,33 @@ export function AgentConfig({
)} )}
<div className="space-y-4"> <div className="space-y-4">
<label className={sectionHeaderStyles}> <div className="flex items-center">
Model <label className={sectionHeaderStyles}>
</label> Model
<CustomDropdown </label>
<div className="relative ml-2 group">
<Info
className="w-4 h-4 text-gray-500 dark:text-gray-400 hover:text-gray-700 dark:hover:text-gray-300 cursor-pointer transition-colors"
/>
<div className="absolute bottom-full left-0 mb-2 p-3 w-80 rounded-lg shadow-lg border border-gray-200 dark:border-gray-700 bg-white dark:bg-gray-800 text-gray-700 dark:text-gray-300 text-xs invisible group-hover:visible z-50">
<div className="mb-1 font-medium">Model Configuration</div>
Set this according to the PROVIDER_BASE_URL you have set in your .env file (such as your LiteLLM, gateway).
<br />
<br />
E.g. LiteLLM&apos;s naming convention is like: &apos;claude-3-7-sonnet-latest&apos;, but you may have set alias model names or might be using a different provider like openrouter, openai etc.
<br />
<br />
By default, the model is set to gpt-4.1, assuming your OpenAI API key is set in PROVIDER_API_KEY and PROVIDER_BASE_URL is not set.
<div className="absolute h-2 w-2 bg-white dark:bg-gray-800 transform rotate-45 -bottom-1 left-4 border-r border-b border-gray-200 dark:border-gray-700"></div>
</div>
</div>
</div>
<Input
value={agent.model} value={agent.model}
options={WorkflowAgent.shape.model.options.map((model) => ({ onChange={(e) => handleUpdate({
key: model.value,
label: model.value
}))}
onChange={(value) => handleUpdate({
...agent, ...agent,
model: value as z.infer<typeof WorkflowAgent>['model'] model: e.target.value as z.infer<typeof WorkflowAgent>['model']
})} })}
className="w-40"
/> />
</div> </div>

View file

@ -202,6 +202,19 @@ export function Chat({
setLoadingAssistantResponse(false); setLoadingAssistantResponse(false);
}); });
eventSource.addEventListener('stream_error', (event) => {
if (eventSource) {
eventSource.close();
}
console.error('SSE Error:', event);
if (!ignore) {
setLoadingAssistantResponse(false);
setFetchResponseError('Error: ' + JSON.parse(event.data).error);
setOptimisticMessages(messages);
}
});
eventSource.onerror = (error) => { eventSource.onerror = (error) => {
console.error('SSE Error:', error); console.error('SSE Error:', error);
if (!ignore) { if (!ignore) {

View file

@ -15,9 +15,11 @@ import { getProjectConfig } from "@/app/actions/project_actions";
export function App({ export function App({
projectId, projectId,
useRag, useRag,
defaultModel,
}: { }: {
projectId: string; projectId: string;
useRag: boolean; useRag: boolean;
defaultModel: string;
}) { }) {
const [selectorKey, setSelectorKey] = useState(0); const [selectorKey, setSelectorKey] = useState(0);
const [workflow, setWorkflow] = useState<WithStringId<z.infer<typeof Workflow>> | null>(null); const [workflow, setWorkflow] = useState<WithStringId<z.infer<typeof Workflow>> | null>(null);
@ -118,6 +120,7 @@ export function App({
useRag={useRag} useRag={useRag}
mcpServerUrls={mcpServerUrls} mcpServerUrls={mcpServerUrls}
toolWebhookUrl={toolWebhookUrl} toolWebhookUrl={toolWebhookUrl}
defaultModel={defaultModel}
/>} />}
</> </>
} }

View file

@ -3,6 +3,7 @@ import { App } from "./app";
import { USE_RAG } from "@/app/lib/feature_flags"; import { USE_RAG } from "@/app/lib/feature_flags";
import { projectsCollection } from "@/app/lib/mongodb"; import { projectsCollection } from "@/app/lib/mongodb";
import { notFound } from "next/navigation"; import { notFound } from "next/navigation";
const DEFAULT_MODEL = process.env.PROVIDER_DEFAULT_MODEL || "gpt-4.1";
export const metadata: Metadata = { export const metadata: Metadata = {
title: "Workflow" title: "Workflow"
@ -25,6 +26,7 @@ export default async function Page({
<App <App
projectId={params.projectId} projectId={params.projectId}
useRag={USE_RAG} useRag={USE_RAG}
defaultModel={DEFAULT_MODEL}
/> />
); );
} }

View file

@ -263,7 +263,7 @@ function reducer(state: State, action: Action): State {
description: "", description: "",
disabled: false, disabled: false,
instructions: "", instructions: "",
model: "gpt-4o", model: "",
locked: false, locked: false,
toggleAble: true, toggleAble: true,
ragReturnType: "chunks", ragReturnType: "chunks",
@ -552,7 +552,6 @@ function reducer(state: State, action: Action): State {
draft.currentIndex++; draft.currentIndex++;
draft.present = nextState; draft.present = nextState;
}); });
} }
} }
@ -568,6 +567,7 @@ export function WorkflowEditor({
useRag, useRag,
mcpServerUrls, mcpServerUrls,
toolWebhookUrl, toolWebhookUrl,
defaultModel,
}: { }: {
dataSources: WithStringId<z.infer<typeof DataSource>>[]; dataSources: WithStringId<z.infer<typeof DataSource>>[];
workflow: WithStringId<z.infer<typeof Workflow>>; workflow: WithStringId<z.infer<typeof Workflow>>;
@ -577,6 +577,7 @@ export function WorkflowEditor({
useRag: boolean; useRag: boolean;
mcpServerUrls: Array<z.infer<typeof MCPServer>>; mcpServerUrls: Array<z.infer<typeof MCPServer>>;
toolWebhookUrl: string; toolWebhookUrl: string;
defaultModel: string;
}) { }) {
const [state, dispatch] = useReducer<Reducer<State, Action>>(reducer, { const [state, dispatch] = useReducer<Reducer<State, Action>>(reducer, {
patches: [], patches: [],
@ -659,7 +660,11 @@ export function WorkflowEditor({
} }
function handleAddAgent(agent: Partial<z.infer<typeof WorkflowAgent>> = {}) { function handleAddAgent(agent: Partial<z.infer<typeof WorkflowAgent>> = {}) {
dispatch({ type: "add_agent", agent }); const agentWithModel = {
...agent,
model: agent.model || defaultModel || "gpt-4o"
};
dispatch({ type: "add_agent", agent: agentWithModel });
} }
function handleAddTool(tool: Partial<z.infer<typeof WorkflowTool>> = {}) { function handleAddTool(tool: Partial<z.infer<typeof WorkflowTool>> = {}) {

View file

@ -984,14 +984,14 @@ files = [
[[package]] [[package]]
name = "griffe" name = "griffe"
version = "1.6.2" version = "1.7.3"
description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API."
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "griffe-1.6.2-py3-none-any.whl", hash = "sha256:6399f7e663150e4278a312a8e8a14d2f3d7bd86e2ef2f8056a1058e38579c2ee"}, {file = "griffe-1.7.3-py3-none-any.whl", hash = "sha256:c6b3ee30c2f0f17f30bcdef5068d6ab7a2a4f1b8bf1a3e74b56fffd21e1c5f75"},
{file = "griffe-1.6.2.tar.gz", hash = "sha256:3a46fa7bd83280909b63c12b9a975732a927dd97809efe5b7972290b606c5d91"}, {file = "griffe-1.7.3.tar.gz", hash = "sha256:52ee893c6a3a968b639ace8015bec9d36594961e156e23315c8e8e51401fa50b"},
] ]
[package.dependencies] [package.dependencies]
@ -1819,14 +1819,14 @@ files = [
[[package]] [[package]]
name = "mcp" name = "mcp"
version = "1.5.0" version = "1.6.0"
description = "Model Context Protocol SDK" description = "Model Context Protocol SDK"
optional = false optional = false
python-versions = ">=3.10" python-versions = ">=3.10"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "mcp-1.5.0-py3-none-any.whl", hash = "sha256:51c3f35ce93cb702f7513c12406bbea9665ef75a08db909200b07da9db641527"}, {file = "mcp-1.6.0-py3-none-any.whl", hash = "sha256:7bd24c6ea042dbec44c754f100984d186620d8b841ec30f1b19eda9b93a634d0"},
{file = "mcp-1.5.0.tar.gz", hash = "sha256:5b2766c05e68e01a2034875e250139839498c61792163a7b221fc170c12f5aa9"}, {file = "mcp-1.6.0.tar.gz", hash = "sha256:d9324876de2c5637369f43161cd71eebfd803df5a95e46225cab8d280e366723"},
] ]
[package.dependencies] [package.dependencies]
@ -2151,14 +2151,14 @@ files = [
[[package]] [[package]]
name = "openai" name = "openai"
version = "1.68.0" version = "1.76.0"
description = "The official Python library for the openai API" description = "The official Python library for the openai API"
optional = false optional = false
python-versions = ">=3.8" python-versions = ">=3.8"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "openai-1.68.0-py3-none-any.whl", hash = "sha256:20e279b0f3a78cb4a95f3eab2a180f3ee30c6a196aeebd6bf642a4f88ab85ee1"}, {file = "openai-1.76.0-py3-none-any.whl", hash = "sha256:a712b50e78cf78e6d7b2a8f69c4978243517c2c36999756673e07a14ce37dc0a"},
{file = "openai-1.68.0.tar.gz", hash = "sha256:c570c06c9ba10f98b891ac30a3dd7b5c89ed48094c711c7a3f35fb5ade6c0757"}, {file = "openai-1.76.0.tar.gz", hash = "sha256:fd2bfaf4608f48102d6b74f9e11c5ecaa058b60dad9c36e409c12477dfd91fb2"},
] ]
[package.dependencies] [package.dependencies]
@ -2166,37 +2166,42 @@ anyio = ">=3.5.0,<5"
distro = ">=1.7.0,<2" distro = ">=1.7.0,<2"
httpx = ">=0.23.0,<1" httpx = ">=0.23.0,<1"
jiter = ">=0.4.0,<1" jiter = ">=0.4.0,<1"
numpy = ">=2.0.2"
pydantic = ">=1.9.0,<3" pydantic = ">=1.9.0,<3"
sniffio = "*" sniffio = "*"
sounddevice = ">=0.5.1"
tqdm = ">4" tqdm = ">4"
typing-extensions = ">=4.11,<5" typing-extensions = ">=4.11,<5"
[package.extras] [package.extras]
datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
realtime = ["websockets (>=13,<15)"] realtime = ["websockets (>=13,<16)"]
voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"]
[[package]] [[package]]
name = "openai-agents" name = "openai-agents"
version = "0.0.4" version = "0.0.13"
description = "OpenAI Agents SDK" description = "OpenAI Agents SDK"
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "openai_agents-0.0.4-py3-none-any.whl", hash = "sha256:5577c3ee994fe0bd200d7283e4f7a614b3af19afeebcfb07b6ca6039a8a50a5c"}, {file = "openai_agents-0.0.13-py3-none-any.whl", hash = "sha256:e11910679e74803e8a4237ce52a21ee6f9ef0848d866e8198f5c4fb8c6310204"},
{file = "openai_agents-0.0.4.tar.gz", hash = "sha256:297e8d5faeca753e1b303d860b7ac94d03a7e10382be738163dc6a10a3b7cc1c"}, {file = "openai_agents-0.0.13.tar.gz", hash = "sha256:6b80315e75c06b5302c5f2adba2f9ea3845f94615daed4706bfb871740f561a5"},
] ]
[package.dependencies] [package.dependencies]
griffe = ">=1.5.6,<2" griffe = ">=1.5.6,<2"
openai = ">=1.66.2" mcp = {version = ">=1.6.0,<2", markers = "python_version >= \"3.10\""}
openai = ">=1.76.0"
pydantic = ">=2.10,<3" pydantic = ">=2.10,<3"
requests = ">=2.0,<3" requests = ">=2.0,<3"
types-requests = ">=2.0,<3" types-requests = ">=2.0,<3"
typing-extensions = ">=4.12.2,<5" typing-extensions = ">=4.12.2,<5"
[package.extras]
litellm = ["litellm (>=1.65.0,<2)"]
viz = ["graphviz (>=0.17)"]
voice = ["numpy (>=2.2.0,<3) ; python_version >= \"3.10\"", "websockets (>=15.0,<16)"]
[[package]] [[package]]
name = "openpyxl" name = "openpyxl"
version = "3.1.5" version = "3.1.5"
@ -3223,27 +3228,6 @@ files = [
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
] ]
[[package]]
name = "sounddevice"
version = "0.5.1"
description = "Play and Record Sound with Python"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "sounddevice-0.5.1-py3-none-any.whl", hash = "sha256:e2017f182888c3f3c280d9fbac92e5dbddac024a7e3442f6e6116bd79dab8a9c"},
{file = "sounddevice-0.5.1-py3-none-macosx_10_6_x86_64.macosx_10_6_universal2.whl", hash = "sha256:d16cb23d92322526a86a9490c427bf8d49e273d9ccc0bd096feecd229cde6031"},
{file = "sounddevice-0.5.1-py3-none-win32.whl", hash = "sha256:d84cc6231526e7a08e89beff229c37f762baefe5e0cc2747cbe8e3a565470055"},
{file = "sounddevice-0.5.1-py3-none-win_amd64.whl", hash = "sha256:4313b63f2076552b23ac3e0abd3bcfc0c1c6a696fc356759a13bd113c9df90f1"},
{file = "sounddevice-0.5.1.tar.gz", hash = "sha256:09ca991daeda8ce4be9ac91e15a9a81c8f81efa6b695a348c9171ea0c16cb041"},
]
[package.dependencies]
CFFI = ">=1.0"
[package.extras]
numpy = ["NumPy"]
[[package]] [[package]]
name = "soupsieve" name = "soupsieve"
version = "2.6" version = "2.6"
@ -3417,14 +3401,14 @@ files = [
[[package]] [[package]]
name = "types-requests" name = "types-requests"
version = "2.32.0.20250306" version = "2.32.0.20250328"
description = "Typing stubs for requests" description = "Typing stubs for requests"
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "types_requests-2.32.0.20250306-py3-none-any.whl", hash = "sha256:25f2cbb5c8710b2022f8bbee7b2b66f319ef14aeea2f35d80f18c9dbf3b60a0b"}, {file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"},
{file = "types_requests-2.32.0.20250306.tar.gz", hash = "sha256:0962352694ec5b2f95fda877ee60a159abdf84a0fc6fdace599f20acb41a03d1"}, {file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"},
] ]
[package.dependencies] [package.dependencies]
@ -4009,4 +3993,4 @@ cffi = ["cffi (>=1.11)"]
[metadata] [metadata]
lock-version = "2.1" lock-version = "2.1"
python-versions = ">=3.10,<4.0" python-versions = ">=3.10,<4.0"
content-hash = "4b3828121cfa1e7657d9f4ecd4635d0a952c81ad77c7ec2d71c8415c90506f2c" content-hash = "9b132012b1e894f31b66796668c874f0c81ca3077c67e12878b00ccc3e8242ac"

View file

@ -62,7 +62,6 @@ mypy-extensions = "^1.0.0"
nest-asyncio = "^1.6.0" nest-asyncio = "^1.6.0"
numpy = "^2.2.1" numpy = "^2.2.1"
openai = "*" openai = "*"
openai-agents = "*"
openpyxl = "^3.1.5" openpyxl = "^3.1.5"
packaging = "^24.2" packaging = "^24.2"
pandas = "^2.2.3" pandas = "^2.2.3"
@ -104,6 +103,7 @@ websockets = "^13.1"
Werkzeug = "^3.1.3" Werkzeug = "^3.1.3"
wheel = "^0.44.0" wheel = "^0.44.0"
xattr = "^1.1.4" xattr = "^1.1.4"
openai-agents = "^0.0.13"
[build-system] [build-system]
requires = ["poetry-core"] requires = ["poetry-core"]

View file

@ -6,6 +6,7 @@ annotated-types==0.7.0
anyio==4.8.0 anyio==4.8.0
asgiref==3.8.1 asgiref==3.8.1
attrs==25.3.0 attrs==25.3.0
babel==2.16.0
beautifulsoup4==4.12.3 beautifulsoup4==4.12.3
blinker==1.9.0 blinker==1.9.0
build==1.2.2.post1 build==1.2.2.post1
@ -29,7 +30,9 @@ findpython==0.6.3
firecrawl==1.9.0 firecrawl==1.9.0
Flask==3.1.0 Flask==3.1.0
frozenlist==1.5.0 frozenlist==1.5.0
griffe==1.6.2 fsspec==2025.3.2
ghp-import==2.1.0
griffe==1.7.3
grpcio==1.71.0 grpcio==1.71.0
grpcio-tools==1.71.0 grpcio-tools==1.71.0
gunicorn==23.0.0 gunicorn==23.0.0
@ -39,9 +42,11 @@ hpack==4.1.0
httpcore==1.0.7 httpcore==1.0.7
httpx==0.27.2 httpx==0.27.2
httpx-sse==0.4.0 httpx-sse==0.4.0
huggingface-hub==0.30.2
Hypercorn==0.17.3 Hypercorn==0.17.3
hyperframe==6.1.0 hyperframe==6.1.0
idna==3.10 idna==3.10
importlib_metadata==8.6.1
installer==0.7.0 installer==0.7.0
itsdangerous==2.2.0 itsdangerous==2.2.0
jaraco.classes==3.4.0 jaraco.classes==3.4.0
@ -50,11 +55,21 @@ jaraco.functools==4.1.0
Jinja2==3.1.5 Jinja2==3.1.5
jiter==0.6.1 jiter==0.6.1
jsonpath-python==1.0.6 jsonpath-python==1.0.6
jsonschema==4.23.0
jsonschema-specifications==2025.4.1
keyring==25.6.0 keyring==25.6.0
litellm==1.67.2
lxml==5.3.0 lxml==5.3.0
Markdown==3.7
markdownify==0.13.1 markdownify==0.13.1
MarkupSafe==3.0.2 MarkupSafe==3.0.2
mcp==1.5.0 mcp==1.6.0
mergedeep==1.3.4
mistralai==1.2.3
mkdocs==1.6.1
mkdocs-get-deps==0.2.0
mkdocs-material==9.5.50
mkdocs-material-extensions==1.3.1
more-itertools==10.6.0 more-itertools==10.6.0
motor==3.7.0 motor==3.7.0
msgpack==1.1.0 msgpack==1.1.0
@ -62,11 +77,13 @@ multidict==6.2.0
mypy-extensions==1.0.0 mypy-extensions==1.0.0
nest-asyncio==1.6.0 nest-asyncio==1.6.0
numpy==2.2.1 numpy==2.2.1
openai==1.68.0 openai==1.76.0
openai-agents==0.0.4 openai-agents==0.0.13
openpyxl==3.1.5 openpyxl==3.1.5
packaging==24.2 packaging==24.2
paginate==0.5.7
pandas==2.2.3 pandas==2.2.3
pathspec==0.12.1
pbs-installer==2025.3.17 pbs-installer==2025.3.17
pkginfo==1.12.1.2 pkginfo==1.12.1.2
platformdirs==4.3.7 platformdirs==4.3.7
@ -80,18 +97,26 @@ pycparser==2.22
pydantic==2.10.5 pydantic==2.10.5
pydantic-settings==2.8.1 pydantic-settings==2.8.1
pydantic_core==2.27.2 pydantic_core==2.27.2
Pygments==2.19.1
PyJWT==2.10.1 PyJWT==2.10.1
pymdown-extensions==10.14.1
pymongo==4.10.1 pymongo==4.10.1
pyproject_hooks==1.2.0 pyproject_hooks==1.2.0
python-dateutil==2.9.0.post0 python-dateutil==2.9.0.post0
python-docx==1.1.2 python-docx==1.1.2
python-dotenv==1.0.1 python-dotenv==1.0.1
pytz==2024.2 pytz==2024.2
PyYAML==6.0.2
pyyaml_env_tag==0.1
qdrant-client==1.13.3 qdrant-client==1.13.3
Quart==0.20.0 Quart==0.20.0
RapidFuzz==3.12.2 RapidFuzz==3.12.2
redis==5.2.1
referencing==0.36.2
regex==2024.11.6
requests==2.32.3 requests==2.32.3
requests-toolbelt==1.0.0 requests-toolbelt==1.0.0
rpds-py==0.24.0
setuptools==75.8.0 setuptools==75.8.0
shellingham==1.5.4 shellingham==1.5.4
six==1.17.0 six==1.17.0
@ -99,23 +124,30 @@ sniffio==1.3.1
sounddevice==0.5.1 sounddevice==0.5.1
soupsieve==2.6 soupsieve==2.6
sse-starlette==2.2.1 sse-starlette==2.2.1
sseclient==0.0.27
sseclient-py==1.8.0
starlette==0.46.1 starlette==0.46.1
tabulate==0.9.0 tabulate==0.9.0
tiktoken==0.9.0
tokenizers==0.21.1
tomlkit==0.13.2 tomlkit==0.13.2
tqdm==4.67.1 tqdm==4.67.1
trove-classifiers==2025.3.19.19 trove-classifiers==2025.3.19.19
types-requests==2.32.0.20250306 types-requests==2.32.0.20250328
typing-inspect==0.9.0 typing-inspect==0.9.0
typing-inspection==0.4.0
typing_extensions==4.12.2 typing_extensions==4.12.2
tzdata==2024.2 tzdata==2024.2
urllib3==2.3.0 urllib3==2.3.0
uvicorn==0.34.0 uvicorn==0.34.0
virtualenv==20.29.3 virtualenv==20.29.3
waitress==2.1.2 waitress==2.1.2
watchdog==6.0.0
websockets==13.1 websockets==13.1
Werkzeug==3.1.3 Werkzeug==3.1.3
wheel==0.44.0 wheel==0.44.0
wsproto==1.2.0 wsproto==1.2.0
xattr==1.1.4 xattr==1.1.4
yarl==1.18.3 yarl==1.18.3
zipp==3.21.0
zstandard==0.23.0 zstandard==0.23.0

View file

@ -91,6 +91,7 @@ async def chat():
start_agent_name=data.get("startAgent", ""), start_agent_name=data.get("startAgent", ""),
agent_configs=data.get("agents", []), agent_configs=data.get("agents", []),
tool_configs=data.get("tools", []), tool_configs=data.get("tools", []),
prompt_configs=data.get("prompts", []),
start_turn_with_start_agent=config.get("start_turn_with_start_agent", False), start_turn_with_start_agent=config.get("start_turn_with_start_agent", False),
state=data.get("state", {}), state=data.get("state", {}),
additional_tool_configs=[RAG_TOOL, CLOSE_CHAT_TOOL], additional_tool_configs=[RAG_TOOL, CLOSE_CHAT_TOOL],
@ -157,6 +158,7 @@ async def chat_stream():
start_agent_name=request_data.get("startAgent", ""), start_agent_name=request_data.get("startAgent", ""),
agent_configs=request_data.get("agents", []), agent_configs=request_data.get("agents", []),
tool_configs=request_data.get("tools", []), tool_configs=request_data.get("tools", []),
prompt_configs=request_data.get("prompts", []),
start_turn_with_start_agent=config.get("start_turn_with_start_agent", False), start_turn_with_start_agent=config.get("start_turn_with_start_agent", False),
state=request_data.get("state", {}), state=request_data.get("state", {}),
additional_tool_configs=[RAG_TOOL, CLOSE_CHAT_TOOL], additional_tool_configs=[RAG_TOOL, CLOSE_CHAT_TOOL],
@ -168,6 +170,9 @@ async def chat_stream():
elif event_type == 'done': elif event_type == 'done':
print("Yielding done:") print("Yielding done:")
yield format_sse(event_data, "done") yield format_sse(event_data, "done")
elif event_type == 'error':
print("Yielding error:")
yield format_sse(event_data, "stream_error")
except Exception as e: except Exception as e:
logger.error(f"Streaming error: {str(e)}") logger.error(f"Streaming error: {str(e)}")

View file

@ -7,6 +7,7 @@ import logging
from .helpers.access import ( from .helpers.access import (
get_agent_by_name, get_agent_by_name,
get_external_tools, get_external_tools,
get_prompt_by_type
) )
from .helpers.state import ( from .helpers.state import (
construct_state_from_response construct_state_from_response
@ -14,7 +15,8 @@ from .helpers.state import (
from .helpers.control import get_latest_assistant_msg, get_latest_non_assistant_messages, get_last_agent_name from .helpers.control import get_latest_assistant_msg, get_latest_non_assistant_messages, get_last_agent_name
from .swarm_wrapper import run as swarm_run, run_streamed as swarm_run_streamed, create_response, get_agents from .swarm_wrapper import run as swarm_run, run_streamed as swarm_run_streamed, create_response, get_agents
from src.utils.common import common_logger as logger from src.utils.common import common_logger as logger
import asyncio
from .types import PromptType
# Create a dedicated logger for swarm wrapper # Create a dedicated logger for swarm wrapper
logger.setLevel(logging.INFO) logger.setLevel(logging.INFO)
@ -43,6 +45,26 @@ def order_messages(messages):
ordered_messages.append(ordered) ordered_messages.append(ordered)
return ordered_messages return ordered_messages
def set_sys_message(messages):
"""
If the system message is empty, set it to the default message: "You are a helplful assistant."
"""
if not any(msg.get("role") == "system" for msg in messages):
messages.insert(0, {
"role": "system",
"content": "You are a helpful assistant."
})
print("Inserted system message: ", messages[0])
logger.info("Inserted system message: ", messages[0])
elif messages[0].get("role") == "system" and messages[0].get("content") == "":
messages[0]["content"] = "You are a helpful assistant."
print("Updated system message: ", messages[0])
logger.info("Updated system message: ", messages[0])
print("Messages: ", messages)
# logger.info("Messages: ", messages)
return messages
def clean_up_history(agent_data): def clean_up_history(agent_data):
""" """
@ -197,7 +219,6 @@ async def run_turn(
logger.info(f"Completed run of agent: {last_new_agent.name}") logger.info(f"Completed run of agent: {last_new_agent.name}")
print(f"Completed run of agent: {last_new_agent.name}") print(f"Completed run of agent: {last_new_agent.name}")
# Otherwise, duplicate the last response as external # Otherwise, duplicate the last response as external
logger.info("No post-processing agent found. Duplicating last response and setting to external.") logger.info("No post-processing agent found. Duplicating last response and setting to external.")
print("No post-processing agent found. Duplicating last response and setting to external.") print("No post-processing agent found. Duplicating last response and setting to external.")
@ -236,13 +257,41 @@ async def run_turn_streamed(
start_agent_name, start_agent_name,
agent_configs, agent_configs,
tool_configs, tool_configs,
prompt_configs,
start_turn_with_start_agent, start_turn_with_start_agent,
state={}, state={},
additional_tool_configs=[], additional_tool_configs=[],
complete_request={} complete_request={}
): ):
messages = set_sys_message(messages)
is_greeting_turn = not any(msg.get("role") != "system" for msg in messages)
final_state = None # Initialize outside try block final_state = None # Initialize outside try block
try: try:
greeting_prompt = get_prompt_by_type(prompt_configs, PromptType.GREETING)
if is_greeting_turn:
if not greeting_prompt:
greeting_prompt = "How can I help you today?"
print("Greeting prompt not found. Using default: ", greeting_prompt)
message = {
'content': greeting_prompt,
'role': 'assistant',
'sender': start_agent_name,
'tool_calls': None,
'tool_call_id': None,
'tool_name': None,
'response_type': 'external'
}
print("Yielding greeting message: ", message)
yield ('message', message)
final_state = {
"last_agent_name": start_agent_name if start_agent_name else None,
"tokens": {"total": 0, "prompt": 0, "completion": 0}
}
print("Yielding done message")
yield ('done', {'state': final_state})
return
# Initialize agents and get external tools # Initialize agents and get external tools
new_agents = get_agents(agent_configs=agent_configs, tool_configs=tool_configs, complete_request=complete_request) new_agents = get_agents(agent_configs=agent_configs, tool_configs=tool_configs, complete_request=complete_request)
last_agent_name = get_last_agent_name( last_agent_name = get_last_agent_name(
@ -274,7 +323,7 @@ async def run_turn_streamed(
# Handle raw response events and accumulate tokens # Handle raw response events and accumulate tokens
if event.type == "raw_response_event": if event.type == "raw_response_event":
if hasattr(event.data, 'type') and event.data.type == "response.completed": if hasattr(event.data, 'type') and event.data.type == "response.completed" and event.data.response.usage:
if hasattr(event.data.response, 'usage'): if hasattr(event.data.response, 'usage'):
tokens_used["total"] += event.data.response.usage.total_tokens tokens_used["total"] += event.data.response.usage.total_tokens
tokens_used["prompt"] += event.data.response.usage.input_tokens tokens_used["prompt"] += event.data.response.usage.input_tokens
@ -616,4 +665,5 @@ async def run_turn_streamed(
except Exception as e: except Exception as e:
print(traceback.format_exc()) print(traceback.format_exc())
print(f"Error in stream processing: {str(e)}") print(f"Error in stream processing: {str(e)}")
print("Yielding error event:", {'error': str(e), 'state': final_state})
yield ('error', {'error': str(e), 'state': final_state}) # Include final_state in error response yield ('error', {'error': str(e), 'state': final_state}) # Include final_state in error response

View file

@ -3,6 +3,7 @@ import json
import aiohttp import aiohttp
import jwt import jwt
import hashlib import hashlib
from agents import OpenAIChatCompletionsModel
# Import helper functions needed for get_agents # Import helper functions needed for get_agents
from .helpers.access import ( from .helpers.access import (
@ -31,6 +32,8 @@ MONGO_URI = os.environ.get("MONGODB_URI", "mongodb://localhost:27017/rowboat").s
mongo_client = MongoClient(MONGO_URI) mongo_client = MongoClient(MONGO_URI)
db = mongo_client["rowboat"] db = mongo_client["rowboat"]
from src.utils.client import client, PROVIDER_DEFAULT_MODEL
class NewResponse(BaseModel): class NewResponse(BaseModel):
messages: List[Dict] messages: List[Dict]
agent: Optional[Any] = None agent: Optional[Any] = None
@ -47,7 +50,9 @@ async def mock_tool(tool_name: str, args: str, description: str, mock_instructio
] ]
print(f"Generating simulated response for tool: {tool_name}") print(f"Generating simulated response for tool: {tool_name}")
response_content = generate_openai_output(messages, output_type='text', model="gpt-4o") response_content = None
response_content = generate_openai_output(messages, output_type='text', model=PROVIDER_DEFAULT_MODEL)
print("Custom provider client not found, using default model: gpt-4o")
return response_content return response_content
except Exception as e: except Exception as e:
logger.error(f"Error in mock_tool: {str(e)}") logger.error(f"Error in mock_tool: {str(e)}")
@ -173,8 +178,6 @@ def get_rag_tool(config: dict, complete_request: dict) -> FunctionTool:
else: else:
return None return None
def get_agents(agent_configs, tool_configs, complete_request): def get_agents(agent_configs, tool_configs, complete_request):
""" """
Creates and initializes Agent objects based on their configurations and connections. Creates and initializes Agent objects based on their configurations and connections.
@ -246,12 +249,15 @@ def get_agents(agent_configs, tool_configs, complete_request):
# add the name and description to the agent instructions # add the name and description to the agent instructions
agent_instructions = f"## Your Name\n{agent_config['name']}\n\n## Description\n{agent_config['description']}\n\n## Instructions\n{agent_config['instructions']}" agent_instructions = f"## Your Name\n{agent_config['name']}\n\n## Description\n{agent_config['description']}\n\n## Instructions\n{agent_config['instructions']}"
try: try:
model_name = agent_config["model"] if agent_config["model"] else PROVIDER_DEFAULT_MODEL
print(f"Using model: {model_name}")
model=OpenAIChatCompletionsModel(model=model_name, openai_client=client) if client else agent_config["model"]
new_agent = NewAgent( new_agent = NewAgent(
name=agent_config["name"], name=agent_config["name"],
instructions=agent_instructions, instructions=agent_instructions,
handoff_description=agent_config["description"], handoff_description=agent_config["description"],
tools=new_tools, tools=new_tools,
model=agent_config["model"], model = model,
model_settings=ModelSettings(temperature=0.0) model_settings=ModelSettings(temperature=0.0)
) )

View file

@ -0,0 +1,32 @@
import os
import logging
from openai import AsyncOpenAI, OpenAI
import dotenv
dotenv.load_dotenv()
PROVIDER_BASE_URL = os.getenv('PROVIDER_BASE_URL', '')
PROVIDER_API_KEY = os.getenv('PROVIDER_API_KEY', os.getenv('OPENAI_API_KEY', ''))
PROVIDER_DEFAULT_MODEL = os.getenv('PROVIDER_DEFAULT_MODEL', 'gpt-4.1')
client = None
if not PROVIDER_API_KEY:
raise ValueError("No LLM Provider API key found")
if PROVIDER_BASE_URL:
print(f"Using provider {PROVIDER_BASE_URL} with API key {PROVIDER_API_KEY}")
client = AsyncOpenAI(base_url=PROVIDER_BASE_URL, api_key=PROVIDER_API_KEY)
else:
print("No provider base URL configured, using OpenAI directly")
completions_client = None
if PROVIDER_BASE_URL:
print(f"Using provider {PROVIDER_BASE_URL} for completions")
completions_client = OpenAI(
base_url=PROVIDER_BASE_URL,
api_key=PROVIDER_API_KEY
)
else:
print(f"Using OpenAI directly for completions")
completions_client = OpenAI(
api_key=PROVIDER_API_KEY
)

View file

@ -7,6 +7,7 @@ import time
from dotenv import load_dotenv from dotenv import load_dotenv
from openai import OpenAI from openai import OpenAI
from src.utils.client import completions_client
load_dotenv() load_dotenv()
def setup_logger(name, log_file='./run.log', level=logging.INFO, log_to_file=False): def setup_logger(name, log_file='./run.log', level=logging.INFO, log_to_file=False):
@ -53,25 +54,22 @@ def get_api_key(key_name):
raise ValueError(f"{key_name} not found. Did you set it in the .env file?") raise ValueError(f"{key_name} not found. Did you set it in the .env file?")
return api_key return api_key
openai_client = OpenAI(
api_key=get_api_key("OPENAI_API_KEY")
)
def generate_gpt4o_output_from_multi_turn_conv(messages, output_type='json', model="gpt-4o"): def generate_gpt4o_output_from_multi_turn_conv(messages, output_type='json', model="gpt-4o"):
return generate_openai_output(messages, output_type, model) return generate_openai_output(messages, output_type, model)
def generate_openai_output(messages, output_type='not_json', model="gpt-4o", return_completion=False): def generate_openai_output(messages, output_type='not_json', model="gpt-4o", return_completion=False):
print(f"In generate_openai_output, using client: {completions_client} and model: {model}")
try: try:
if output_type == 'json': if output_type == 'json':
chat_completion = openai_client.chat.completions.create( chat_completion = completions_client.chat.completions.create(
messages=messages,
model=model, model=model,
messages=messages,
response_format={"type": "json_object"} response_format={"type": "json_object"}
) )
else: else:
chat_completion = openai_client.chat.completions.create( chat_completion = completions_client.chat.completions.create(
messages=messages,
model=model, model=model,
messages=messages,
) )
if return_completion: if return_completion:

View file

@ -37,6 +37,7 @@ services:
- MAX_QUERIES_PER_MINUTE=${MAX_QUERIES_PER_MINUTE} - MAX_QUERIES_PER_MINUTE=${MAX_QUERIES_PER_MINUTE}
- MAX_PROJECTS_PER_USER=${MAX_PROJECTS_PER_USER} - MAX_PROJECTS_PER_USER=${MAX_PROJECTS_PER_USER}
- VOICE_API_URL=${VOICE_API_URL} - VOICE_API_URL=${VOICE_API_URL}
- PROVIDER_DEFAULT_MODEL=${PROVIDER_DEFAULT_MODEL}
restart: unless-stopped restart: unless-stopped
rowboat_agents: rowboat_agents:
@ -52,6 +53,9 @@ services:
- MONGODB_URI=mongodb://mongo:27017/rowboat - MONGODB_URI=mongodb://mongo:27017/rowboat
- QDRANT_URL=${QDRANT_URL} - QDRANT_URL=${QDRANT_URL}
- QDRANT_API_KEY=${QDRANT_API_KEY} - QDRANT_API_KEY=${QDRANT_API_KEY}
- PROVIDER_BASE_URL=${PROVIDER_BASE_URL}
- PROVIDER_API_KEY=${PROVIDER_API_KEY}
- PROVIDER_DEFAULT_MODEL=${PROVIDER_DEFAULT_MODEL}
restart: unless-stopped restart: unless-stopped
copilot: copilot:
@ -63,6 +67,9 @@ services:
environment: environment:
- OPENAI_API_KEY=${OPENAI_API_KEY} - OPENAI_API_KEY=${OPENAI_API_KEY}
- API_KEY=${COPILOT_API_KEY} - API_KEY=${COPILOT_API_KEY}
- PROVIDER_BASE_URL=${PROVIDER_BASE_URL}
- PROVIDER_API_KEY=${PROVIDER_API_KEY}
- PROVIDER_DEFAULT_MODEL=${PROVIDER_DEFAULT_MODEL}
restart: unless-stopped restart: unless-stopped
# tools_webhook: # tools_webhook:
@ -150,6 +157,7 @@ services:
- MONGODB_CONNECTION_STRING=mongodb://mongo:27017/rowboat - MONGODB_CONNECTION_STRING=mongodb://mongo:27017/rowboat
- QDRANT_URL=${QDRANT_URL} - QDRANT_URL=${QDRANT_URL}
- QDRANT_API_KEY=${QDRANT_API_KEY} - QDRANT_API_KEY=${QDRANT_API_KEY}
- REDIS_URL=redis://redis:6379
restart: unless-stopped restart: unless-stopped
# chat_widget: # chat_widget: