mv experimental apps

This commit is contained in:
Ramnique Singh 2025-04-07 23:53:17 +05:30
parent 7f6ece90f8
commit f722591ccd
53 changed files with 31 additions and 31 deletions

View file

@ -0,0 +1,8 @@
Dockerfile
.dockerignore
node_modules
npm-debug.log
README.md
.next
.git
.env*

View file

@ -0,0 +1,3 @@
{
"extends": ["next/core-web-vitals", "next/typescript"]
}

View file

@ -0,0 +1,40 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
/node_modules
/.pnp
.pnp.*
.yarn/*
!.yarn/patches
!.yarn/plugins
!.yarn/releases
!.yarn/versions
# testing
/coverage
# next.js
/.next/
/out/
# production
/build
# misc
.DS_Store
*.pem
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# env files (can opt-in for commiting if needed)
.env*
# vercel
.vercel
# typescript
*.tsbuildinfo
next-env.d.ts

View file

@ -0,0 +1,68 @@
# syntax=docker.io/docker/dockerfile:1
FROM node:18-alpine AS base
# Install dependencies only when needed
FROM base AS deps
# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed.
RUN apk add --no-cache libc6-compat
WORKDIR /app
# Install dependencies based on the preferred package manager
COPY package.json yarn.lock* package-lock.json* pnpm-lock.yaml* .npmrc* ./
RUN \
if [ -f yarn.lock ]; then yarn --frozen-lockfile; \
elif [ -f package-lock.json ]; then npm ci; \
elif [ -f pnpm-lock.yaml ]; then corepack enable pnpm && pnpm i --frozen-lockfile; \
else echo "Lockfile not found." && exit 1; \
fi
# Rebuild the source code only when needed
FROM base AS builder
WORKDIR /app
COPY --from=deps /app/node_modules ./node_modules
COPY . .
# Next.js collects completely anonymous telemetry data about general usage.
# Learn more here: https://nextjs.org/telemetry
# Uncomment the following line in case you want to disable telemetry during the build.
# ENV NEXT_TELEMETRY_DISABLED=1
RUN \
if [ -f yarn.lock ]; then yarn run build; \
elif [ -f package-lock.json ]; then npm run build; \
elif [ -f pnpm-lock.yaml ]; then corepack enable pnpm && pnpm run build; \
else echo "Lockfile not found." && exit 1; \
fi
# Production image, copy all the files and run next
FROM base AS runner
WORKDIR /app
ENV NODE_ENV=production
# Uncomment the following line in case you want to disable telemetry during runtime.
# ENV NEXT_TELEMETRY_DISABLED=1
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 nextjs
COPY --from=builder /app/public ./public
# Automatically leverage output traces to reduce image size
# https://nextjs.org/docs/advanced-features/output-file-tracing
COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
USER nextjs
EXPOSE 3000
ENV PORT=3000
# server.js is created by next build from the standalone output
# https://nextjs.org/docs/pages/api-reference/config/next-config-js/output
ENV HOSTNAME="0.0.0.0"
ENV PORT=3000
CMD echo "Starting server $CHAT_WIDGET_HOST, $ROWBOAT_HOST" && node server.js
#CMD ["node", "server.js"]

View file

@ -0,0 +1,36 @@
This is a [Next.js](https://nextjs.org) project bootstrapped with [`create-next-app`](https://nextjs.org/docs/app/api-reference/cli/create-next-app).
## Getting Started
First, run the development server:
```bash
npm run dev
# or
yarn dev
# or
pnpm dev
# or
bun dev
```
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file.
This project uses [`next/font`](https://nextjs.org/docs/app/building-your-application/optimizing/fonts) to automatically optimize and load [Geist](https://vercel.com/font), a new font family for Vercel.
## Learn More
To learn more about Next.js, take a look at the following resources:
- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js) - your feedback and contributions are welcome!
## Deploy on Vercel
The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js.
Check out our [Next.js deployment documentation](https://nextjs.org/docs/app/building-your-application/deploying) for more details.

View file

@ -0,0 +1,27 @@
export const dynamic = 'force-dynamic'
// Fetch template once when module loads
const templatePromise = fetch(process.env.CHAT_WIDGET_HOST + '/bootstrap.template.js')
.then(res => res.text());
export async function GET() {
try {
// Reuse the cached content
const template = await templatePromise;
// Replace placeholder values with actual URLs
const contents = template
.replace('__CHAT_WIDGET_HOST__', process.env.CHAT_WIDGET_HOST || '')
.replace('__ROWBOAT_HOST__', process.env.ROWBOAT_HOST || '');
return new Response(contents, {
headers: {
'Content-Type': 'application/javascript',
'Cache-Control': 'no-cache, no-store, must-revalidate',
},
});
} catch (error) {
console.error('Error serving bootstrap.js:', error);
return new Response('Error loading script', { status: 500 });
}
}

View file

@ -0,0 +1,466 @@
"use client";
import { useEffect, useRef, useState, useCallback } from "react";
import { useSearchParams } from "next/navigation";
import { apiV1 } from "rowboat-shared";
import { z } from "zod";
import { Button, Dropdown, DropdownItem, DropdownMenu, DropdownTrigger, Textarea } from "@nextui-org/react";
import MarkdownContent from "./markdown-content";
type Message = {
role: "user" | "assistant" | "system" | "tool";
content: string;
tool_call_id?: string;
tool_name?: string;
}
function ChatWindowHeader({
chatId,
closeChat,
closed,
setMinimized,
}: {
chatId: string | null;
closeChat: () => Promise<void>;
closed: boolean;
setMinimized: (minimized: boolean) => void;
}) {
return <div className="shrink-0 flex justify-between items-center gap-2 bg-gray-400 px-2 py-1 rounded-t-lg dark:bg-gray-800">
<div className="text-gray-800 dark:text-white">Chat</div>
<div className="flex gap-1 items-center">
{(chatId && !closed) && <Dropdown>
<DropdownTrigger>
<button>
<svg className="w-6 h-6 text-gray-800 dark:text-white" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="none" viewBox="0 0 24 24">
<path stroke="currentColor" strokeLinecap="round" strokeWidth="2" d="M6 12h.01m6 0h.01m5.99 0h.01" />
</svg>
</button>
</DropdownTrigger>
<DropdownMenu onAction={(key) => {
if (key === "close") {
closeChat();
}
}}>
<DropdownItem key="close">
Close chat
</DropdownItem>
</DropdownMenu>
</Dropdown>}
<button onClick={() => setMinimized(true)}>
<svg className="w-6 h-6 text-gray-800 dark:text-white" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="none" viewBox="0 0 24 24">
<path stroke="currentColor" strokeLinecap="round" strokeLinejoin="round" strokeWidth="2" d="m19 9-7 7-7-7" />
</svg>
</button>
</div>
</div>
}
function LoadingAssistantResponse() {
return <div className="flex gap-2 items-end">
<div className="shrink-0 w-10 h-10 bg-gray-400 rounded-full dark:bg-gray-800"></div>
<div className="bg-white rounded-md dark:bg-gray-800 text-gray-800 dark:text-white mr-[20%] rounded-bl-none p-2">
<div className="flex gap-1">
<div className="w-2 h-2 rounded-full bg-gray-400 dark:bg-gray-600 animate-bounce"></div>
<div className="w-2 h-2 rounded-full bg-gray-400 dark:bg-gray-600 animate-bounce [animation-delay:0.2s]"></div>
<div className="w-2 h-2 rounded-full bg-gray-400 dark:bg-gray-600 animate-bounce [animation-delay:0.4s]"></div>
</div>
</div>
</div>
}
function AssistantMessage({
children,
}: {
children: React.ReactNode;
}) {
return <div className="flex flex-col gap-1 items-start">
<div className="text-gray-800 dark:text-white text-xs pl-2">Assistant</div>
<div className="bg-gray-200 rounded-md dark:bg-gray-800 text-gray-800 dark:text-white mr-[20%] rounded-bl-none p-2">
{typeof children === 'string' ? <MarkdownContent content={children} /> : children}
</div>
</div>
}
function UserMessage({
children,
}: {
children: React.ReactNode;
}) {
return <div className="flex flex-col gap-1 items-end">
<div className="bg-gray-200 rounded-md dark:bg-gray-800 text-gray-800 dark:text-white ml-[20%] rounded-br-none p-2">
{typeof children === 'string' ? <MarkdownContent content={children} /> : children}
</div>
</div>
}
function ChatWindowMessages({
messages,
loadingAssistantResponse,
}: {
messages: Message[];
loadingAssistantResponse: boolean;
}) {
const messagesEndRef = useRef<HTMLDivElement>(null);
useEffect(() => {
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
}, [messages]);
return <div className="flex flex-col grow p-2 gap-4 overflow-auto">
<AssistantMessage>
Hello! I&apos;m Rowboat, your personal assistant. How can I help you today?
</AssistantMessage>
{messages.map((message, index) => {
switch (message.role) {
case "user":
return <UserMessage key={index}>{message.content}</UserMessage>;
case "assistant":
return <AssistantMessage key={index}>{message.content}</AssistantMessage>;
case "system":
return null; // Hide system messages from the UI
case "tool":
return <AssistantMessage key={index}>
Tool response ({message.tool_name}): {message.content}
</AssistantMessage>;
default:
return null;
}
})}
{loadingAssistantResponse && <LoadingAssistantResponse />}
<div ref={messagesEndRef} />
</div>
}
function ChatWindowInput({
handleUserMessage,
}: {
handleUserMessage: (message: string) => Promise<void>;
}) {
const [prompt, setPrompt] = useState<string>("");
function handleInputKeyDown(event: React.KeyboardEvent<HTMLInputElement>) {
if (event.key === 'Enter' && !event.shiftKey) {
event.preventDefault();
const input = prompt.trim();
setPrompt('');
handleUserMessage(input);
}
}
return <div className="bg-white rounded-md dark:bg-gray-900 shrink-0 p-2">
<Textarea
placeholder="Ask me anything..."
minRows={1}
maxRows={3}
variant="flat"
className="w-full"
onKeyDown={handleInputKeyDown}
value={prompt}
onValueChange={setPrompt}
/>
</div>
}
function ChatWindowBody({
chatId,
createChat,
getAssistantResponse,
closed,
resetState,
messages,
setMessages,
}: {
chatId: string | null;
createChat: () => Promise<string>;
getAssistantResponse: (chatId: string, message: string) => Promise<Message>;
closed: boolean;
resetState: () => Promise<void>;
messages: Message[];
setMessages: (messages: Message[]) => void;
}) {
const [loadingAssistantResponse, setLoadingAssistantResponse] = useState<boolean>(false);
async function handleUserMessage(message: string) {
const userMessage: Message = { role: "user", content: message };
setMessages([...messages, userMessage]);
setLoadingAssistantResponse(true);
let availableChatId = chatId;
if (!availableChatId) {
availableChatId = await createChat();
}
const response = await getAssistantResponse(availableChatId, message);
setMessages([...messages, userMessage, response]);
setLoadingAssistantResponse(false);
}
return <div className="flex flex-col grow bg-white rounded-b-lg dark:bg-gray-900 overflow-auto">
<ChatWindowMessages messages={messages} loadingAssistantResponse={loadingAssistantResponse} />
{!closed && <ChatWindowInput
handleUserMessage={handleUserMessage}
/>}
{closed && <div className="flex flex-col items-center py-4 gap-2">
<div className="text-gray-800 dark:text-white">This chat is closed</div>
<Button
onPress={resetState}
>
Start new chat
</Button>
</div>}
</div>
}
function ChatWindow({
chatId,
closed,
closeChat,
createChat,
getAssistantResponse,
resetState,
messages,
setMessages,
setMinimized,
}: {
chatId: string | null;
closed: boolean;
closeChat: () => Promise<void>;
createChat: () => Promise<string>;
getAssistantResponse: (chatId: string, message: string) => Promise<Message>;
resetState: () => Promise<void>;
messages: Message[];
setMessages: (messages: Message[]) => void;
setMinimized: (minimized: boolean) => void;
}) {
return <div className="h-full flex flex-col rounded-lg overflow-hidden">
<ChatWindowHeader
chatId={chatId}
closeChat={closeChat}
closed={closed}
setMinimized={setMinimized}
/>
<ChatWindowBody
chatId={chatId}
createChat={createChat}
getAssistantResponse={getAssistantResponse}
closed={closed}
resetState={resetState}
messages={messages}
setMessages={setMessages}
/>
</div>
}
export function App({
apiUrl,
}: {
apiUrl: string;
}) {
const searchParams = useSearchParams();
const sessionId = searchParams.get("session_id");
const [minimized, setMinimized] = useState(searchParams.get("minimized") === 'true');
const [chatId, setChatId] = useState<string | null>(null);
const [closed, setClosed] = useState(false);
const [messages, setMessages] = useState<Message[]>([]);
const fetchLastChat = useCallback(async (): Promise<{
chat: z.infer<typeof apiV1.ApiGetChatsResponse.shape.chats.element>;
messages: Message[];
} | null> => {
const response = await fetch(`${apiUrl}/chats`, {
headers: {
"Authorization": `Bearer ${sessionId}`,
},
});
if (response.status === 403) {
window.parent.postMessage({
type: 'sessionExpired'
}, '*');
return null;
}
if (!response.ok) {
throw new Error("Failed to fetch last chat");
}
const { chats }: z.infer<typeof apiV1.ApiGetChatsResponse> = await response.json();
if (chats.length === 0) {
return null;
}
const chat = chats[0];
// fetch all chat messages
let allMessages: Message[] = [];
let nextCursor: string | undefined = undefined;
do {
const url = new URL(`${apiUrl}/chats/${chat.id}/messages`);
if (nextCursor) {
url.searchParams.set('next', nextCursor);
}
const messagesResponse = await fetch(url, {
headers: {
"Authorization": `Bearer ${sessionId}`,
},
});
if (!messagesResponse.ok) {
throw new Error("Failed to fetch chat messages");
}
const { messages, next }: z.infer<typeof apiV1.ApiGetChatMessagesResponse> = await messagesResponse.json();
const formattedMessages = messages.map(m => ({
role: m.role,
content: m.role === "assistant" ? (m.content || '') : m.content,
...(m.role === "tool" ? {
tool_call_id: m.tool_call_id,
tool_name: m.tool_name,
} : {})
}));
allMessages = [...allMessages, ...formattedMessages];
nextCursor = next;
} while (nextCursor);
return {
chat,
messages: allMessages,
};
}, [sessionId, apiUrl]);
async function resetState() {
setChatId(null);
setClosed(false);
setMessages([]);
}
async function closeChat() {
const response = await fetch(`${apiUrl}/chats/${chatId}/close`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${sessionId}`,
},
});
if (response.status === 403) {
window.parent.postMessage({
type: 'sessionExpired'
}, '*');
return;
}
if (!response.ok) {
throw new Error("Failed to close chat");
}
setClosed(true);
}
async function createChat(): Promise<string> {
const response = await fetch(`${apiUrl}/chats`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${sessionId}`,
},
body: JSON.stringify({}),
});
if (response.status === 403) {
window.parent.postMessage({
type: 'sessionExpired'
}, '*');
throw new Error("Session expired");
}
const { id }: z.infer<typeof apiV1.ApiCreateChatResponse> = await response.json();
setChatId(id);
return id;
}
async function getAssistantResponse(chatId: string, message: string): Promise<Message> {
const response = await fetch(`${apiUrl}/chats/${chatId}/turn`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${sessionId}`,
},
body: JSON.stringify({
message: message,
}),
});
if (response.status === 403) {
window.parent.postMessage({
type: 'sessionExpired'
}, '*');
throw new Error("Session expired");
}
if (!response.ok) {
throw new Error("Failed to get assistant response");
}
const { content }: z.infer<typeof apiV1.ApiChatTurnResponse> = await response.json();
return {
role: "assistant",
content: content || '',
};
}
useEffect(() => {
window.parent.postMessage({
type: 'chatStateChange',
isMinimized: minimized
}, '*');
}, [minimized]);
useEffect(() => {
let abort = false;
async function process(){
const lastChat = await fetchLastChat();
if (abort) {
return;
}
if (lastChat) {
setChatId(lastChat.chat.id);
setClosed(lastChat.chat.closed || false);
setMessages(lastChat.messages);
}
}
process()
.finally(() => {
if (!abort) {
window.parent.postMessage({
type: 'chatLoaded',
}, '*');
}
});
return () => {
abort = true;
}
}, [sessionId, fetchLastChat]);
if (!sessionId) {
return <></>;
}
return <>
{minimized && <div className="fixed bottom-0 right-0">
<button
onClick={() => setMinimized(false)}
className="w-12 h-12 bg-gray-200 dark:bg-gray-800 hover:bg-gray-300 dark:hover:bg-gray-700 rounded-full flex items-center justify-center shadow-lg transition-colors"
>
<svg className="w-6 h-6 text-gray-800 dark:text-white" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="none" viewBox="0 0 24 24">
<path stroke="currentColor" strokeLinecap="round" strokeLinejoin="round" strokeWidth="2" d="M9 17h6l3 3v-3h2V9h-2M4 4h11v8H9l-3 3v-3H4V4Z" />
</svg>
</button>
</div>}
{!minimized && <div className="fixed h-full">
<ChatWindow
key={sessionId}
chatId={chatId}
closed={closed}
closeChat={closeChat}
createChat={createChat}
getAssistantResponse={getAssistantResponse}
resetState={resetState}
messages={messages}
setMessages={setMessages}
setMinimized={setMinimized}
/>
</div>}
</>
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

View file

@ -0,0 +1,7 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
body {
font-family: Arial, Helvetica, sans-serif;
}

View file

@ -0,0 +1,35 @@
import type { Metadata } from "next";
import localFont from "next/font/local";
import "./globals.css";
const geistSans = localFont({
src: "./fonts/GeistVF.woff",
variable: "--font-geist-sans",
weight: "100 900",
});
const geistMono = localFont({
src: "./fonts/GeistMonoVF.woff",
variable: "--font-geist-mono",
weight: "100 900",
});
export const metadata: Metadata = {
title: "RowBoat Chat",
description: "RowBoat Chat",
};
export default function RootLayout({
children,
}: Readonly<{
children: React.ReactNode;
}>) {
return (
<html lang="en" className="h-full bg-transparent">
<body
className={`${geistSans.variable} ${geistMono.variable} antialiased h-full`}
>
{children}
</body>
</html>
);
}

View file

@ -0,0 +1,51 @@
import Markdown from 'react-markdown'
import remarkGfm from 'remark-gfm'
export default function MarkdownContent({ content }: { content: string }) {
return <Markdown
className="overflow-auto break-words"
remarkPlugins={[remarkGfm]}
components={{
strong({ children }) {
return <span className="font-semibold">{children}</span>
},
p({ children }) {
return <p className="py-1">{children}</p>
},
ul({ children }) {
return <ul className="py-1 pl-5 list-disc">{children}</ul>
},
ol({ children }) {
return <ul className="py-1 pl-5 list-decimal">{children}</ul>
},
h3({ children }) {
return <h3 className="font-semibold">{children}</h3>
},
table({ children }) {
return <table className="my-1 border-collapse border border-gray-400 rounded">{children}</table>
},
th({ children }) {
return <th className="px-2 py-1 border-collapse border border-gray-300 rounded">{children}</th>
},
td({ children }) {
return <td className="px-2 py-1 border-collapse border border-gray-300 rounded">{children}</td>
},
blockquote({ children }) {
return <blockquote className='bg-gray-200 px-1'>{children}</blockquote>;
},
a(props) {
const { children, ...rest } = props
return <a className="inline-flex items-center gap-1" target="_blank" {...rest} >
<span className='underline'>
{children}
</span>
<svg className="w-[16px] h-[16px]" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="none" viewBox="0 0 24 24">
<path stroke="currentColor" strokeLinecap="round" strokeLinejoin="round" strokeWidth="1" d="M18 14v4.833A1.166 1.166 0 0 1 16.833 20H5.167A1.167 1.167 0 0 1 4 18.833V7.167A1.166 1.166 0 0 1 5.167 6h4.618m4.447-2H20v5.768m-7.889 2.121 7.778-7.778" />
</svg>
</a>
},
}}
>
{content}
</Markdown>;
}

View file

@ -0,0 +1,10 @@
import { Suspense } from 'react';
import { App } from './app';
export const dynamic = 'force-dynamic';
export default function Page() {
return <Suspense>
<App apiUrl={`${process.env.ROWBOAT_HOST}/api/widget/v1`} />
</Suspense>
}

View file

@ -0,0 +1,16 @@
import * as React from "react";
// 1. import `NextUIProvider` component
import {NextUIProvider} from "@nextui-org/react";
export default function Providers({
children,
}: {
children: React.ReactNode;
}) {
return (
<NextUIProvider>
{children}
</NextUIProvider>
);
}

View file

@ -0,0 +1,6 @@
/** @type {import('next').NextConfig} */
const nextConfig = {
output: 'standalone',
};
export default nextConfig;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,32 @@
{
"name": "chat-widget",
"version": "0.1.0",
"private": true,
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint"
},
"dependencies": {
"@nextui-org/react": "^2.4.8",
"framer-motion": "^11.11.11",
"next": "^14.2.25",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-markdown": "^9.0.1",
"remark-gfm": "^4.0.0",
"rowboat-shared": "github:rowboatlabs/shared",
"zod": "^3.23.8"
},
"devDependencies": {
"@types/node": "^20",
"@types/react": "^18",
"@types/react-dom": "^18",
"eslint": "^8",
"eslint-config-next": "15.0.2",
"postcss": "^8",
"tailwindcss": "^3.4.1",
"typescript": "^5"
}
}

View file

@ -0,0 +1,8 @@
/** @type {import('postcss-load-config').Config} */
const config = {
plugins: {
tailwindcss: {},
},
};
export default config;

View file

@ -0,0 +1,183 @@
// Split into separate configuration file/module
const CONFIG = {
CHAT_URL: '__CHAT_WIDGET_HOST__',
API_URL: '__ROWBOAT_HOST__/api/widget/v1',
STORAGE_KEYS: {
MINIMIZED: 'rowboat_chat_minimized',
SESSION: 'rowboat_session_id'
},
IFRAME_STYLES: {
MINIMIZED: {
width: '48px',
height: '48px',
borderRadius: '50%'
},
MAXIMIZED: {
width: '400px',
height: 'min(calc(100vh - 32px), 600px)',
borderRadius: '10px'
},
BASE: {
position: 'fixed',
bottom: '20px',
right: '20px',
border: 'none',
boxShadow: '0 4px 12px rgba(0, 0, 0, 0.15)',
zIndex: '999999',
transition: 'all 0.1s ease-in-out'
}
}
};
// New SessionManager class to handle session-related operations
class SessionManager {
static async createGuestSession() {
try {
const response = await fetch(`${CONFIG.API_URL}/session/guest`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-client-id': window.ROWBOAT_CONFIG.clientId
},
});
if (!response.ok) throw new Error('Failed to create session');
const data = await response.json();
CookieManager.setCookie(CONFIG.STORAGE_KEYS.SESSION, data.sessionId);
return true;
} catch (error) {
console.error('Failed to create chat session:', error);
return false;
}
}
}
// New CookieManager class for cookie operations
class CookieManager {
static getCookie(name) {
const value = `; ${document.cookie}`;
const parts = value.split(`; ${name}=`);
if (parts.length === 2) return parts.pop().split(';').shift();
return null;
}
static setCookie(name, value) {
document.cookie = `${name}=${value}; path=/`;
}
static deleteCookie(name) {
document.cookie = `${name}=; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT`;
}
}
// New IframeManager class to handle iframe-specific operations
class IframeManager {
static createIframe(url, isMinimized) {
const iframe = document.createElement('iframe');
iframe.hidden = true;
iframe.src = url.toString();
Object.assign(iframe.style, CONFIG.IFRAME_STYLES.BASE);
IframeManager.updateSize(iframe, isMinimized);
return iframe;
}
static updateSize(iframe, isMinimized) {
const styles = isMinimized ? CONFIG.IFRAME_STYLES.MINIMIZED : CONFIG.IFRAME_STYLES.MAXIMIZED;
Object.assign(iframe.style, styles);
}
static removeIframe(iframe) {
if (iframe && iframe.parentNode) {
iframe.parentNode.removeChild(iframe);
}
}
}
// Refactored main ChatWidget class
class ChatWidget {
constructor() {
this.iframe = null;
this.messageHandlers = {
chatLoaded: () => this.iframe.hidden = false,
chatStateChange: (data) => this.handleStateChange(data),
sessionExpired: () => this.handleSessionExpired()
};
this.init();
}
async init() {
const sessionId = CookieManager.getCookie(CONFIG.STORAGE_KEYS.SESSION);
if (!sessionId && !(await SessionManager.createGuestSession())) {
console.error('Chat widget initialization failed: Could not create session');
return;
}
this.createAndMountIframe();
this.setupEventListeners();
}
createAndMountIframe() {
const url = this.buildUrl();
const isMinimized = this.getStoredMinimizedState();
this.iframe = IframeManager.createIframe(url, isMinimized);
document.body.appendChild(this.iframe);
}
buildUrl() {
const sessionId = CookieManager.getCookie(CONFIG.STORAGE_KEYS.SESSION);
const isMinimized = this.getStoredMinimizedState();
const url = new URL(`${CONFIG.CHAT_URL}/`);
url.searchParams.append('session_id', sessionId);
url.searchParams.append('minimized', isMinimized);
return url;
}
setupEventListeners() {
window.addEventListener('message', (event) => this.handleMessage(event));
}
handleMessage(event) {
if (event.origin !== CONFIG.CHAT_URL) return;
if (this.messageHandlers[event.data.type]) {
this.messageHandlers[event.data.type](event.data);
}
}
async handleSessionExpired() {
console.log("Session expired");
IframeManager.removeIframe(this.iframe);
CookieManager.deleteCookie(CONFIG.STORAGE_KEYS.SESSION);
const sessionCreated = await SessionManager.createGuestSession();
if (!sessionCreated) {
console.error('Failed to recreate session after expiry');
return;
}
this.createAndMountIframe();
document.body.appendChild(this.iframe);
}
handleStateChange(data) {
localStorage.setItem(CONFIG.STORAGE_KEYS.MINIMIZED, data.isMinimized);
IframeManager.updateSize(this.iframe, data.isMinimized);
}
getStoredMinimizedState() {
return localStorage.getItem(CONFIG.STORAGE_KEYS.MINIMIZED) !== 'false';
}
}
// Initialize when DOM is ready
if (document.readyState === 'complete') {
new ChatWidget();
} else {
window.addEventListener('load', () => new ChatWidget());
}

View file

@ -0,0 +1 @@
<svg fill="none" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg"><path d="M14.5 13.5V5.41a1 1 0 0 0-.3-.7L9.8.29A1 1 0 0 0 9.08 0H1.5v13.5A2.5 2.5 0 0 0 4 16h8a2.5 2.5 0 0 0 2.5-2.5m-1.5 0v-7H8v-5H3v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1M9.5 5V2.12L12.38 5zM5.13 5h-.62v1.25h2.12V5zm-.62 3h7.12v1.25H4.5zm.62 3h-.62v1.25h7.12V11z" clip-rule="evenodd" fill="#666" fill-rule="evenodd"/></svg>

After

Width:  |  Height:  |  Size: 391 B

View file

@ -0,0 +1 @@
<svg fill="none" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16"><g clip-path="url(#a)"><path fill-rule="evenodd" clip-rule="evenodd" d="M10.27 14.1a6.5 6.5 0 0 0 3.67-3.45q-1.24.21-2.7.34-.31 1.83-.97 3.1M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16m.48-1.52a7 7 0 0 1-.96 0H7.5a4 4 0 0 1-.84-1.32q-.38-.89-.63-2.08a40 40 0 0 0 3.92 0q-.25 1.2-.63 2.08a4 4 0 0 1-.84 1.31zm2.94-4.76q1.66-.15 2.95-.43a7 7 0 0 0 0-2.58q-1.3-.27-2.95-.43a18 18 0 0 1 0 3.44m-1.27-3.54a17 17 0 0 1 0 3.64 39 39 0 0 1-4.3 0 17 17 0 0 1 0-3.64 39 39 0 0 1 4.3 0m1.1-1.17q1.45.13 2.69.34a6.5 6.5 0 0 0-3.67-3.44q.65 1.26.98 3.1M8.48 1.5l.01.02q.41.37.84 1.31.38.89.63 2.08a40 40 0 0 0-3.92 0q.25-1.2.63-2.08a4 4 0 0 1 .85-1.32 7 7 0 0 1 .96 0m-2.75.4a6.5 6.5 0 0 0-3.67 3.44 29 29 0 0 1 2.7-.34q.31-1.83.97-3.1M4.58 6.28q-1.66.16-2.95.43a7 7 0 0 0 0 2.58q1.3.27 2.95.43a18 18 0 0 1 0-3.44m.17 4.71q-1.45-.12-2.69-.34a6.5 6.5 0 0 0 3.67 3.44q-.65-1.27-.98-3.1" fill="#666"/></g><defs><clipPath id="a"><path fill="#fff" d="M0 0h16v16H0z"/></clipPath></defs></svg>

After

Width:  |  Height:  |  Size: 1 KiB

View file

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 394 80"><path fill="#000" d="M262 0h68.5v12.7h-27.2v66.6h-13.6V12.7H262V0ZM149 0v12.7H94v20.4h44.3v12.6H94v21h55v12.6H80.5V0h68.7zm34.3 0h-17.8l63.8 79.4h17.9l-32-39.7 32-39.6h-17.9l-23 28.6-23-28.6zm18.3 56.7-9-11-27.1 33.7h17.8l18.3-22.7z"/><path fill="#000" d="M81 79.3 17 0H0v79.3h13.6V17l50.2 62.3H81Zm252.6-.4c-1 0-1.8-.4-2.5-1s-1.1-1.6-1.1-2.6.3-1.8 1-2.5 1.6-1 2.6-1 1.8.3 2.5 1a3.4 3.4 0 0 1 .6 4.3 3.7 3.7 0 0 1-3 1.8zm23.2-33.5h6v23.3c0 2.1-.4 4-1.3 5.5a9.1 9.1 0 0 1-3.8 3.5c-1.6.8-3.5 1.3-5.7 1.3-2 0-3.7-.4-5.3-1s-2.8-1.8-3.7-3.2c-.9-1.3-1.4-3-1.4-5h6c.1.8.3 1.6.7 2.2s1 1.2 1.6 1.5c.7.4 1.5.5 2.4.5 1 0 1.8-.2 2.4-.6a4 4 0 0 0 1.6-1.8c.3-.8.5-1.8.5-3V45.5zm30.9 9.1a4.4 4.4 0 0 0-2-3.3 7.5 7.5 0 0 0-4.3-1.1c-1.3 0-2.4.2-3.3.5-.9.4-1.6 1-2 1.6a3.5 3.5 0 0 0-.3 4c.3.5.7.9 1.3 1.2l1.8 1 2 .5 3.2.8c1.3.3 2.5.7 3.7 1.2a13 13 0 0 1 3.2 1.8 8.1 8.1 0 0 1 3 6.5c0 2-.5 3.7-1.5 5.1a10 10 0 0 1-4.4 3.5c-1.8.8-4.1 1.2-6.8 1.2-2.6 0-4.9-.4-6.8-1.2-2-.8-3.4-2-4.5-3.5a10 10 0 0 1-1.7-5.6h6a5 5 0 0 0 3.5 4.6c1 .4 2.2.6 3.4.6 1.3 0 2.5-.2 3.5-.6 1-.4 1.8-1 2.4-1.7a4 4 0 0 0 .8-2.4c0-.9-.2-1.6-.7-2.2a11 11 0 0 0-2.1-1.4l-3.2-1-3.8-1c-2.8-.7-5-1.7-6.6-3.2a7.2 7.2 0 0 1-2.4-5.7 8 8 0 0 1 1.7-5 10 10 0 0 1 4.3-3.5c2-.8 4-1.2 6.4-1.2 2.3 0 4.4.4 6.2 1.2 1.8.8 3.2 2 4.3 3.4 1 1.4 1.5 3 1.5 5h-5.8z"/></svg>

After

Width:  |  Height:  |  Size: 1.3 KiB

View file

@ -0,0 +1 @@
<svg fill="none" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1155 1000"><path d="m577.3 0 577.4 1000H0z" fill="#fff"/></svg>

After

Width:  |  Height:  |  Size: 128 B

View file

@ -0,0 +1 @@
<svg fill="none" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16"><path fill-rule="evenodd" clip-rule="evenodd" d="M1.5 2.5h13v10a1 1 0 0 1-1 1h-11a1 1 0 0 1-1-1zM0 1h16v11.5a2.5 2.5 0 0 1-2.5 2.5h-11A2.5 2.5 0 0 1 0 12.5zm3.75 4.5a.75.75 0 1 0 0-1.5.75.75 0 0 0 0 1.5M7 4.75a.75.75 0 1 1-1.5 0 .75.75 0 0 1 1.5 0m1.75.75a.75.75 0 1 0 0-1.5.75.75 0 0 0 0 1.5" fill="#666"/></svg>

After

Width:  |  Height:  |  Size: 385 B

View file

@ -0,0 +1,16 @@
import { nextui } from "@nextui-org/react";
import type { Config } from "tailwindcss";
const config: Config = {
content: [
"./pages/**/*.{js,ts,jsx,tsx,mdx}",
"./components/**/*.{js,ts,jsx,tsx,mdx}",
"./app/**/*.{js,ts,jsx,tsx,mdx}",
"./node_modules/@nextui-org/theme/dist/**/*.{js,ts,jsx,tsx}",
],
theme: {
extend: {},
},
plugins: [nextui()],
};
export default config;

View file

@ -0,0 +1,27 @@
{
"compilerOptions": {
"target": "ES2017",
"lib": ["dom", "dom.iterable", "esnext"],
"allowJs": true,
"skipLibCheck": true,
"strict": true,
"noEmit": true,
"esModuleInterop": true,
"module": "esnext",
"moduleResolution": "bundler",
"resolveJsonModule": true,
"isolatedModules": true,
"jsx": "preserve",
"incremental": true,
"plugins": [
{
"name": "next"
}
],
"paths": {
"@/*": ["./*"]
}
},
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
"exclude": ["node_modules"]
}

View file

@ -0,0 +1,20 @@
# Use official Python runtime as base image
FROM python:3.11-slim
# Set working directory in container
WORKDIR /app
# Copy requirements file
COPY requirements.txt .
# Install dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy project files
COPY . .
# Expose port if your app needs it (adjust as needed)
ENV PYTHONUNBUFFERED=1
# Command to run the simulation service
CMD ["python", "service.py"]

View file

@ -0,0 +1,171 @@
from pymongo import MongoClient
from bson import ObjectId
import os
from datetime import datetime, timedelta, timezone
from typing import Optional
from scenario_types import (
TestRun,
TestScenario,
TestSimulation,
TestResult,
AggregateResults
)
MONGO_URI = os.environ.get("MONGODB_URI", "mongodb://localhost:27017/rowboat").strip()
TEST_SCENARIOS_COLLECTION = "test_scenarios"
TEST_SIMULATIONS_COLLECTION = "test_simulations"
TEST_RUNS_COLLECTION = "test_runs"
TEST_RESULTS_COLLECTION = "test_results"
API_KEYS_COLLECTION = "api_keys"
def get_db():
client = MongoClient(MONGO_URI)
return client["rowboat"]
def get_collection(collection_name: str):
db = get_db()
return db[collection_name]
def get_api_key(project_id: str):
"""
If you still use an API key pattern, adapt as needed.
"""
collection = get_collection(API_KEYS_COLLECTION)
doc = collection.find_one({"projectId": project_id})
if doc:
return doc["key"]
else:
return None
#
# TestRun helpers
#
def get_pending_run() -> Optional[TestRun]:
"""
Finds a run with 'pending' status, marks it 'running', and returns it.
"""
collection = get_collection(TEST_RUNS_COLLECTION)
doc = collection.find_one_and_update(
{"status": "pending"},
{"$set": {"status": "running"}},
return_document=True
)
if doc:
return TestRun(
id=str(doc["_id"]),
projectId=doc["projectId"],
name=doc["name"],
simulationIds=doc["simulationIds"],
workflowId=doc["workflowId"],
status="running",
startedAt=doc["startedAt"],
completedAt=doc.get("completedAt"),
aggregateResults=doc.get("aggregateResults"),
lastHeartbeat=doc.get("lastHeartbeat")
)
return None
def set_run_to_completed(test_run: TestRun, aggregate: AggregateResults):
"""
Marks a test run 'completed' and sets the aggregate results.
"""
collection = get_collection(TEST_RUNS_COLLECTION)
collection.update_one(
{"_id": ObjectId(test_run.id)},
{
"$set": {
"status": "completed",
"aggregateResults": aggregate.model_dump(by_alias=True),
"completedAt": datetime.now(timezone.utc)
}
}
)
def update_run_heartbeat(run_id: str):
"""
Updates the 'lastHeartbeat' timestamp for a TestRun.
"""
collection = get_collection(TEST_RUNS_COLLECTION)
collection.update_one(
{"_id": ObjectId(run_id)},
{"$set": {"lastHeartbeat": datetime.now(timezone.utc)}}
)
def mark_stale_jobs_as_failed(threshold_minutes: int = 20) -> int:
"""
Finds any run in 'running' status whose lastHeartbeat is older than
`threshold_minutes`, and sets it to 'failed'. Returns the count.
"""
collection = get_collection(TEST_RUNS_COLLECTION)
stale_threshold = datetime.now(timezone.utc) - timedelta(minutes=threshold_minutes)
result = collection.update_many(
{
"status": "running",
"lastHeartbeat": {"$lt": stale_threshold}
},
{
"$set": {"status": "failed"}
}
)
return result.modified_count
#
# TestSimulation helpers
#
def get_simulations_for_run(test_run: TestRun) -> list[TestSimulation]:
"""
Returns all simulations specified by a particular run.
"""
if test_run is None:
return []
collection = get_collection(TEST_SIMULATIONS_COLLECTION)
simulation_docs = collection.find({
"_id": {"$in": [ObjectId(sim_id) for sim_id in test_run.simulationIds]}
})
simulations = []
for doc in simulation_docs:
simulations.append(
TestSimulation(
id=str(doc["_id"]),
projectId=doc["projectId"],
name=doc["name"],
scenarioId=doc["scenarioId"],
profileId=doc["profileId"],
passCriteria=doc["passCriteria"],
createdAt=doc["createdAt"],
lastUpdatedAt=doc["lastUpdatedAt"]
)
)
return simulations
def get_scenario_by_id(scenario_id: str) -> TestScenario:
"""
Returns a TestScenario by its ID.
"""
collection = get_collection(TEST_SCENARIOS_COLLECTION)
doc = collection.find_one({"_id": ObjectId(scenario_id)})
if doc:
return TestScenario(
id=str(doc["_id"]),
projectId=doc["projectId"],
name=doc["name"],
description=doc["description"],
createdAt=doc["createdAt"],
lastUpdatedAt=doc["lastUpdatedAt"]
)
return None
#
# TestResult helpers
#
def write_test_result(result: TestResult):
"""
Writes a test result into the `test_results` collection.
"""
collection = get_collection(TEST_RESULTS_COLLECTION)
collection.insert_one(result.model_dump())

View file

@ -0,0 +1,29 @@
annotated-types==0.7.0
anyio==4.8.0
certifi==2025.1.31
charset-normalizer==3.4.1
distro==1.9.0
dnspython==2.7.0
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
idna==3.10
iniconfig==2.0.0
jiter==0.8.2
motor==3.7.0
openai==1.63.0
packaging==24.2
pluggy==1.5.0
pydantic==2.10.6
pydantic_core==2.27.2
pymongo==4.11.1
pytest==8.3.4
pytest-asyncio==0.25.3
python-dateutil==2.9.0.post0
requests==2.32.3
rowboat==2.1.0
six==1.17.0
sniffio==1.3.1
tqdm==4.67.1
typing_extensions==4.12.2
urllib3==2.3.0

View file

@ -0,0 +1,50 @@
from datetime import datetime
from typing import Optional, List, Literal
from pydantic import BaseModel, Field
# Define run statuses to include the new "error" status
RunStatus = Literal["pending", "running", "completed", "cancelled", "failed", "error"]
class TestScenario(BaseModel):
# `_id` in Mongo will be stored as ObjectId; we return it as a string
id: str
projectId: str
name: str
description: str
createdAt: datetime
lastUpdatedAt: datetime
class TestSimulation(BaseModel):
id: str
projectId: str
name: str
scenarioId: str
profileId: str
passCriteria: str
createdAt: datetime
lastUpdatedAt: datetime
class AggregateResults(BaseModel):
total: int
passCount: int
failCount: int
class TestRun(BaseModel):
id: str
projectId: str
name: str
simulationIds: List[str]
workflowId: str
status: RunStatus
startedAt: datetime
completedAt: Optional[datetime] = None
aggregateResults: Optional[AggregateResults] = None
lastHeartbeat: Optional[datetime] = None
class TestResult(BaseModel):
projectId: str
runId: str
simulationId: str
result: Literal["pass", "fail"]
details: str
transcript: str

View file

@ -0,0 +1,120 @@
import asyncio
import logging
from typing import List, Optional
# Updated imports from your new db module and scenario_types
from db import (
get_pending_run,
get_simulations_for_run,
set_run_to_completed,
get_api_key,
mark_stale_jobs_as_failed,
update_run_heartbeat
)
from scenario_types import TestRun, TestSimulation
# If you have a new simulation function, import it here.
# Otherwise, adapt the name as needed:
from simulation import simulate_simulations # or simulate_scenarios, if unchanged
logging.basicConfig(level=logging.INFO)
class JobService:
def __init__(self):
self.poll_interval = 5 # seconds
# Control concurrency of run processing
self.semaphore = asyncio.Semaphore(5)
async def poll_and_process_jobs(self, max_iterations: Optional[int] = None):
"""
Periodically checks for new runs in MongoDB and processes them.
"""
# Start the stale-run check in the background
asyncio.create_task(self.fail_stale_runs_loop())
iterations = 0
while True:
run = get_pending_run() # <--- changed to match new DB function
if run:
logging.info(f"Found new run: {run}. Processing...")
asyncio.create_task(self.process_run(run))
iterations += 1
if max_iterations is not None and iterations >= max_iterations:
break
# Sleep for the polling interval
await asyncio.sleep(self.poll_interval)
async def process_run(self, run: TestRun):
"""
Calls the simulation function and updates run status upon completion.
"""
async with self.semaphore:
# Start heartbeat in background
stop_heartbeat_event = asyncio.Event()
heartbeat_task = asyncio.create_task(self.heartbeat_loop(run.id, stop_heartbeat_event))
try:
# Fetch the simulations associated with this run
simulations = get_simulations_for_run(run)
if not simulations:
logging.info(f"No simulations found for run {run.id}")
return
# Fetch API key if needed
api_key = get_api_key(run.projectId)
# Perform your simulation logic
# adapt this call to your actual simulation functions signature
aggregate_result = await simulate_simulations(
simulations=simulations,
run_id=run.id,
workflow_id=run.workflowId,
api_key=api_key
)
# Mark run as completed with the aggregated result
set_run_to_completed(run, aggregate_result)
logging.info(f"Run {run.id} completed.")
except Exception as exc:
logging.error(f"Run {run.id} failed: {exc}")
finally:
stop_heartbeat_event.set()
await heartbeat_task
async def fail_stale_runs_loop(self):
"""
Periodically checks for stale runs (no heartbeat) and marks them as 'failed'.
"""
while True:
count = mark_stale_jobs_as_failed()
if count > 0:
logging.warning(f"Marked {count} stale runs as failed.")
await asyncio.sleep(60) # Check every 60 seconds
async def heartbeat_loop(self, run_id: str, stop_event: asyncio.Event):
"""
Periodically updates 'lastHeartbeat' for the given run until 'stop_event' is set.
"""
try:
while not stop_event.is_set():
update_run_heartbeat(run_id)
await asyncio.sleep(10) # Heartbeat interval in seconds
except asyncio.CancelledError:
pass
def start(self):
"""
Entry point to start the service event loop.
"""
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(self.poll_and_process_jobs())
except KeyboardInterrupt:
logging.info("Service stopped by user.")
finally:
loop.close()
if __name__ == "__main__":
service = JobService()
service.start()

View file

@ -0,0 +1,198 @@
import asyncio
import logging
from typing import List
import json
import os
from openai import OpenAI
from scenario_types import TestSimulation, TestResult, AggregateResults, TestScenario
from db import write_test_result, get_scenario_by_id
from rowboat import Client, StatefulChat
openai_client = OpenAI()
MODEL_NAME = "gpt-4o"
ROWBOAT_API_HOST = os.environ.get("ROWBOAT_API_HOST", "http://127.0.0.1:3000").strip()
async def simulate_simulation(
scenario: TestScenario,
profile_id: str,
pass_criteria: str,
rowboat_client: Client,
workflow_id: str,
max_iterations: int = 5
) -> tuple[str, str, str]:
"""
Runs a mock simulation for a given TestSimulation asynchronously.
After simulating several turns of conversation, it evaluates the conversation.
Returns a tuple of (evaluation_result, details, transcript_str).
"""
loop = asyncio.get_running_loop()
pass_criteria = pass_criteria
# Todo: add profile_id
support_chat = StatefulChat(
rowboat_client,
workflow_id=workflow_id,
test_profile_id=profile_id
)
messages = [
{
"role": "system",
"content": (
f"You are role playing a customer talking to a chatbot (the user is role playing the chatbot). Have the following chat with the chatbot. Scenario:\n{scenario.description}. You are provided no other information. If the chatbot asks you for information that is not in context, go ahead and provide one unless stated otherwise in the scenario. Directly have the chat with the chatbot. Start now with your first message."
)
}
]
# -------------------------
# (1) MAIN SIMULATION LOOP
# -------------------------
for _ in range(max_iterations):
openai_input = messages
# Run OpenAI API call in a separate thread (non-blocking)
simulated_user_response = await loop.run_in_executor(
None, # default ThreadPool
lambda: openai_client.chat.completions.create(
model=MODEL_NAME,
messages=openai_input,
temperature=0.0,
)
)
simulated_content = simulated_user_response.choices[0].message.content.strip()
messages.append({"role": "assistant", "content": simulated_content})
# Run Rowboat chat in a thread if it's synchronous
rowboat_response = await loop.run_in_executor(
None,
lambda: support_chat.run(simulated_content)
)
messages.append({"role": "user", "content": rowboat_response})
# -------------------------
# (2) EVALUATION STEP
# -------------------------
# swap the roles of the assistant and the user
transcript_str = ""
for m in messages:
if m.get("role") == "assistant":
m["role"] = "user"
elif m.get("role") == "user":
m["role"] = "assistant"
role = m.get("role", "unknown")
content = m.get("content", "")
transcript_str += f"{role.upper()}: {content}\n"
# Store the transcript as a JSON string
transcript = json.dumps(messages)
# We use passCriteria as the evaluation "criteria."
evaluation_prompt = [
{
"role": "system",
"content": (
f"You are a neutral evaluator. Evaluate based on these criteria:\n"
f"{pass_criteria}\n\n"
"Return ONLY a JSON object in this format:\n"
'{"verdict": "pass", "details": <reason>} or '
'{"verdict": "fail", "details": <reason>}.'
)
},
{
"role": "user",
"content": (
f"Here is the conversation transcript:\n\n{transcript_str}\n\n"
"Did the support bot answer correctly or not? "
"Return only 'pass' or 'fail' for verdict, and a brief explanation for details."
)
}
]
# Run evaluation in a separate thread
eval_response = await loop.run_in_executor(
None,
lambda: openai_client.chat.completions.create(
model=MODEL_NAME,
messages=evaluation_prompt,
temperature=0.0,
response_format={"type": "json_object"}
)
)
if not eval_response.choices:
raise Exception("No evaluation response received from model")
response_json_str = eval_response.choices[0].message.content
# Attempt to parse the JSON
response_json = json.loads(response_json_str)
evaluation_result = response_json.get("verdict")
details = response_json.get("details")
if evaluation_result is None:
raise Exception("No 'verdict' field found in evaluation response")
return (evaluation_result, details, transcript)
async def simulate_simulations(
simulations: List[TestSimulation],
run_id: str,
workflow_id: str,
api_key: str,
max_iterations: int = 5
) -> AggregateResults:
"""
Simulates a list of TestSimulations asynchronously and aggregates the results.
"""
if not simulations:
# Return an empty result if there's nothing to simulate
return AggregateResults(total=0, pass_=0, fail=0)
project_id = simulations[0].projectId
client = Client(
host=ROWBOAT_API_HOST,
project_id=project_id,
api_key=api_key
)
# Store results here
results: List[TestResult] = []
for simulation in simulations:
verdict, details, transcript = await simulate_simulation(
scenario=get_scenario_by_id(simulation.scenarioId),
profile_id=simulation.profileId,
pass_criteria=simulation.passCriteria,
rowboat_client=client,
workflow_id=workflow_id,
max_iterations=max_iterations
)
# Create a new TestResult
test_result = TestResult(
projectId=project_id,
runId=run_id,
simulationId=simulation.id,
result=verdict,
details=details,
transcript=transcript
)
results.append(test_result)
# Persist the test result
write_test_result(test_result)
# Aggregate pass/fail
total_count = len(results)
pass_count = sum(1 for r in results if r.result == "pass")
fail_count = sum(1 for r in results if r.result == "fail")
return AggregateResults(
total=total_count,
passCount=pass_count,
failCount=fail_count
)

View file

@ -0,0 +1,21 @@
# Use official Python runtime as base image
FROM python:3.11-slim
# Set working directory in container
WORKDIR /app
# Copy requirements file
COPY requirements.txt .
# Install dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy project files
COPY . .
# Expose port if your app needs it (adjust as needed)
ENV FLASK_APP=app
ENV PYTHONUNBUFFERED=1
# Command to run Flask development server
CMD ["flask", "run", "--host=0.0.0.0", "--port=3005"]

View file

@ -0,0 +1,127 @@
# app.py
import hashlib
import json
import logging
import os
from functools import wraps
import jwt
from flask import Flask, jsonify, request
from jwt import InvalidTokenError
from .function_map import FUNCTIONS_MAP
from .tool_caller import call_tool
app = Flask(__name__)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def require_signed_request(f):
"""
If SIGNING_SECRET is set, verifies the request content's SHA256 hash
matches 'bodyHash' in the 'X-Signature-Jwt' header using HS256.
If no SIGNING_SECRET is configured, skip the validation entirely.
"""
@wraps(f)
def decorated(*args, **kwargs):
signing_secret = os.environ.get("SIGNING_SECRET", "").strip()
# 1) If no signing secret is set, skip validation
if not signing_secret:
return f(*args, **kwargs)
# 2) Attempt to retrieve the JWT from the header
signature_jwt = request.headers.get("X-Signature-Jwt")
if not signature_jwt:
logger.error("Missing X-Signature-Jwt header")
return jsonify({"error": "Missing X-Signature-Jwt header"}), 401
# 3) Decode/verify the token with PyJWT, ignoring audience/issuer
try:
decoded = jwt.decode(
signature_jwt,
signing_secret,
algorithms=["HS256"],
options={
"require": ["bodyHash"], # must have bodyHash
"verify_aud": False, # disable audience check
"verify_iss": False, # disable issuer check
}
)
except InvalidTokenError as e:
logger.error("Invalid token: %s", e)
return jsonify({"error": f"Invalid token: {str(e)}"}), 401
# 4) Compare bodyHash to SHA256(content)
request_data = request.get_json() or {}
content_str = request_data.get("content", "")
actual_hash = hashlib.sha256(content_str.encode("utf-8")).hexdigest()
if decoded["bodyHash"] != actual_hash:
logger.error("bodyHash mismatch")
return jsonify({"error": "bodyHash mismatch"}), 403
return f(*args, **kwargs)
return decorated
@app.route("/tool_call", methods=["POST"])
@require_signed_request
def tool_call():
"""
1) Parse the incoming JSON (including 'content' as a JSON string).
2) Extract function name and arguments.
3) Use call_tool(...) to invoke the function.
4) Return JSON response with result or error.
"""
req_data = request.get_json()
if not req_data:
logger.warning("No JSON data provided in request body.")
return jsonify({"error": "No JSON data provided"}), 400
content_str = req_data.get("content")
if not content_str:
logger.warning("Missing 'content' in request data.")
return jsonify({"error": "Missing 'content' in request data"}), 400
# Parse the JSON string in "content"
try:
parsed_content = json.loads(content_str)
except json.JSONDecodeError as e:
logger.error("Unable to parse 'content' as JSON: %s", e)
return jsonify({"error": f"Unable to parse 'content' as JSON: {str(e)}"}), 400
# Extract function info
tool_call_data = parsed_content.get("toolCall", {})
function_data = tool_call_data.get("function", {})
function_name = function_data.get("name")
arguments_str = function_data.get("arguments")
if not function_name:
logger.warning("No function name provided.")
return jsonify({"error": "No function name provided"}), 400
if not arguments_str:
logger.warning("No arguments string provided.")
return jsonify({"error": "No arguments string provided"}), 400
# Parse the arguments, which is also a JSON string
try:
parameters = json.loads(arguments_str)
except json.JSONDecodeError as e:
logger.error("Unable to parse 'arguments' as JSON: %s", e)
return jsonify({"error": f"Unable to parse 'arguments' as JSON: {str(e)}"}), 400
try:
result = call_tool(function_name, parameters, FUNCTIONS_MAP)
return jsonify({"result": result}), 200
except ValueError as val_err:
logger.warning("ValueError in call_tool: %s", val_err)
return jsonify({"error": str(val_err)}), 400
except Exception as e:
logger.exception("Unexpected error in /tool_call route")
return jsonify({"error": str(e)}), 500
if __name__ == "__main__":
app.run(debug=True)

View file

@ -0,0 +1,26 @@
"""
function_map.py
Defines all the callable functions and a mapping from
string names to these functions.
"""
def greet(name: str, message: str):
"""Return a greeting string."""
return f"{message}, {name}!"
def add(a: int, b: int):
"""Return the sum of two integers."""
return a + b
def get_account_balance(user_id: str):
"""Return a mock account balance for the given user_id."""
return f"User {user_id} has a balance of $123.45."
# A configurable mapping from function identifiers to actual Python functions
FUNCTIONS_MAP = {
"greet": greet,
"add": add,
"get_account_balance": get_account_balance
}

View file

@ -0,0 +1,12 @@
blinker==1.9.0
click==8.1.8
Flask==3.1.0
iniconfig==2.0.0
itsdangerous==2.2.0
Jinja2==3.1.5
MarkupSafe==3.0.2
packaging==24.2
pluggy==1.5.0
PyJWT==2.10.1
pytest==8.3.4
Werkzeug==3.1.3

View file

@ -0,0 +1,95 @@
# tests/test_app.py
import json
import pytest
from tools_webhook.app import app # If "sidecar" is recognized as a package
@pytest.fixture
def client():
"""
A pytest fixture that provides a Flask test client.
The `app.test_client()` allows us to make requests to our Flask app
without running the server.
"""
with app.test_client() as client:
yield client
def test_tool_call_greet(client):
# This matches the structure of the request in our code:
# {
# "content": "...a JSON string..."
# }
# The content we pass is another JSON, so we have to double-escape quotes.
request_data = {
"content": json.dumps({
"toolCall": {
"function": {
"name": "greet",
"arguments": json.dumps({
"name": "Alice",
"message": "Hello"
})
}
}
})
}
response = client.post(
"/tool_call",
data=json.dumps(request_data),
content_type="application/json"
)
assert response.status_code == 200
data = response.get_json()
assert data["result"] == "Hello, Alice!"
def test_tool_call_missing_params(client):
request_data = {
"content": json.dumps({
"toolCall": {
"function": {
"name": "greet",
"arguments": json.dumps({
"name": "Alice"
# Missing "message"
})
}
}
})
}
response = client.post(
"/tool_call",
data=json.dumps(request_data),
content_type="application/json"
)
assert response.status_code == 400
data = response.get_json()
assert "Missing required parameter: message" in data["error"]
def test_tool_call_invalid_func(client):
request_data = {
"content": json.dumps({
"toolCall": {
"function": {
"name": "does_not_exist",
"arguments": json.dumps({})
}
}
})
}
response = client.post(
"/tool_call",
data=json.dumps(request_data),
content_type="application/json"
)
assert response.status_code == 400
data = response.get_json()
assert "Function 'does_not_exist' not found" in data["error"]

View file

@ -0,0 +1,40 @@
# tests/test_tool_caller.py
import pytest
from tools_webhook.tool_caller import call_tool
from tools_webhook.function_map import FUNCTIONS_MAP
def test_call_tool_greet():
# Normal case
result = call_tool("greet", {"name": "Alice", "message": "Hello"}, FUNCTIONS_MAP)
assert result == "Hello, Alice!"
def test_call_tool_add():
# Normal case
result = call_tool("add", {"a": 2, "b": 5}, FUNCTIONS_MAP)
assert result == 7
def test_call_tool_missing_func():
# Should raise ValueError if function is not in FUNCTIONS_MAP
with pytest.raises(ValueError) as exc_info:
call_tool("non_existent_func", {}, FUNCTIONS_MAP)
assert "Function 'non_existent_func' not found" in str(exc_info.value)
def test_call_tool_missing_param():
# greet requires `name` and `message`
with pytest.raises(ValueError) as exc_info:
call_tool("greet", {"name": "Alice"}, FUNCTIONS_MAP)
assert "Missing required parameter: message" in str(exc_info.value)
def test_call_tool_unexpected_param():
# `greet` only expects name and message
with pytest.raises(ValueError) as exc_info:
call_tool("greet", {"name": "Alice", "message": "Hello", "extra": "???"},
FUNCTIONS_MAP)
assert "Unexpected parameter: extra" in str(exc_info.value)
def test_call_tool_type_conversion_error():
# `add` expects integers `a` and `b`, so passing a string should fail
with pytest.raises(ValueError) as exc_info:
call_tool("add", {"a": "not_an_int", "b": 3}, FUNCTIONS_MAP)
assert "Parameter 'a' must be of type int" in str(exc_info.value)

View file

@ -0,0 +1,69 @@
# tool_caller.py
import inspect
import logging
logger = logging.getLogger(__name__)
def call_tool(function_name: str, parameters: dict, functions_map: dict):
"""
1) Lookup a function in functions_map by name.
2) Validate parameters against the function signature.
3) Call the function with converted parameters.
4) Return the result or raise an Exception on error.
"""
logger.debug("call_tool invoked with function_name=%s, parameters=%s", function_name, parameters)
# 1) Check if function exists
if function_name not in functions_map:
error_msg = f"Function '{function_name}' not found."
logger.error(error_msg)
raise ValueError(error_msg)
func = functions_map[function_name]
signature = inspect.signature(func)
# 2) Identify required parameters
required_params = [
pname for pname, p in signature.parameters.items()
if p.default == inspect.Parameter.empty
]
# Check required params
for rp in required_params:
if rp not in parameters:
error_msg = f"Missing required parameter: {rp}"
logger.error(error_msg)
raise ValueError(error_msg)
# Check unexpected params
valid_param_names = signature.parameters.keys()
for p in parameters.keys():
if p not in valid_param_names:
error_msg = f"Unexpected parameter: {p}"
logger.error(error_msg)
raise ValueError(error_msg)
# 3) Convert types based on annotations (if any)
converted_params = {}
for param_name, param_value in parameters.items():
param_obj = signature.parameters[param_name]
if param_obj.annotation != inspect.Parameter.empty:
try:
converted_params[param_name] = param_obj.annotation(param_value)
except (ValueError, TypeError) as e:
error_msg = f"Parameter '{param_name}' must be of type {param_obj.annotation.__name__}: {e}"
logger.error(error_msg)
raise ValueError(error_msg)
else:
converted_params[param_name] = param_value
# 4) Invoke the function
try:
result = func(**converted_params)
logger.debug("Function '%s' returned: %s", function_name, result)
return result
except Exception as e:
logger.exception("Unexpected error calling '%s'", function_name) # logs stack trace
raise

View file

@ -0,0 +1,2 @@
__pycache__
.venv/

View file

@ -0,0 +1,24 @@
# Environment variables for the Voice API application
# Twilio configuration
TWILIO_ACCOUNT_SID=your_account_sid_here
TWILIO_AUTH_TOKEN=your_auth_token_here
BASE_URL=https://your-public-url-here.ngrok.io
# RowBoat API configuration
ROWBOAT_API_HOST=http://localhost:3000
ROWBOAT_PROJECT_ID=your_project_id_here
ROWBOAT_API_KEY=your_api_key_here
# Speech processing APIs
DEEPGRAM_API_KEY=your_deepgram_api_key_here
ELEVENLABS_API_KEY=your_elevenlabs_api_key_here
# Server configuration
PORT=3009
WHATSAPP_PORT=3010
# Redis configuration for persistent state
REDIS_URL=redis://localhost:6379/0
REDIS_EXPIRY_SECONDS=86400
SERVICE_NAME=rowboat-voice

View file

@ -0,0 +1,2 @@
__pycache__
.venv

View file

@ -0,0 +1,18 @@
FROM python:3.12-slim
WORKDIR /app
# Copy requirements first to leverage Docker cache
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY . .
# Set environment variables
ENV FLASK_APP=app
ENV PYTHONUNBUFFERED=1
ENV PYTHONPATH=/app
# Command to run Flask development server
CMD ["flask", "run", "--host=0.0.0.0", "--port=4010"]

View file

@ -0,0 +1,633 @@
from flask import Flask, request, jsonify, Response
from twilio.twiml.voice_response import VoiceResponse, Gather
import os
import logging
import uuid
from typing import Dict, Any, Optional
import json
from time import time
from rowboat.schema import SystemMessage, UserMessage, ApiMessage
import elevenlabs
# Load environment variables
from load_env import load_environment
load_environment()
from twilio_api import process_conversation_turn
# Import MongoDB utility functions
from util import (
get_call_state,
save_call_state,
delete_call_state,
get_mongodb_status,
get_twilio_config,
CallState
)
Message = SystemMessage | UserMessage
ELEVENLABS_API_KEY = os.environ.get("ELEVENLABS_API_KEY")
elevenlabs_client = elevenlabs.ElevenLabs(api_key=ELEVENLABS_API_KEY)
app = Flask(__name__)
# Configure logging to stdout for Docker compatibility
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler()] # Send logs to stdout
)
logger = logging.getLogger(__name__)
# Local in-memory cache of call state (temporary cache only - not primary storage)
# MongoDB is the primary storage for state across multiple instances
active_calls = {}
# TTS configuration
TTS_VOICE = "Markus - Mature and Chill"
TTS_MODEL = "eleven_flash_v2_5"
@app.route('/inbound', methods=['POST'])
def handle_inbound_call():
"""Handle incoming calls to Twilio numbers configured for RowBoat"""
try:
# Log the entire request for debugging
logger.info(f"Received inbound call request: {request.values}")
# Get the Twilio phone number that received the call
to_number = request.values.get('To')
call_sid = request.values.get('CallSid')
from_number = request.values.get('From')
logger.info(f"Inbound call from {from_number} to {to_number}, CallSid: {call_sid}")
logger.info(f"Raw To number value: '{to_number}', Type: {type(to_number)}")
# Get configuration ONLY from MongoDB
system_prompt = "You are a helpful assistant. Provide concise and clear answers."
workflow_id = None
project_id = None
# Look up configuration in MongoDB
twilio_config = get_twilio_config(to_number)
if twilio_config:
workflow_id = twilio_config['workflow_id']
project_id = twilio_config['project_id']
system_prompt = twilio_config.get('system_prompt', system_prompt)
logger.info(f"Found MongoDB configuration for {to_number}: project_id={project_id}, workflow_id={workflow_id}")
else:
logger.warning(f"No active configuration found in MongoDB for phone number {to_number}")
if not workflow_id:
# No workflow found - provide error message
logger.error(f"No workflow_id found for inbound call to {to_number}")
response = VoiceResponse()
response.say("I'm sorry, this phone number is not properly configured in our system. Please contact support.", voice='alice')
# Include additional information in TwiML for debugging
response.say(f"Received call to number {to_number}", voice='alice')
response.hangup()
return str(response)
# Initialize call state with stateless API fields
call_state = CallState(
workflow_id=workflow_id,
project_id=project_id,
system_prompt=system_prompt,
conversation_history=[],
messages=[], # For stateless API
state=None, # For stateless API state
turn_count=0,
inbound=True,
to_number=to_number,
created_at=int(time()) # Add timestamp for expiration tracking
)
# Save to MongoDB (primary source of truth)
try:
save_call_state(call_sid, call_state)
logger.info(f"Saved initial call state to MongoDB for inbound call {call_sid}")
except Exception as e:
logger.error(f"Error saving inbound call state to MongoDB: {str(e)}")
raise RuntimeError(f"Failed to save call state to MongoDB: {str(e)}")
# Only use memory storage as a temporary cache
# The service that handles the next request might be different
active_calls[call_sid] = call_state
logger.info(f"Initialized call state for {call_sid}, proceeding to handle_call")
# Create a direct response instead of redirecting
return handle_call(call_sid, workflow_id, project_id)
except Exception as e:
# Log the full error with traceback
import traceback
logger.error(f"Error in handle_inbound_call: {str(e)}")
logger.error(traceback.format_exc())
# Return a basic TwiML response so Twilio doesn't get a 500 error
response = VoiceResponse()
response.say("I'm sorry, we encountered an error processing your call. Please try again later.", voice='alice')
response.hangup()
return str(response)
@app.route('/twiml', methods=['POST'])
def handle_twiml_call():
"""TwiML endpoint for outbound call handling"""
call_sid = request.values.get('CallSid')
# Get call state to retrieve workflow_id and project_id
call_state = get_call_state(call_sid)
if call_state:
workflow_id = call_state.get('workflow_id')
project_id = call_state.get('project_id')
return handle_call(call_sid, workflow_id, project_id)
else:
# No call state found - error response
response = VoiceResponse()
response.say("I'm sorry, your call session has expired. Please try again.", voice='alice')
response.hangup()
return str(response)
def handle_call(call_sid, workflow_id, project_id=None):
"""Common handler for both inbound and outbound calls"""
try:
logger.info(f"handle_call: processing call {call_sid} with workflow {workflow_id}, project_id {project_id}")
# Get or initialize call state, first from MongoDB
call_state = None
try:
# Query MongoDB for the call state
call_state = get_call_state(call_sid)
if call_state:
logger.info(f"Loaded and restored call state from MongoDB for {call_sid}")
except Exception as e:
logger.error(f"Error retrieving MongoDB state for {call_sid}: {str(e)}")
call_state = None
# Try in-memory cache as fallback (temporary local cache)
if call_state is None and call_sid in active_calls:
call_state = active_calls.get(call_sid)
logger.info(f"Using in-memory cache for call state of {call_sid}")
# Initialize new state if needed
if call_state is None and workflow_id:
call_state = CallState(
workflow_id=workflow_id,
project_id=project_id,
system_prompt="You are a helpful assistant. Provide concise and clear answers.",
conversation_history=[],
messages=[], # For stateless API
state=None, # For stateless API state
turn_count=0,
inbound=False, # Default for outbound calls
to_number="", # This will be set properly for inbound calls
created_at=int(time()), # Add timestamp for expiration tracking
last_transcription=""
)
# Save to MongoDB (primary source of truth)
try:
save_call_state(call_sid, call_state)
logger.info(f"Initialized and saved new call state to MongoDB for {call_sid}")
except Exception as e:
logger.error(f"Error saving new call state to MongoDB: {str(e)}")
raise RuntimeError(f"Failed to save call state to MongoDB: {str(e)}")
# Only use memory as temporary cache for this request
active_calls[call_sid] = call_state
logger.info(f"Initialized new call state for {call_sid}")
logger.info(f"Using call state: {call_state}")
# Create TwiML response
response = VoiceResponse()
# Check if this is a new call (no turns yet)
if call_state.get('turn_count', 0) == 0:
logger.info("First turn: generating AI greeting using an empty user input...")
# Generate greeting by calling process_conversation_turn with empty user input
try:
ai_greeting, updated_messages, updated_state = process_conversation_turn(
user_input="", # empty to signal "give me your greeting"
workflow_id=call_state['workflow_id'],
system_prompt=call_state['system_prompt'],
previous_messages=[],
previous_state=None,
project_id=call_state.get('project_id')
)
except Exception as e:
logger.error(f"Error generating AI greeting: {str(e)}")
ai_greeting = "Hello, I encountered an issue creating a greeting. How can I help you?"
# Fallback: no changes to updated_messages/updated_state
updated_messages = []
updated_state = None
# Update call_state with AI greeting
call_state['messages'] = updated_messages
call_state['state'] = updated_state
call_state['conversation_history'].append({
'user': "", # empty user
'assistant': ai_greeting
})
call_state['turn_count'] = 1
# Save changes to MongoDB
try:
save_call_state(call_sid, call_state)
logger.info(f"Saved greeting state to MongoDB for {call_sid}")
except Exception as e:
logger.error(f"Error saving greeting state to MongoDB: {str(e)}")
raise RuntimeError(f"Failed to save greeting state to MongoDB: {str(e)}")
active_calls[call_sid] = call_state
# Play the greeting via streaming audio
unique_id = str(uuid.uuid4())
audio_url = f"/stream-audio/{call_sid}/greeting/{unique_id}"
logger.info(f"Will stream greeting from {audio_url}")
response.play(audio_url)
# Gather user input next
gather = Gather(
input='speech',
action=f'/process_speech?call_sid={call_sid}',
speech_timeout='auto',
language='en-US',
enhanced=True,
speechModel='phone_call'
)
response.append(gather)
response.redirect('/twiml')
logger.info(f"Returning response: {str(response)}")
return str(response)
except Exception as e:
# Log the full error with traceback
import traceback
logger.error(f"Error in handle_call: {str(e)}")
logger.error(traceback.format_exc())
# Return a basic TwiML response
response = VoiceResponse()
response.say("I'm sorry, we encountered an error processing your call. Please try again later.", voice='alice')
response.hangup()
return str(response)
@app.route('/process_speech', methods=['POST'])
def process_speech():
"""Process user speech input and generate AI response"""
try:
logger.info(f"Processing speech: {request.values}")
call_sid = request.args.get('call_sid')
# Log all request values for debugging
logger.info(f"FULL REQUEST VALUES: {dict(request.values)}")
logger.info(f"FULL REQUEST ARGS: {dict(request.args)}")
# Get the speech result directly from Twilio
# We're now relying on Twilio's enhanced speech recognition instead of Deepgram
speech_result = request.values.get('SpeechResult')
confidence = request.values.get('Confidence')
logger.info(f"Twilio SpeechResult: {speech_result}")
logger.info(f"Twilio Confidence: {confidence}")
if not call_sid:
logger.warning(f"Missing call_sid: {call_sid}")
response = VoiceResponse()
response.say("I'm sorry, I couldn't process that request.", voice='alice')
response.hangup()
return str(response)
if not speech_result:
logger.warning("No speech result after transcription attempts")
response = VoiceResponse()
response.say("I'm sorry, I didn't catch what you said. Could you please try again?", voice='alice')
# Gather user input again
gather = Gather(
input='speech',
action=f'/process_speech?call_sid={call_sid}',
speech_timeout='auto',
language='en-US',
enhanced=True,
speechModel='phone_call'
)
response.append(gather)
# Redirect to twiml endpoint which will get call state from MongoDB
response.redirect('/twiml')
return str(response)
# Load call state from MongoDB (primary source of truth)
call_state = None
try:
call_state = get_call_state(call_sid)
if call_state:
logger.info(f"Loaded call state from MongoDB for speech processing: {call_sid}")
except Exception as e:
logger.error(f"Error retrieving MongoDB state for speech processing: {str(e)}")
call_state = None
# Try memory cache as fallback
if call_state is None and call_sid in active_calls:
call_state = active_calls[call_sid]
logger.info(f"Using in-memory state for speech processing: {call_sid}")
# Check if we have valid state
if not call_state:
logger.warning(f"No call state found for speech processing: {call_sid}")
response = VoiceResponse()
response.say("I'm sorry, your call session has expired. Please call back.", voice='alice')
response.hangup()
return str(response)
# Extract key information
workflow_id = call_state.get('workflow_id')
project_id = call_state.get('project_id')
system_prompt = call_state.get('system_prompt', "You are a helpful assistant.")
# Check if we have a Deepgram transcription stored in the call state
if 'last_transcription' in call_state and call_state['last_transcription']:
deepgram_transcription = call_state['last_transcription']
logger.info(f"Found stored Deepgram transcription: {deepgram_transcription}")
logger.info(f"Comparing with Twilio transcription: {speech_result}")
# Use the Deepgram transcription instead of Twilio's
speech_result = deepgram_transcription
# Remove it so we don't use it again
del call_state['last_transcription']
logger.info(f"Using Deepgram transcription instead")
# Log final user input that will be used
logger.info(f"Final user input: {speech_result}")
# Process with RowBoat agent
try:
# Clean up the speech result if needed
if speech_result:
# Remove any common filler words or fix typical transcription issues
import re
# Convert to lowercase for easier pattern matching
cleaned_input = speech_result.lower()
# Remove filler words that might be at the beginning
cleaned_input = re.sub(r'^(um|uh|like|so|okay|well)\s+', '', cleaned_input)
# Capitalize first letter for better appearance
if cleaned_input:
speech_result = cleaned_input[0].upper() + cleaned_input[1:]
logger.info(f"Sending to RowBoat: '{speech_result}'")
# Get previous messages and state from call state
previous_messages = call_state.get('messages', [])
previous_state = call_state.get('state')
# Process with stateless API
ai_response, updated_messages, updated_state = process_conversation_turn(
user_input=speech_result,
workflow_id=workflow_id,
system_prompt=system_prompt,
previous_messages=previous_messages,
previous_state=previous_state,
project_id=project_id
)
# Update the messages and state in call state
call_state['messages'] = updated_messages
call_state['state'] = updated_state
logger.info(f"RowBoat response: {ai_response}")
except Exception as e:
logger.error(f"Error processing with RowBoat: {str(e)}")
ai_response = "I'm sorry, I encountered an issue processing your request. Could you please try again?"
# Conversation history is updated in the streaming response section below
# Create TwiML response
response = VoiceResponse()
# Use streaming audio for the response
logger.info("Setting up response streaming with ElevenLabs")
try:
# Store the AI response in conversation history first
# (The stream-audio endpoint will read it from here)
# Update conversation history (do this before streaming so the endpoint can access it)
call_state['conversation_history'].append({
'user': speech_result,
'assistant': ai_response
})
call_state['turn_count'] += 1
# Save to MongoDB (primary source of truth)
try:
save_call_state(call_sid, call_state)
logger.info(f"Saved response state to MongoDB for {call_sid}")
except Exception as e:
logger.error(f"Error saving response state to MongoDB: {str(e)}")
raise RuntimeError(f"Failed to save response state to MongoDB: {str(e)}")
# Update local memory cache
active_calls[call_sid] = call_state
# Generate a unique ID to prevent caching
unique_id = str(uuid.uuid4())
# Use a relative URL - Twilio will use the same host as the webhook
audio_url = f"/stream-audio/{call_sid}/response/{unique_id}"
logger.info(f"Streaming response from relative URL: {audio_url}")
# Play the response via streaming
response.play(audio_url)
except Exception as e:
logger.error(f"Error with audio streaming for response: {str(e)}")
import traceback
logger.error(traceback.format_exc())
# Fallback to Twilio TTS
response.say(ai_response, voice='alice')
# Gather next user input with enhanced speech recognition
gather = Gather(
input='speech',
action=f'/process_speech?call_sid={call_sid}',
speech_timeout='auto',
language='en-US',
enhanced=True, # Enable enhanced speech recognition
speechModel='phone_call' # Optimize for phone calls
)
response.append(gather)
# If no input detected, redirect to twiml endpoint
# Call state will be retrieved from MongoDB
response.redirect('/twiml')
logger.info(f"Returning TwiML response for speech processing")
return str(response)
except Exception as e:
# Log the full error with traceback
import traceback
logger.error(f"Error in process_speech: {str(e)}")
logger.error(traceback.format_exc())
# Return a basic TwiML response
response = VoiceResponse()
response.say("I'm sorry, we encountered an error processing your speech. Please try again.", voice='alice')
response.gather(
input='speech',
action=f'/process_speech?call_sid={request.args.get("call_sid")}',
speech_timeout='auto'
)
return str(response)
@app.route('/stream-audio/<call_sid>/<text_type>/<unique_id>', methods=['GET'])
def stream_audio(call_sid, text_type, unique_id):
"""Stream audio directly from ElevenLabs to Twilio without saving to disk"""
try:
logger.info(f"Audio streaming requested for call {call_sid}, type {text_type}")
# Determine what text to synthesize
text_to_speak = ""
if text_type == "greeting" or text_type == "response":
# Get the text from call state (try MongoDB first, then memory)
call_state = None
# Try MongoDB first
try:
call_state = get_call_state(call_sid)
if call_state:
logger.info(f"Loaded call state from MongoDB for streaming: {call_sid}")
except Exception as e:
logger.error(f"Error retrieving MongoDB state for streaming: {str(e)}")
call_state = None
# Fall back to memory if needed
if call_state is None:
if call_sid not in active_calls:
logger.error(f"Call SID not found for streaming: {call_sid}")
return "Call not found", 404
call_state = active_calls[call_sid]
logger.info(f"Using in-memory state for streaming: {call_sid}")
if call_state.get('conversation_history') and len(call_state['conversation_history']) > 0:
# Get the most recent AI response
text_to_speak = call_state['conversation_history'][-1]['assistant']
else:
logger.warning(f"No conversation history found for call {call_sid}")
text_to_speak = "I'm sorry, I don't have a response ready. Could you please repeat?"
else:
# Direct text may be passed as the text_type (for testing)
text_to_speak = text_type
if not text_to_speak:
logger.error("No text to synthesize")
return "No text to synthesize", 400
logger.info(f"Streaming audio for text: {text_to_speak[:50]}...")
def generate():
try:
# Generate and stream the audio directly
audio_stream = elevenlabs_client.generate(
text=text_to_speak,
voice=TTS_VOICE,
model=TTS_MODEL,
output_format="mp3_44100_128"
)
# Stream chunks directly to the response
for chunk in audio_stream:
yield chunk
logger.info(f"Finished streaming audio for call {call_sid}")
except Exception as e:
logger.error(f"Error in audio stream generator: {str(e)}")
import traceback
logger.error(traceback.format_exc())
# Return a streaming response
response = Response(generate(), mimetype='audio/mpeg')
return response
except Exception as e:
logger.error(f"Error setting up audio stream: {str(e)}")
import traceback
logger.error(traceback.format_exc())
return "Error streaming audio", 500
@app.route('/call-status', methods=['POST'])
def call_status_callback():
"""Handle call status callbacks from Twilio"""
call_sid = request.values.get('CallSid')
call_status = request.values.get('CallStatus')
logger.info(f"Call {call_sid} status: {call_status}")
# Clean up resources when call completes
if call_status in ['completed', 'failed', 'busy', 'no-answer', 'canceled']:
# Get call state from MongoDB or memory
call_state = None
# Try to load from MongoDB first
try:
call_state = get_call_state(call_sid)
if call_state:
logger.info(f"Loaded final state from MongoDB for {call_sid}")
except Exception as e:
logger.error(f"Error retrieving final state from MongoDB: {str(e)}")
call_state = None
# Fall back to memory if needed
if call_state is None and call_sid in active_calls:
call_state = active_calls[call_sid]
logger.info(f"Using in-memory state for final call state of {call_sid}")
if call_state:
# Remove from active calls in both memory and MongoDB
if call_sid in active_calls:
del active_calls[call_sid]
logger.info(f"Removed call {call_sid} from active calls memory")
try:
# Remove the document from MongoDB
delete_call_state(call_sid)
logger.info(f"Removed call {call_sid} from MongoDB")
except Exception as e:
logger.error(f"Error removing call state from MongoDB: {str(e)}")
return '', 204
@app.route('/health', methods=['GET'])
def health_check():
"""Simple health check endpoint"""
health_data = {
"status": "healthy",
"active_calls_memory": len(active_calls)
}
# Get MongoDB status
try:
mongodb_status = get_mongodb_status()
health_data["mongodb"] = mongodb_status
health_data["active_calls_mongodb"] = mongodb_status.get("active_calls", 0)
except Exception as e:
health_data["mongodb_error"] = str(e)
health_data["status"] = "degraded"
return jsonify(health_data)
if __name__ == '__main__':
# Log startup information
logger.info(f"Starting Twilio-RowBoat server")
# Remove the explicit run configuration since Flask CLI will handle it
app.run()

View file

@ -0,0 +1,6 @@
from dotenv import load_dotenv
import os
def load_environment():
"""Load environment variables from .env file"""
load_dotenv()

View file

@ -0,0 +1,39 @@
aiohappyeyeballs==2.5.0
aiohttp==3.11.13
aiohttp-retry==2.9.1
aiosignal==1.3.2
annotated-types==0.7.0
anyio==4.8.0
attrs==25.1.0
blinker==1.9.0
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
dnspython==2.7.0
dotenv==0.9.9
elevenlabs==1.52.0
Flask==3.1.0
frozenlist==1.5.0
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
idna==3.10
itsdangerous==2.2.0
Jinja2==3.1.6
MarkupSafe==3.0.2
multidict==6.1.0
propcache==0.3.0
pydantic==2.10.6
pydantic_core==2.27.2
PyJWT==2.10.1
pymongo==4.11.2
python-dotenv==1.0.1
requests==2.32.3
rowboat==2.1.0
sniffio==1.3.1
twilio==9.4.6
typing_extensions==4.12.2
urllib3==2.3.0
websockets==15.0.1
Werkzeug==3.1.3
yarl==1.18.3

View file

@ -0,0 +1,96 @@
from twilio.rest import Client as TwilioClient
from rowboat.client import Client
from rowboat.schema import UserMessage, SystemMessage
import os
from typing import Dict, List, Optional, Tuple, Any
import logging
from util import get_api_key
import time
import json
# Load environment variables
from load_env import load_environment
load_environment()
# Configure logging to stdout for Docker compatibility
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler()] # Send logs to stdout
)
logger = logging.getLogger(__name__)
# Environment variables and configuration
ROWBOAT_API_HOST = os.environ.get("ROWBOAT_API_HOST").strip()
Message = UserMessage | SystemMessage
def process_conversation_turn(
user_input: str,
workflow_id: str,
system_prompt: str = "You are a helpful assistant. Provide concise and clear answers.",
previous_messages: List[Message] = None,
previous_state: Any = None,
project_id: str = None
) -> Tuple[str, List[Message], Any]:
"""
Process a single conversation turn with the RowBoat agent using the stateless API.
Args:
user_input: User's transcribed input
workflow_id: RowBoat workflow ID
system_prompt: System prompt for the agent
previous_messages: Previous messages in the conversation
previous_state: Previous state from RowBoat
project_id: RowBoat project ID (if different from default)
Returns:
A tuple of (response_text, updated_messages, updated_state)
"""
try:
# Initialize messages list if not provided
messages = [] if previous_messages is None else previous_messages.copy()
# If we're starting a new conversation, add the system message
if not messages or not any(msg.role == 'system' for msg in messages):
messages.append(SystemMessage(role='system', content=system_prompt))
# Add the user's new
if user_input:
messages.append(UserMessage(role='user', content=user_input))
# Process the conversation using the stateless API
logger.info(f"Sending to RowBoat API with {len(messages)} messages")
# Create client with custom project_id if provided
client = Client(
host=ROWBOAT_API_HOST,
project_id=project_id,
api_key=get_api_key(project_id)
)
response_messages, new_state = client.chat(
messages=messages,
workflow_id=workflow_id,
state=previous_state
)
# Extract the assistant's response (last message)
if response_messages and len(response_messages) > 0:
assistant_response = response_messages[-1].content
else:
assistant_response = "I'm sorry, I didn't receive a proper response."
# Update messages list with the new responses
final_messages = messages + response_messages
logger.info(f"Got response from RowBoat API: {assistant_response[:100]}...")
return assistant_response, final_messages, new_state
except Exception as e:
logger.error(f"Error processing conversation turn: {str(e)}")
import traceback
logger.error(traceback.format_exc())
return "I'm sorry, I encountered an error processing your request.", previous_messages, previous_state

View file

@ -0,0 +1,423 @@
import os
import logging
import datetime
from typing import Dict, Any, Optional, List, Union
import copy
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure, PyMongoError
from pymongo.collection import Collection
from bson import json_util
from pydantic import BaseModel
from rowboat.schema import ApiMessage
# Configure logging to stdout for Docker compatibility
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler()] # Send logs to stdout
)
logger = logging.getLogger(__name__)
# MongoDB Configuration
MONGODB_URI = os.environ.get('MONGODB_URI')
MONGODB_DB = 'rowboat'
CALL_STATE_COLLECTION = 'call-state'
MONGODB_EXPIRY_SECONDS = 86400 # Default 24 hours
API_KEYS_COLLECTION = "api_keys"
# MongoDB client singleton
_mongo_client = None
_db = None
_call_state_collection = None
_api_keys_collection = None
# Define chat state pydantic model
class CallState(BaseModel):
messages: List[ApiMessage] = []
workflow_id: str
project_id: str
system_prompt: str
turn_count: int = 0
inbound: bool = False
conversation_history: List[Dict[str, str]] = [] # Using Dict instead of ApiMessage for chat history
to_number: str = ""
created_at: int
state: Any = None # Allow any type since the API might return a complex state object
last_transcription: Optional[str] = None
# Enable dictionary-style access for compatibility with existing code
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
setattr(self, key, value)
def get(self, key, default=None):
return getattr(self, key, default)
model_config = {
# Allow extra fields for flexibility
"extra": "allow",
# More lenient type validation
"arbitrary_types_allowed": True,
# Allow population by field name
"populate_by_name": True
}
def init_mongodb():
"""Initialize MongoDB connection and set up indexes."""
global _mongo_client, _db, _call_state_collection, _api_keys_collection
try:
_mongo_client = MongoClient(MONGODB_URI)
# Force a command to check the connection
_mongo_client.admin.command('ping')
# Set up database and collection
_db = _mongo_client[MONGODB_DB]
_call_state_collection = _db[CALL_STATE_COLLECTION]
_api_keys_collection = _db[API_KEYS_COLLECTION]
# Create TTL index if it doesn't exist
if 'expires_at_1' not in _call_state_collection.index_information():
_call_state_collection.create_index('expires_at', expireAfterSeconds=0)
logger.info(f"Connected to MongoDB at {MONGODB_URI}")
return True
except ConnectionFailure as e:
logger.error(f"Failed to connect to MongoDB: {str(e)}")
raise RuntimeError(f"Could not connect to MongoDB: {str(e)}")
def get_collection() -> Collection:
"""Get the MongoDB collection, initializing if needed."""
global _call_state_collection
if _call_state_collection is None:
init_mongodb()
return _call_state_collection
def get_api_keys_collection() -> Collection:
"""Get the MongoDB collection, initializing if needed."""
global _api_keys_collection
if _api_keys_collection is None:
init_mongodb()
return _api_keys_collection
def get_api_key(project_id: str) -> Optional[str]:
"""Get the API key for a given project ID."""
collection = get_api_keys_collection()
doc = collection.find_one({"projectId": project_id})
return doc["key"] if doc else None
def save_call_state(call_sid: str, call_state: CallState) -> bool:
"""
Save call state to MongoDB.
Args:
call_sid: The call SID to use as document ID
call_state: The call state dictionary to save
Returns:
True if successful, False otherwise
"""
try:
# Validate call_state is a CallState object
if not isinstance(call_state, CallState):
raise ValueError(f"call_state must be a CallState object, got {type(call_state)}")
collection = get_collection()
# Use call_sid as document ID
collection.update_one(
{'_id': call_sid},
{'$set': call_state.model_dump()},
upsert=True
)
logger.info(f"Saved call state to MongoDB for call {call_sid}")
return True
except PyMongoError as e:
logger.error(f"Error saving call state to MongoDB for call {call_sid}: {str(e)}")
raise RuntimeError(f"Failed to save call state to MongoDB: {str(e)}")
except Exception as e:
logger.error(f"Unexpected error in save_call_state: {str(e)}")
raise RuntimeError(f"Failed to save call state: {str(e)}")
def get_call_state(call_sid: str) -> Optional[CallState]:
"""
Retrieve call state from MongoDB.
Args:
call_sid: The call SID to retrieve
Returns:
Call state dictionary or None if not found
"""
try:
collection = get_collection()
# Query MongoDB for the call state
state_doc = collection.find_one({'_id': call_sid})
if not state_doc:
logger.info(f"No call state found in MongoDB for call {call_sid}")
return None
call_state = CallState.model_validate(state_doc)
logger.info(f"Retrieved call state from MongoDB for call {call_sid}")
return call_state
except PyMongoError as e:
logger.error(f"Error retrieving call state from MongoDB for call {call_sid}: {str(e)}")
raise RuntimeError(f"Failed to retrieve call state from MongoDB: {str(e)}")
except Exception as e:
logger.error(f"Unexpected error in get_call_state: {str(e)}")
raise RuntimeError(f"Failed to retrieve call state: {str(e)}")
def delete_call_state(call_sid: str) -> bool:
"""
Delete call state from MongoDB.
Args:
call_sid: The call SID to delete
Returns:
True if successful, False if not found
"""
try:
collection = get_collection()
# Delete the document from MongoDB
result = collection.delete_one({'_id': call_sid})
if result.deleted_count > 0:
logger.info(f"Deleted call state from MongoDB for call {call_sid}")
return True
else:
logger.info(f"No call state found to delete in MongoDB for call {call_sid}")
return False
except PyMongoError as e:
logger.error(f"Error deleting call state from MongoDB for call {call_sid}: {str(e)}")
raise RuntimeError(f"Failed to delete call state from MongoDB: {str(e)}")
except Exception as e:
logger.error(f"Unexpected error in delete_call_state: {str(e)}")
raise RuntimeError(f"Failed to delete call state: {str(e)}")
def count_active_calls() -> int:
"""
Count active call documents in MongoDB.
Returns:
Number of active call documents
"""
try:
collection = get_collection()
return collection.count_documents({})
except PyMongoError as e:
logger.error(f"Error counting active calls in MongoDB: {str(e)}")
raise RuntimeError(f"Failed to count active calls in MongoDB: {str(e)}")
except Exception as e:
logger.error(f"Unexpected error in count_active_calls: {str(e)}")
raise RuntimeError(f"Failed to count active calls: {str(e)}")
def get_mongodb_status() -> Dict[str, Any]:
"""
Get MongoDB connection status information.
Returns:
Dictionary with status information
"""
status = {
"status": "connected",
"uri": MONGODB_URI,
"database": MONGODB_DB,
"collection": CALL_STATE_COLLECTION
}
try:
# First check connection with a simple command
collection = get_collection()
db = collection.database
db.command('ping')
status["connection"] = "ok"
# Count active calls
count = count_active_calls()
status["active_calls"] = count
# Get collection stats
try:
stats = db.command("collStats", CALL_STATE_COLLECTION)
status["size_bytes"] = stats.get("size", 0)
status["document_count"] = stats.get("count", 0)
status["index_count"] = len(stats.get("indexSizes", {}))
except Exception as stats_error:
status["stats_error"] = str(stats_error)
except Exception as e:
status["status"] = "error"
status["error"] = str(e)
status["timestamp"] = datetime.datetime.utcnow().isoformat()
return status
# Twilio configuration functions
def get_twilio_config(phone_number: str) -> Optional[Dict[str, Any]]:
"""
Get Twilio configuration for a specific phone number from MongoDB.
Args:
phone_number: The phone number to look up configuration for
Returns:
Configuration dictionary or None if not found/active
"""
try:
# Get MongoDB client and database
client = get_collection().database.client
db = client[MONGODB_DB]
# Use the twilio_configs collection
config_collection = db['twilio_configs']
# Enhanced logging for phone number format
logger.info(f"Looking up configuration for phone number: '{phone_number}'")
# Try different formats of the phone number
cleaned_number = phone_number.strip().replace(' ', '').replace('-', '').replace('(', '').replace(')', '')
possible_formats = [
phone_number, # Original format from Twilio
cleaned_number, # Thoroughly cleaned number
'+' + cleaned_number if not cleaned_number.startswith('+') else cleaned_number, # Ensure + prefix
# Try with different country code formats
'+1' + cleaned_number[-10:] if len(cleaned_number) >= 10 else cleaned_number, # US format with +1
'1' + cleaned_number[-10:] if len(cleaned_number) >= 10 else cleaned_number, # US format with 1
cleaned_number[-10:] if len(cleaned_number) >= 10 else cleaned_number, # US format without country code
]
# Remove duplicates while preserving order
unique_formats = []
for fmt in possible_formats:
if fmt not in unique_formats:
unique_formats.append(fmt)
possible_formats = unique_formats
# Log the formats we're trying
logger.info(f"Trying phone number formats: {possible_formats}")
# Try each format
for phone_format in possible_formats:
# Look up the configuration for this phone number format with status=active
config = config_collection.find_one({'phone_number': phone_format, 'status': 'active'})
if config:
logger.info(f"Found active configuration for '{phone_format}': project_id={config.get('project_id')}, workflow_id={config.get('workflow_id')}")
break # Found a match, exit the loop
# If we didn't find any match
if not config:
# Try a more generic query to see what configurations exist
try:
all_configs = list(config_collection.find({'phone_number': {'$regex': phone_number[-10:] if len(phone_number) >= 10 else phone_number}}))
if all_configs:
logger.warning(f"Found {len(all_configs)} configurations that match phone number {phone_number}, but none are active:")
for cfg in all_configs:
logger.warning(f" - Phone: {cfg.get('phone_number')}, Status: {cfg.get('status')}, Workflow: {cfg.get('workflow_id')}")
else:
logger.warning(f"No configurations found at all for phone number {phone_number} or related formats")
except Exception as e:
logger.error(f"Error running regex query: {str(e)}")
logger.warning(f"No active configuration found for any format of phone number {phone_number}")
return None
# Make sure required fields are present
if 'project_id' not in config or 'workflow_id' not in config:
logger.error(f"Configuration for {phone_number} is missing required fields")
return None
logger.info(f"Found active configuration for {phone_number}: project_id={config['project_id']}, workflow_id={config['workflow_id']}")
return config
except Exception as e:
logger.error(f"Error retrieving Twilio configuration for {phone_number}: {str(e)}")
# Return None instead of raising an exception to allow fallback to default behavior
return None
def list_active_twilio_configs() -> List[Dict[str, Any]]:
"""
List all active Twilio configurations from MongoDB.
Returns:
List of active configuration dictionaries
"""
try:
# Get MongoDB client and database
client = get_collection().database.client
db = client[MONGODB_DB]
# Use the twilio_configs collection
config_collection = db['twilio_configs']
# Find all active configurations
configs = list(config_collection.find({'status': 'active'}))
logger.info(f"Found {len(configs)} active Twilio configurations")
return configs
except Exception as e:
logger.error(f"Error retrieving active Twilio configurations: {str(e)}")
return []
def save_twilio_config(config: Dict[str, Any]) -> bool:
"""
Save a Twilio configuration to MongoDB.
Args:
config: Configuration dictionary with at least phone_number, project_id, and workflow_id
Returns:
True if successful, False otherwise
"""
required_fields = ['phone_number', 'project_id', 'workflow_id']
for field in required_fields:
if field not in config:
logger.error(f"Missing required field '{field}' in Twilio configuration")
return False
try:
# Get MongoDB client and database
client = get_collection().database.client
db = client[MONGODB_DB]
# Use the twilio_configs collection
config_collection = db['twilio_configs']
# Ensure status is set to active
if 'status' not in config:
config['status'] = 'active'
# Add timestamp
config['updated_at'] = datetime.datetime.utcnow()
if 'created_at' not in config:
config['created_at'] = config['updated_at']
# Use phone_number as the ID
phone_number = config['phone_number']
# Update or insert the configuration
result = config_collection.update_one(
{'phone_number': phone_number},
{'$set': config},
upsert=True
)
if result.matched_count > 0:
logger.info(f"Updated Twilio configuration for {phone_number}")
else:
logger.info(f"Created new Twilio configuration for {phone_number}")
return True
except Exception as e:
logger.error(f"Error saving Twilio configuration: {str(e)}")
return False
# Initialize MongoDB on module import
init_mongodb()