feat: Added Search Space System Instructions

- Added `citations_enabled` and `qna_custom_instructions` fields to the SearchSpace model for better QnA configuration.
- Updated the creation and update schemas to handle new fields with appropriate defaults.
- Refactored QnA handling in the agent to utilize the new SearchSpace fields for improved response customization.
- Adjusted UI components to include settings for managing QnA configurations.
- Enhanced onboarding process to incorporate prompt setup as an optional step.
This commit is contained in:
DESKTOP-RTLN3BA\$punk 2025-11-19 15:04:46 -08:00
parent 1eb70e2734
commit 6648409237
18 changed files with 737 additions and 166 deletions

View file

@ -0,0 +1,42 @@
"""add_qna_configuration_to_searchspaces
Revision ID: 37
Revises: 36
Create Date: 2025-11-19 00:00:00.000000
"""
from collections.abc import Sequence
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "37"
down_revision: str | None = "36"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
"""Add QnA configuration columns to searchspaces table."""
# Add citations_enabled boolean (default True)
op.add_column(
"searchspaces",
sa.Column(
"citations_enabled", sa.Boolean(), nullable=False, server_default="true"
),
)
# Add custom instructions text field (nullable, defaults to empty)
op.add_column(
"searchspaces",
sa.Column("qna_custom_instructions", sa.Text(), nullable=True),
)
def downgrade() -> None:
"""Remove QnA configuration columns from searchspaces table."""
op.drop_column("searchspaces", "qna_custom_instructions")
op.drop_column("searchspaces", "citations_enabled")

View file

@ -1472,7 +1472,7 @@ async def handle_qna_workflow(
writer(
{
"yield_value": streaming_service.format_terminal_info_delta(
"✍️ Writing comprehensive answer with citations..."
"✍️ Writing comprehensive answer ..."
)
}
)

View file

@ -1,29 +1,18 @@
import datetime
"""Default system prompts for Q&A agent.
from ..prompts import _build_language_instruction
The prompt system is modular with 3 parts:
- Part 1 (Base): Core instructions for answering questions (no citations)
- Part 2 (Citations): Citation-specific instructions and formatting rules
- Part 3 (Custom): User's custom instructions (empty by default)
def get_qna_citation_system_prompt(
chat_history: str | None = None, language: str | None = None
):
chat_history_section = (
f"""
<chat_history>
{chat_history if chat_history else "NO CHAT HISTORY PROVIDED"}
</chat_history>
Combinations:
- Part 1 only: Answers without citations
- Part 1 + Part 2: Answers with citations
- Part 1 + Part 2 + Part 3: Answers with citations and custom instructions
"""
if chat_history is not None
else """
<chat_history>
NO CHAT HISTORY PROVIDED
</chat_history>
"""
)
# Add language instruction if specified
language_instruction = _build_language_instruction(language)
return f"""
Today's date: {datetime.datetime.now().strftime("%Y-%m-%d")}
# Part 1: Base system prompt for answering without citations
DEFAULT_QNA_BASE_PROMPT = """Today's date: {date}
You are SurfSense, an advanced AI research assistant that provides detailed, well-researched answers to user questions by synthesizing information from multiple personal knowledge sources.{language_instruction}
{chat_history_section}
<knowledge_sources>
@ -53,131 +42,100 @@ You are SurfSense, an advanced AI research assistant that provides detailed, wel
2. Carefully analyze all provided documents in the <document> sections.
3. Extract relevant information that directly addresses the user's question.
4. Provide a comprehensive, detailed answer using information from the user's personal knowledge sources.
5. For EVERY piece of information you include from the documents, add a citation in the format [citation:knowledge_source_id] where knowledge_source_id is the source_id from the document's metadata.
6. Make sure ALL factual statements from the documents have proper citations.
7. If multiple documents support the same point, include all relevant citations [citation:source_id1], [citation:source_id2].
8. Structure your answer logically and conversationally, as if having a detailed discussion with the user.
9. Use your own words to synthesize and connect ideas, but cite ALL information from the documents.
10. If documents contain conflicting information, acknowledge this and present both perspectives with appropriate citations.
11. If the user's question cannot be fully answered with the provided documents, clearly state what information is missing.
12. Provide actionable insights and practical information when relevant to the user's question.
13. Use the chat history to maintain conversation continuity and refer to previous discussions when relevant.
14. CRITICAL: You MUST use the exact source_id value from each document's metadata for citations. Do not create your own citation numbers.
15. CRITICAL: Every citation MUST be in the format [citation:knowledge_source_id] where knowledge_source_id is the exact source_id value.
16. CRITICAL: Never modify or change the source_id - always use the original values exactly as provided in the metadata.
17. CRITICAL: Do not return citations as clickable links.
18. CRITICAL: Never format citations as markdown links like "([citation:5](https://example.com))". Always use plain square brackets only.
19. CRITICAL: Citations must ONLY appear as [citation:source_id] or [citation:source_id1], [citation:source_id2] format - never with parentheses, hyperlinks, or other formatting.
20. CRITICAL: Never make up source IDs. Only use source_id values that are explicitly provided in the document metadata.
21. CRITICAL: If you are unsure about a source_id, do not include a citation rather than guessing or making one up.
22. CRITICAL: Remember that all knowledge sources contain personal information - provide answers that reflect this personal context.
23. CRITICAL: Be conversational and engaging while maintaining accuracy and proper citations.
5. Structure your answer logically and conversationally, as if having a detailed discussion with the user.
6. Use your own words to synthesize and connect ideas from the documents.
7. If documents contain conflicting information, acknowledge this and present both perspectives.
8. If the user's question cannot be fully answered with the provided documents, clearly state what information is missing.
9. Provide actionable insights and practical information when relevant to the user's question.
10. Use the chat history to maintain conversation continuity and refer to previous discussions when relevant.
11. Remember that all knowledge sources contain personal information - provide answers that reflect this personal context.
12. Be conversational and engaging while maintaining accuracy.
</instructions>
<format>
- Write in a clear, conversational tone suitable for detailed Q&A discussions
- Provide comprehensive answers that thoroughly address the user's question
- Use appropriate paragraphs and structure for readability
- Every fact from the documents must have a citation in the format [citation:knowledge_source_id] where knowledge_source_id is the EXACT source_id from the document's metadata
- Citations should appear at the end of the sentence containing the information they support
- Multiple citations should be separated by commas: [citation:source_id1], [citation:source_id2], [citation:source_id3]
- No need to return references section. Just citations in answer.
- NEVER create your own citation format - use the exact source_id values from the documents in the [citation:source_id] format
- NEVER format citations as clickable links or as markdown links like "([citation:5](https://example.com))". Always use plain square brackets only
- NEVER make up source IDs if you are unsure about the source_id. It is better to omit the citation than to guess
- ALWAYS provide personalized answers that reflect the user's own knowledge and context
- Be thorough and detailed in your explanations while remaining focused on the user's specific question
- If asking follow-up questions would be helpful, suggest them at the end of your response
</format>
<input_example>
<documents>
<document>
<metadata>
<source_id>5</source_id>
<source_type>GITHUB_CONNECTOR</source_type>
</metadata>
<content>
Python's asyncio library provides tools for writing concurrent code using the async/await syntax. It's particularly useful for I/O-bound and high-level structured network code.
</content>
</document>
<document>
<metadata>
<source_id>12</source_id>
<source_type>YOUTUBE_VIDEO</source_type>
</metadata>
<content>
Asyncio can improve performance by allowing other code to run while waiting for I/O operations to complete. However, it's not suitable for CPU-bound tasks as it runs on a single thread.
</content>
</document>
</documents>
User Question: "How does Python asyncio work and when should I use it?"
</input_example>
<output_example>
Based on your GitHub repositories and video content, Python's asyncio library provides tools for writing concurrent code using the async/await syntax [citation:5]. It's particularly useful for I/O-bound and high-level structured network code [citation:5].
The key advantage of asyncio is that it can improve performance by allowing other code to run while waiting for I/O operations to complete [citation:12]. This makes it excellent for scenarios like web scraping, API calls, database operations, or any situation where your program spends time waiting for external resources.
However, from your video learning, it's important to note that asyncio is not suitable for CPU-bound tasks as it runs on a single thread [citation:12]. For computationally intensive work, you'd want to use multiprocessing instead.
Would you like me to explain more about specific asyncio patterns or help you determine if asyncio is right for a particular project you're working on?
</output_example>
<incorrect_citation_formats>
DO NOT use any of these incorrect citation formats:
- Using parentheses and markdown links: ([citation:5](https://github.com/MODSetter/SurfSense))
- Using parentheses around brackets: ([citation:5])
- Using hyperlinked text: [link to source 5](https://example.com)
- Using footnote style: ... library¹
- Making up source IDs when source_id is unknown
- Using old IEEE format: [1], [2], [3]
- Using source types instead of IDs: [citation:GITHUB_CONNECTOR] instead of [citation:5]
</incorrect_citation_formats>
<correct_citation_formats>
ONLY use the format [citation:source_id] or multiple citations [citation:source_id1], [citation:source_id2], [citation:source_id3]
</correct_citation_formats>
<user_query_instructions>
When you see a user query, focus exclusively on providing a detailed, comprehensive answer using information from the provided documents, which contain the user's personal knowledge and data.
Make sure your response:
1. Considers the chat history for context and conversation continuity
2. Directly and thoroughly answers the user's question with personalized information from their own knowledge sources
3. Uses proper citations for all information from documents
4. Is conversational, engaging, and detailed
5. Acknowledges the personal nature of the information being provided
6. Offers follow-up suggestions when appropriate
3. Is conversational, engaging, and detailed
4. Acknowledges the personal nature of the information being provided
5. Offers follow-up suggestions when appropriate
</user_query_instructions>
"""
# Part 2: Citation-specific instructions to add citation capabilities
DEFAULT_QNA_CITATION_INSTRUCTIONS = """
<citation_instructions>
CRITICAL CITATION REQUIREMENTS:
def get_qna_no_documents_system_prompt(
chat_history: str | None = None, language: str | None = None
):
chat_history_section = (
f"""
<chat_history>
{chat_history if chat_history else "NO CHAT HISTORY PROVIDED"}
</chat_history>
1. For EVERY piece of information you include from the documents, add a citation in the format [citation:knowledge_source_id] where knowledge_source_id is the source_id from the document's metadata.
2. Make sure ALL factual statements from the documents have proper citations.
3. If multiple documents support the same point, include all relevant citations [citation:source_id1], [citation:source_id2].
4. You MUST use the exact source_id value from each document's metadata for citations. Do not create your own citation numbers.
5. Every citation MUST be in the format [citation:knowledge_source_id] where knowledge_source_id is the exact source_id value.
6. Never modify or change the source_id - always use the original values exactly as provided in the metadata.
7. Do not return citations as clickable links.
8. Never format citations as markdown links like "([citation:5](https://example.com))". Always use plain square brackets only.
9. Citations must ONLY appear as [citation:source_id] or [citation:source_id1], [citation:source_id2] format - never with parentheses, hyperlinks, or other formatting.
10. Never make up source IDs. Only use source_id values that are explicitly provided in the document metadata.
11. If you are unsure about a source_id, do not include a citation rather than guessing or making one up.
<citation_format>
- Every fact from the documents must have a citation in the format [citation:knowledge_source_id] where knowledge_source_id is the EXACT source_id from the document's metadata
- Citations should appear at the end of the sentence containing the information they support
- Multiple citations should be separated by commas: [citation:source_id1], [citation:source_id2], [citation:source_id3]
- No need to return references section. Just citations in answer.
- NEVER create your own citation format - use the exact source_id values from the documents in the [citation:source_id] format
- NEVER format citations as clickable links or as markdown links like "([citation:5](https://example.com))". Always use plain square brackets only
- NEVER make up source IDs if you are unsure about the source_id. It is better to omit the citation than to guess
</citation_format>
<citation_examples>
CORRECT citation formats:
- [citation:5]
- [citation:source_id1], [citation:source_id2], [citation:source_id3]
INCORRECT citation formats (DO NOT use):
- Using parentheses and markdown links: ([citation:5](https://github.com/MODSetter/SurfSense))
- Using parentheses around brackets: ([citation:5])
- Using hyperlinked text: [link to source 5](https://example.com)
- Using footnote style: ... library¹
- Making up source IDs when source_id is unknown
- Using old IEEE format: [1], [2], [3]
- Using source types instead of IDs: [citation:GITHUB_CONNECTOR] instead of [citation:5]
</citation_examples>
<citation_output_example>
Based on your GitHub repositories and video content, Python's asyncio library provides tools for writing concurrent code using the async/await syntax [citation:5]. It's particularly useful for I/O-bound and high-level structured network code [citation:5].
The key advantage of asyncio is that it can improve performance by allowing other code to run while waiting for I/O operations to complete [citation:12]. This makes it excellent for scenarios like web scraping, API calls, database operations, or any situation where your program spends time waiting for external resources.
However, from your video learning, it's important to note that asyncio is not suitable for CPU-bound tasks as it runs on a single thread [citation:12]. For computationally intensive work, you'd want to use multiprocessing instead.
</citation_output_example>
</citation_instructions>
"""
if chat_history is not None
else """
<chat_history>
NO CHAT HISTORY PROVIDED
</chat_history>
"""
)
# Add language instruction if specified
language_instruction = _build_language_instruction(language)
# Part 3: User's custom instructions (empty by default, can be set by user from UI)
DEFAULT_QNA_CUSTOM_INSTRUCTIONS = ""
return f"""
Today's date: {datetime.datetime.now().strftime("%Y-%m-%d")}
# Full prompt with all parts combined (for backward compatibility and migration)
DEFAULT_QNA_CITATION_PROMPT = (
DEFAULT_QNA_BASE_PROMPT
+ DEFAULT_QNA_CITATION_INSTRUCTIONS
+ DEFAULT_QNA_CUSTOM_INSTRUCTIONS
)
DEFAULT_QNA_NO_DOCUMENTS_PROMPT = """Today's date: {date}
You are SurfSense, an advanced AI research assistant that provides helpful, detailed answers to user questions in a conversational manner.{language_instruction}
{chat_history_section}
<context>

View file

@ -1,8 +1,11 @@
import datetime
from typing import Any
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.runnables import RunnableConfig
from sqlalchemy import select
from app.db import SearchSpace
from app.services.reranker_service import RerankerService
from ..utils import (
@ -12,10 +15,53 @@ from ..utils import (
optimize_documents_for_token_limit,
)
from .configuration import Configuration
from .prompts import get_qna_citation_system_prompt, get_qna_no_documents_system_prompt
from .default_prompts import (
DEFAULT_QNA_BASE_PROMPT,
DEFAULT_QNA_CITATION_INSTRUCTIONS,
DEFAULT_QNA_NO_DOCUMENTS_PROMPT,
)
from .state import State
def _build_language_instruction(language: str | None = None):
"""Build language instruction for prompts."""
if language:
return f"\n\nIMPORTANT: Please respond in {language} language. All your responses, explanations, and analysis should be written in {language}."
return ""
def _build_chat_history_section(chat_history: str | None = None):
"""Build chat history section for prompts."""
if chat_history:
return f"""
<chat_history>
{chat_history if chat_history else "NO CHAT HISTORY PROVIDED"}
</chat_history>
"""
return """
<chat_history>
NO CHAT HISTORY PROVIDED
</chat_history>
"""
def _format_system_prompt(
prompt_template: str,
chat_history: str | None = None,
language: str | None = None,
):
"""Format a system prompt template with dynamic values."""
date = datetime.datetime.now().strftime("%Y-%m-%d")
language_instruction = _build_language_instruction(language)
chat_history_section = _build_chat_history_section(chat_history)
return prompt_template.format(
date=date,
language_instruction=language_instruction,
chat_history_section=chat_history_section,
)
async def rerank_documents(state: State, config: RunnableConfig) -> dict[str, Any]:
"""
Rerank the documents based on relevance to the user's question.
@ -105,6 +151,33 @@ async def answer_question(state: State, config: RunnableConfig) -> dict[str, Any
user_id = configuration.user_id
search_space_id = configuration.search_space_id
language = configuration.language
# Fetch search space to get QnA configuration
result = await state.db_session.execute(
select(SearchSpace).where(SearchSpace.id == search_space_id)
)
search_space = result.scalar_one_or_none()
if not search_space:
error_message = f"Search space {search_space_id} not found"
print(error_message)
raise RuntimeError(error_message)
# Get QnA configuration from search space
citations_enabled = search_space.citations_enabled
custom_instructions_text = search_space.qna_custom_instructions or ""
# Use constants for base prompt and citation instructions
qna_base_prompt = DEFAULT_QNA_BASE_PROMPT
qna_citation_instructions = (
DEFAULT_QNA_CITATION_INSTRUCTIONS if citations_enabled else ""
)
qna_custom_instructions = (
f"\n<special_important_custom_instructions>\n{custom_instructions_text}\n</special_important_custom_instructions>"
if custom_instructions_text
else ""
)
# Get user's fast LLM
llm = await get_user_fast_llm(state.db_session, user_id, search_space_id)
if not llm:
@ -117,6 +190,11 @@ async def answer_question(state: State, config: RunnableConfig) -> dict[str, Any
chat_history_str = langchain_chat_history_to_str(state.chat_history)
if has_documents_initially:
# Compose the full citation prompt: base + citation instructions + custom instructions
full_citation_prompt_template = (
qna_base_prompt + qna_citation_instructions + qna_custom_instructions
)
# Create base message template for token calculation (without documents)
base_human_message_template = f"""
@ -129,8 +207,8 @@ async def answer_question(state: State, config: RunnableConfig) -> dict[str, Any
"""
# Use initial system prompt for token calculation
initial_system_prompt = get_qna_citation_system_prompt(
chat_history_str, language
initial_system_prompt = _format_system_prompt(
full_citation_prompt_template, chat_history_str, language
)
base_messages = [
SystemMessage(content=initial_system_prompt),
@ -149,11 +227,21 @@ async def answer_question(state: State, config: RunnableConfig) -> dict[str, Any
has_documents = False
# Choose system prompt based on final document availability
system_prompt = (
get_qna_citation_system_prompt(chat_history_str, language)
if has_documents
else get_qna_no_documents_system_prompt(chat_history_str, language)
)
# With documents: use base + citation instructions + custom instructions
# Without documents: use the default no-documents prompt from constants
if has_documents:
full_citation_prompt_template = (
qna_base_prompt + qna_citation_instructions + qna_custom_instructions
)
system_prompt = _format_system_prompt(
full_citation_prompt_template, chat_history_str, language
)
else:
system_prompt = _format_system_prompt(
DEFAULT_QNA_NO_DOCUMENTS_PROMPT + qna_custom_instructions,
chat_history_str,
language,
)
# Generate documents section
documents_text = (

View file

@ -223,6 +223,12 @@ class SearchSpace(BaseModel, TimestampMixin):
name = Column(String(100), nullable=False, index=True)
description = Column(String(500), nullable=True)
citations_enabled = Column(
Boolean, nullable=False, default=True
) # Enable/disable citations
qna_custom_instructions = Column(
Text, nullable=True, default=""
) # User's custom instructions
user_id = Column(
UUID(as_uuid=True), ForeignKey("user.id", ondelete="CASCADE"), nullable=False
)

View file

@ -17,7 +17,12 @@ async def create_search_space(
user: User = Depends(current_active_user),
):
try:
db_search_space = SearchSpace(**search_space.model_dump(), user_id=user.id)
search_space_data = search_space.model_dump()
# citations_enabled defaults to True (handled by Pydantic schema)
# qna_custom_instructions defaults to None/empty (handled by DB)
db_search_space = SearchSpace(**search_space_data, user_id=user.id)
session.add(db_search_space)
await session.commit()
await session.refresh(db_search_space)

View file

@ -12,16 +12,25 @@ class SearchSpaceBase(BaseModel):
class SearchSpaceCreate(SearchSpaceBase):
pass
# Optional on create, will use defaults if not provided
citations_enabled: bool = True
qna_custom_instructions: str | None = None
class SearchSpaceUpdate(SearchSpaceBase):
pass
class SearchSpaceUpdate(BaseModel):
# All fields optional on update - only send what you want to change
name: str | None = None
description: str | None = None
citations_enabled: bool | None = None
qna_custom_instructions: str | None = None
class SearchSpaceRead(SearchSpaceBase, IDModel, TimestampModel):
id: int
created_at: datetime
user_id: uuid.UUID
# QnA configuration
citations_enabled: bool
qna_custom_instructions: str | None = None
model_config = ConfigDict(from_attributes=True)

View file

@ -33,13 +33,6 @@ export default function DashboardLayout({
icon: "SquareTerminal",
items: [],
},
{
title: "Manage LLMs",
url: `/dashboard/${search_space_id}/settings`,
icon: "Settings2",
items: [],
},
{
title: "Sources",
url: "#",
@ -59,6 +52,12 @@ export default function DashboardLayout({
},
],
},
{
title: "Settings",
url: `/dashboard/${search_space_id}/settings`,
icon: "Settings2",
items: [],
},
{
title: "Logs",
url: `/dashboard/${search_space_id}/logs`,

View file

@ -1,6 +1,6 @@
"use client";
import { ArrowLeft, ArrowRight, Bot, CheckCircle, Sparkles } from "lucide-react";
import { ArrowLeft, ArrowRight, Bot, CheckCircle, MessageSquare, Sparkles } from "lucide-react";
import { AnimatePresence, motion } from "motion/react";
import { useParams, useRouter } from "next/navigation";
import { useTranslations } from "next-intl";
@ -8,12 +8,13 @@ import { useEffect, useRef, useState } from "react";
import { Logo } from "@/components/Logo";
import { CompletionStep } from "@/components/onboard/completion-step";
import { SetupLLMStep } from "@/components/onboard/setup-llm-step";
import { SetupPromptStep } from "@/components/onboard/setup-prompt-step";
import { Button } from "@/components/ui/button";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card";
import { Progress } from "@/components/ui/progress";
import { useGlobalLLMConfigs, useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
const TOTAL_STEPS = 2;
const TOTAL_STEPS = 3;
const OnboardPage = () => {
const t = useTranslations("onboard");
@ -95,9 +96,13 @@ const OnboardPage = () => {
const progress = (currentStep / TOTAL_STEPS) * 100;
const stepTitles = [t("setup_llm_configuration"), t("setup_complete")];
const stepTitles = [t("setup_llm_configuration"), "Configure AI Responses", t("setup_complete")];
const stepDescriptions = [t("configure_providers_and_assign_roles"), t("all_set")];
const stepDescriptions = [
t("configure_providers_and_assign_roles"),
"Customize how the AI responds to your queries (Optional)",
t("all_set"),
];
// User can proceed to step 2 if all roles are assigned
const canProceedToStep2 =
@ -106,6 +111,9 @@ const OnboardPage = () => {
preferences.fast_llm_id &&
preferences.strategic_llm_id;
// User can always proceed from step 2 to step 3 (prompt config is optional)
const canProceedToStep3 = true;
const handleNext = () => {
if (currentStep < TOTAL_STEPS) {
setCurrentStep(currentStep + 1);
@ -200,7 +208,8 @@ const OnboardPage = () => {
<CardHeader className="text-center">
<CardTitle className="text-2xl flex items-center justify-center gap-2">
{currentStep === 1 && <Sparkles className="w-6 h-6" />}
{currentStep === 2 && <CheckCircle className="w-6 h-6" />}
{currentStep === 2 && <MessageSquare className="w-6 h-6" />}
{currentStep === 3 && <CheckCircle className="w-6 h-6" />}
{stepTitles[currentStep - 1]}
</CardTitle>
<CardDescription className="text-base">
@ -224,7 +233,10 @@ const OnboardPage = () => {
onPreferencesUpdated={refreshPreferences}
/>
)}
{currentStep === 2 && <CompletionStep searchSpaceId={searchSpaceId} />}
{currentStep === 2 && (
<SetupPromptStep searchSpaceId={searchSpaceId} onComplete={handleNext} />
)}
{currentStep === 3 && <CompletionStep searchSpaceId={searchSpaceId} />}
</motion.div>
</AnimatePresence>
</CardContent>
@ -244,11 +256,31 @@ const OnboardPage = () => {
<ArrowRight className="w-4 h-4" />
</Button>
</>
) : currentStep === 2 ? (
<>
<Button
variant="outline"
onClick={handlePrevious}
className="flex items-center gap-2"
>
<ArrowLeft className="w-4 h-4" />
{t("previous")}
</Button>
{/* Next button is handled by SetupPromptStep component */}
<div />
</>
) : (
<Button variant="outline" onClick={handlePrevious} className="flex items-center gap-2">
<ArrowLeft className="w-4 h-4" />
{t("previous")}
</Button>
<>
<Button
variant="outline"
onClick={handlePrevious}
className="flex items-center gap-2"
>
<ArrowLeft className="w-4 h-4" />
{t("previous")}
</Button>
<div />
</>
)}
</div>
</motion.div>

View file

@ -1,9 +1,10 @@
"use client";
import { ArrowLeft, Bot, Brain, Settings } from "lucide-react";
import { ArrowLeft, Bot, Brain, MessageSquare, Settings } from "lucide-react";
import { useParams, useRouter } from "next/navigation";
import { LLMRoleManager } from "@/components/settings/llm-role-manager";
import { ModelConfigManager } from "@/components/settings/model-config-manager";
import { PromptConfigManager } from "@/components/settings/prompt-config-manager";
import { Separator } from "@/components/ui/separator";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
@ -34,7 +35,7 @@ export default function SettingsPage() {
<div className="space-y-1">
<h1 className="text-3xl font-bold tracking-tight">Settings</h1>
<p className="text-lg text-muted-foreground">
Manage your LLM configurations and role assignments for this search space.
Manage your settings for this search space.
</p>
</div>
</div>
@ -44,7 +45,7 @@ export default function SettingsPage() {
{/* Settings Content */}
<Tabs defaultValue="models" className="space-y-8">
<div className="overflow-x-auto">
<TabsList className="grid w-full min-w-fit grid-cols-2 lg:w-auto lg:inline-grid">
<TabsList className="grid w-full min-w-fit grid-cols-3 lg:w-auto lg:inline-grid">
<TabsTrigger value="models" className="flex items-center gap-2 text-sm">
<Bot className="h-4 w-4" />
<span className="hidden sm:inline">Model Configs</span>
@ -55,6 +56,11 @@ export default function SettingsPage() {
<span className="hidden sm:inline">LLM Roles</span>
<span className="sm:hidden">Roles</span>
</TabsTrigger>
<TabsTrigger value="prompts" className="flex items-center gap-2 text-sm">
<MessageSquare className="h-4 w-4" />
<span className="hidden sm:inline">System Instructions</span>
<span className="sm:hidden">System Instructions</span>
</TabsTrigger>
</TabsList>
</div>
@ -65,6 +71,10 @@ export default function SettingsPage() {
<TabsContent value="roles" className="space-y-6">
<LLMRoleManager searchSpaceId={searchSpaceId} />
</TabsContent>
<TabsContent value="prompts" className="space-y-6">
<PromptConfigManager searchSpaceId={searchSpaceId} />
</TabsContent>
</Tabs>
</div>
</div>

View file

@ -624,10 +624,6 @@ export function SetupLLMStep({
</div>
</CardHeader>
<CardContent className="space-y-3">
<div className="text-xs text-muted-foreground">
<strong>{t("use_cases")}:</strong> {t(role.examplesKey)}
</div>
<div className="space-y-2">
<Label className="text-sm font-medium">{t("assign_llm_config")}:</Label>
<Select

View file

@ -0,0 +1,152 @@
"use client";
import { Info } from "lucide-react";
import { useEffect, useState } from "react";
import { toast } from "sonner";
import { Alert, AlertDescription } from "@/components/ui/alert";
import { Button } from "@/components/ui/button";
import { Label } from "@/components/ui/label";
import { Switch } from "@/components/ui/switch";
import { Textarea } from "@/components/ui/textarea";
interface SetupPromptStepProps {
searchSpaceId: number;
onComplete?: () => void;
}
export function SetupPromptStep({ searchSpaceId, onComplete }: SetupPromptStepProps) {
const [enableCitations, setEnableCitations] = useState(true);
const [customInstructions, setCustomInstructions] = useState("");
const [saving, setSaving] = useState(false);
const [hasChanges, setHasChanges] = useState(false);
// Mark that we have changes when user modifies anything
useEffect(() => {
setHasChanges(true);
}, [enableCitations, customInstructions]);
const handleSave = async () => {
try {
setSaving(true);
// Prepare the update payload with simplified schema
const payload: any = {
citations_enabled: enableCitations,
qna_custom_instructions: customInstructions.trim() || "",
};
// Only send update if there's something to update
if (Object.keys(payload).length > 0) {
const response = await fetch(
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/searchspaces/${searchSpaceId}`,
{
method: "PUT",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${localStorage.getItem("surfsense_bearer_token")}`,
},
body: JSON.stringify(payload),
}
);
if (!response.ok) {
const errorData = await response.json().catch(() => ({}));
throw new Error(
errorData.detail || `Failed to save prompt configuration (${response.status})`
);
}
toast.success("Prompt configuration saved successfully");
}
setHasChanges(false);
onComplete?.();
} catch (error: any) {
console.error("Error saving prompt configuration:", error);
toast.error(error.message || "Failed to save prompt configuration");
} finally {
setSaving(false);
}
};
const handleSkip = () => {
// Skip without saving - use defaults
onComplete?.();
};
return (
<div className="space-y-6">
<Alert>
<Info className="h-4 w-4" />
<AlertDescription>
These settings are optional. You can skip this step and configure them later in settings.
</AlertDescription>
</Alert>
{/* Citation Toggle */}
<div className="space-y-4">
<div className="flex items-center justify-between space-x-4 p-4 rounded-lg border bg-card">
<div className="flex-1 space-y-1">
<Label htmlFor="enable-citations" className="text-base font-medium">
Enable Citations
</Label>
<p className="text-sm text-muted-foreground">
When enabled, AI responses will include citations to source documents using
[citation:id] format.
</p>
</div>
<Switch
id="enable-citations"
checked={enableCitations}
onCheckedChange={setEnableCitations}
/>
</div>
{!enableCitations && (
<Alert
variant="default"
className="bg-yellow-50 dark:bg-yellow-950/20 border-yellow-200 dark:border-yellow-800"
>
<Info className="h-4 w-4 text-yellow-600 dark:text-yellow-500" />
<AlertDescription className="text-yellow-800 dark:text-yellow-300">
Disabling citations means AI responses won't include source references. You can
re-enable this anytime in settings.
</AlertDescription>
</Alert>
)}
</div>
{/* SearchSpace System Instructions */}
<div className="space-y-4">
<div className="space-y-2">
<Label htmlFor="custom-instructions" className="text-base font-medium">
SearchSpace System Instructions (Optional)
</Label>
<p className="text-sm text-muted-foreground">
Add system instructions to guide how the AI should respond. For example: "Always provide
code examples" or "Keep responses concise and technical".
</p>
<Textarea
id="custom-instructions"
placeholder="E.g., Always provide practical examples, be concise, focus on technical details..."
value={customInstructions}
onChange={(e) => setCustomInstructions(e.target.value)}
rows={6}
className="resize-none"
/>
<p className="text-xs text-muted-foreground">{customInstructions.length} characters</p>
</div>
</div>
{/* Action Buttons */}
<div className="flex items-center justify-between pt-4 border-t">
<Button variant="ghost" onClick={handleSkip} disabled={saving}>
Skip for now
</Button>
<Button onClick={handleSave} disabled={saving || !hasChanges}>
{saving ? "Saving..." : "Save Configuration"}
</Button>
</div>
</div>
);
}

View file

@ -33,7 +33,7 @@ const ROLE_DESCRIPTIONS = {
long_context: {
icon: Brain,
title: "Long Context LLM",
description: "Handles complex tasks requiring extensive context understanding and reasoning",
description: "Handles summarization of long documents and complex Q&A",
color: "bg-blue-100 text-blue-800 border-blue-200",
examples: "Document analysis, research synthesis, complex Q&A",
characteristics: ["Large context window", "Deep reasoning", "Complex analysis"],

View file

@ -0,0 +1,269 @@
"use client";
import { Info, RotateCcw, Save } from "lucide-react";
import { useEffect, useState } from "react";
import { toast } from "sonner";
import { Alert, AlertDescription } from "@/components/ui/alert";
import { Button } from "@/components/ui/button";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card";
import { Label } from "@/components/ui/label";
import { Separator } from "@/components/ui/separator";
import { Skeleton } from "@/components/ui/skeleton";
import { Switch } from "@/components/ui/switch";
import { Textarea } from "@/components/ui/textarea";
import { useSearchSpace } from "@/hooks/use-search-space";
interface PromptConfigManagerProps {
searchSpaceId: number;
}
export function PromptConfigManager({ searchSpaceId }: PromptConfigManagerProps) {
const { searchSpace, loading, fetchSearchSpace } = useSearchSpace({
searchSpaceId,
autoFetch: true,
});
const [enableCitations, setEnableCitations] = useState(true);
const [customInstructions, setCustomInstructions] = useState("");
const [saving, setSaving] = useState(false);
const [hasChanges, setHasChanges] = useState(false);
// Initialize state from fetched search space
useEffect(() => {
if (searchSpace) {
setEnableCitations(searchSpace.citations_enabled);
setCustomInstructions(searchSpace.qna_custom_instructions || "");
setHasChanges(false);
}
}, [searchSpace]);
// Track changes
useEffect(() => {
if (searchSpace) {
const currentCustom = searchSpace.qna_custom_instructions || "";
const changed =
searchSpace.citations_enabled !== enableCitations || currentCustom !== customInstructions;
setHasChanges(changed);
}
}, [searchSpace, enableCitations, customInstructions]);
const handleSave = async () => {
try {
setSaving(true);
// Prepare payload with simplified schema
const payload: any = {
citations_enabled: enableCitations,
qna_custom_instructions: customInstructions.trim() || "",
};
// Only send request if we have something to update
if (Object.keys(payload).length > 0) {
const response = await fetch(
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/searchspaces/${searchSpaceId}`,
{
method: "PUT",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${localStorage.getItem("surfsense_bearer_token")}`,
},
body: JSON.stringify(payload),
}
);
if (!response.ok) {
const errorData = await response.json().catch(() => ({}));
throw new Error(errorData.detail || "Failed to save prompt configuration");
}
toast.success("Prompt configuration saved successfully");
}
setHasChanges(false);
// Refresh to get updated data
await fetchSearchSpace();
} catch (error: any) {
console.error("Error saving prompt configuration:", error);
toast.error(error.message || "Failed to save prompt configuration");
} finally {
setSaving(false);
}
};
const handleReset = () => {
if (searchSpace) {
setEnableCitations(searchSpace.citations_enabled);
setCustomInstructions(searchSpace.qna_custom_instructions || "");
setHasChanges(false);
}
};
if (loading) {
return (
<div className="space-y-6">
<Card>
<CardHeader>
<Skeleton className="h-6 w-48" />
<Skeleton className="h-4 w-full max-w-md" />
</CardHeader>
<CardContent className="space-y-4">
<Skeleton className="h-20 w-full" />
<Skeleton className="h-32 w-full" />
</CardContent>
</Card>
</div>
);
}
return (
<div className="space-y-6">
<Alert>
<Info className="h-4 w-4" />
<AlertDescription>
Configure how the AI responds to your queries. Citations add source references, and the
system instructions personalize the response style.
</AlertDescription>
</Alert>
{/* Citations Card */}
<Card>
<CardHeader>
<CardTitle>Citation Configuration</CardTitle>
<CardDescription>
Control whether AI responses include citations to source documents
</CardDescription>
</CardHeader>
<CardContent className="space-y-4">
<div className="flex items-center justify-between space-x-4 p-4 rounded-lg border bg-card">
<div className="flex-1 space-y-1">
<Label htmlFor="enable-citations-settings" className="text-base font-medium">
Enable Citations
</Label>
<p className="text-sm text-muted-foreground">
When enabled, AI responses will include citations in [citation:id] format linking to
source documents.
</p>
</div>
<Switch
id="enable-citations-settings"
checked={enableCitations}
onCheckedChange={setEnableCitations}
/>
</div>
{!enableCitations && (
<Alert
variant="default"
className="bg-yellow-50 dark:bg-yellow-950/20 border-yellow-200 dark:border-yellow-800"
>
<Info className="h-4 w-4 text-yellow-600 dark:text-yellow-500" />
<AlertDescription className="text-yellow-800 dark:text-yellow-300">
Citations are currently disabled. AI responses will not include source references.
You can re-enable this anytime.
</AlertDescription>
</Alert>
)}
{enableCitations && (
<Alert>
<Info className="h-4 w-4" />
<AlertDescription>
Citations are enabled. When answering questions, the AI will reference source
documents using the [citation:id] format.
</AlertDescription>
</Alert>
)}
</CardContent>
</Card>
{/* SearchSpace System Instructions Card */}
<Card>
<CardHeader>
<CardTitle>SearchSpace System Instructions</CardTitle>
<CardDescription>
Add system instructions to guide the AI's response style and behavior
</CardDescription>
</CardHeader>
<CardContent className="space-y-4">
<div className="space-y-2">
<Label htmlFor="custom-instructions-settings" className="text-base font-medium">
Your System Instructions
</Label>
<p className="text-sm text-muted-foreground">
Provide specific guidelines for how you want the AI to respond. These instructions
will be applied to all answers.
</p>
<Textarea
id="custom-instructions-settings"
placeholder="E.g., Always provide practical examples, be concise, focus on technical details, use simple language..."
value={customInstructions}
onChange={(e) => setCustomInstructions(e.target.value)}
rows={8}
className="resize-none font-mono text-sm"
/>
<div className="flex items-center justify-between">
<p className="text-xs text-muted-foreground">
{customInstructions.length} characters
</p>
{customInstructions.length > 0 && (
<Button
variant="ghost"
size="sm"
onClick={() => setCustomInstructions("")}
className="h-auto py-1 px-2 text-xs"
>
Clear
</Button>
)}
</div>
</div>
{customInstructions.trim().length === 0 && (
<Alert>
<Info className="h-4 w-4" />
<AlertDescription>
No system instructions are currently set. The AI will use default behavior.
</AlertDescription>
</Alert>
)}
</CardContent>
</Card>
{/* Action Buttons */}
<div className="flex items-center justify-between pt-4">
<Button
variant="outline"
onClick={handleReset}
disabled={!hasChanges || saving}
className="flex items-center gap-2"
>
<RotateCcw className="h-4 w-4" />
Reset Changes
</Button>
<Button
onClick={handleSave}
disabled={!hasChanges || saving}
className="flex items-center gap-2"
>
<Save className="h-4 w-4" />
{saving ? "Saving..." : "Save Configuration"}
</Button>
</div>
{hasChanges && (
<Alert
variant="default"
className="bg-blue-50 dark:bg-blue-950/20 border-blue-200 dark:border-blue-800"
>
<Info className="h-4 w-4 text-blue-600 dark:text-blue-500" />
<AlertDescription className="text-blue-800 dark:text-blue-300">
You have unsaved changes. Click "Save Configuration" to apply them.
</AlertDescription>
</Alert>
)}
</div>
);
}

View file

@ -9,6 +9,8 @@ interface SearchSpace {
name: string;
description: string;
user_id: string;
citations_enabled: boolean;
qna_custom_instructions: string | null;
}
interface UseSearchSpaceOptions {

View file

@ -8,7 +8,8 @@ interface SearchSpace {
name: string;
description: string;
created_at: string;
// Add other fields from your SearchSpaceRead model
citations_enabled: boolean;
qna_custom_instructions: string | null;
}
export function useSearchSpaces() {

View file

@ -131,6 +131,7 @@
"book_a_call": "Book a call"
},
"nav_menu": {
"settings": "Settings",
"platform": "Platform",
"researcher": "Researcher",
"manage_llms": "Manage LLMs",
@ -425,7 +426,7 @@
"long_context_llm": "Long Context LLM",
"fast_llm": "Fast LLM",
"strategic_llm": "Strategic LLM",
"long_context_desc": "Handles complex tasks requiring extensive context understanding and reasoning",
"long_context_desc": "Handles summarization of long documents and complex Q&A",
"long_context_examples": "Document analysis, research synthesis, complex Q&A",
"large_context_window": "Large context window",
"deep_reasoning": "Deep reasoning",
@ -572,7 +573,7 @@
"no_llm_configs_found": "No LLM Configurations Found",
"add_provider_before_roles": "Please add at least one LLM provider in the previous step before assigning roles.",
"long_context_llm_title": "Long Context LLM",
"long_context_llm_desc": "Handles complex tasks requiring extensive context understanding and reasoning",
"long_context_llm_desc": "Handles summarization of long documents and complex Q&A",
"long_context_llm_examples": "Document analysis, research synthesis, complex Q&A",
"fast_llm_title": "Fast LLM",
"fast_llm_desc": "Optimized for quick responses and real-time interactions",

View file

@ -131,6 +131,7 @@
"book_a_call": "预约咨询"
},
"nav_menu": {
"settings": "设置",
"platform": "平台",
"researcher": "AI 研究",
"manage_llms": "管理 LLM",
@ -425,7 +426,7 @@
"long_context_llm": "长上下文 LLM",
"fast_llm": "快速 LLM",
"strategic_llm": "战略 LLM",
"long_context_desc": "处理需要广泛上下文理解和推理的复杂任务",
"long_context_desc": "处理长文档摘要和复杂问答",
"long_context_examples": "文档分析、研究综合、复杂问答",
"large_context_window": "大型上下文窗口",
"deep_reasoning": "深度推理",
@ -572,7 +573,7 @@
"no_llm_configs_found": "未找到 LLM 配置",
"add_provider_before_roles": "在分配角色之前,请先在上一步中添加至少一个 LLM 提供商。",
"long_context_llm_title": "长上下文 LLM",
"long_context_llm_desc": "处理需要广泛上下文理解和推理的复杂任务",
"long_context_llm_desc": "处理长文档摘要和复杂问答",
"long_context_llm_examples": "文档分析、研究综合、复杂问答",
"fast_llm_title": "快速 LLM",
"fast_llm_desc": "针对快速响应和实时交互进行优化",