mirror of
https://github.com/VectifyAI/PageIndex.git
synced 2026-04-24 23:56:21 +02:00
Disable agent tracing and auto-add litellm/ prefix for retrieve_model
* Disable agent tracing and auto-add litellm/ prefix for retrieve_model * Preserve supported retrieve_model prefixes * Remove temporary retrieve_model tests * Limit tracing disablement to demo execution
This commit is contained in:
parent
d50c293309
commit
a108c021ae
3 changed files with 15 additions and 3 deletions
|
|
@ -24,7 +24,7 @@ import requests
|
|||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from agents import Agent, Runner, function_tool
|
||||
from agents import Agent, Runner, function_tool, set_tracing_disabled
|
||||
from agents.model_settings import ModelSettings
|
||||
from agents.stream_events import RawResponsesStreamEvent, RunItemStreamEvent
|
||||
from openai.types.responses import ResponseTextDeltaEvent, ResponseReasoningSummaryTextDeltaEvent
|
||||
|
|
@ -135,6 +135,8 @@ def query_agent(client: PageIndexClient, doc_id: str, prompt: str, verbose: bool
|
|||
|
||||
if __name__ == "__main__":
|
||||
|
||||
set_tracing_disabled(True)
|
||||
|
||||
# Download PDF if needed
|
||||
if not os.path.exists(PDF_PATH):
|
||||
print(f"Downloading {PDF_URL} ...")
|
||||
|
|
|
|||
|
|
@ -15,6 +15,16 @@ from .utils import ConfigLoader, remove_fields
|
|||
META_INDEX = "_meta.json"
|
||||
|
||||
|
||||
def _normalize_retrieve_model(model: str) -> str:
|
||||
"""Preserve supported Agents SDK prefixes and route other provider paths via LiteLLM."""
|
||||
passthrough_prefixes = ("litellm/", "openai/")
|
||||
if not model or "/" not in model:
|
||||
return model
|
||||
if model.startswith(passthrough_prefixes):
|
||||
return model
|
||||
return f"litellm/{model}"
|
||||
|
||||
|
||||
class PageIndexClient:
|
||||
"""
|
||||
A client for indexing and retrieving document content.
|
||||
|
|
@ -35,7 +45,7 @@ class PageIndexClient:
|
|||
overrides["retrieve_model"] = retrieve_model
|
||||
opt = ConfigLoader().load(overrides or None)
|
||||
self.model = opt.model
|
||||
self.retrieve_model = opt.retrieve_model or self.model
|
||||
self.retrieve_model = _normalize_retrieve_model(opt.retrieve_model or self.model)
|
||||
if self.workspace:
|
||||
self.workspace.mkdir(parents=True, exist_ok=True)
|
||||
self.documents = {}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
model: "gpt-4o-2024-11-20"
|
||||
# model: "anthropic/claude-sonnet-4-6"
|
||||
retrieve_model: "gpt-5.4" # defaults to model if not set
|
||||
retrieve_model: "gpt-5.4" # defaults to `model` if not set
|
||||
toc_check_page_num: 20
|
||||
max_page_num_each_node: 10
|
||||
max_token_num_each_node: 20000
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue