chore: ran linting

This commit is contained in:
Anish Sarkar 2026-03-17 04:40:46 +05:30
parent bc1f31b481
commit ac0f2fa2eb
33 changed files with 688 additions and 661 deletions

View file

@ -262,7 +262,11 @@ def _normalize_connectors(
valid_set -= _LIVE_SEARCH_CONNECTORS
if not connectors_to_search:
base = list(available_connectors) if available_connectors else list(_ALL_CONNECTORS)
base = (
list(available_connectors)
if available_connectors
else list(_ALL_CONNECTORS)
)
return [c for c in base if c not in _LIVE_SEARCH_CONNECTORS]
normalized: list[str] = []
@ -291,7 +295,11 @@ def _normalize_connectors(
# Fallback to all available if nothing matched
if not out:
base = list(available_connectors) if available_connectors else list(_ALL_CONNECTORS)
base = (
list(available_connectors)
if available_connectors
else list(_ALL_CONNECTORS)
)
return [c for c in base if c not in _LIVE_SEARCH_CONNECTORS]
return out

View file

@ -68,12 +68,12 @@ from .podcast import create_generate_podcast_tool
from .report import create_generate_report_tool
from .scrape_webpage import create_scrape_webpage_tool
from .search_surfsense_docs import create_search_surfsense_docs_tool
from .web_search import create_web_search_tool
from .shared_memory import (
create_recall_shared_memory_tool,
create_save_shared_memory_tool,
)
from .user_memory import create_recall_memory_tool, create_save_memory_tool
from .web_search import create_web_search_tool
# =============================================================================
# Tool Definition

View file

@ -72,20 +72,22 @@ def _format_web_results(
continue
metadata_json = json.dumps(metadata, ensure_ascii=False)
doc_xml = "\n".join([
"<document>",
"<document_metadata>",
f" <document_type>{source}</document_type>",
f" <title><![CDATA[{title}]]></title>",
f" <url><![CDATA[{url}]]></url>",
f" <metadata_json><![CDATA[{metadata_json}]]></metadata_json>",
"</document_metadata>",
"<document_content>",
f" <chunk id='{url}'><![CDATA[{content}]]></chunk>",
"</document_content>",
"</document>",
"",
])
doc_xml = "\n".join(
[
"<document>",
"<document_metadata>",
f" <document_type>{source}</document_type>",
f" <title><![CDATA[{title}]]></title>",
f" <url><![CDATA[{url}]]></url>",
f" <metadata_json><![CDATA[{metadata_json}]]></metadata_json>",
"</document_metadata>",
"<document_content>",
f" <chunk id='{url}'><![CDATA[{content}]]></chunk>",
"</document_content>",
"</document>",
"",
]
)
if total_chars + len(doc_xml) > max_chars:
parts.append("<!-- Output truncated to fit context window -->")
@ -152,9 +154,7 @@ def create_web_search_tool(
]
engine_names = ["SearXNG (platform default)"]
engine_names.extend(
_CONNECTOR_LABELS.get(c, c) for c in active_live_connectors
)
engine_names.extend(_CONNECTOR_LABELS.get(c, c) for c in active_live_connectors)
engines_summary = ", ".join(engine_names)
description = (
@ -179,10 +179,12 @@ def create_web_search_tool(
tasks: list[asyncio.Task[list[dict[str, Any]]]] = []
if web_search_service.is_available():
async def _searxng() -> list[dict[str, Any]]:
async with semaphore:
_result_obj, docs = await web_search_service.search(
query=query, top_k=clamped_top_k,
query=query,
top_k=clamped_top_k,
)
return docs