mirror of
https://github.com/0xMassi/webclaw.git
synced 2026-04-25 00:06:21 +02:00
feat(noxa-9fw.2): make gemini cli the primary llm backend
- ProviderChain::default() order: Gemini CLI -> OpenAI -> Ollama -> Anthropic - Add --llm-provider gemini arm to build_llm_provider() in noxa-cli - Update unknown-provider error to mention gemini - Update empty-chain error messages in CLI and MCP to mention gemini CLI - Update MCP startup warn! to list gemini CLI as first option
This commit is contained in:
parent
d800c37bfd
commit
420a1d7522
3 changed files with 79 additions and 14 deletions
|
|
@ -247,7 +247,7 @@ struct Cli {
|
||||||
#[arg(long, num_args = 0..=1, default_missing_value = "3")]
|
#[arg(long, num_args = 0..=1, default_missing_value = "3")]
|
||||||
summarize: Option<usize>,
|
summarize: Option<usize>,
|
||||||
|
|
||||||
/// Force a specific LLM provider (ollama, openai, anthropic)
|
/// Force a specific LLM provider (gemini, ollama, openai, anthropic)
|
||||||
#[arg(long, env = "NOXA_LLM_PROVIDER")]
|
#[arg(long, env = "NOXA_LLM_PROVIDER")]
|
||||||
llm_provider: Option<String>,
|
llm_provider: Option<String>,
|
||||||
|
|
||||||
|
|
@ -1814,6 +1814,17 @@ async fn run_brand(cli: &Cli) -> Result<(), String> {
|
||||||
async fn build_llm_provider(cli: &Cli) -> Result<Box<dyn LlmProvider>, String> {
|
async fn build_llm_provider(cli: &Cli) -> Result<Box<dyn LlmProvider>, String> {
|
||||||
if let Some(ref name) = cli.llm_provider {
|
if let Some(ref name) = cli.llm_provider {
|
||||||
match name.as_str() {
|
match name.as_str() {
|
||||||
|
"gemini" => {
|
||||||
|
let provider = noxa_llm::providers::gemini_cli::GeminiCliProvider::new(
|
||||||
|
cli.llm_model.clone(),
|
||||||
|
);
|
||||||
|
if !provider.is_available().await {
|
||||||
|
return Err(
|
||||||
|
"gemini CLI not found on PATH -- install it or omit --llm-provider".into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(Box::new(provider))
|
||||||
|
}
|
||||||
"ollama" => {
|
"ollama" => {
|
||||||
let provider = noxa_llm::providers::ollama::OllamaProvider::new(
|
let provider = noxa_llm::providers::ollama::OllamaProvider::new(
|
||||||
cli.llm_base_url.clone(),
|
cli.llm_base_url.clone(),
|
||||||
|
|
@ -1842,14 +1853,14 @@ async fn build_llm_provider(cli: &Cli) -> Result<Box<dyn LlmProvider>, String> {
|
||||||
Ok(Box::new(provider))
|
Ok(Box::new(provider))
|
||||||
}
|
}
|
||||||
other => Err(format!(
|
other => Err(format!(
|
||||||
"unknown LLM provider: {other} (use ollama, openai, or anthropic)"
|
"unknown LLM provider: {other} (use gemini, ollama, openai, or anthropic)"
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let chain = noxa_llm::ProviderChain::default().await;
|
let chain = noxa_llm::ProviderChain::default().await;
|
||||||
if chain.is_empty() {
|
if chain.is_empty() {
|
||||||
return Err(
|
return Err(
|
||||||
"no LLM providers available -- start Ollama or set OPENAI_API_KEY / ANTHROPIC_API_KEY"
|
"no LLM providers available -- install the gemini CLI, start Ollama, or set OPENAI_API_KEY / ANTHROPIC_API_KEY"
|
||||||
.into(),
|
.into(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,10 @@ use tracing::{debug, warn};
|
||||||
use crate::error::LlmError;
|
use crate::error::LlmError;
|
||||||
use crate::provider::{CompletionRequest, LlmProvider};
|
use crate::provider::{CompletionRequest, LlmProvider};
|
||||||
use crate::providers::{
|
use crate::providers::{
|
||||||
anthropic::AnthropicProvider, ollama::OllamaProvider, openai::OpenAiProvider,
|
anthropic::AnthropicProvider,
|
||||||
|
gemini_cli::GeminiCliProvider,
|
||||||
|
ollama::OllamaProvider,
|
||||||
|
openai::OpenAiProvider,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct ProviderChain {
|
pub struct ProviderChain {
|
||||||
|
|
@ -15,12 +18,26 @@ pub struct ProviderChain {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ProviderChain {
|
impl ProviderChain {
|
||||||
/// Build the default chain: Ollama -> OpenAI -> Anthropic.
|
/// Build the default chain: Gemini CLI -> OpenAI -> Ollama -> Anthropic.
|
||||||
/// Ollama is always added (availability checked at call time).
|
/// Gemini CLI is the primary backend (subprocess-based, requires `gemini` on PATH).
|
||||||
/// Cloud providers are only added if their API keys are configured.
|
/// Cloud providers are only added if their API keys are configured.
|
||||||
|
/// Ollama is added if reachable at call time.
|
||||||
pub async fn default() -> Self {
|
pub async fn default() -> Self {
|
||||||
let mut providers: Vec<Box<dyn LlmProvider>> = Vec::new();
|
let mut providers: Vec<Box<dyn LlmProvider>> = Vec::new();
|
||||||
|
|
||||||
|
let gemini = GeminiCliProvider::new(None);
|
||||||
|
if gemini.is_available().await {
|
||||||
|
debug!("gemini cli available, adding as primary provider");
|
||||||
|
providers.push(Box::new(gemini));
|
||||||
|
} else {
|
||||||
|
debug!("gemini cli not found on PATH, skipping");
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(openai) = OpenAiProvider::new(None, None, None) {
|
||||||
|
debug!("openai configured, adding to chain");
|
||||||
|
providers.push(Box::new(openai));
|
||||||
|
}
|
||||||
|
|
||||||
let ollama = OllamaProvider::new(None, None);
|
let ollama = OllamaProvider::new(None, None);
|
||||||
if ollama.is_available().await {
|
if ollama.is_available().await {
|
||||||
debug!("ollama is available, adding to chain");
|
debug!("ollama is available, adding to chain");
|
||||||
|
|
@ -29,11 +46,6 @@ impl ProviderChain {
|
||||||
debug!("ollama not available, skipping");
|
debug!("ollama not available, skipping");
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(openai) = OpenAiProvider::new(None, None, None) {
|
|
||||||
debug!("openai configured, adding to chain");
|
|
||||||
providers.push(Box::new(openai));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(anthropic) = AnthropicProvider::new(None, None) {
|
if let Some(anthropic) = AnthropicProvider::new(None, None) {
|
||||||
debug!("anthropic configured, adding to chain");
|
debug!("anthropic configured, adding to chain");
|
||||||
providers.push(Box::new(anthropic));
|
providers.push(Box::new(anthropic));
|
||||||
|
|
@ -202,4 +214,46 @@ mod tests {
|
||||||
assert_eq!(chain.len(), 2);
|
assert_eq!(chain.len(), 2);
|
||||||
assert!(!chain.is_empty());
|
assert!(!chain.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ── Gemini-first chain ordering ───────────────────────────────────────────
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn gemini_first_in_single_provider_chain() {
|
||||||
|
// When we build a chain with a mock "gemini" provider first, it should
|
||||||
|
// be used before any fallback.
|
||||||
|
let chain = ProviderChain::from_providers(vec![
|
||||||
|
Box::new(MockProvider {
|
||||||
|
name: "gemini",
|
||||||
|
response: Ok("from gemini".into()),
|
||||||
|
available: true,
|
||||||
|
}),
|
||||||
|
Box::new(MockProvider {
|
||||||
|
name: "openai",
|
||||||
|
response: Ok("from openai".into()),
|
||||||
|
available: true,
|
||||||
|
}),
|
||||||
|
]);
|
||||||
|
let result = chain.complete(&test_request()).await.unwrap();
|
||||||
|
assert_eq!(result, "from gemini");
|
||||||
|
// Confirm order: first provider name is "gemini"
|
||||||
|
assert_eq!(chain.providers[0].name(), "gemini");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn gemini_failure_falls_back_to_openai() {
|
||||||
|
let chain = ProviderChain::from_providers(vec![
|
||||||
|
Box::new(MockProvider {
|
||||||
|
name: "gemini",
|
||||||
|
response: Err("subprocess timed out".into()),
|
||||||
|
available: true,
|
||||||
|
}),
|
||||||
|
Box::new(MockProvider {
|
||||||
|
name: "openai",
|
||||||
|
response: Ok("from openai".into()),
|
||||||
|
available: true,
|
||||||
|
}),
|
||||||
|
]);
|
||||||
|
let result = chain.complete(&test_request()).await.unwrap();
|
||||||
|
assert_eq!(result, "from openai");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -89,7 +89,7 @@ impl NoxaMcp {
|
||||||
|
|
||||||
let chain = noxa_llm::ProviderChain::default().await;
|
let chain = noxa_llm::ProviderChain::default().await;
|
||||||
let llm_chain = if chain.is_empty() {
|
let llm_chain = if chain.is_empty() {
|
||||||
warn!("no LLM providers available -- extract/summarize tools will fail");
|
warn!("no LLM providers available (gemini CLI, OPENAI_API_KEY, ANTHROPIC_API_KEY) -- extract/summarize tools will fail");
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
info!(providers = chain.len(), "LLM provider chain ready");
|
info!(providers = chain.len(), "LLM provider chain ready");
|
||||||
|
|
@ -334,7 +334,7 @@ impl NoxaMcp {
|
||||||
// No local LLM — fall back to cloud API directly
|
// No local LLM — fall back to cloud API directly
|
||||||
if self.llm_chain.is_none() {
|
if self.llm_chain.is_none() {
|
||||||
let cloud = self.cloud.as_ref().ok_or(
|
let cloud = self.cloud.as_ref().ok_or(
|
||||||
"No LLM providers available. Set OPENAI_API_KEY, ANTHROPIC_API_KEY, or NOXA_API_KEY for cloud fallback.",
|
"No LLM providers available. Install the gemini CLI, set OPENAI_API_KEY, ANTHROPIC_API_KEY, or NOXA_API_KEY for cloud fallback.",
|
||||||
)?;
|
)?;
|
||||||
let mut body = json!({"url": params.url});
|
let mut body = json!({"url": params.url});
|
||||||
if let Some(ref schema) = params.schema {
|
if let Some(ref schema) = params.schema {
|
||||||
|
|
@ -387,7 +387,7 @@ impl NoxaMcp {
|
||||||
// No local LLM — fall back to cloud API directly
|
// No local LLM — fall back to cloud API directly
|
||||||
if self.llm_chain.is_none() {
|
if self.llm_chain.is_none() {
|
||||||
let cloud = self.cloud.as_ref().ok_or(
|
let cloud = self.cloud.as_ref().ok_or(
|
||||||
"No LLM providers available. Set OPENAI_API_KEY, ANTHROPIC_API_KEY, or NOXA_API_KEY for cloud fallback.",
|
"No LLM providers available. Install the gemini CLI, set OPENAI_API_KEY, ANTHROPIC_API_KEY, or NOXA_API_KEY for cloud fallback.",
|
||||||
)?;
|
)?;
|
||||||
let mut body = json!({"url": params.url});
|
let mut body = json!({"url": params.url});
|
||||||
if let Some(sentences) = params.max_sentences {
|
if let Some(sentences) = params.max_sentences {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue