add support for using custom upstream llm (#365)

This commit is contained in:
Adil Hafeez 2025-01-17 18:25:55 -08:00 committed by GitHub
parent 3fc21de60c
commit 07ef3149b8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 263 additions and 52 deletions

View file

@ -162,15 +162,34 @@ pub struct EmbeddingProviver {
pub model: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum LlmProviderType {
#[serde(rename = "openai")]
OpenAI,
#[serde(rename = "mistral")]
Mistral,
}
impl Display for LlmProviderType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
LlmProviderType::OpenAI => write!(f, "openai"),
LlmProviderType::Mistral => write!(f, "mistral"),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
//TODO: use enum for model, but if there is a new model, we need to update the code
pub struct LlmProvider {
pub name: String,
pub provider: String,
pub provider_interface: LlmProviderType,
pub access_key: Option<String>,
pub model: String,
pub default: Option<bool>,
pub stream: Option<bool>,
pub endpoint: Option<String>,
pub port: Option<u16>,
pub rate_limits: Option<LlmRatelimit>,
}