mirror of
https://github.com/katanemo/plano.git
synced 2026-05-08 07:12:42 +02:00
Introduce hermesllm library to handle llm message translation (#501)
This commit is contained in:
parent
96b583c819
commit
6c53510f49
33 changed files with 1693 additions and 690 deletions
|
|
@ -1,14 +1,13 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use common::api::open_ai::ChatCompletionsRequest;
|
||||
use common::consts::ARCH_PROVIDER_HINT_HEADER;
|
||||
use hermesllm::providers::openai::types::ChatCompletionsRequest;
|
||||
use http_body_util::combinators::BoxBody;
|
||||
use http_body_util::{BodyExt, Full, StreamBody};
|
||||
use hyper::body::Frame;
|
||||
use hyper::header::{self};
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
use serde_json::Value;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tokio_stream::StreamExt;
|
||||
|
|
@ -32,13 +31,15 @@ pub async fn chat_completions(
|
|||
let chat_request_bytes = request.collect().await?.to_bytes();
|
||||
|
||||
let chat_completion_request: ChatCompletionsRequest =
|
||||
match serde_json::from_slice(&chat_request_bytes) {
|
||||
match ChatCompletionsRequest::try_from(chat_request_bytes.as_ref()) {
|
||||
Ok(request) => request,
|
||||
Err(err) => {
|
||||
let v: Value = serde_json::from_slice(&chat_request_bytes).unwrap();
|
||||
warn!(
|
||||
"arch-router request body string: {}",
|
||||
String::from_utf8_lossy(&chat_request_bytes)
|
||||
);
|
||||
let err_msg = format!("Failed to parse request body: {}", err);
|
||||
warn!("{}", err_msg);
|
||||
warn!("arch-router request body: {}", v.to_string());
|
||||
let mut bad_request = Response::new(full(err_msg));
|
||||
*bad_request.status_mut() = StatusCode::BAD_REQUEST;
|
||||
return Ok(bad_request);
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use bytes::Bytes;
|
||||
use common::api::open_ai::Models;
|
||||
use common::configuration::LlmProvider;
|
||||
use common::configuration::{IntoModels, LlmProvider};
|
||||
use hermesllm::providers::openai::types::Models;
|
||||
use http_body_util::{combinators::BoxBody, BodyExt, Full};
|
||||
use hyper::{Response, StatusCode};
|
||||
use serde_json;
|
||||
|
|
@ -11,7 +11,7 @@ pub async fn list_models(
|
|||
) -> Response<BoxBody<Bytes, hyper::Error>> {
|
||||
let prov = llm_providers.clone();
|
||||
let providers = (*prov).clone();
|
||||
let openai_models = Models::from(providers);
|
||||
let openai_models: Models = providers.into_models();
|
||||
|
||||
match serde_json::to_string(&openai_models) {
|
||||
Ok(json) => {
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use common::{
|
||||
api::open_ai::{ChatCompletionsResponse, ContentType, Message},
|
||||
configuration::{LlmProvider, LlmRoute},
|
||||
consts::ARCH_PROVIDER_HINT_HEADER,
|
||||
};
|
||||
use hermesllm::providers::openai::types::{ChatCompletionsResponse, ContentType, Message};
|
||||
use hyper::header;
|
||||
use thiserror::Error;
|
||||
use tracing::{debug, info, warn};
|
||||
|
|
@ -136,6 +136,11 @@ impl RouterService {
|
|||
}
|
||||
};
|
||||
|
||||
if chat_completion_response.choices.is_empty() {
|
||||
warn!("No choices in router response: {}", body);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if let Some(ContentType::Text(content)) =
|
||||
&chat_completion_response.choices[0].message.content
|
||||
{
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
use common::api::open_ai::{ChatCompletionsRequest, Message};
|
||||
use hermesllm::providers::openai::types::{ChatCompletionsRequest, Message};
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
use common::{
|
||||
api::open_ai::{ChatCompletionsRequest, ContentType, Message},
|
||||
configuration::LlmRoute,
|
||||
consts::{SYSTEM_ROLE, TOOL_ROLE, USER_ROLE},
|
||||
};
|
||||
use hermesllm::providers::openai::types::{ChatCompletionsRequest, ContentType, Message};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::{debug, warn};
|
||||
|
||||
|
|
@ -121,11 +121,13 @@ impl RouterModel for RouterModelV1 {
|
|||
.iter()
|
||||
.rev()
|
||||
.map(|message| {
|
||||
Message::new(
|
||||
message.role.clone(),
|
||||
Message {
|
||||
role: message.role.clone(),
|
||||
// we can unwrap here because we have already filtered out messages without content
|
||||
message.content.as_ref().unwrap().to_string(),
|
||||
)
|
||||
content: Some(ContentType::Text(
|
||||
message.content.as_ref().unwrap().to_string(),
|
||||
)),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<Message>>();
|
||||
|
||||
|
|
@ -141,14 +143,8 @@ impl RouterModel for RouterModelV1 {
|
|||
messages: vec![Message {
|
||||
content: Some(ContentType::Text(messages_content)),
|
||||
role: USER_ROLE.to_string(),
|
||||
model: None,
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
}],
|
||||
tools: None,
|
||||
stream: false,
|
||||
stream_options: None,
|
||||
metadata: None,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue