use passed in model name in chat completion request (#445)

This commit is contained in:
Adil Hafeez 2025-03-21 15:56:17 -07:00 committed by GitHub
parent bd8004d1ae
commit eb48f3d5bb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 364 additions and 89 deletions

View file

@ -427,7 +427,6 @@ impl StreamContext {
headers.insert(key.as_str(), value.as_str());
}
let call_args = CallArgs::new(
ARCH_INTERNAL_CLUSTER_NAME,
&path,
@ -499,10 +498,7 @@ impl StreamContext {
}
};
if !prompt_target
.auto_llm_dispatch_on_response
.unwrap_or(true)
{
if !prompt_target.auto_llm_dispatch_on_response.unwrap_or(true) {
let tool_call_response = self.tool_call_response.as_ref().unwrap().clone();
let direct_response_str = if self.streaming_response {
@ -655,10 +651,7 @@ impl StreamContext {
.clone();
// check if the default target should be dispatched to the LLM provider
if !prompt_target
.auto_llm_dispatch_on_response
.unwrap_or(true)
{
if !prompt_target.auto_llm_dispatch_on_response.unwrap_or(true) {
let default_target_response_str = if self.streaming_response {
let chat_completion_response =
match serde_json::from_slice::<ChatCompletionsResponse>(&body) {