use passed in model name in chat completion request (#445)

This commit is contained in:
Adil Hafeez 2025-03-21 15:56:17 -07:00 committed by GitHub
parent bd8004d1ae
commit eb48f3d5bb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 364 additions and 89 deletions

View file

@ -1,19 +1,25 @@
use log::trace;
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
#[allow(dead_code)]
pub enum Error {
#[error("Unknown model: {model_name}")]
UnknownModel { model_name: String },
}
#[allow(dead_code)]
pub fn token_count(model_name: &str, text: &str) -> Result<usize, Error> {
pub fn token_count(model_name: &str, text: &str) -> Result<usize, String> {
trace!("getting token count model={}", model_name);
//HACK: add support for tokenizing mistral and other models
//filed issue https://github.com/katanemo/arch/issues/222
let updated_model = match model_name.starts_with("gpt") {
false => {
trace!(
"tiktoken_rs: unsupported model: {}, using gpt-4 to compute token count",
model_name
);
"gpt-4"
}
true => model_name,
};
// Consideration: is it more expensive to instantiate the BPE object every time, or to contend the singleton?
let bpe = tiktoken_rs::get_bpe_from_model(model_name).map_err(|_| Error::UnknownModel {
model_name: model_name.to_string(),
})?;
let bpe = tiktoken_rs::get_bpe_from_model(updated_model).map_err(|e| e.to_string())?;
Ok(bpe.encode_ordinary(text).len())
}
@ -30,14 +36,4 @@ mod test {
token_count(model_name, text).expect("correct tokenization")
);
}
#[test]
fn unrecognized_model() {
assert_eq!(
Error::UnknownModel {
model_name: "unknown".to_string()
},
token_count("unknown", "").expect_err("unknown model")
)
}
}