2025-04-15 14:39:12 -07:00
|
|
|
use log::debug;
|
2024-09-04 17:28:12 -07:00
|
|
|
|
|
|
|
|
#[allow(dead_code)]
|
2025-03-21 15:56:17 -07:00
|
|
|
pub fn token_count(model_name: &str, text: &str) -> Result<usize, String> {
|
2025-03-27 10:40:20 -07:00
|
|
|
debug!("getting token count model={}", model_name);
|
2025-03-21 15:56:17 -07:00
|
|
|
//HACK: add support for tokenizing mistral and other models
|
|
|
|
|
//filed issue https://github.com/katanemo/arch/issues/222
|
|
|
|
|
|
2025-06-13 17:02:20 -07:00
|
|
|
let updated_model = match model_name.starts_with("gpt-4") {
|
2025-03-21 15:56:17 -07:00
|
|
|
false => {
|
2025-03-27 10:40:20 -07:00
|
|
|
debug!(
|
2025-03-21 15:56:17 -07:00
|
|
|
"tiktoken_rs: unsupported model: {}, using gpt-4 to compute token count",
|
|
|
|
|
model_name
|
|
|
|
|
);
|
2025-06-13 17:02:20 -07:00
|
|
|
"gpt-4o"
|
|
|
|
|
}
|
|
|
|
|
true => {
|
|
|
|
|
if model_name.starts_with("gpt-4.1") {
|
|
|
|
|
"gpt-4o"
|
|
|
|
|
} else {
|
|
|
|
|
model_name
|
|
|
|
|
}
|
2025-03-21 15:56:17 -07:00
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2024-09-04 17:28:12 -07:00
|
|
|
// Consideration: is it more expensive to instantiate the BPE object every time, or to contend the singleton?
|
2025-03-21 15:56:17 -07:00
|
|
|
let bpe = tiktoken_rs::get_bpe_from_model(updated_model).map_err(|e| e.to_string())?;
|
2024-09-04 17:28:12 -07:00
|
|
|
Ok(bpe.encode_ordinary(text).len())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
mod test {
|
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn encode_ordinary() {
|
|
|
|
|
let model_name = "gpt-3.5-turbo";
|
|
|
|
|
let text = "How many tokens does this sentence have?";
|
|
|
|
|
assert_eq!(
|
|
|
|
|
8,
|
|
|
|
|
token_count(model_name, text).expect("correct tokenization")
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|