Remove blanket unused imports from the common crate (#292)

* Remove blanket unused imports from the common crate

Signed-off-by: José Ulises Niño Rivera <junr03@users.noreply.github.com>

* updatE

Signed-off-by: José Ulises Niño Rivera <junr03@users.noreply.github.com>

---------

Signed-off-by: José Ulises Niño Rivera <junr03@users.noreply.github.com>
This commit is contained in:
José Ulises Niño Rivera 2024-11-25 19:19:06 -06:00 committed by GitHub
parent 9c6fcdb771
commit be8c3c9ea3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 11 additions and 31 deletions

View file

@ -445,7 +445,7 @@ pub struct PromptGuardResponse {
#[cfg(test)]
mod test {
use crate::common_types::open_ai::{ChatCompletionStreamResponseServerEvents, Message};
use pretty_assertions::{assert_eq, assert_ne};
use pretty_assertions::assert_eq;
use std::collections::HashMap;
const TOOL_SERIALIZED: &str = r#"{
@ -534,9 +534,7 @@ mod test {
#[test]
fn test_parameter_types() {
use super::open_ai::{
ChatCompletionsRequest, FunctionDefinition, FunctionParameter, ParameterType, ToolType,
};
use super::open_ai::{FunctionParameter, ParameterType};
const PARAMETER_SERIALZIED: &str = r#"{
"city": {
@ -582,8 +580,6 @@ mod test {
#[test]
fn stream_chunk_parse() {
use super::open_ai::{ChatCompletionStreamResponse, ChunkChoice, Delta};
const CHUNK_RESPONSE: &str = r#"data: {"id":"chatcmpl-ALmdmtKulBMEq3fRLbrnxJwcKOqvS","object":"chat.completion.chunk","created":1729755226,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-ALmdmtKulBMEq3fRLbrnxJwcKOqvS","object":"chat.completion.chunk","created":1729755226,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]}
@ -645,8 +641,6 @@ data: {"id":"chatcmpl-ALmdmtKulBMEq3fRLbrnxJwcKOqvS","object":"chat.completion.c
#[test]
fn stream_chunk_parse_done() {
use super::open_ai::{ChatCompletionStreamResponse, ChunkChoice, Delta};
const CHUNK_RESPONSE: &str = r#"data: {"id":"chatcmpl-ALn2KTfmrIpYd9N3Un4Kyg08WIIP6","object":"chat.completion.chunk","created":1729756748,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" I"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-ALn2KTfmrIpYd9N3Un4Kyg08WIIP6","object":"chat.completion.chunk","created":1729756748,"model":"gpt-3.5-turbo-0125","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" assist"},"logprobs":null,"finish_reason":null}]}
@ -712,8 +706,6 @@ data: [DONE]
#[test]
fn stream_chunk_parse_mistral() {
use super::open_ai::{ChatCompletionStreamResponse, ChunkChoice, Delta};
const CHUNK_RESPONSE: &str = r#"data: {"id":"e1ebce16de5443b79613512c2d757936","object":"chat.completion.chunk","created":1729805261,"model":"ministral-8b-latest","choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}
data: {"id":"e1ebce16de5443b79613512c2d757936","object":"chat.completion.chunk","created":1729805261,"model":"ministral-8b-latest","choices":[{"index":0,"delta":{"content":"Hello"},"finish_reason":null}]}

View file

@ -1,8 +1,6 @@
use duration_string::DurationString;
use serde::{Deserialize, Deserializer, Serialize};
use std::default;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt::Display;
use std::{collections::HashMap, time::Duration};
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct Overrides {

View file

@ -8,7 +8,6 @@
* Generated by: https://openapi-generator.tech
*/
use crate::embeddings;
use serde::{Deserialize, Serialize};
/// CreateEmbeddingRequestInput : Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. for counting tokens.

View file

@ -8,7 +8,6 @@
* Generated by: https://openapi-generator.tech
*/
use crate::embeddings;
use serde::{Deserialize, Serialize};
/// CreateEmbeddingResponseUsage : The usage information for the request.

View file

@ -8,7 +8,6 @@
* Generated by: https://openapi-generator.tech
*/
use crate::embeddings;
use serde::{Deserialize, Serialize};
/// Embedding : Represents an embedding vector returned by embedding endpoint.

View file

@ -1,5 +1,4 @@
use proxy_wasm::types::Status;
use serde_json::error;
use crate::{common_types::open_ai::ChatCompletionChunkResponseError, ratelimit};

View file

@ -3,8 +3,8 @@ use crate::{
stats::{Gauge, IncrementingMetric},
};
use derivative::Derivative;
use log::{debug, trace};
use proxy_wasm::{traits::Context, types::Status};
use log::trace;
use proxy_wasm::traits::Context;
use serde::Serialize;
use std::{cell::RefCell, collections::HashMap, fmt::Debug, time::Duration};

View file

@ -1,5 +1,3 @@
#![allow(unused_imports)]
pub mod common_types;
pub mod configuration;
pub mod consts;

View file

@ -1,5 +1,3 @@
use std::path::Display;
use rand::RngCore;
use serde::{Deserialize, Serialize};

View file

@ -1,8 +1,4 @@
use std::{
collections::HashMap,
time::{Duration, SystemTime, UNIX_EPOCH},
};
use crate::stream_context::{ResponseHandlerType, StreamCallContext, StreamContext};
use common::{
common_types::{
open_ai::{
@ -23,8 +19,10 @@ use http::StatusCode;
use log::{debug, trace, warn};
use proxy_wasm::{traits::HttpContext, types::Action};
use serde_json::Value;
use crate::stream_context::{ResponseHandlerType, StreamCallContext, StreamContext};
use std::{
collections::HashMap,
time::{Duration, SystemTime, UNIX_EPOCH},
};
// HttpContext is the trait that allows the Rust code to interact with HTTP objects.
impl HttpContext for StreamContext {