removing model_server. buh bye (#619)

This commit is contained in:
Salman Paracha 2025-11-22 15:04:41 -08:00 committed by GitHub
parent 88c2bd1851
commit d37af7605c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
50 changed files with 40 additions and 9597 deletions

View file

@ -1,46 +0,0 @@
name: e2e model server tests
on:
push:
branches:
- main
pull_request:
jobs:
e2e_model_server_tests:
runs-on: ubuntu-latest-m
strategy:
fail-fast: false
matrix:
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
defaults:
run:
working-directory: ./tests/modelserver
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
cache: "pip" # auto-caches based on requirements files
- name: install poetry
run: |
export POETRY_VERSION=2.2.1
curl -sSL https://install.python-poetry.org | python3 -
export PATH="$HOME/.local/bin:$PATH"
- name: install model server and start it
run: |
cd ../../model_server/ && poetry install && poetry run archgw_modelserver start
- name: install test dependencies
run: |
poetry install
- name: run tests
run: |
poetry run pytest

View file

@ -40,11 +40,10 @@ jobs:
curl --location --remote-name https://github.com/Orange-OpenSource/hurl/releases/download/4.0.0/hurl_4.0.0_amd64.deb
sudo dpkg -i hurl_4.0.0_amd64.deb
- name: install model server, arch gateway and test dependencies
- name: install arch gateway and test dependencies
run: |
source venv/bin/activate
cd model_server/ && echo "installing model server" && poetry install
cd ../arch/tools && echo "installing archgw cli" && poetry install
cd arch/tools && echo "installing archgw cli" && poetry install
cd ../../demos/shared/test_runner && echo "installing test dependencies" && poetry install
- name: run demo tests

View file

@ -40,11 +40,10 @@ jobs:
curl --location --remote-name https://github.com/Orange-OpenSource/hurl/releases/download/4.0.0/hurl_4.0.0_amd64.deb
sudo dpkg -i hurl_4.0.0_amd64.deb
- name: install model server, arch gateway and test dependencies
- name: install arch gateway and test dependencies
run: |
source venv/bin/activate
cd model_server/ && echo "installing model server" && poetry install
cd ../arch/tools && echo "installing archgw cli" && poetry install
cd arch/tools && echo "installing archgw cli" && poetry install
cd ../../demos/shared/test_runner && echo "installing test dependencies" && poetry install
- name: run demo tests

View file

@ -1,45 +0,0 @@
name: model server tests
on:
push:
branches:
- main # Run tests on pushes to the main branch
pull_request:
branches:
- main # Run tests on pull requests to the main branch
jobs:
test:
runs-on: ubuntu-latest
steps:
# Step 1: Check out the code from your repository
- name: Checkout code
uses: actions/checkout@v3
# Step 2: Set up Python (specify the version)
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.12"
# Step 3: Install Poetry
- name: Install Poetry
run: |
export POETRY_VERSION=2.2.1
curl -sSL https://install.python-poetry.org | python3 -
export PATH="$HOME/.local/bin:$PATH"
# Step 4: Install dependencies using Poetry
- name: Install dependencies
run: |
cd model_server
poetry install
# Step 5: Set PYTHONPATH and run tests
- name: Run model server tests with pytest
env:
PYTHONPATH: model_server # Ensure the app's path is available
run: |
cd model_server
poetry run pytest

5
.gitignore vendored
View file

@ -113,11 +113,6 @@ venv.bak/
arch/tools/config
arch/tools/build
# Archgw - model_server
model_server/venv_model_server
model_server/build
model_server/dist
# Archgw - Docs
docs/build/

View file

@ -22,4 +22,3 @@ services:
- OPENAI_API_KEY=${OPENAI_API_KEY:?error}
- MISTRAL_API_KEY=${MISTRAL_API_KEY:?error}
- OTEL_TRACING_HTTP_ENDPOINT=http://host.docker.internal:4318/v1/traces
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-51000}

View file

@ -56,15 +56,8 @@ poetry install
archgw build
```
### Step 5: download models
This will help download models so model_server can load faster. This should be done once.
```bash
archgw download-models
```
### Logs
`archgw` command can also view logs from gateway and model_server. Use following command to view logs,
`archgw` command can also view logs from the gateway. Use following command to view logs,
```bash
archgw logs --follow

View file

@ -1,13 +1,5 @@
import os
KATANEMO_DOCKERHUB_REPO = "katanemo/archgw"
KATANEMO_LOCAL_MODEL_LIST = [
"katanemo/Arch-Guard",
]
SERVICE_NAME_ARCHGW = "archgw"
SERVICE_NAME_MODEL_SERVER = "model_server"
SERVICE_ALL = "all"
MODEL_SERVER_LOG_FILE = "~/archgw_logs/modelserver.log"
ARCHGW_DOCKER_NAME = "archgw"
ARCHGW_DOCKER_IMAGE = os.getenv("ARCHGW_DOCKER_IMAGE", "katanemo/archgw:0.3.18")

View file

@ -9,9 +9,7 @@ from cli.utils import convert_legacy_listeners, getLogger
from cli.consts import (
ARCHGW_DOCKER_IMAGE,
ARCHGW_DOCKER_NAME,
KATANEMO_LOCAL_MODEL_LIST,
)
from huggingface_hub import snapshot_download
import subprocess
from cli.docker_cli import (
docker_container_status,
@ -144,49 +142,6 @@ def stop_docker_container(service=ARCHGW_DOCKER_NAME):
log.info(f"Failed to shut down services: {str(e)}")
def download_models_from_hf():
for model in KATANEMO_LOCAL_MODEL_LIST:
log.info(f"Downloading model: {model}")
snapshot_download(repo_id=model)
def start_arch_modelserver(foreground):
"""
Start the model server. This assumes that the archgw_modelserver package is installed locally
"""
try:
log.info("archgw_modelserver restart")
if foreground:
subprocess.run(
["archgw_modelserver", "start", "--foreground"],
check=True,
)
else:
subprocess.run(
["archgw_modelserver", "start"],
check=True,
)
except subprocess.CalledProcessError as e:
log.info(f"Failed to start model_server. Please check archgw_modelserver logs")
sys.exit(1)
def stop_arch_modelserver():
"""
Stop the model server. This assumes that the archgw_modelserver package is installed locally
"""
try:
subprocess.run(
["archgw_modelserver", "stop"],
check=True,
)
except subprocess.CalledProcessError as e:
log.info(f"Failed to start model_server. Please check archgw_modelserver logs")
sys.exit(1)
def start_cli_agent(arch_config_file=None, settings_json="{}"):
"""Start a CLI client connected to Arch."""

View file

@ -20,20 +20,14 @@ from cli.utils import (
find_config_file,
)
from cli.core import (
start_arch_modelserver,
stop_arch_modelserver,
start_arch,
stop_docker_container,
download_models_from_hf,
start_cli_agent,
)
from cli.consts import (
ARCHGW_DOCKER_IMAGE,
ARCHGW_DOCKER_NAME,
KATANEMO_DOCKERHUB_REPO,
SERVICE_NAME_ARCHGW,
SERVICE_NAME_MODEL_SERVER,
SERVICE_ALL,
)
log = getLogger(__name__)
@ -47,9 +41,8 @@ logo = r"""
"""
# Command to build archgw and model_server Docker images
# Command to build archgw Docker images
ARCHGW_DOCKERFILE = "./arch/Dockerfile"
MODEL_SERVER_BUILD_FILE = "./model_server/pyproject.toml"
def get_version():
@ -60,18 +53,6 @@ def get_version():
return "version not found"
def verify_service_name(service):
"""Verify if the service name is valid."""
if service not in [
SERVICE_NAME_ARCHGW,
SERVICE_NAME_MODEL_SERVER,
SERVICE_ALL,
]:
print(f"Error: Invalid service {service}. Exiting")
sys.exit(1)
return True
@click.group(invoke_without_command=True)
@click.option("--version", is_flag=True, help="Show the archgw cli version and exit.")
@click.pass_context
@ -89,17 +70,11 @@ def main(ctx, version):
@click.command()
@click.option(
"--service",
default=SERVICE_ALL,
help="Optional parameter to specify which service to build. Options are model_server, archgw",
)
def build(service):
def build():
"""Build Arch from source. Must be in root of cloned repo."""
verify_service_name(service)
# Check if /arch/Dockerfile exists
if service == SERVICE_NAME_ARCHGW or service == SERVICE_ALL:
if os.path.exists(ARCHGW_DOCKERFILE):
if os.path.exists(ARCHGW_DOCKERFILE):
click.echo("Building archgw image...")
try:
@ -110,8 +85,6 @@ def build(service):
"-f",
ARCHGW_DOCKERFILE,
"-t",
f"{KATANEMO_DOCKERHUB_REPO}:latest",
"-t",
f"{ARCHGW_DOCKER_IMAGE}",
".",
"--add-host=host.docker.internal:host-gateway",
@ -128,57 +101,20 @@ def build(service):
click.echo("archgw image built successfully.")
"""Install the model server dependencies using Poetry."""
if service == SERVICE_NAME_MODEL_SERVER or service == SERVICE_ALL:
# Check if pyproject.toml exists
if os.path.exists(MODEL_SERVER_BUILD_FILE):
click.echo("Installing model server dependencies with Poetry...")
try:
subprocess.run(
["poetry", "install", "--no-cache"],
cwd=os.path.dirname(MODEL_SERVER_BUILD_FILE),
check=True,
)
click.echo("Model server dependencies installed successfully.")
except subprocess.CalledProcessError as e:
click.echo(f"Error installing model server dependencies: {e}")
sys.exit(1)
else:
click.echo(f"Error: pyproject.toml not found in {MODEL_SERVER_BUILD_FILE}")
sys.exit(1)
@click.command()
@click.argument("file", required=False) # Optional file argument
@click.option(
"--path", default=".", help="Path to the directory containing arch_config.yaml"
)
@click.option(
"--service",
default=SERVICE_ALL,
help="Service to start. Options are model_server, archgw.",
)
@click.option(
"--foreground",
default=False,
help="Run Arch in the foreground. Default is False",
is_flag=True,
)
def up(file, path, service, foreground):
def up(file, path, foreground):
"""Starts Arch."""
verify_service_name(service)
if service == SERVICE_ALL and foreground:
# foreground can only be specified when starting individual services
log.info("foreground flag is only supported for individual services. Exiting.")
sys.exit(1)
if service == SERVICE_NAME_MODEL_SERVER:
log.info("Download models from HuggingFace...")
download_models_from_hf()
start_arch_modelserver(foreground)
return
# Use the utility function to find config file
arch_config_file = find_config_file(path, file)
@ -202,7 +138,6 @@ def up(file, path, service, foreground):
# Set the ARCH_CONFIG_FILE environment variable
env_stage = {
"OTEL_TRACING_HTTP_ENDPOINT": "http://host.docker.internal:4318/v1/traces",
"MODEL_SERVER_PORT": os.getenv("MODEL_SERVER_PORT", "51000"),
}
env = os.environ.copy()
# Remove PATH variable if present
@ -242,40 +177,13 @@ def up(file, path, service, foreground):
env_stage[access_key] = env_file_dict[access_key]
env.update(env_stage)
if service == SERVICE_NAME_ARCHGW:
start_arch(arch_config_file, env, foreground=foreground)
else:
# Check if ingress_traffic listener is configured before starting model_server
if has_ingress_listener(arch_config_file):
download_models_from_hf()
start_arch_modelserver(foreground)
else:
log.info(
"Skipping model_server startup: no ingress_traffic listener configured in arch_config.yaml"
)
start_arch(arch_config_file, env, foreground=foreground)
start_arch(arch_config_file, env, foreground=foreground)
@click.command()
@click.option(
"--service",
default=SERVICE_ALL,
help="Service to down. Options are all, model_server, archgw. Default is all",
)
def down(service):
def down():
"""Stops Arch."""
verify_service_name(service)
if service == SERVICE_NAME_MODEL_SERVER:
stop_arch_modelserver()
elif service == SERVICE_NAME_ARCHGW:
stop_docker_container()
else:
stop_arch_modelserver()
stop_docker_container(SERVICE_NAME_ARCHGW)
stop_docker_container()
@click.command()
@ -303,7 +211,7 @@ def generate_prompt_targets(file):
@click.command()
@click.option(
"--debug",
help="For detailed debug logs to trace calls from archgw <> model_server <> api_server, etc",
help="For detailed debug logs to trace calls from archgw <> api_server, etc",
is_flag=True,
)
@click.option("--follow", help="Follow the logs", is_flag=True)

2481
arch/tools/poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,34 +1,28 @@
[project]
[tool.poetry]
name = "archgw"
version = "0.3.18"
description = "Python-based CLI tool to manage Arch Gateway."
authors = [{ name = "Katanemo Labs, Inc." }]
authors = ["Katanemo Labs, Inc."]
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
"archgw_modelserver==0.3.18",
"click>=8.1.7,<9.0.0",
"jinja2>=3.1.4,<4.0.0",
"jsonschema>=4.23.0,<5.0.0",
"pyyaml>=6.0.2,<7.0.0",
]
[project.scripts]
archgw = "cli.main:main"
[dependency-groups]
dev = [
"pytest>=8.4.1,<9.0.0",
]
[tool.poetry]
packages = [{ include = "cli" }]
dependencies = { archgw_modelserver = { path = "../../model_server", develop = true } }
[tool.poetry.dependencies]
python = ">=3.10"
click = ">=8.1.7,<9.0.0"
jinja2 = ">=3.1.4,<4.0.0"
jsonschema = ">=4.23.0,<5.0.0"
pyyaml = ">=6.0.2,<7.0.0"
requests = ">=2.31.0,<3.0.0"
[tool.poetry.group.dev.dependencies]
pytest = ">=8.4.1,<9.0.0"
[tool.poetry.scripts]
archgw = "cli.main:main"
[build-system]
requires = ["poetry-core>=2.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.pytest.ini_options]
addopts = ["-v"]

View file

@ -12,10 +12,6 @@
"name": "archgw_cli",
"path": "arch/tools"
},
{
"name": "model_server",
"path": "model_server"
},
{
"name": "tests_e2e",
"path": "tests/e2e"
@ -24,10 +20,6 @@
"name": "tests_archgw",
"path": "tests/archgw"
},
{
"name": "tests_modelserver",
"path": "tests/modelserver"
},
{
"name": "chatbot_ui",
"path": "demos/shared/chatbot_ui"

View file

@ -79,7 +79,6 @@ files = [
]
[package.dependencies]
archgw_modelserver = ">=0.3.12,<0.4.0"
click = ">=8.1.7,<9.0.0"
jinja2 = ">=3.1.4,<4.0.0"
jsonschema = ">=4.23.0,<5.0.0"
@ -93,11 +92,6 @@ description = "A model server for serving models"
optional = false
python-versions = "<4.0,>=3.10"
groups = ["main"]
files = [
{file = "archgw_modelserver-0.3.12-py3-none-any.whl", hash = "sha256:ddc4536108e74bee637b41e35f80ad5cdf1708a32f670f33b2eef3526eb6d4a1"},
{file = "archgw_modelserver-0.3.12.tar.gz", hash = "sha256:b749edd0b3d4e6f01e94bcf6b363bf52b8ca515e8191a0951b99152f45745212"},
]
[package.dependencies]
accelerate = ">=1.0.0,<2.0.0"
dateparser = "*"

View file

@ -35,7 +35,6 @@ Create a ``docker-compose.yml`` file with the following configuration:
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY:?error}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:?error}
- MODEL_SERVER_PORT=51000
Starting the Stack
~~~~~~~~~~~~~~~~~~

View file

@ -1,16 +0,0 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "model server",
"type": "debugpy",
"request": "launch",
"module": "uvicorn",
"args": ["src.main:app","--reload", "--port", "51000"]
}
]
}

View file

@ -1,7 +0,0 @@
{
"python.testing.pytestArgs": [
"."
],
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true
}

View file

@ -1,31 +0,0 @@
FROM python:3.12 AS builder
COPY requirements.txt .
RUN pip install --prefix=/runtime -r requirements.txt
FROM python:3.12-slim AS output
# curl is needed for health check in docker-compose
RUN apt-get update && apt-get install -y curl && apt-get clean && rm -rf /var/lib/apt/lists/*
COPY --from=builder /runtime /usr/local
WORKDIR /src
# specify list of models that will go into the image as a comma separated list
# following models have been tested to work with this image
# "sentence-transformers/all-MiniLM-L6-v2,sentence-transformers/all-mpnet-base-v2,thenlper/gte-base,thenlper/gte-large,thenlper/gte-small"
ENV MODELS=""
COPY ./app ./app
COPY ./app/guard_model_config.yaml .
COPY ./app/openai_params.yaml .
# comment it out for now as we don't want to download the model every time we build the image
# we will mount host cache to docker image to avoid downloading the model every time
# see docker-compose file for more details
# RUN python install.py && \
# find /root/.cache/torch/sentence_transformers/ -name onnx -exec rm -rf {} +
CMD ["uvicorn", "src.app.main:app", "--host", "0.0.0.0", "--port", "80"]

View file

@ -1,64 +0,0 @@
# Use NVIDIA CUDA base image to enable GPU support
FROM nvidia/cuda:12.1.0-cudnn8-runtime-ubuntu22.04 as base
ENV DEBIAN_FRONTEND=noninteractive
# Install Python 3.10
RUN apt-get update && \
apt-get install -y python3.10 python3-pip python3-dev python-is-python3 && \
rm -rf /var/lib/apt/lists/*
#
# builder
#
FROM base AS builder
WORKDIR /src
# Upgrade pip
RUN pip install --upgrade pip
# Install git for cloning repositories
RUN apt-get update && apt-get install -y git && apt-get clean
# Copy requirements.txt
COPY requirements.txt /src/
# Install Python dependencies
RUN pip install --force-reinstall -r requirements.txt
RUN apt-get update && \
apt-get install -y cuda-toolkit-12-2
# Check for NVIDIA GPU and CUDA support and install EETQ if detected
RUN if command -v nvcc >/dev/null 2>&1; then \
echo "CUDA and NVIDIA GPU detected, installing EETQ..." && \
git clone https://github.com/NetEase-FuXi/EETQ.git && \
cd EETQ && \
git submodule update --init --recursive && \
pip install .; \
else \
echo "CUDA or NVIDIA GPU not detected, skipping EETQ installation."; \
fi
COPY . /src
# Specify list of models that will go into the image as a comma separated list
ENV MODELS=""
ENV DEBIAN_FRONTEND=noninteractive
COPY /app /app
WORKDIR /app
# Install required tools
RUN apt-get update && apt-get install -y \
curl \
&& rm -rf /var/lib/apt/lists/*
# Uncomment if you want to install the model during the image build
# RUN python install.py && \
# find /root/.cache/torch/sentence_transformers/ -name onnx -exec rm -rf {} +
# Set the default command to run the application
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80"]

View file

@ -1,2 +0,0 @@
# Model Server Package #
This model server package is a dependency of the Arch intelligent prompt gateway. It should not be used alone. Please refer to the [quickstart-guide](https://github.com/katanemo/arch?tab=readme-ov-file#quickstart) for more details on how to get start with Arch.

3102
model_server/poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,49 +0,0 @@
[project]
name = "archgw_modelserver"
version = "0.3.18"
description = "A model server for serving models"
authors = [{name = "Katanemo Labs, Inc", email = "info@katanemo.com"}]
license = "Apache 2.0"
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
"fastapi==0.115.0",
"torch>=2.6.0",
"uvicorn==0.31.0",
"transformers>=4.37.0,<5.0.0",
"accelerate>=1.0.0,<2.0.0",
"pydantic>=2.10.1,<3.0.0",
"dateparser",
"openai>=1.50.2,<2.0.0",
"httpx==0.27.2", # https://community.openai.com/t/typeerror-asyncclient-init-got-an-unexpected-keyword-argument-proxies/1040287
"opentelemetry-api>=1.28.0,<2.0.0",
"opentelemetry-sdk>=1.28.0,<2.0.0",
"opentelemetry-exporter-otlp>=1.28.0,<2.0.0",
"opentelemetry-instrumentation-fastapi>=0.49b0,<1.0",
"overrides>=7.7.0,<8.0.0",
]
[project.scripts]
archgw_modelserver = "src.cli:main"
[dependency-groups]
dev = [
"pytest",
"pytest-asyncio",
"pytest-httpserver>=1.1.0,<2.0.0",
"pytest-retry>=1.6.3,<2.0.0",
]
[tool.poetry]
packages = [{ include = "src" }]
[build-system]
requires = ["poetry-core>=2.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.pytest.ini_options]
python_files = ["test*.py"]
addopts = ["-v", "-s"]
retries = 2
retry_delay = 0.5
cumulative_timing = false

View file

@ -1,196 +0,0 @@
import importlib
import os
import sys
import subprocess
import argparse
import signal
import tempfile
import time
import requests
import src.commons.utils as utils
logger = utils.get_model_server_logger()
def get_version():
try:
version = importlib.metadata.version("archgw_modelserver")
return version
except importlib.metadata.PackageNotFoundError:
return "version not found"
def wait_for_health_check(url, timeout=300):
"""Wait for the Uvicorn server to respond to health-check requests."""
start_time = time.time()
while time.time() - start_time < timeout:
try:
response = requests.get(url)
if response.status_code == 200:
return True
except requests.ConnectionError:
time.sleep(1)
return False
def get_pid_file():
temp_dir = tempfile.gettempdir()
return os.path.join(temp_dir, "model_server.pid")
def ensure_killed(process):
process.terminate()
# if the process is not terminated, kill it
now = time.time()
# wait for 5 seconds
while time.time() - now < 5:
if process.poll() is not None:
break
time.sleep(1)
if process.poll() is None:
logger.info("Killing model server")
process.kill()
def start_server(port=51000, foreground=False):
"""Start the Uvicorn server."""
logger.info("model server version: %s", get_version())
stop_server()
logger.info(
"starting model server, port: %s, foreground: %s. Please wait ...",
port,
foreground,
)
if foreground:
process = subprocess.Popen(
[
sys.executable,
"-m",
"uvicorn",
"src.main:app",
"--host",
"0.0.0.0",
"--port",
str(port),
],
)
else:
process = subprocess.Popen(
[
sys.executable,
"-m",
"uvicorn",
"src.main:app",
"--host",
"0.0.0.0",
"--port",
str(port),
],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
try:
if wait_for_health_check(f"http://0.0.0.0:{port}/healthz"):
logger.info(
f"model server health check passed, port {port}, pid: {process.pid}"
)
else:
logger.error("health check failed, shutting it down.")
process.terminate()
except KeyboardInterrupt:
logger.info("model server stopped by user during initialization.")
ensure_killed(process)
# write process id to temp file in temp folder
pid_file = get_pid_file()
logger.info(f"writing pid {process.pid} to {pid_file}")
with open(pid_file, "w") as f:
f.write(str(process.pid))
if foreground:
try:
process.wait()
except KeyboardInterrupt:
logger.info("model server stopped by user.")
ensure_killed(process)
def stop_server():
"""Stop the Uvicorn server."""
pid_file = get_pid_file()
if os.path.exists(pid_file):
logger.info("PID file found, shutting down the server.")
# read pid from file
with open(pid_file, "r") as f:
pid = int(f.read())
logger.info(f"Killing model server {pid}")
try:
os.kill(pid, signal.SIGKILL)
except ProcessLookupError:
logger.info(f"Process {pid} not found")
os.remove(pid_file)
else:
logger.info("No PID file found, server is not running.")
def restart_server(port=51000, foreground=False):
"""Restart the Uvicorn server."""
stop_server()
start_server(port, foreground)
def parse_args():
parser = argparse.ArgumentParser(description="Manage the Uvicorn server.")
parser.add_argument(
"action",
choices=["start", "stop", "restart"],
default="start",
nargs="?",
help="Action to perform on the server (default: start).",
)
parser.add_argument(
"--port",
type=int,
default=51000,
help="Port number for the server (default: 51000).",
)
parser.add_argument(
"--foreground",
default=False,
action="store_true",
help="Run the server in the foreground (default: False).",
)
return parser.parse_args()
def main():
"""
Start, stop, or restart the Uvicorn server based on command-line arguments.
"""
args = parse_args()
if args.action == "start":
logger.info("[CLI] - Starting server")
start_server(args.port, args.foreground)
elif args.action == "stop":
logger.info("[CLI] - Stopping server")
stop_server()
elif args.action == "restart":
logger.info("[CLI] - Restarting server")
restart_server(args.port)
else:
logger.error(f"[CLI] - Unknown action: {args.action}")
sys.exit(1)

View file

@ -1,38 +0,0 @@
import os
from openai import OpenAI
from src.commons.utils import get_model_server_logger
from src.core.guardrails import get_guardrail_handler
from src.core.function_calling import (
ArchAgentConfig,
ArchAgentHandler,
ArchFunctionConfig,
ArchFunctionHandler,
)
# Define logger
logger = get_model_server_logger()
# Define the client
ARCH_ENDPOINT = os.getenv("ARCH_ENDPOINT", "https://archfc.katanemo.dev/v1")
ARCH_API_KEY = "EMPTY"
ARCH_CLIENT = OpenAI(base_url=ARCH_ENDPOINT, api_key=ARCH_API_KEY)
ARCH_AGENT_CLIENT = ARCH_CLIENT
# Define model names
ARCH_INTENT_MODEL_ALIAS = "Arch-Intent"
ARCH_FUNCTION_MODEL_ALIAS = "Arch-Function"
ARCH_AGENT_MODEL_ALIAS = ARCH_FUNCTION_MODEL_ALIAS
ARCH_GUARD_MODEL_ALIAS = "katanemo/Arch-Guard"
# Define model handlers
handler_map = {
"Arch-Function": ArchFunctionHandler(
ARCH_CLIENT, ARCH_FUNCTION_MODEL_ALIAS, ArchFunctionConfig
),
"Arch-Agent": ArchAgentHandler(
ARCH_AGENT_CLIENT, ARCH_AGENT_MODEL_ALIAS, ArchAgentConfig
),
"Arch-Guard": get_guardrail_handler(ARCH_GUARD_MODEL_ALIAS),
}

View file

@ -1,50 +0,0 @@
import torch
import logging
from datetime import datetime
def get_model_server_logger():
"""
Get or initialize the logger instance for the model server.
Returns:
- logging.Logger: Configured logger instance.
"""
# Check if the logger is already configured
logger = logging.getLogger("model_server")
# Return existing logger instance if already configured
if logger.hasHandlers():
return logger
# Configure logging to only log to console
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[logging.StreamHandler()],
)
return logger
def get_device():
if torch.cuda.is_available():
device = "cuda"
elif torch.backends.mps.is_available():
device = "mps"
else:
device = "cpu"
return device
def get_today_date():
# Get today's date
today = datetime.now()
# Get full date with day of week
full_date = today.strftime("%Y-%m-%d")
return full_date

View file

@ -1,520 +0,0 @@
import ast
import copy
import json
import random
import builtins
import src.commons.utils as utils
from openai import OpenAI
from typing import Any, Dict, List
from overrides import override
from src.core.utils.hallucination_utils import HallucinationState
from src.core.utils.model_utils import (
Message,
ChatMessage,
Choice,
ChatCompletionResponse,
ArchBaseHandler,
)
logger = utils.get_model_server_logger()
# ==============================================================================================================================================
class ArchFunctionConfig:
TASK_PROMPT = (
"You are a helpful assistant designed to assist with the user query by making one or more function calls if needed."
"\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>\n{tools}\n</tools>"
"\n\nYour task is to decide which functions are needed and collect missing parameters if necessary."
)
FORMAT_PROMPT = (
"\n\nBased on your analysis, provide your response in one of the following JSON formats:"
'\n1. If no functions are needed:\n```json\n{"response": "Your response text here"}\n```'
'\n2. If functions are needed but some required parameters are missing:\n```json\n{"required_functions": ["func_name1", "func_name2", ...], "clarification": "Text asking for missing parameters"}\n```'
'\n3. If functions are needed and all required parameters are available:\n```json\n{"tool_calls": [{"name": "func_name1", "arguments": {"argument1": "value1", "argument2": "value2"}},... (more tool calls as required)]}\n```'
)
GENERATION_PARAMS = {
"temperature": 0.1,
"top_p": 1.0,
"top_k": 10,
"max_tokens": 1024,
"stop_token_ids": [151645],
"logprobs": True,
"top_logprobs": 10,
}
SUPPORT_DATA_TYPES = ["int", "float", "bool", "str", "list", "tuple", "set", "dict"]
class ArchFunctionHandler(ArchBaseHandler):
def __init__(
self,
client: OpenAI,
model_name: str,
config: ArchFunctionConfig,
):
"""
Initializes the function handler.
Args:
client (OpenAI): An OpenAI client instance.
model_name (str): Name of the model to use.
config (ArchFunctionConfig): The configuration for Arch-Function
"""
super().__init__(
client,
model_name,
config.TASK_PROMPT,
config.FORMAT_PROMPT,
config.GENERATION_PARAMS,
)
self.generation_params = self.generation_params | {
"continue_final_message": True,
"add_generation_prompt": False,
}
self.default_prefix = '```json\n{"'
self.clarify_prefix = '```json\n{"required_functions":'
self.hallucination_state = None
# Predefine data types for verification. Only support Python for now.
# TODO: Extend the list of support data types
self.support_data_types = {
type_name: getattr(builtins, type_name)
for type_name in config.SUPPORT_DATA_TYPES
}
@override
def _convert_tools(self, tools: List[Dict[str, Any]]) -> str:
"""
Converts a list of tools into JSON format.
Args:
tools (List[Dict[str, Any]]): A list of tools represented as dictionaries.
Returns:
str: A string representation of converted tools.
"""
converted = [json.dumps(tool["function"], ensure_ascii=False) for tool in tools]
return "\n".join(converted)
def _fix_json_string(self, json_str: str) -> str:
"""
Fixes malformed JSON strings by ensuring proper bracket matching.
Args:
json_str (str): A JSON string that might be malformed.
Returns:
str: A corrected JSON string.
"""
# Remove any leading or trailing whitespace or newline characters
json_str = json_str.strip()
# Stack to keep track of brackets
stack = []
# Clean string to collect valid characters
fixed_str = ""
# Dictionary for matching brackets
matching_bracket = {")": "(", "}": "{", "]": "["}
# Dictionary for the opposite of matching_bracket
opening_bracket = {v: k for k, v in matching_bracket.items()}
for char in json_str:
if char in "{[(":
stack.append(char)
fixed_str += char
elif char in "}])":
if stack and stack[-1] == matching_bracket[char]:
stack.pop()
fixed_str += char
else:
# Ignore the unmatched closing brackets
continue
else:
fixed_str += char
# If there are unmatched opening brackets left in the stack, add corresponding closing brackets
while stack:
unmatched_opening = stack.pop()
fixed_str += opening_bracket[unmatched_opening]
try:
fixed_str = json.loads(fixed_str)
except Exception:
fixed_str = json.loads(fixed_str.replace("'", '"'))
return json.dumps(fixed_str)
def _parse_model_response(self, content: str) -> Dict[str, any]:
"""
Extracts tool call information from a given string.
Args:
content (str): The content string containing potential tool call information.
Returns:
Dict: A dictionary of extraction, including:
- "required_functions": A list of detected intents.
- "clarification": Text to collect missing parameters
- "tool_calls": A list of tool call dictionaries.
- "is_valid": A boolean indicating if the extraction was valid.
- "error_message": An error message or exception if parsing failed.
"""
response_dict = {
"raw_response": [],
"response": [],
"required_functions": [],
"clarification": "",
"tool_calls": [],
"is_valid": True,
"error_message": "",
}
try:
if content.startswith("```") and content.endswith("```"):
content = content.strip("```").strip()
if content.startswith("json"):
content = content[4:].strip()
content = self._fix_json_string(content)
response_dict["raw_response"] = f"```json\n{content}\n```"
model_response = json.loads(content)
response_dict["response"] = model_response.get("response", "")
response_dict["required_functions"] = model_response.get(
"required_functions", []
)
response_dict["clarification"] = model_response.get("clarification", "")
for tool_call in model_response.get("tool_calls", []):
response_dict["tool_calls"].append(
{
"id": f"call_{random.randint(1000, 10000)}",
"type": "function",
"function": {
"name": tool_call.get("name", ""),
"arguments": tool_call.get("arguments", {}),
},
}
)
except Exception as e:
response_dict["is_valid"] = False
response_dict["error_message"] = f"Fail to parse model responses: {e}"
return response_dict
def _convert_data_type(self, value: str, target_type: str):
# TODO: Add more conversion rules as needed
try:
if target_type is float and isinstance(value, int):
return float(value)
elif target_type is list and isinstance(value, str):
return ast.literal_eval(value)
elif target_type is str and not isinstance(value, str):
return str(value)
except (ValueError, TypeError, json.JSONDecodeError):
pass
return value
def _verify_tool_calls(
self, tools: List[Dict[str, Any]], tool_calls: List[Dict[str, Any]]
) -> Dict[str, any]:
"""
Verifies the validity of extracted tool calls against the provided tools.
Args:
tools (List[Dict[str, Any]]): A list of available tools.
tool_calls (List[Dict[str, Any]]): A list of tool calls to verify.
Returns:
Dict: A dictionary of verification, including:
- "status": A boolean indicating if the tool calls are valid.
- "invalid_tool_call": A dictionary of the invalid tool call if any.
- "message": An error message.
"""
verification_dict = {
"is_valid": True,
"invalid_tool_call": {},
"error_message": "",
}
functions = {}
for tool in tools:
functions[tool["function"]["name"]] = tool["function"]["parameters"]
for tool_call in tool_calls:
if not verification_dict["is_valid"]:
break
func_name = tool_call["function"]["name"]
func_args = tool_call["function"]["arguments"]
# Check whether the function is available or not
if func_name not in functions:
verification_dict["is_valid"] = False
verification_dict["invalid_tool_call"] = tool_call
verification_dict["error_message"] = f"{func_name} is not available!"
else:
# Check if all the requried parameters can be found in the tool calls
for required_param in functions[func_name].get("required", []):
if required_param not in func_args:
verification_dict["is_valid"] = False
verification_dict["invalid_tool_call"] = tool_call
verification_dict[
"error_message"
] = f"`{required_param}` is required by the function `{func_name}` but not found in the tool call!"
break
# Verify the data type of each parameter in the tool calls
function_properties = functions[func_name]["properties"]
logger.info("== func_args ==")
logger.info(func_args)
for param_name in func_args:
if param_name not in function_properties:
verification_dict["is_valid"] = False
verification_dict["invalid_tool_call"] = tool_call
verification_dict[
"error_message"
] = f"Parameter `{param_name}` is not defined in the function `{func_name}`."
break
else:
param_value = func_args[param_name]
target_type = function_properties[param_name]["type"]
if target_type in self.support_data_types:
data_type = self.support_data_types[target_type]
if not isinstance(param_value, data_type):
param_value = self._convert_data_type(
param_value, data_type
)
if not isinstance(param_value, data_type):
verification_dict["is_valid"] = False
verification_dict["invalid_tool_call"] = tool_call
verification_dict[
"error_message"
] = f"Parameter `{param_name}` is expected to have the data type `{data_type}`, got `{type(param_value)}`."
break
else:
verification_dict["is_valid"] = False
verification_dict["invalid_tool_call"] = tool_call
verification_dict[
"error_message"
] = f"Data type `{target_type}` is not supported."
return verification_dict
def _prefill_message(self, messages: List[Dict[str, str]], prefill_message):
"""
Update messages and generation params for prompt prefilling
Args:
messages (List[Dict[str, str]]): A list of messages.
Returns:
prefill_messages (List[Dict[str, str]]): A list of messages.
"""
return messages + [{"role": "assistant", "content": prefill_message}]
@override
async def chat_completion(self, req: ChatMessage) -> ChatCompletionResponse:
"""
Generates a chat completion response for a given request.
Args:
req (ChatMessage): A chat message request object.
enable_prefilling (bool, optional): Whether to enable prefill responses. Defaults to True.
Returns:
ChatCompletionResponse: The model's response to the chat request.
Note:
Currently only support vllm inference
"""
logger.info("[Arch-Function] - ChatCompletion")
messages = self._process_messages(
req.messages, req.tools, metadata=req.metadata
)
logger.info(
f"[request to arch-fc]: model: {self.model_name}, extra_body: {self.generation_params}, body: {json.dumps(messages)}"
)
# always enable `stream=True` to collect model responses
response = self.client.chat.completions.create(
messages=self._prefill_message(messages, self.default_prefix),
model=self.model_name,
stream=True,
extra_body=self.generation_params,
)
use_agent_orchestrator = req.metadata.get("use_agent_orchestrator", False)
model_response = ""
if use_agent_orchestrator:
for chunk in response:
if len(chunk.choices) > 0 and chunk.choices[0].delta.content:
model_response += chunk.choices[0].delta.content
logger.info(f"[Agent Orchestrator]: response received: {model_response}")
else:
# initialize the hallucination handler, which is an iterator
self.hallucination_state = HallucinationState(
response_iterator=response, function=req.tools
)
has_tool_calls, has_hallucination = None, False
for _ in self.hallucination_state:
# check if moodel response starts with tool calls, we do it after 5 tokens because we only check the first part of the response.
if len(self.hallucination_state.tokens) > 5 and has_tool_calls is None:
content = "".join(self.hallucination_state.tokens)
if "tool_calls" in content:
has_tool_calls = True
else:
has_tool_calls = False
# if the model is hallucinating, start parameter gathering
if self.hallucination_state.hallucination is True:
has_hallucination = True
break
if has_tool_calls and has_hallucination:
# start prompt prefilling if hallcuination is found in tool calls
logger.info(
f"[Hallucination]: {self.hallucination_state.error_message}"
)
response = self.client.chat.completions.create(
messages=self._prefill_message(messages, self.clarify_prefix),
model=self.model_name,
stream=False,
extra_body=self.generation_params,
)
model_response = response.choices[0].message.content
else:
model_response = "".join(self.hallucination_state.tokens)
# Extract tool calls from model response
response_dict = self._parse_model_response(model_response)
logger.info(f"[arch-fc]: raw model response: {response_dict['raw_response']}")
# General model response
if response_dict.get("response", ""):
model_message = Message(content="", tool_calls=[])
# Parameter gathering
elif response_dict.get("required_functions", []):
if not use_agent_orchestrator:
clarification = response_dict.get("clarification", "")
model_message = Message(content=clarification, tool_calls=[])
else:
model_message = Message(content="", tool_calls=[])
# Function Calling
elif response_dict.get("tool_calls", []):
if response_dict["is_valid"]:
if not use_agent_orchestrator:
verification_dict = self._verify_tool_calls(
tools=req.tools, tool_calls=response_dict["tool_calls"]
)
if verification_dict["is_valid"]:
logger.info(
f"[Tool calls]: {json.dumps([tool_call['function'] for tool_call in response_dict['tool_calls']])}"
)
model_message = Message(
content="", tool_calls=response_dict["tool_calls"]
)
else:
logger.error(
f"Invalid tool call - {verification_dict['error_message']}"
)
model_message = Message(content="", tool_calls=[])
else:
# skip tool call verification if using agent orchestrator
logger.info(
f"[Tool calls]: {json.dumps([tool_call['function'] for tool_call in response_dict['tool_calls']])}"
)
model_message = Message(
content="", tool_calls=response_dict["tool_calls"]
)
else:
# Response with tool calls but invalid
model_message = Message(content="", tool_calls=[])
# Response not in the desired format
else:
logger.error(f"Invalid model response - {model_response}")
model_message = Message(content="", tool_calls=[])
chat_completion_response = ChatCompletionResponse(
choices=[Choice(message=model_message)],
model=self.model_name,
metadata={"x-arch-fc-model-response": response_dict["raw_response"]},
role="assistant",
)
logger.info(
f"[response arch-fc]: {json.dumps(chat_completion_response.model_dump(exclude_none=True))}"
)
return chat_completion_response
# ==============================================================================================================================================
class ArchAgentConfig(ArchFunctionConfig):
GENERATION_PARAMS = {
"temperature": 0.01,
"top_p": 1.0,
"top_k": 10,
"max_tokens": 1024,
"stop_token_ids": [151645],
"logprobs": True,
"top_logprobs": 10,
}
class ArchAgentHandler(ArchFunctionHandler):
def __init__(self, client: OpenAI, model_name: str, config: ArchAgentConfig):
super().__init__(client, model_name, config)
@override
def _convert_tools(self, tools: List[Dict[str, Any]]) -> str:
"""
Converts a list of tools into JSON format.
Args:
tools (List[Dict[str, Any]]): A list of tools represented as dictionaries.
Returns:
str: A string representation of converted tools.
"""
converted = []
# delete parameters key if its empty in tool
for tool in tools:
if (
"parameters" in tool["function"]
and "properties" in tool["function"]["parameters"]
and not tool["function"]["parameters"]["properties"]
):
tool_copy = copy.deepcopy(tool)
del tool_copy["function"]["parameters"]
converted.append(json.dumps(tool_copy["function"], ensure_ascii=False))
else:
converted.append(json.dumps(tool["function"], ensure_ascii=False))
return "\n".join(converted)

View file

@ -1,160 +0,0 @@
import torch
import numpy as np
import src.commons.utils as utils
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from src.core.utils.model_utils import GuardRequest, GuardResponse
logger = utils.get_model_server_logger()
class ArchGuardHanlder:
def __init__(self, model_dict):
"""
Initializes the ArchGuardHanlder with the given model dictionary.
Args:
model_dict (dict): A dictionary containing the model, tokenizer, and device information.
"""
self.model = model_dict["model"]
self.model_name = model_dict["model_name"]
self.tokenizer = model_dict["tokenizer"]
self.device = model_dict["device"]
self.support_tasks = {"jailbreak": {"positive_class": 2, "threshold": 0.5}}
def _split_text_into_chunks(self, text, max_num_words=300):
"""
Splits the input text into chunks of up to `max_num_words` words.
Args:
text (str): The input text to be split.
max_num_words (int, optional): The maximum number of words in each chunk. Defaults to 300.
Returns:
List[str]: A list of text chunks.
"""
words = text.split()
chunks = [
" ".join(words[i : i + max_num_words])
for i in range(0, len(words), max_num_words)
]
return chunks
@staticmethod
def softmax(x):
"""
Computes the softmax of the input array.
Args:
x (np.ndarray): The input array.
Returns:
np.ndarray: The softmax of the input.
"""
return np.exp(x) / np.exp(x).sum(axis=0)
def _predict_text(self, task, text, max_length=512) -> GuardResponse:
"""
Predicts the result for the provided text for a specific task.
Args:
task (str): The task to perform (e.g., "jailbreak").
text (str): The input text to classify.
max_length (int, optional): The maximum length for tokenization. Defaults to 512.
Returns:
GuardResponse: A GuardResponse object containing the prediction.
"""
inputs = self.tokenizer(
text, truncation=True, max_length=max_length, return_tensors="pt"
).to(self.device)
with torch.no_grad():
logits = self.model(**inputs).logits.cpu().detach().numpy()[0]
prob = ArchGuardHanlder.softmax(logits)[
self.support_tasks[task]["positive_class"]
].item()
verdict = prob > self.support_tasks[task]["threshold"]
return GuardResponse(task=task, input=text, prob=prob, verdict=verdict)
def predict(self, req: GuardRequest, max_num_words=300) -> GuardResponse:
"""
Makes a prediction based on the GuardRequest input.
Args:
req (GuardRequest): The GuardRequest object containing the input text and task.
max_num_words (int, optional): The maximum number of words in each chunk if splitting is needed. Defaults to 300.
Returns:
GuardResponse: A GuardResponse object containing the prediction.
Note:
currently only support jailbreak check
"""
if req.task not in self.support_tasks:
raise NotImplementedError(f"{req.task} is not supported!")
logger.info("[Arch-Guard] - Prediction")
logger.info(f"[request arch-guard]: {req.input}")
if len(req.input.split()) < max_num_words:
result = self._predict_text(req.task, req.input)
else:
prob, verdict = 0.0, False
# split into chunks if text is long
text_chunks = self._split_text_into_chunks(req.input)
for chunk in text_chunks:
chunk_result = self._predict_text(req.task, chunk)
if chunk_result.verdict:
prob = chunk_result.prob
verdict = True
break
result = GuardResponse(
task=req.task, input=req.input, prob=prob, verdict=verdict
)
logger.info(
f"[response]: {req.task}: {'True' if result.verdict else 'False'} (prob: {result.prob:.2f})"
)
return result
def get_guardrail_handler(model_name: str = "katanemo/Arch-Guard", device: str = None):
"""
Initializes and returns an instance of ArchGuardHanlder based on the specified device.
Args:
device (str, optional): The device to use for model inference (e.g., "cpu" or "cuda"). Defaults to None.
Returns:
ArchGuardHanlder: An instance of ArchGuardHanlder configured for the specified device.
"""
if device is None:
device = utils.get_device()
guardrail_dict = {
"device": device,
"model_name": model_name,
"tokenizer": AutoTokenizer.from_pretrained(model_name, trust_remote_code=True),
"model": AutoModelForSequenceClassification.from_pretrained(
model_name, device_map=device, low_cpu_mem_usage=True
),
}
return ArchGuardHanlder(model_dict=guardrail_dict)

View file

@ -1,393 +0,0 @@
import json
import math
import torch
import itertools
from typing import Dict, List, Tuple
from enum import Enum
import string
from src.commons.utils import get_model_server_logger
logger = get_model_server_logger()
# constants
FUNC_NAME_START_PATTERN = ('{"name":"', "{'name':'")
FUNC_NAME_END_TOKEN = ('",', "',")
END_TOOL_CALL_TOKEN = "}}"
FIRST_PARAM_NAME_START_PATTERN = ('"arguments":{"', "'arguments':{'")
PARAMETER_NAME_END_TOKENS = ('":', ':"', "':", ":'", '":"', "':'")
PARAMETER_NAME_START_PATTERN = ('","', "','")
PARAMETER_VALUE_START_PATTERN = ('":', "':")
PARAMETER_VALUE_END_TOKEN = ('",', '"}')
BRACKETS = {"(": ")", "{": "}", "[": "]"}
# Thresholds
class MaskToken(Enum):
FUNCTION_NAME = "f"
PARAMETER_VALUE = "v"
PARAMETER_NAME = "p"
NOT_USED = "e"
TOOL_CALL = "t"
HALLUCINATION_THRESHOLD_DICT = {
"entropy": 0.0001,
"varentropy": 0.0001,
"probability": 0.8,
}
def check_threshold(entropy: float, varentropy: float, thd: Dict) -> bool:
"""
Check if the given entropy or variance of entropy exceeds the specified thresholds.
Args:
entropy (float): The entropy value to check.
varentropy (float): The variance of entropy value to check.
thd (dict): A dictionary containing the threshold values with keys 'entropy' and 'varentropy'.
Returns:
bool: True if both the entropy and varentropy exceeds their respective thresholds, False otherwise.
"""
return entropy > thd["entropy"] and varentropy > thd["varentropy"]
def calculate_uncertainty(log_probs: List[float]) -> Tuple[float, float]:
"""
Calculate the entropy and variance of entropy (varentropy) from log probabilities.
Args:
log_probs (list of float): A list of log probabilities.
Returns:
tuple: A tuple containing:
- log_probs (list of float): The input log probabilities as a list.
- entropy (float): The calculated entropy.
- varentropy (float): The calculated variance of entropy.
"""
log_probs = torch.tensor(log_probs)
token_probs = torch.exp(log_probs)
entropy = -torch.sum(log_probs * token_probs, dim=-1) / math.log(2, math.e)
varentropy = torch.sum(
token_probs * (log_probs / math.log(2, math.e) + entropy.unsqueeze(-1)) ** 2,
dim=-1,
)
return entropy.item(), varentropy.item(), token_probs[0].item()
def is_parameter_required(
function_description: Dict,
parameter_name: str,
) -> bool:
"""
Check if a parameter in required list
Args:
function_description (dict): The API description in JSON format.
parameter_name (str): The name of the parameter to check.
Returns:
bool: True if the parameter has the specified property, False otherwise.
"""
required_parameters = function_description.get("required", {})
return parameter_name in required_parameters
def is_parameter_property(
function_description: Dict, parameter_name: str, property_name: str
) -> bool:
"""
Check if a parameter in an API description has a specific property.
Args:
function_description (dict): The API description in JSON format.
parameter_name (str): The name of the parameter to check.
property_name (str): The property to look for (e.g., 'format', 'default').
Returns:
bool: True if the parameter has the specified property, False otherwise.
"""
parameters = function_description.get("properties", {})
parameter_info = parameters.get(parameter_name, {})
return property_name in parameter_info
class HallucinationState:
"""
A class to handle the state of hallucination detection in token processing.
Attributes:
tokens (list): List of tokens processed.
logprobs (list): List of log probabilities for each token.
state (str): Current state of the handler.
mask (list): List of masks indicating the type of each token.
parameter_name_done (bool): Flag indicating if parameter name extraction is done.
hallucination (bool): Flag indicating if a hallucination is detected.
hallucination_message (str): Message describing the hallucination.
parameter_name (list): List of extracted parameter names.
token_probs_map (list): List mapping tokens to their entropy and variance of entropy.
"""
def __init__(self, response_iterator=None, function=None):
"""
Initializes the HallucinationState with default values.
"""
self.tokens: List[str] = []
self.logprobs: List[float] = []
self.state: str = None
self.mask: List[str] = []
self.parameter_name_done: bool = False
self.hallucination: bool = False
self.error_message: str = ""
self.parameter_name: List[str] = []
self.token_probs_map: List[Tuple[str, float, float]] = []
self.response_iterator = response_iterator
self._process_function(function)
self.open_bracket = False
self.bracket = None
self.function_name = ""
self.check_parameter_name = {}
self.HALLUCINATION_THRESHOLD_DICT = HALLUCINATION_THRESHOLD_DICT
def _process_function(self, function):
self.function = function
if self.function is None:
raise ValueError("API descriptions not set.")
self.function_properties = {
x["function"]["name"]: x["function"]["parameters"] for x in self.function
}
def _reset_parameters(self):
"""
Resets all parameters in the HallucinationState to their default values.
"""
self.state = None
self.parameter_name_done = False
self.hallucination = False
self.error_message = ""
self.open_bracket = False
self.bracket = None
self.check_parameter_name = {}
def append_and_check_token_hallucination(self, token, logprob):
"""
Check if the given token is hallucinated based on the log probability.
Args:
token (str): The token to check.
logprob (float): The log probability of the token.
Returns:
bool: True if the token is hallucinated, False otherwise.
"""
self.tokens.append(token)
self.logprobs.append(logprob)
self._process_token()
return self.hallucination
def __iter__(self):
return self
def __next__(self):
if self.response_iterator is not None:
try:
r = next(self.response_iterator)
if hasattr(r.choices[0].delta, "content"):
token_content = r.choices[0].delta.content
if token_content != "":
try:
logprobs = [
p.logprob
for p in r.choices[0].logprobs.content[0].top_logprobs
]
self.append_and_check_token_hallucination(
token_content, logprobs
)
except Exception as e:
self.append_and_check_token_hallucination(
token_content, [None]
)
return token_content
except StopIteration:
raise StopIteration
def _process_token(self):
"""
Processes the current token and updates the state and mask accordingly.
Detects hallucinations based on the token type and log probabilities.
"""
content = "".join(self.tokens).replace(" ", "")
# Function name extraction logic
# If the state is function name and the token is not an end token, add to the mask
if content.endswith(END_TOOL_CALL_TOKEN):
self._reset_parameters()
if self.state == "function_name":
if self.tokens[-1] not in FUNC_NAME_END_TOKEN:
self.mask.append(MaskToken.FUNCTION_NAME)
else:
self.state = None
self._get_function_name()
# Check if the token is a function name start token, change the state
if content.endswith(FUNC_NAME_START_PATTERN):
self.state = "function_name"
# Parameter name extraction logic
# if the state is parameter name and the token is not an end token, add to the mask
if self.state == "parameter_name" and not content.endswith(
PARAMETER_NAME_END_TOKENS
):
self.mask.append(MaskToken.PARAMETER_NAME)
# if the state is parameter name and the token is an end token, change the state, check hallucination and set the flag parameter name done
# The need for parameter name done is to allow the check of parameter value pattern
elif self.state == "parameter_name" and content.endswith(
PARAMETER_NAME_END_TOKENS
):
self.state = None
self.parameter_name_done = True
self._get_parameter_name()
# if the parameter name is done and the token is a parameter name start token, change the state
elif (
self.parameter_name_done
and not self.open_bracket
and content.endswith(PARAMETER_NAME_START_PATTERN)
):
self.state = "parameter_name"
# if token is a first parameter value start token, change the state
if content.endswith(FIRST_PARAM_NAME_START_PATTERN):
self.state = "parameter_name"
# Parameter value extraction logic
# if the state is parameter value and the token is not an end token, add to the mask
if self.state == "parameter_value" and not content.endswith(
PARAMETER_VALUE_END_TOKEN
):
# checking if the token is a value token and is not empty
open_brackets = [
char for char in self.tokens[-1].strip() if char in BRACKETS
]
if open_brackets:
self.open_bracket = True
self.bracket = open_brackets[0]
if self.open_bracket and BRACKETS[self.bracket] in self.tokens[-1].strip():
self.open_bracket = False
self.bracket = None
if (
not all(
char in set(string.punctuation) for char in self.tokens[-1].strip()
)
and self.tokens[-1].strip() != ""
):
self.mask.append(MaskToken.PARAMETER_VALUE)
# checking if the parameter doesn't have enum and the token is the first parameter value token
# check if function name is in function properties
if self.function_name in self.function_properties:
if (
len(self.mask) > 1
and self.mask[-2] != MaskToken.PARAMETER_VALUE
and is_parameter_required(
self.function_properties[self.function_name],
self.parameter_name[-1],
)
and not is_parameter_property(
self.function_properties[self.function_name],
self.parameter_name[-1],
"enum",
)
):
if self.parameter_name[-1] not in self.check_parameter_name:
self._check_logprob()
self.check_parameter_name[self.parameter_name[-1]] = True
else:
self._check_logprob()
self.error_message = f"Function name {self.function_name} not found in function properties"
logger.warning(
f"Function name {self.function_name} not found in function properties"
)
else:
self.mask.append(MaskToken.NOT_USED)
# if the state is parameter value and the token is an end token, change the state
elif (
self.state == "parameter_value"
and not self.open_bracket
and content.endswith(PARAMETER_VALUE_END_TOKEN)
):
self.state = None
# if the parameter name is done and the token is a parameter value start token, change the state
elif self.parameter_name_done and content.endswith(
PARAMETER_VALUE_START_PATTERN
):
self.state = "parameter_value"
# Maintain consistency between stack and mask
# If the mask length is less than tokens, add an not used (e) token to the mask
if len(self.mask) != len(self.tokens):
self.mask.append(MaskToken.NOT_USED)
def _check_logprob(self):
"""
Checks the log probability of the current token and updates the token probability map.
Detects hallucinations based on entropy and variance of entropy.
"""
probs = self.logprobs[-1]
entropy, varentropy, probability = calculate_uncertainty(probs)
self.token_probs_map.append((self.tokens[-1], entropy, varentropy, probability))
if check_threshold(
entropy,
varentropy,
self.HALLUCINATION_THRESHOLD_DICT,
):
self.hallucination = True
self.error_message = f"token '{self.tokens[-1]}' is uncertain. Generated response:\n{''.join(self.tokens)}"
def _count_consecutive_token(self, token=MaskToken.PARAMETER_VALUE) -> int:
"""
Counts the number of consecutive occurrences of a given token in the mask.
Args:
token (str): The token to count in the mask.
Returns:
int: The number of consecutive occurrences of the token.
"""
return (
len(list(itertools.takewhile(lambda x: x == token, reversed(self.mask))))
if self.mask and self.mask[-1] == token
else 0
)
def _get_parameter_name(self):
"""
Get the parameter name from the tokens.
Returns:
str: The extracted parameter name.
"""
p_len = self._count_consecutive_token(MaskToken.PARAMETER_NAME)
parameter_name = "".join(self.tokens[:-1][-p_len:])
self.parameter_name.append(parameter_name)
def _get_function_name(self):
"""
Get the function name from the tokens.
Returns:
str: The extracted function name.
"""
f_len = self._count_consecutive_token(MaskToken.FUNCTION_NAME)
self.function_name = "".join(self.tokens[:-1][-f_len:])

View file

@ -1,217 +0,0 @@
import json
import src.commons.utils as utils
from openai import OpenAI
from pydantic import BaseModel
from typing import Any, Dict, List, Optional
from overrides import final
class Message(BaseModel):
role: Optional[str] = ""
content: Optional[str] = ""
tool_call_id: Optional[str] = ""
tool_calls: Optional[List[Dict[str, Any]]] = []
class ChatMessage(BaseModel):
messages: List[Message] = []
tools: List[Dict[str, Any]] = []
metadata: Optional[Dict[str, str]] = {}
class Choice(BaseModel):
id: Optional[int] = 0
message: Message
finish_reason: Optional[str] = "stop"
class ChatCompletionResponse(BaseModel):
id: Optional[int] = 0
object: Optional[str] = "chat_completion"
created: Optional[str] = ""
choices: List[Choice] = []
model: str = ""
metadata: Optional[Dict[str, str]] = {}
class GuardRequest(BaseModel):
input: str
task: str
class GuardResponse(BaseModel):
task: str = ""
input: str = ""
prob: float = 0.0
verdict: bool = False
metadata: Optional[Dict[str, str]] = {}
# ================================================================================================
class ArchBaseHandler:
def __init__(
self,
client: OpenAI,
model_name: str,
task_prompt: str,
format_prompt: str,
generation_params: Dict,
):
"""
Initializes the base handler.
Args:
client (OpenAI): An OpenAI client instance.
model_name (str): Name of the model to use.
task_prompt (str): The main task prompt for the system.
format_prompt (str): A prompt specifying the desired output format.
generation_params (Dict): Generation parameters for the model.
"""
self.client = client
self.model_name = model_name
self.task_prompt = task_prompt
self.format_prompt = format_prompt
self.generation_params = generation_params
def _convert_tools(self, tools: List[Dict[str, Any]]) -> str:
"""
Converts a list of tools into the desired internal representation.
Args:
tools (List[Dict[str, Any]]): A list of tools represented as dictionaries.
Raises:
NotImplementedError: Method should be overridden in subclasses.
"""
raise NotImplementedError()
@final
def _format_system_prompt(self, tools: List[Dict[str, Any]]) -> str:
"""
Formats the system prompt using provided tools.
Args:
tools (List[Dict[str, Any]]): A list of tools represented as dictionaries.
Returns:
str: A formatted system prompt.
"""
today_date = utils.get_today_date()
tools = self._convert_tools(tools)
system_prompt = (
self.task_prompt.format(today_date=today_date, tools=tools)
+ self.format_prompt
)
return system_prompt
@final
def _process_messages(
self,
messages: List[Message],
tools: List[Dict[str, Any]] = None,
extra_instruction: str = None,
max_tokens=4096,
metadata: Dict[str, str] = {},
):
"""
Processes a list of messages and formats them appropriately.
Args:
messages (List[Message]): A list of message objects.
tools (List[Dict[str, Any]], optional): A list of tools to include in the system prompt.
extra_instruction (str, optional): Additional instructions to append to the last user message.
max_tokens (int): Maximum allowed token count, assuming ~4 characters per token on average.
Returns:
List[Dict[str, Any]]: A list of processed message dictionaries.
"""
processed_messages = []
if tools:
processed_messages.append(
{"role": "system", "content": self._format_system_prompt(tools)}
)
for idx, message in enumerate(messages):
role, content, tool_calls = (
message.role,
message.content,
message.tool_calls,
)
if tool_calls:
# TODO: Extend to support multiple function calls
role = "assistant"
content = f"<tool_call>\n{json.dumps(tool_calls[0]['function'])}\n</tool_call>"
elif role == "tool":
role = "user"
if metadata.get("optimize_context_window", "false").lower() == "true":
content = f"<tool_response>\n\n</tool_response>"
else:
# sample response below
# "content": "<tool_response>\n{'name': 'get_stock_price', 'result': '$196.66'}\n</tool_response>"
# msg[idx-1] contains tool call = '{"tool_calls": [{"name": "currency_exchange", "arguments": {"currency_symbol": "NZD"}}]}'
tool_call_msg = messages[idx - 1].content
if tool_call_msg.startswith("```") and tool_call_msg.endswith(
"```"
):
tool_call_msg = tool_call_msg.strip("```").strip()
if tool_call_msg.startswith("json"):
tool_call_msg = tool_call_msg[4:].strip()
func_name = json.loads(tool_call_msg)["tool_calls"][0].get(
"name", "no_name"
)
tool_response = {
"name": func_name,
"result": content,
}
content = f"<tool_response>\n{json.dumps(tool_response)}\n</tool_response>"
processed_messages.append({"role": role, "content": content})
assert processed_messages[-1]["role"] == "user"
if extra_instruction:
processed_messages[-1]["content"] += "\n" + extra_instruction
# keep the first system message and shift conversation if the total token length exceeds the limit
def truncate_messages(messages: List[Dict[str, Any]]):
num_tokens, conversation_idx = 0, 0
if messages[0]["role"] == "system":
num_tokens += len(messages[0]["content"]) // 4
conversation_idx = 1
for message_idx in range(len(messages) - 1, conversation_idx - 1, -1):
num_tokens += len(messages[message_idx]["content"]) // 4
if num_tokens >= max_tokens:
if messages[message_idx]["role"] == "user":
break
return messages[:conversation_idx] + messages[message_idx:]
processed_messages = truncate_messages(processed_messages)
return processed_messages
async def chat_completion(self, req: ChatMessage) -> ChatCompletionResponse:
"""
Abstract method for generating chat completions.
Args:
req (ChatMessage): A chat message request object.
Raises:
NotImplementedError: Method should be overridden in subclasses.
"""
raise NotImplementedError()

View file

@ -1,158 +0,0 @@
import json
import os
import time
import logging
import src.commons.utils as utils
from src.commons.globals import ARCH_ENDPOINT, handler_map
from src.core.function_calling import ArchFunctionHandler
from src.core.utils.model_utils import (
ChatMessage,
ChatCompletionResponse,
GuardRequest,
GuardResponse,
)
from fastapi import FastAPI, Response
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.sdk.resources import Resource
from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
resource = Resource.create(
{
"service.name": "model-server",
}
)
# Initialize the tracer provider
trace.set_tracer_provider(TracerProvider(resource=resource))
tracer = trace.get_tracer(__name__)
# DEFAULT_OTLP_HOST = "http://localhost:4317"
DEFAULT_OTLP_HOST = "none"
# Configure the OTLP exporter (Jaeger, Zipkin, etc.)
otlp_exporter = OTLPSpanExporter(
endpoint=os.getenv("OTLP_HOST", DEFAULT_OTLP_HOST) # noqa: F821
)
trace.get_tracer_provider().add_span_processor(BatchSpanProcessor(otlp_exporter))
logger = utils.get_model_server_logger()
logging.getLogger("httpx").setLevel(logging.ERROR)
logging.getLogger("opentelemetry.exporter.otlp.proto.grpc.exporter").setLevel(
logging.ERROR
)
app = FastAPI()
FastAPIInstrumentor().instrument_app(app)
logger.info(f"using archfc endpoint: {ARCH_ENDPOINT}")
@app.get("/healthz")
async def healthz():
return {"status": "ok"}
@app.get("/models")
async def models():
return {
"object": "list",
"data": [{"id": model_name, "object": "model"} for model_name in handler_map],
}
@app.post("/function_calling")
async def function_calling(req: ChatMessage, res: Response):
logger.info("[Endpoint: /function_calling]")
logger.info(f"[request body]: {json.dumps(req.model_dump(exclude_none=True))}")
final_response: ChatCompletionResponse = None
error_messages = None
use_agent_orchestrator = req.metadata.get("use_agent_orchestrator", False)
logger.info(f"Use agent orchestrator: {use_agent_orchestrator}")
try:
handler_name = "Arch-Agent" if use_agent_orchestrator else "Arch-Function"
model_handler: ArchFunctionHandler = handler_map[handler_name]
start_time = time.perf_counter()
final_response = await model_handler.chat_completion(req)
latency = time.perf_counter() - start_time
if not final_response.metadata:
final_response.metadata = {}
# Parameter gathering for detected intents
if final_response.choices[0].message.content:
final_response.metadata["function_latency"] = str(round(latency * 1000, 3))
# Function Calling
elif final_response.choices[0].message.tool_calls:
final_response.metadata["function_latency"] = str(round(latency * 1000, 3))
if not use_agent_orchestrator:
final_response.metadata["hallucination"] = str(
model_handler.hallucination_state.hallucination
)
# No intent detected
else:
final_response.metadata["intent_latency"] = str(round(latency * 1000, 3))
if not use_agent_orchestrator:
final_response.metadata["intent_latency"] = str(round(latency * 1000, 3))
final_response.metadata["hallucination"] = str(
model_handler.hallucination_state.hallucination
)
except ValueError as e:
res.statuscode = 503
error_messages = f"[{handler_name}] - Error in tool call extraction: {e}"
raise
except StopIteration as e:
res.statuscode = 500
error_messages = f"[{handler_name}] - Error in hallucination check: {e}"
raise
except Exception as e:
res.status_code = 500
error_messages = f"[{handler_name}] - Error in ChatCompletion: {e}"
raise
if error_messages is not None:
logger.error(error_messages)
final_response = ChatCompletionResponse(metadata={"error": error_messages})
return final_response
@app.post("/guardrails")
async def guardrails(req: GuardRequest, res: Response, max_num_words=300):
logger.info("[Endpoint: /guardrails] - Gateway")
logger.info(f"[request body]: {json.dumps(req.model_dump(exclude_none=True))}")
final_response: GuardResponse = None
error_messages = None
try:
guard_start_time = time.perf_counter()
final_response = handler_map["Arch-Guard"].predict(req)
guard_latency = time.perf_counter() - guard_start_time
final_response.metadata = {
"guard_latency": round(guard_latency * 1000, 3),
}
except Exception as e:
res.status_code = 500
error_messages = f"[Arch-Guard]: {e}"
if error_messages is not None:
logger.error(error_messages)
final_response = GuardResponse(metadata={"error": error_messages})
return final_response

View file

@ -1,115 +0,0 @@
import pytest
import time
from src.commons.globals import handler_map
from src.core.utils.model_utils import ChatMessage, Message
# define function
get_weather_api = {
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get current weather at a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "str",
"description": "The location to get the weather for",
"format": "City, State",
},
"unit": {
"type": "str",
"description": "The unit to return the weather in.",
"enum": ["celsius", "fahrenheit"],
"default": "celsius",
},
"days": {
"type": "str",
"description": "the number of days for the request.",
},
},
"required": ["location", "days"],
},
},
}
# get_data class return request, intent, hallucination, parameter_gathering
def get_hallucination_data():
# Create instances of the Message class
message1 = Message(role="user", content="How is the weather in Seattle in days?")
# Create a list of tools
tools = [get_weather_api]
# Create an instance of the ChatMessage class
req = ChatMessage(messages=[message1], tools=tools)
# first token will not be tool call
return req, False, True
def get_success_tool_call_data():
# Create instances of the Message class
message1 = Message(role="user", content="How is the weather in Seattle in 7 days?")
# Create a list of tools
tools = [get_weather_api]
# Create an instance of the ChatMessage class
req = ChatMessage(messages=[message1], tools=tools)
return req, True, False
def get_irrelevant_data():
# Create instances of the Message class
message1 = Message(role="user", content="What is 1+1?")
# Create a list of tools
tools = [get_weather_api]
# Create an instance of the ChatMessage class
req = ChatMessage(messages=[message1], tools=tools)
return req, False, False
def get_greeting_data():
# Create instances of the Message class
message1 = Message(role="user", content="Hello how are you?")
# Create a list of tools
tools = [get_weather_api]
# Create an instance of the ChatMessage class
req = ChatMessage(messages=[message1], tools=tools)
return req, False, False
@pytest.mark.asyncio
@pytest.mark.parametrize(
"get_data_func",
[
get_hallucination_data,
get_greeting_data,
get_irrelevant_data,
get_success_tool_call_data,
],
)
async def test_function_calling(get_data_func):
req, intent, hallucination = get_data_func()
handler_name = "Arch-Function"
use_agent_orchestrator = False
model_handler: ArchFunctionHandler = handler_map[handler_name]
start_time = time.perf_counter()
final_response = await model_handler.chat_completion(req)
latency = time.perf_counter() - start_time
assert intent == (len(final_response.choices[0].message.tool_calls) >= 1)
assert hallucination == model_handler.hallucination_state.hallucination

View file

@ -1,42 +0,0 @@
from unittest.mock import patch, MagicMock
from src.core.guardrails import get_guardrail_handler
# Test for `get_guardrail_handler()` function on `cuda`
@patch("src.core.guardrails.AutoTokenizer.from_pretrained")
@patch("src.core.guardrails.AutoModelForSequenceClassification.from_pretrained")
def test_guardrail_handler_on_cuda(mock_auto_model, mock_tokenizer):
device = "cuda"
mock_auto_model.return_value = MagicMock()
mock_tokenizer.return_value = MagicMock()
guardrail = get_guardrail_handler(device=device)
mock_tokenizer.assert_called_once_with(guardrail.model_name, trust_remote_code=True)
mock_auto_model.assert_called_once_with(
guardrail.model_name,
device_map=device,
low_cpu_mem_usage=True,
)
# Test for `get_guardrail_handler()` function on `mps`
@patch("src.core.guardrails.AutoTokenizer.from_pretrained")
@patch("src.core.guardrails.AutoModelForSequenceClassification.from_pretrained")
def test_guardrail_handler_on_mps(mock_auto_model, mock_tokenizer):
device = "mps"
mock_auto_model.return_value = MagicMock()
mock_tokenizer.return_value = MagicMock()
guardrail = get_guardrail_handler(device=device)
mock_tokenizer.assert_called_once_with(guardrail.model_name, trust_remote_code=True)
mock_auto_model.assert_called_once_with(
guardrail.model_name,
device_map=device,
low_cpu_mem_usage=True,
)

View file

@ -1,36 +0,0 @@
from src.commons.globals import handler_map
from src.core.function_calling import ArchFunctionHandler, Message
test_input_history = [
{"role": "user", "content": "how is the weather in chicago for next 5 days?"},
{
"role": "assistant",
"model": "Arch-Function",
"content": '```json\n{"tool_calls": [{"name": "get_current_weather", "arguments": {"days": 5, "location": "Chicago, Illinois"}}]}\n```',
},
{
"role": "tool",
"model": "Arch-Function",
"content": '{"location":"Chicago%2C%20Illinois","temperature":[{"date":"2025-04-14","temperature":{"min":53,"max":65},"units":"Farenheit","query_time":"2025-04-14 17:01:52.432817+00:00"},{"date":"2025-04-15","temperature":{"min":85,"max":97},"units":"Farenheit","query_time":"2025-04-14 17:01:52.432830+00:00"},{"date":"2025-04-16","temperature":{"min":62,"max":78},"units":"Farenheit","query_time":"2025-04-14 17:01:52.432835+00:00"},{"date":"2025-04-17","temperature":{"min":89,"max":101},"units":"Farenheit","query_time":"2025-04-14 17:01:52.432839+00:00"},{"date":"2025-04-18","temperature":{"min":86,"max":104},"units":"Farenheit","query_time":"2025-04-14 17:01:52.432843+00:00"}],"units":"Farenheit"}',
},
{
"role": "assistant",
"model": "gpt-4o-2024-08-06",
"content": '{"response": "Based on the forecast data you provided, here is the weather for the next 5 days in Chicago:\\n\\n- **April 14, 2025**: The temperature will range between 53\\u00b0F and 65\\u00b0F. \\n- **April 15, 2025**: The temperature will range between 85\\u00b0F and 97\\u00b0F.\\n- **April 16, 2025**: The temperature will range between 62\\u00b0F and 78\\u00b0F.\\n- **April 17, 2025**: The temperature will range between 89\\u00b0F and 101\\u00b0F.\\n- **April 18, 2025**: The temperature will range between 86\\u00b0F and 104\\u00b0F.\\n\\nPlease note that the temperatures are given in Fahrenheit."}',
},
{"role": "user", "content": "what about seattle?"},
]
def test_update_fc_history():
message_history = []
for h in test_input_history:
message_history.append(Message(**h))
handler: ArchFunctionHandler = handler_map["Arch-Function"]
updated_history = handler._process_messages(message_history)
assert len(updated_history) == 5
# ensure that tool role does not exist anymore
assert all([h["role"] != "tool" for h in updated_history])

View file

@ -1,47 +0,0 @@
import pytest
import httpx
from fastapi.testclient import TestClient
from src.main import app
client = TestClient(app)
# Unit tests for the health check endpoint
@pytest.mark.asyncio
async def test_healthz():
response = client.get("/healthz")
assert response.status_code == 200
assert response.json() == {"status": "ok"}
# Unit test for the models endpoint
@pytest.mark.asyncio
async def test_models():
response = client.get("/models")
assert response.status_code == 200
assert response.json()["object"] == "list"
assert len(response.json()["data"]) > 0
# Unit test for the guardrail endpoint
@pytest.mark.asyncio
async def test_guardrail_endpoint():
request_data = {"input": "Test for jailbreak and toxicity", "task": "jailbreak"}
response = client.post("/guardrails", json=request_data)
assert response.status_code == 200
# Unit test for the function calling endpoint
@pytest.mark.asyncio
async def test_function_calling_endpoint():
async with httpx.AsyncClient(app=app, base_url="http://test") as client:
request_data = {
"messages": [{"role": "user", "content": "Hello!"}],
"model": "Arch-Function",
"tools": [],
"metadata": {"x-arch-state": "[]"},
}
response = await client.post("/function_calling", json=request_data)
assert response.status_code == 200

View file

@ -12,4 +12,3 @@ services:
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY:?error}
- MISTRAL_API_KEY=${MISTRAL_API_KEY:?error}
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-51001}

View file

@ -18,4 +18,3 @@ services:
- MISTRAL_API_KEY=${MISTRAL_API_KEY:?error}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:?error}
- OTEL_TRACING_HTTP_ENDPOINT=http://host.docker.internal:4318/v1/traces
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-51000}

View file

@ -11,9 +11,6 @@ touch ~/archgw_logs/modelserver.log
print_debug() {
log "Received signal to stop"
log "Printing debug logs for model_server"
log "===================================="
tail -n 100 ~/archgw_logs/modelserver.log
log "Printing debug logs for docker"
log "===================================="
tail -n 100 ../build.log
@ -30,12 +27,6 @@ cd ../../demos/samples_python/weather_forecast/
docker compose up weather_forecast_service --build -d
cd -
log building and install model server
log =================================
cd ../../model_server
poetry install
cd -
log building and installing archgw cli
log ==================================
cd ../../arch/tools
@ -53,11 +44,8 @@ poetry install
log startup arch gateway with function calling demo
cd ../../
tail -F ~/archgw_logs/modelserver.log &
model_server_tail_pid=$!
archgw down
archgw up demos/samples_python/weather_forecast/arch_config.yaml
kill $model_server_tail_pid
cd -
log running e2e tests for prompt gateway

View file

@ -1,15 +0,0 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Python Debugger: Current File",
"type": "debugpy",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal"
}
]
}

View file

@ -1,7 +0,0 @@
{
"python.testing.pytestArgs": [
"."
],
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true
}

View file

@ -1,899 +0,0 @@
# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
[[package]]
name = "attrs"
version = "24.3.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.8"
files = [
{file = "attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308"},
{file = "attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff"},
]
[package.extras]
benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
[[package]]
name = "certifi"
version = "2024.12.14"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
{file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"},
{file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"},
]
[[package]]
name = "cffi"
version = "1.17.1"
description = "Foreign Function Interface for Python calling C code."
optional = false
python-versions = ">=3.8"
files = [
{file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"},
{file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"},
{file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"},
{file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"},
{file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"},
{file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"},
{file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"},
{file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"},
{file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"},
{file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"},
{file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"},
{file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"},
{file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"},
{file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"},
{file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"},
{file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"},
{file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"},
{file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"},
{file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"},
{file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"},
{file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"},
{file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"},
{file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"},
{file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"},
{file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"},
{file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"},
{file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"},
{file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"},
{file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"},
{file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"},
{file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"},
{file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"},
{file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"},
{file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"},
{file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"},
{file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"},
{file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"},
]
[package.dependencies]
pycparser = "*"
[[package]]
name = "charset-normalizer"
version = "3.4.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
files = [
{file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"},
{file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"},
{file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"},
{file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"},
{file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"},
{file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"},
{file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"},
{file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"},
{file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"},
{file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"},
{file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"},
{file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"},
{file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"},
{file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"},
{file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"},
{file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"},
{file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"},
{file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"},
{file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"},
{file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"},
]
[[package]]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
[[package]]
name = "coverage"
version = "7.6.10"
description = "Code coverage measurement for Python"
optional = false
python-versions = ">=3.9"
files = [
{file = "coverage-7.6.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5c912978f7fbf47ef99cec50c4401340436d200d41d714c7a4766f377c5b7b78"},
{file = "coverage-7.6.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a01ec4af7dfeb96ff0078ad9a48810bb0cc8abcb0115180c6013a6b26237626c"},
{file = "coverage-7.6.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3b204c11e2b2d883946fe1d97f89403aa1811df28ce0447439178cc7463448a"},
{file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32ee6d8491fcfc82652a37109f69dee9a830e9379166cb73c16d8dc5c2915165"},
{file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675cefc4c06e3b4c876b85bfb7c59c5e2218167bbd4da5075cbe3b5790a28988"},
{file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f4f620668dbc6f5e909a0946a877310fb3d57aea8198bde792aae369ee1c23b5"},
{file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4eea95ef275de7abaef630c9b2c002ffbc01918b726a39f5a4353916ec72d2f3"},
{file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e2f0280519e42b0a17550072861e0bc8a80a0870de260f9796157d3fca2733c5"},
{file = "coverage-7.6.10-cp310-cp310-win32.whl", hash = "sha256:bc67deb76bc3717f22e765ab3e07ee9c7a5e26b9019ca19a3b063d9f4b874244"},
{file = "coverage-7.6.10-cp310-cp310-win_amd64.whl", hash = "sha256:0f460286cb94036455e703c66988851d970fdfd8acc2a1122ab7f4f904e4029e"},
{file = "coverage-7.6.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ea3c8f04b3e4af80e17bab607c386a830ffc2fb88a5484e1df756478cf70d1d3"},
{file = "coverage-7.6.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:507a20fc863cae1d5720797761b42d2d87a04b3e5aeb682ef3b7332e90598f43"},
{file = "coverage-7.6.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d37a84878285b903c0fe21ac8794c6dab58150e9359f1aaebbeddd6412d53132"},
{file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a534738b47b0de1995f85f582d983d94031dffb48ab86c95bdf88dc62212142f"},
{file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d7a2bf79378d8fb8afaa994f91bfd8215134f8631d27eba3e0e2c13546ce994"},
{file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6713ba4b4ebc330f3def51df1d5d38fad60b66720948112f114968feb52d3f99"},
{file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ab32947f481f7e8c763fa2c92fd9f44eeb143e7610c4ca9ecd6a36adab4081bd"},
{file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7bbd8c8f1b115b892e34ba66a097b915d3871db7ce0e6b9901f462ff3a975377"},
{file = "coverage-7.6.10-cp311-cp311-win32.whl", hash = "sha256:299e91b274c5c9cdb64cbdf1b3e4a8fe538a7a86acdd08fae52301b28ba297f8"},
{file = "coverage-7.6.10-cp311-cp311-win_amd64.whl", hash = "sha256:489a01f94aa581dbd961f306e37d75d4ba16104bbfa2b0edb21d29b73be83609"},
{file = "coverage-7.6.10-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:27c6e64726b307782fa5cbe531e7647aee385a29b2107cd87ba7c0105a5d3853"},
{file = "coverage-7.6.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c56e097019e72c373bae32d946ecf9858fda841e48d82df7e81c63ac25554078"},
{file = "coverage-7.6.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7827a5bc7bdb197b9e066cdf650b2887597ad124dd99777332776f7b7c7d0d0"},
{file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204a8238afe787323a8b47d8be4df89772d5c1e4651b9ffa808552bdf20e1d50"},
{file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67926f51821b8e9deb6426ff3164870976fe414d033ad90ea75e7ed0c2e5022"},
{file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e78b270eadb5702938c3dbe9367f878249b5ef9a2fcc5360ac7bff694310d17b"},
{file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:714f942b9c15c3a7a5fe6876ce30af831c2ad4ce902410b7466b662358c852c0"},
{file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:abb02e2f5a3187b2ac4cd46b8ced85a0858230b577ccb2c62c81482ca7d18852"},
{file = "coverage-7.6.10-cp312-cp312-win32.whl", hash = "sha256:55b201b97286cf61f5e76063f9e2a1d8d2972fc2fcfd2c1272530172fd28c359"},
{file = "coverage-7.6.10-cp312-cp312-win_amd64.whl", hash = "sha256:e4ae5ac5e0d1e4edfc9b4b57b4cbecd5bc266a6915c500f358817a8496739247"},
{file = "coverage-7.6.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:05fca8ba6a87aabdd2d30d0b6c838b50510b56cdcfc604d40760dae7153b73d9"},
{file = "coverage-7.6.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9e80eba8801c386f72e0712a0453431259c45c3249f0009aff537a517b52942b"},
{file = "coverage-7.6.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a372c89c939d57abe09e08c0578c1d212e7a678135d53aa16eec4430adc5e690"},
{file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec22b5e7fe7a0fa8509181c4aac1db48f3dd4d3a566131b313d1efc102892c18"},
{file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26bcf5c4df41cad1b19c84af71c22cbc9ea9a547fc973f1f2cc9a290002c8b3c"},
{file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4e4630c26b6084c9b3cb53b15bd488f30ceb50b73c35c5ad7871b869cb7365fd"},
{file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2396e8116db77789f819d2bc8a7e200232b7a282c66e0ae2d2cd84581a89757e"},
{file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79109c70cc0882e4d2d002fe69a24aa504dec0cc17169b3c7f41a1d341a73694"},
{file = "coverage-7.6.10-cp313-cp313-win32.whl", hash = "sha256:9e1747bab246d6ff2c4f28b4d186b205adced9f7bd9dc362051cc37c4a0c7bd6"},
{file = "coverage-7.6.10-cp313-cp313-win_amd64.whl", hash = "sha256:254f1a3b1eef5f7ed23ef265eaa89c65c8c5b6b257327c149db1ca9d4a35f25e"},
{file = "coverage-7.6.10-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2ccf240eb719789cedbb9fd1338055de2761088202a9a0b73032857e53f612fe"},
{file = "coverage-7.6.10-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0c807ca74d5a5e64427c8805de15b9ca140bba13572d6d74e262f46f50b13273"},
{file = "coverage-7.6.10-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bcfa46d7709b5a7ffe089075799b902020b62e7ee56ebaed2f4bdac04c508d8"},
{file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e0de1e902669dccbf80b0415fb6b43d27edca2fbd48c74da378923b05316098"},
{file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f7b444c42bbc533aaae6b5a2166fd1a797cdb5eb58ee51a92bee1eb94a1e1cb"},
{file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b330368cb99ef72fcd2dc3ed260adf67b31499584dc8a20225e85bfe6f6cfed0"},
{file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:9a7cfb50515f87f7ed30bc882f68812fd98bc2852957df69f3003d22a2aa0abf"},
{file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f93531882a5f68c28090f901b1d135de61b56331bba82028489bc51bdd818d2"},
{file = "coverage-7.6.10-cp313-cp313t-win32.whl", hash = "sha256:89d76815a26197c858f53c7f6a656686ec392b25991f9e409bcef020cd532312"},
{file = "coverage-7.6.10-cp313-cp313t-win_amd64.whl", hash = "sha256:54a5f0f43950a36312155dae55c505a76cd7f2b12d26abeebbe7a0b36dbc868d"},
{file = "coverage-7.6.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:656c82b8a0ead8bba147de9a89bda95064874c91a3ed43a00e687f23cc19d53a"},
{file = "coverage-7.6.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ccc2b70a7ed475c68ceb548bf69cec1e27305c1c2606a5eb7c3afff56a1b3b27"},
{file = "coverage-7.6.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5e37dc41d57ceba70956fa2fc5b63c26dba863c946ace9705f8eca99daecdc4"},
{file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0aa9692b4fdd83a4647eeb7db46410ea1322b5ed94cd1715ef09d1d5922ba87f"},
{file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa744da1820678b475e4ba3dfd994c321c5b13381d1041fe9c608620e6676e25"},
{file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0b1818063dc9e9d838c09e3a473c1422f517889436dd980f5d721899e66f315"},
{file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:59af35558ba08b758aec4d56182b222976330ef8d2feacbb93964f576a7e7a90"},
{file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7ed2f37cfce1ce101e6dffdfd1c99e729dd2ffc291d02d3e2d0af8b53d13840d"},
{file = "coverage-7.6.10-cp39-cp39-win32.whl", hash = "sha256:4bcc276261505d82f0ad426870c3b12cb177752834a633e737ec5ee79bbdff18"},
{file = "coverage-7.6.10-cp39-cp39-win_amd64.whl", hash = "sha256:457574f4599d2b00f7f637a0700a6422243b3565509457b2dbd3f50703e11f59"},
{file = "coverage-7.6.10-pp39.pp310-none-any.whl", hash = "sha256:fd34e7b3405f0cc7ab03d54a334c17a9e802897580d964bd8c2001f4b9fd488f"},
{file = "coverage-7.6.10.tar.gz", hash = "sha256:7fb105327c8f8f0682e29843e2ff96af9dcbe5bab8eeb4b398c6a33a16d80a23"},
]
[package.dependencies]
tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""}
[package.extras]
toml = ["tomli"]
[[package]]
name = "deepdiff"
version = "8.1.1"
description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other."
optional = false
python-versions = ">=3.8"
files = [
{file = "deepdiff-8.1.1-py3-none-any.whl", hash = "sha256:b0231fa3afb0f7184e82535f2b4a36636442ed21e94a0cf3aaa7982157e7ebca"},
{file = "deepdiff-8.1.1.tar.gz", hash = "sha256:dd7bc7d5c8b51b5b90f01b0e2fe23c801fd8b4c6a7ee7e31c5a3c3663fcc7ceb"},
]
[package.dependencies]
orderly-set = ">=5.2.3,<6"
[package.extras]
cli = ["click (==8.1.7)", "pyyaml (==6.0.2)"]
optimize = ["orjson"]
[[package]]
name = "exceptiongroup"
version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
]
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.7"
files = [
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
]
[[package]]
name = "idna"
version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
]
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]]
name = "iniconfig"
version = "2.0.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.7"
files = [
{file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
]
[[package]]
name = "markupsafe"
version = "3.0.2"
description = "Safely add untrusted strings to HTML/XML markup."
optional = false
python-versions = ">=3.9"
files = [
{file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"},
{file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"},
{file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"},
{file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"},
{file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"},
{file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"},
{file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"},
{file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"},
{file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"},
{file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"},
{file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"},
{file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"},
{file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"},
{file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"},
{file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"},
{file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"},
{file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"},
{file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"},
{file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"},
{file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"},
{file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"},
{file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"},
{file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"},
{file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"},
{file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"},
{file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"},
{file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"},
{file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"},
{file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"},
{file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"},
{file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"},
{file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"},
{file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"},
{file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"},
{file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"},
{file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"},
{file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"},
{file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"},
{file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"},
{file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"},
{file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"},
{file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"},
{file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"},
{file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"},
{file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"},
{file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"},
{file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"},
{file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"},
{file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"},
{file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"},
{file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"},
{file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"},
]
[[package]]
name = "orderly-set"
version = "5.2.3"
description = "Orderly set"
optional = false
python-versions = ">=3.8"
files = [
{file = "orderly_set-5.2.3-py3-none-any.whl", hash = "sha256:d357cedcf67f4ebff0d4cbd5b0997e98eeb65dd24fdf5c990a501ae9e82c7d34"},
{file = "orderly_set-5.2.3.tar.gz", hash = "sha256:571ed97c5a5fca7ddeb6b2d26c19aca896b0ed91f334d9c109edd2f265fb3017"},
]
[[package]]
name = "outcome"
version = "1.3.0.post0"
description = "Capture the outcome of Python function calls."
optional = false
python-versions = ">=3.7"
files = [
{file = "outcome-1.3.0.post0-py2.py3-none-any.whl", hash = "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b"},
{file = "outcome-1.3.0.post0.tar.gz", hash = "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8"},
]
[package.dependencies]
attrs = ">=19.2.0"
[[package]]
name = "packaging"
version = "24.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
files = [
{file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
{file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
]
[[package]]
name = "pluggy"
version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
files = [
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
]
[package.extras]
dev = ["pre-commit", "tox"]
testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "pycparser"
version = "2.22"
description = "C parser in Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"},
{file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"},
]
[[package]]
name = "pysocks"
version = "1.7.1"
description = "A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information."
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
{file = "PySocks-1.7.1-py27-none-any.whl", hash = "sha256:08e69f092cc6dbe92a0fdd16eeb9b9ffbc13cadfe5ca4c7bd92ffb078b293299"},
{file = "PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5"},
{file = "PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0"},
]
[[package]]
name = "pytest"
version = "8.3.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"},
{file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"},
]
[package.dependencies]
colorama = {version = "*", markers = "sys_platform == \"win32\""}
exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
iniconfig = "*"
packaging = "*"
pluggy = ">=1.5,<2"
tomli = {version = ">=1", markers = "python_version < \"3.11\""}
[package.extras]
dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
[[package]]
name = "pytest-cov"
version = "4.1.0"
description = "Pytest plugin for measuring coverage."
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"},
{file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"},
]
[package.dependencies]
coverage = {version = ">=5.2.1", extras = ["toml"]}
pytest = ">=4.6"
[package.extras]
testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"]
[[package]]
name = "pytest-httpserver"
version = "1.1.0"
description = "pytest-httpserver is a httpserver for pytest"
optional = false
python-versions = ">=3.8"
files = [
{file = "pytest_httpserver-1.1.0-py3-none-any.whl", hash = "sha256:7ef88be8ed3354b6784daa3daa75a422370327c634053cefb124903fa8d73a41"},
{file = "pytest_httpserver-1.1.0.tar.gz", hash = "sha256:6b1cb0199e2ed551b1b94d43f096863bbf6ae5bcd7c75c2c06845e5ce2dc8701"},
]
[package.dependencies]
Werkzeug = ">=2.0.0"
[[package]]
name = "pytest-retry"
version = "1.6.3"
description = "Adds the ability to retry flaky tests in CI environments"
optional = false
python-versions = ">=3.9"
files = [
{file = "pytest_retry-1.6.3-py3-none-any.whl", hash = "sha256:e96f7df77ee70b0838d1085f9c3b8b5b7d74bf8947a0baf32e2b8c71b27683c8"},
{file = "pytest_retry-1.6.3.tar.gz", hash = "sha256:36ccfa11c8c8f9ddad5e20375182146d040c20c4a791745139c5a99ddf1b557d"},
]
[package.dependencies]
pytest = ">=7.0.0"
[package.extras]
dev = ["black", "flake8", "isort", "mypy"]
[[package]]
name = "pytest-sugar"
version = "1.0.0"
description = "pytest-sugar is a plugin for pytest that changes the default look and feel of pytest (e.g. progressbar, show tests that fail instantly)."
optional = false
python-versions = "*"
files = [
{file = "pytest-sugar-1.0.0.tar.gz", hash = "sha256:6422e83258f5b0c04ce7c632176c7732cab5fdb909cb39cca5c9139f81276c0a"},
{file = "pytest_sugar-1.0.0-py3-none-any.whl", hash = "sha256:70ebcd8fc5795dc457ff8b69d266a4e2e8a74ae0c3edc749381c64b5246c8dfd"},
]
[package.dependencies]
packaging = ">=21.3"
pytest = ">=6.2.0"
termcolor = ">=2.1.0"
[package.extras]
dev = ["black", "flake8", "pre-commit"]
[[package]]
name = "pyyaml"
version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
{file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
{file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
{file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
{file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
{file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
{file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
{file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
{file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
{file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
{file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
{file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
{file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
{file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
{file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
{file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
{file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
{file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
{file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
{file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
{file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
{file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
{file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
{file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
]
[[package]]
name = "requests"
version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
]
[package.dependencies]
certifi = ">=2017.4.17"
charset-normalizer = ">=2,<4"
idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<3"
[package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "selenium"
version = "4.27.1"
description = "Official Python bindings for Selenium WebDriver"
optional = false
python-versions = ">=3.8"
files = [
{file = "selenium-4.27.1-py3-none-any.whl", hash = "sha256:b89b1f62b5cfe8025868556fe82360d6b649d464f75d2655cb966c8f8447ea18"},
{file = "selenium-4.27.1.tar.gz", hash = "sha256:5296c425a75ff1b44d0d5199042b36a6d1ef76c04fb775b97b40be739a9caae2"},
]
[package.dependencies]
certifi = ">=2021.10.8"
trio = ">=0.17,<1.0"
trio-websocket = ">=0.9,<1.0"
typing_extensions = ">=4.9,<5.0"
urllib3 = {version = ">=1.26,<3", extras = ["socks"]}
websocket-client = ">=1.8,<2.0"
[[package]]
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
[[package]]
name = "sortedcontainers"
version = "2.4.0"
description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set"
optional = false
python-versions = "*"
files = [
{file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"},
{file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"},
]
[[package]]
name = "termcolor"
version = "2.5.0"
description = "ANSI color formatting for output in terminal"
optional = false
python-versions = ">=3.9"
files = [
{file = "termcolor-2.5.0-py3-none-any.whl", hash = "sha256:37b17b5fc1e604945c2642c872a3764b5d547a48009871aea3edd3afa180afb8"},
{file = "termcolor-2.5.0.tar.gz", hash = "sha256:998d8d27da6d48442e8e1f016119076b690d962507531df4890fcd2db2ef8a6f"},
]
[package.extras]
tests = ["pytest", "pytest-cov"]
[[package]]
name = "tomli"
version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"},
{file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"},
{file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"},
{file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"},
{file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"},
{file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"},
{file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"},
{file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"},
{file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"},
{file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"},
{file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"},
{file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"},
{file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"},
{file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"},
{file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"},
{file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"},
{file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"},
{file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"},
{file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"},
]
[[package]]
name = "trio"
version = "0.28.0"
description = "A friendly Python library for async concurrency and I/O"
optional = false
python-versions = ">=3.9"
files = [
{file = "trio-0.28.0-py3-none-any.whl", hash = "sha256:56d58977acc1635735a96581ec70513cc781b8b6decd299c487d3be2a721cd94"},
{file = "trio-0.28.0.tar.gz", hash = "sha256:4e547896fe9e8a5658e54e4c7c5fa1db748cbbbaa7c965e7d40505b928c73c05"},
]
[package.dependencies]
attrs = ">=23.2.0"
cffi = {version = ">=1.14", markers = "os_name == \"nt\" and implementation_name != \"pypy\""}
exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
idna = "*"
outcome = "*"
sniffio = ">=1.3.0"
sortedcontainers = "*"
[[package]]
name = "trio-websocket"
version = "0.11.1"
description = "WebSocket library for Trio"
optional = false
python-versions = ">=3.7"
files = [
{file = "trio-websocket-0.11.1.tar.gz", hash = "sha256:18c11793647703c158b1f6e62de638acada927344d534e3c7628eedcb746839f"},
{file = "trio_websocket-0.11.1-py3-none-any.whl", hash = "sha256:520d046b0d030cf970b8b2b2e00c4c2245b3807853ecd44214acd33d74581638"},
]
[package.dependencies]
exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
trio = ">=0.11"
wsproto = ">=0.14"
[[package]]
name = "typing-extensions"
version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
[[package]]
name = "urllib3"
version = "2.3.0"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.9"
files = [
{file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"},
{file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"},
]
[package.dependencies]
pysocks = {version = ">=1.5.6,<1.5.7 || >1.5.7,<2.0", optional = true, markers = "extra == \"socks\""}
[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "websocket-client"
version = "1.8.0"
description = "WebSocket client for Python with low level API options"
optional = false
python-versions = ">=3.8"
files = [
{file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"},
{file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"},
]
[package.extras]
docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"]
optional = ["python-socks", "wsaccel"]
test = ["websockets"]
[[package]]
name = "werkzeug"
version = "3.1.3"
description = "The comprehensive WSGI web application library."
optional = false
python-versions = ">=3.9"
files = [
{file = "werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e"},
{file = "werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746"},
]
[package.dependencies]
MarkupSafe = ">=2.1.1"
[package.extras]
watchdog = ["watchdog (>=2.3)"]
[[package]]
name = "wsproto"
version = "1.2.0"
description = "WebSockets state-machine based protocol implementation"
optional = false
python-versions = ">=3.7.0"
files = [
{file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"},
{file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"},
]
[package.dependencies]
h11 = ">=0.9.0,<1"
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
content-hash = "d0ba3f1875425991d473c9b6fee9f9cb35de5df5835fd7f7e27e3863ff7d37fe"

View file

@ -1,29 +0,0 @@
[tool.poetry]
name = "modelserver_mock_tests"
version = "0.0.1"
description = "modelserver tests"
authors = ["Katanemo Labs, Inc <info@katanemo.com>"]
license = "Apache 2.0"
readme = "README.md"
package-mode = false
[tool.poetry.dependencies]
python = "^3.10"
pytest = "^8.3.3"
requests = "^2.29.0"
selenium = "^4.11.2"
pytest-sugar = "^1.0.0"
deepdiff = "^8.0.1"
pytest-retry = "^1.6.3"
pytest-httpserver = "^1.1.0"
pyyaml = "*"
[tool.poetry.dev-dependencies]
pytest-cov = "^4.1.0"
[tool.pytest.ini_options]
python_files = ["test*.py"]
addopts = ["-v", "-s"]
retries = 2
retry_delay = 0.5
cumulative_timing = false

View file

@ -1,58 +0,0 @@
import os
import pytest
import requests
import yaml
from deepdiff import DeepDiff
pytestmark = pytest.mark.skip(
reason="Skipping entire test file as this these tests are heavily dependent on model output"
)
MODEL_SERVER_ENDPOINT = os.getenv(
"MODEL_SERVER_ENDPOINT", "http://localhost:12000/function_calling"
)
# Load test data from YAML file
script_dir = os.path.dirname(__file__)
# Construct the full path to the YAML file
yaml_file_path = os.path.join(script_dir, "test_success_data.yaml")
# Load test data from YAML file
with open(yaml_file_path, "r") as file:
test_data_yaml = yaml.safe_load(file)
@pytest.mark.parametrize(
"test_data",
[
pytest.param(test_case, id=test_case["id"])
for test_case in test_data_yaml["test_cases"]
],
)
def test_model_server(test_data):
input = test_data["input"]
expected = test_data["expected"]
response = requests.post(MODEL_SERVER_ENDPOINT, json=input)
assert response.status_code == 200
# ensure that response is json
assert response.headers["content-type"] == "application/json"
response_json = response.json()
assert response_json
choices = response_json.get("choices", [])
assert len(choices) == 1
choice = choices[0]
assert "message" in choice
message = choice["message"]
assert "tool_calls" in message
tool_calls = message["tool_calls"]
assert len(tool_calls) == len(expected)
for tool_call, expected_tool_call in zip(tool_calls, expected):
assert "id" in tool_call
del tool_call["id"]
# ensure that the tool call matches the expected tool call
diff = DeepDiff(expected_tool_call, tool_call, ignore_string_case=True)
assert not diff

View file

@ -1,561 +0,0 @@
test_cases:
- id: "[WEATHER AGENT] - single turn, single tool, all parameters"
input:
messages:
- role: "user"
content: "what is the weather forecast for Seattle, WA in the next 10 days?"
tools:
- type: "function"
function:
name: "get_current_weather"
description: "Get current weather at a location."
parameters:
type: "object"
properties:
location:
type: "str"
description: "The location to get the weather for"
format: "City, State"
days:
type: "int"
description: "the number of days for the request."
required: ["location", "days"]
expected:
- type: "function"
function:
name: "get_current_weather"
arguments:
location: "Seattle, WA"
days: 10
- id: "[WEATHER AGENT] - single turn, single tool, param gathering"
input:
messages:
- role: "user"
content: "what is the weather in Seattle?"
- role: "assistant"
content: "May I know the location and number of days you want to get the weather for?"
model: "Arch-Function"
- role: "user"
content: "5 days"
tools:
- type: "function"
function:
name: "get_current_weather"
description: "Get current weather at a location."
parameters:
type: "object"
properties:
location:
type: "str"
description: "The location to get the weather for"
format: "City, State"
days:
type: "int"
description: "the number of days for the request."
required: ["location", "days"]
expected:
- type: "function"
function:
name: "get_current_weather"
arguments:
location: "Seattle, WA"
days: 5
- id: "[WEATHER AGENT] - multi turn, single tool, all params passed"
input:
messages:
- role: "user"
content: "how is the weather in chicago for next 5 days?"
- role: "assistant"
tool_calls:
- id: "call_3394"
type: "function"
function:
name: "get_current_weather"
arguments:
location: "Chicago, IL"
days: 5
- role: "tool"
content: "--"
tool_call_id: "call_3394"
- role: "assistant"
content: "--"
- role: "user"
content: "how is the weather in LA for next 5 days?"
tools:
- type: "function"
function:
name: "get_current_weather"
description: "Get current weather at a location."
parameters:
type: "object"
properties:
location:
type: "str"
description: "The location to get the weather for"
format: "City, State"
days:
type: "int"
description: "the number of days for the request."
required: ["location", "days"]
expected:
- type: "function"
function:
name: "get_current_weather"
arguments:
location: "Los Angeles, CA"
days: 5
# Skip!
# - id: "[WEATHER AGENT] - multi turn, single tool, infer param from context"
# input:
# messages:
# - role: "user"
# content: "how is the weather in chicago for next 5 days?"
# - role: "assistant"
# tool_calls:
# - id: "call_3394"
# type: "function"
# function:
# name: "get_current_weather"
# arguments:
# location: "Chicago, IL"
# days: 5
# - role: "tool"
# content: "--"
# tool_call_id: "call_3394"
# - role: "assistant"
# content: "--"
# - role: "user"
# content: "how is the weather in LA?"
# tools:
# - type: "function"
# function:
# name: "get_current_weather"
# description: "Get current weather at a location."
# parameters:
# type: "object"
# properties:
# location:
# type: "str"
# description: "The location to get the weather for"
# format: "City, State"
# days:
# type: "int"
# description: "the number of days for the request."
# required: ["location", "days"]
# expected:
# - type: "function"
# function:
# name: "get_current_weather"
# arguments:
# location: "Los Angeles, CA"
# days: 5
- id: "[WEATHER AGENT] - multi turn, single tool, infer param from context 2nd try"
input:
messages:
- role: "user"
content: "how is the weather in seattle for 5 days?"
tool_call_id: ""
- role: "assistant"
content: ""
tool_call_id: ""
tool_calls:
- id: "call_7134"
type: "function"
function:
name: "get_current_weather"
arguments:
location: "Seattle, WA"
days: 5
- role: "tool"
content: "{\"location\":\"Seattle, WA\",\"temperature\":[{\"date\":\"2024-12-19\",\"temperature\":{\"min\":74,\"max\":90},\"units\":\"Farenheit\",\"query_time\":\"2024-12-19 00:14:35.853372+00:00\"},{\"date\":\"2024-12-20\",\"temperature\":{\"min\":79,\"max\":88},\"units\":\"Farenheit\",\"query_time\":\"2024-12-19 00:14:35.853402+00:00\"}],\"units\":\"Farenheit\"}"
tool_call_id: ""
- role: "assistant"
content: "The weather in Seattle for the next two days is as follows:\n\n- **December 19, 2024**: The temperature will range from a minimum of 74°F to a maximum of 90°F.\n- **December 20, 2024**: The temperature will range from a minimum of 79°F to a maximum of 88°F.\n\nIt seems to be quite warm for Seattle during these dates!"
tool_call_id: ""
- role: "user"
content: "what about weather in chicago?"
tool_call_id: ""
tools:
- type: "function"
function:
name: "get_current_weather"
description: "Get current weather at a location."
parameters:
properties:
days:
type: "int"
description: "the number of days for the request"
location:
type: "str"
description: "The location to get the weather for"
format: "city, state"
required: ["days", "location"]
- type: "function"
function:
name: "default_target"
description: "This is the default target for all unmatched prompts."
parameters:
properties: {}
expected:
- type: "function"
function:
name: "get_current_weather"
arguments:
location: "Chicago, IL"
days: 5
- id: "[HR AGENT] - single turn, single tool, all parameters"
input:
messages:
- role: "user"
content: "Can you show the workforce data for agency staff in america?"
tools:
- type: "function"
function:
name: "get_hr_data"
description: "Get workforce data like headcount and satisfacton levels by region and staffing type."
parameters:
type: "object"
properties:
staffing_type:
type: "str"
description: "Staffing type of employees"
region:
type: "str"
description: "Geographical region for which you want workforce data."
enum: ["america", "emea", "apac"]
point_in_time:
type: "str"
description: "the point in time for which to retrieve data."
default: "1"
required: ["staffing_type", "region"]
expected:
- type: "function"
function:
name: "get_hr_data"
arguments:
region: "america"
staffing_type: "agency"
- id: "[HR AGENT] - multi turn, single tool, all parameters, enum"
input:
messages:
- role: "user"
content: "Can you show the workforce data for agency staff?"
- role: "assistant"
content: "Of course, I can help with that. However, I need the region and staffing type to provide the workforce data. Could you please provide that information?"
- role: "user"
content: "ameriza"
tools:
- type: "function"
function:
name: "get_hr_data"
description: "Get workforce data like headcount and satisfacton levels by region and staffing type."
parameters:
type: "object"
properties:
staffing_type:
type: "str"
description: "Staffing type of employees"
region:
type: "str"
description: "Geographical region for which you want workforce data."
enum: ["america", "emea", "apac"]
point_in_time:
type: "str"
description: "the point in time for which to retrieve data."
default: "1"
required: ["staffing_type", "region"]
expected:
- type: "function"
function:
name: "get_hr_data"
arguments:
region: "america"
staffing_type: "agency"
- id: "[HR AGENT] - multi turn, multi tool, all parameters, enum"
input:
messages:
- role: "user"
content: "Can you show the workforce data for agency staff?"
- role: "assistant"
content: "Of course, I can help with that. However, I need the region and staffing type to provide the workforce data. Could you please provide that information?"
- role: "user"
content: "america. Also, please get the satisfaction levels for the full_time staff in emea"
tools:
- type: "function"
function:
name: "get_hr_data"
description: "Get workforce data like headcount and satisfacton levels by region and staffing type."
parameters:
type: "object"
properties:
staffing_type:
type: "str"
description: "Staffing type of employees"
region:
type: "str"
description: "Geographical region for which you want workforce data."
enum: ["america", "emea", "apac"]
point_in_time:
type: "str"
description: "the point in time for which to retrieve data."
default: "1"
required: ["staffing_type", "region"]
expected:
- type: "function"
function:
name: "get_hr_data"
arguments:
region: "america"
staffing_type: "agency"
- type: "function"
function:
name: "get_hr_data"
arguments:
region: "emea"
staffing_type: "full_time"
- id: "[INSURANCE AGENT] - single turn, multi tool, all parameters"
input:
messages:
- role: "user"
content: " i want to start an insurance policy with 500 deductible for car and update deductible my boat insurance policy with id boawd123 to 1000"
tools:
- type: "function"
function:
name: "policy_qa"
description: "Handle general Q/A related to insurance."
parameters:
type: "object"
properties: {}
required: []
- type: "function"
function:
name: "get_policy_coverage"
description: "Retrieve the coverage details for an insurance policy."
parameters:
type: "object"
properties:
policy_type:
type: "str"
description: "The type of insurance policy."
required: ["policy_type"]
- type: "function"
function:
name: "initiate_policy"
description: "Start a policy coverage for an insurance policy."
parameters:
type: "object"
properties:
policy_type:
type: "str"
description: "The type of insurance policy."
deductible:
type: "float"
description: "The deductible amount set for the policy."
required: ["policy_type", "deductible"]
- type: "function"
function:
name: "update_claim"
description: "Update the notes on the claim."
parameters:
type: "object"
properties:
claim_id:
type: "str"
description: "The claim number."
notes:
type: "str"
description: "Notes about the claim number for your adjustor to see."
required: ["claim_id"]
- type: "function"
function:
name: "update_deductible"
description: "Update the deductible amount for a specific insurance policy coverage."
parameters:
type: "object"
properties:
policy_id:
type: "str"
description: "The ID of the insurance policy."
deductible:
type: "float"
description: "The deductible amount set for the policy."
required: ["policy_id", "deductible"]
expected:
- type: "function"
function:
name: "initiate_policy"
arguments:
policy_type: "car"
deductible: 500
- type: "function"
function:
name: "update_deductible"
arguments:
policy_id: "boawd123"
deductible: 1000
- id: "[INSURANCE AGENT] - multi turn, multi tool, all parameters"
input:
messages:
- role: "user"
content: "hi what can you do?"
- role: "assistant"
content: "Certainly! I'm here to assist you with various questions and tasks. Whether it's answering specific questions, providing information, or helping with something else, feel free to let me know how I can assist you."
- role: "user"
content: "i want to start a new insurance policy"
- role: "assistant"
content: "Certainly! To start a new insurance policy, I'll need the type of insurance policy you're interested in and the deductible amount you'd like to set for that policy. Could you please provide this information?"
- role: "user"
content: "car insurance, 500. Also, please get me the coverage details for a house insurance"
tools:
- type: "function"
function:
name: "policy_qa"
description: "Handle general Q/A related to insurance."
parameters:
type: "object"
properties: {}
required: []
- type: "function"
function:
name: "get_policy_coverage"
description: "Retrieve the coverage details for an insurance policy."
parameters:
type: "object"
properties:
policy_type:
type: "str"
description: "The type of insurance policy."
required: ["policy_type"]
- type: "function"
function:
name: "initiate_policy"
description: "Start a policy coverage for an insurance policy."
parameters:
type: "object"
properties:
policy_type:
type: "str"
description: "The type of insurance policy."
deductible:
type: "float"
description: "The deductible amount set for the policy."
required: ["policy_type", "deductible"]
- type: "function"
function:
name: "update_claim"
description: "Update the notes on the claim."
parameters:
type: "object"
properties:
claim_id:
type: "str"
description: "The claim number."
notes:
type: "str"
description: "Notes about the claim number for your adjustor to see."
required: ["claim_id"]
- type: "function"
function:
name: "update_deductible"
description: "Update the deductible amount for a specific insurance policy coverage."
parameters:
type: "object"
properties:
policy_id:
type: "str"
description: "The ID of the insurance policy."
deductible:
type: "float"
description: "The deductible amount set for the policy."
required: ["policy_id", "deductible"]
expected:
- type: "function"
function:
name: "initiate_policy"
arguments:
policy_type: "car insurance"
deductible: 500
- type: "function"
function:
name: "get_policy_coverage"
arguments:
policy_type: "house insurance"
- id: "[INSURANCE AGENT] - single turn, single tool, all parameters"
input:
messages:
- role: "user"
content: "i want to start a insurance policy for car with 500 deductible"
tools:
- type: "function"
function:
name: "policy_qa"
description: "Handle general Q/A related to insurance."
parameters:
type: "object"
properties: {}
required: []
- type: "function"
function:
name: "get_policy_coverage"
description: "Retrieve the coverage details for an insurance policy."
parameters:
type: "object"
properties:
policy_type:
type: "str"
description: "The type of insurance policy."
required: ["policy_type"]
- type: "function"
function:
name: "initiate_policy"
description: "Start a policy coverage for an insurance policy."
parameters:
type: "object"
properties:
policy_type:
type: "str"
description: "The type of insurance policy."
deductible:
type: "float"
description: "The deductible amount set for the policy."
required: ["policy_type", "deductible"]
- type: "function"
function:
name: "update_claim"
description: "Update the notes on the claim."
parameters:
type: "object"
properties:
claim_id:
type: "str"
description: "The claim number."
notes:
type: "str"
description: "Notes about the claim number for your adjustor to see."
required: ["claim_id"]
- type: "function"
function:
name: "update_deductible"
description: "Update the deductible amount for a specific insurance policy coverage."
parameters:
type: "object"
properties:
policy_id:
type: "str"
description: "The ID of the insurance policy."
deductible:
type: "float"
description: "The deductible amount set for the policy."
required: ["policy_id", "deductible"]
expected:
- type: "function"
function:
name: "initiate_policy"
arguments:
policy_type: "car"
deductible: 500