fix demos code (#76)

This commit is contained in:
Adil Hafeez 2024-09-24 14:34:22 -07:00 committed by GitHub
parent 13dff3089d
commit 685144bbd7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
29 changed files with 2020 additions and 21 deletions

View file

@ -0,0 +1,25 @@
FROM Bolt-Function-Calling-1B-Q3_K_L.gguf
# Set the size of the context window used to generate the next token
# PARAMETER num_ctx 16384
PARAMETER num_ctx 4096
# Set parameters for response generation
PARAMETER num_predict 1024
PARAMETER temperature 0.1
PARAMETER top_p 0.5
PARAMETER top_k 32022
PARAMETER repeat_penalty 1.0
PARAMETER stop "<|EOT|>"
# Set the random number seed to use for generation
PARAMETER seed 42
# Set the prompt template to be passed into the model
TEMPLATE """{{ if .System }}<begin▁of▁sentence>
{{ .System }}
{{ end }}{{ if .Prompt }}### Instruction:
{{ .Prompt }}
{{ end }}### Response:
{{ .Response }}
<|EOT|>"""

View file

@ -0,0 +1,24 @@
FROM Bolt-Function-Calling-1B-Q4_K_M.gguf
# Set the size of the context window used to generate the next token
PARAMETER num_ctx 4096
# Set parameters for response generation
PARAMETER num_predict 1024
PARAMETER temperature 0.1
PARAMETER top_p 0.5
PARAMETER top_k 32022
PARAMETER repeat_penalty 1.0
PARAMETER stop "<|EOT|>"
# Set the random number seed to use for generation
PARAMETER seed 42
# Set the prompt template to be passed into the model
TEMPLATE """{{ if .System }}<begin▁of▁sentence>
{{ .System }}
{{ end }}{{ if .Prompt }}### Instruction:
{{ .Prompt }}
{{ end }}### Response:
{{ .Response }}
<|EOT|>"""

View file

@ -1,7 +1,7 @@
# Function calling # Function calling
This demo shows how you can use intelligent prompt gateway to act a copilot for calling the correct proc by capturing the required and optional parametrs from the prompt. This demo assumes you are using ollama running natively. If you want to run ollama running inside docker then please update ollama endpoint in docker-compose file. This demo shows how you can use intelligent prompt gateway to act a copilot for calling the correct proc by capturing the required and optional parametrs from the prompt. This demo assumes you are using ollama running natively. If you want to run ollama running inside docker then please update ollama endpoint in docker-compose file.
# Startig the demo # Starting the demo
1. Ensure that submodule is up to date 1. Ensure that submodule is up to date
```sh ```sh
git submodule sync --recursive git submodule sync --recursive

View file

@ -0,0 +1,16 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "function-calling api server",
"cwd": "${workspaceFolder}/app",
"type": "debugpy",
"request": "launch",
"module": "uvicorn",
"args": ["main:app","--reload", "--port", "8001"],
}
]
}

View file

@ -0,0 +1,19 @@
FROM python:3 AS base
FROM base AS builder
WORKDIR /src
COPY requirements.txt /src/
RUN pip install --prefix=/runtime --force-reinstall -r requirements.txt
COPY . /src
FROM python:3-slim AS output
COPY --from=builder /runtime /usr/local
COPY /app /app
WORKDIR /app
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80"]

View file

@ -0,0 +1,289 @@
import random
from typing import List
from fastapi import FastAPI, HTTPException, Response
from datetime import datetime, date, timedelta, timezone
import logging
from pydantic import BaseModel
from utils import load_sql
import pandas as pd
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
app = FastAPI()
@app.get("/healthz")
async def healthz():
return {
"status": "ok"
}
conn = load_sql()
name_col = "name"
class TopEmployees(BaseModel):
grouping: str
ranking_criteria: str
top_n: int
@app.post("/top_employees")
async def top_employees(req: TopEmployees, res: Response):
name_col = "name"
# Check if `req.ranking_criteria` is a Text object and extract its value accordingly
logger.info(
f"{'* ' * 50}\n\nCaptured Ranking Criteria: {req.ranking_criteria}\n\n{'* ' * 50}"
)
if req.ranking_criteria == "yoe":
req.ranking_criteria = "years_of_experience"
elif req.ranking_criteria == "rating":
req.ranking_criteria = "performance_score"
logger.info(
f"{'* ' * 50}\n\nFinal Ranking Criteria: {req.ranking_criteria}\n\n{'* ' * 50}"
)
query = f"""
SELECT {req.grouping}, {name_col}, {req.ranking_criteria}
FROM (
SELECT {req.grouping}, {name_col}, {req.ranking_criteria},
DENSE_RANK() OVER (PARTITION BY {req.grouping} ORDER BY {req.ranking_criteria} DESC) as emp_rank
FROM employees
) ranked_employees
WHERE emp_rank <= {req.top_n};
"""
result_df = pd.read_sql_query(query, conn)
result = result_df.to_dict(orient="records")
return result
class AggregateStats(BaseModel):
grouping: str
aggregate_criteria: str
aggregate_type: str
@app.post("/aggregate_stats")
async def aggregate_stats(req: AggregateStats, res: Response):
logger.info(
f"{'* ' * 50}\n\nCaptured Aggregate Criteria: {req.aggregate_criteria}\n\n{'* ' * 50}"
)
if req.aggregate_criteria == "yoe":
req.aggregate_criteria = "years_of_experience"
logger.info(
f"{'* ' * 50}\n\nFinal Aggregate Criteria: {req.aggregate_criteria}\n\n{'* ' * 50}"
)
logger.info(
f"{'* ' * 50}\n\nCaptured Aggregate Type: {req.aggregate_type}\n\n{'* ' * 50}"
)
if req.aggregate_type.lower() not in ["sum", "avg", "min", "max"]:
if req.aggregate_type.lower() == "count":
req.aggregate_type = "COUNT"
elif req.aggregate_type.lower() == "total":
req.aggregate_type = "SUM"
elif req.aggregate_type.lower() == "average":
req.aggregate_type = "AVG"
elif req.aggregate_type.lower() == "minimum":
req.aggregate_type = "MIN"
elif req.aggregate_type.lower() == "maximum":
req.aggregate_type = "MAX"
else:
raise HTTPException(status_code=400, detail="Invalid aggregate type")
logger.info(
f"{'* ' * 50}\n\nFinal Aggregate Type: {req.aggregate_type}\n\n{'* ' * 50}"
)
query = f"""
SELECT {req.grouping}, {req.aggregate_type}({req.aggregate_criteria}) as {req.aggregate_type}_{req.aggregate_criteria}
FROM employees
GROUP BY {req.grouping};
"""
result_df = pd.read_sql_query(query, conn)
result = result_df.to_dict(orient="records")
return result
# 1. Top Employees by Performance, Projects, and Timeframe
class TopEmployeesProjects(BaseModel):
min_performance_score: float
min_years_experience: int
department: str
min_project_count: int = None # Optional
months_range: int = None # Optional (for filtering recent projects)
@app.post("/top_employees_projects")
async def employees_projects(req: TopEmployeesProjects, res: Response):
params, filters = {}, []
# Add optional months_range filter
if req.months_range:
params['months_range'] = req.months_range
filters.append(f"p.start_date >= DATE('now', '-{req.months_range} months')")
# Add project count filter if provided
if req.min_project_count:
filters.append(f"COUNT(p.project_id) >= {req.min_project_count}")
where_clause = " AND ".join(filters)
if where_clause:
where_clause = "AND " + where_clause
query = f"""
SELECT e.name, e.department, e.years_of_experience, e.performance_score, COUNT(p.project_id) as project_count
FROM employees e
LEFT JOIN projects p ON e.eid = p.eid
WHERE e.performance_score >= {req.min_performance_score}
AND e.years_of_experience >= {req.min_years_experience}
AND e.department = '{req.department}'
{where_clause}
GROUP BY e.eid, e.name, e.department, e.years_of_experience, e.performance_score
ORDER BY e.performance_score DESC;
"""
result_df = pd.read_sql_query(query, conn, params=params)
return result_df.to_dict(orient='records')
# 2. Employees with Salary Growth Since Last Promotion
class SalaryGrowthRequest(BaseModel):
min_salary_increase_percentage: float
department: str = None # Optional
@app.post("/salary_growth")
async def salary_growth(req: SalaryGrowthRequest, res: Response):
params, filters = {}, []
if req.department:
filters.append("e.department = :department")
params['department'] = req.department
where_clause = " AND ".join(filters)
if where_clause:
where_clause = "AND " + where_clause
query = f"""
SELECT e.name, e.department, s.salary_increase_percentage
FROM employees e
JOIN salary_history s ON e.eid = s.eid
WHERE s.salary_increase_percentage >= {req.min_salary_increase_percentage}
AND s.promotion_date IS NOT NULL
{where_clause}
ORDER BY s.salary_increase_percentage DESC;
"""
result_df = pd.read_sql_query(query, conn, params=params)
return result_df.to_dict(orient='records')
# 4. Employees with Promotions and Salary Increases
class PromotionsIncreasesRequest(BaseModel):
year: int
min_salary_increase_percentage: float = None # Optional
department: str = None # Optional
@app.post("/promotions_increases")
async def promotions_increases(req: PromotionsIncreasesRequest, res: Response):
params, filters = {}, []
if req.min_salary_increase_percentage:
filters.append(f"s.salary_increase_percentage >= {req.min_salary_increase_percentage}")
if req.department:
filters.append("e.department = :department")
params['department'] = req.department
where_clause = " AND ".join(filters)
if where_clause:
where_clause = "AND " + where_clause
query = f"""
SELECT e.name, e.department, s.salary_increase_percentage, s.promotion_date
FROM employees e
JOIN salary_history s ON e.eid = s.eid
WHERE strftime('%Y', s.promotion_date) = '{req.year}'
{where_clause}
ORDER BY s.salary_increase_percentage DESC;
"""
result_df = pd.read_sql_query(query, conn, params=params)
return result_df.to_dict(orient='records')
# 5. Employees with Highest Average Project Performance
class AvgProjPerformanceRequest(BaseModel):
min_project_count: int
min_performance_score: float
department: str = None # Optional
@app.post("/avg_project_performance")
async def avg_project_performance(req: AvgProjPerformanceRequest, res: Response):
params, filters = {}, []
if req.department:
filters.append("e.department = :department")
params['department'] = req.department
filters.append(f"p.performance_score >= {req.min_performance_score}")
where_clause = " AND ".join(filters)
query = f"""
SELECT e.name, e.department, AVG(p.performance_score) as avg_performance_score, COUNT(p.project_id) as project_count
FROM employees e
JOIN projects p ON e.eid = p.eid
WHERE {where_clause}
GROUP BY e.eid, e.name, e.department
HAVING COUNT(p.project_id) >= {req.min_project_count}
ORDER BY avg_performance_score DESC;
"""
result_df = pd.read_sql_query(query, conn, params=params)
return result_df.to_dict(orient='records')
# 6. Employees by Certification and Years of Experience
class CertificationsExperienceRequest(BaseModel):
certifications: List[str]
min_years_experience: int
department: str = None # Optional
@app.post("/employees_certifications_experience")
async def certifications_experience(req: CertificationsExperienceRequest, res: Response):
# Convert the list of certifications into a format for SQL query
certs_filter = ', '.join([f"'{cert}'" for cert in req.certifications])
params, filters = {}, []
# Add department filter if provided
if req.department:
filters.append("e.department = :department")
params['department'] = req.department
filters.append("e.years_of_experience >= :min_years_experience")
params['min_years_experience'] = req.min_years_experience
where_clause = " AND ".join(filters)
query = f"""
SELECT e.name, e.department, e.years_of_experience, COUNT(c.certification_name) as cert_count
FROM employees e
JOIN certifications c ON e.eid = c.eid
WHERE c.certification_name IN ({certs_filter})
AND {where_clause}
GROUP BY e.eid, e.name, e.department, e.years_of_experience
HAVING COUNT(c.certification_name) = {len(req.certifications)}
ORDER BY e.years_of_experience DESC;
"""
result_df = pd.read_sql_query(query, conn, params=params)
return result_df.to_dict(orient='records')

View file

@ -0,0 +1,157 @@
import pandas as pd
import random
import datetime
import sqlite3
def load_sql():
# Example Usage
conn = sqlite3.connect(":memory:")
# create and load the employees table
generate_employee_data(conn)
# create and load the projects table
generate_project_data(conn)
# create and load the salary_history table
generate_salary_history(conn)
# create and load the certifications table
generate_certifications(conn)
return conn
# Function to generate random employee data with `eid` as the primary key
def generate_employee_data(conn):
# List of possible names, positions, departments, and locations
names = [
"Alice",
"Bob",
"Charlie",
"David",
"Eve",
"Frank",
"Grace",
"Hank",
"Ivy",
"Jack",
]
positions = [
"Manager",
"Engineer",
"Salesperson",
"HR Specialist",
"Marketing Analyst",
]
# List of possible names, positions, departments, locations, and certifications
names = ["Alice", "Bob", "Charlie", "David", "Eve", "Frank", "Grace", "Hank", "Ivy", "Jack"]
positions = ["Manager", "Engineer", "Salesperson", "HR Specialist", "Marketing Analyst"]
departments = ["Engineering", "Marketing", "HR", "Sales", "Finance"]
locations = ["New York", "San Francisco", "Austin", "Boston", "Chicago"]
certifications = ["AWS Certified", "Google Cloud Certified", "PMP", "Scrum Master", "Cisco Certified"]
# Generate random hire dates
def random_hire_date():
start_date = datetime.date(2000, 1, 1)
end_date = datetime.date(2023, 12, 31)
time_between_dates = end_date - start_date
days_between_dates = time_between_dates.days
random_number_of_days = random.randrange(days_between_dates)
return start_date + datetime.timedelta(days=random_number_of_days)
# Generate random employee records with an employee ID (eid)
employees = []
for eid in range(1, 101): # 100 employees with `eid` starting from 1
name = random.choice(names)
position = random.choice(positions)
salary = round(random.uniform(50000, 150000), 2) # Salary between 50,000 and 150,000
department = random.choice(departments)
location = random.choice(locations)
hire_date = random_hire_date()
performance_score = round(random.uniform(1, 5), 2) # Performance score between 1.0 and 5.0
years_of_experience = random.randint(1, 30) # Years of experience between 1 and 30
employee = {
"eid": eid, # Employee ID
"name": name,
"position": position,
"salary": salary,
"department": department,
"location": location,
"hire_date": hire_date,
"performance_score": performance_score,
"years_of_experience": years_of_experience
}
employees.append(employee)
# Convert the list of dictionaries to a DataFrame and save to DB
df_employees = pd.DataFrame(employees)
df_employees.to_sql('employees', conn, index=False, if_exists='replace')
# Function to generate random project data with `eid`
def generate_project_data(conn):
employees = pd.read_sql_query("SELECT eid FROM employees", conn)
projects = []
for _ in range(500): # 500 projects
eid = random.choice(employees['eid'])
project_name = f"Project_{random.randint(1, 100)}"
start_date = datetime.date(2020, 1, 1) + datetime.timedelta(days=random.randint(0, 365 * 3)) # Within the last 3 years
performance_score = round(random.uniform(1, 5), 2) # Performance score for the project between 1.0 and 5.0
project = {
"eid": eid, # Foreign key from employees table
"project_name": project_name,
"start_date": start_date,
"performance_score": performance_score
}
projects.append(project)
# Convert the list of dictionaries to a DataFrame and save to DB
df_projects = pd.DataFrame(projects)
df_projects.to_sql('projects', conn, index=False, if_exists='replace')
# Function to generate random salary history data with `eid`
def generate_salary_history(conn):
employees = pd.read_sql_query("SELECT eid FROM employees", conn)
salary_history = []
for _ in range(300): # 300 salary records
eid = random.choice(employees['eid'])
salary_increase_percentage = round(random.uniform(5, 30), 2) # Salary increase between 5% and 30%
promotion_date = datetime.date(2018, 1, 1) + datetime.timedelta(days=random.randint(0, 365 * 5)) # Promotions in the last 5 years
salary_record = {
"eid": eid, # Foreign key from employees table
"salary_increase_percentage": salary_increase_percentage,
"promotion_date": promotion_date
}
salary_history.append(salary_record)
# Convert the list of dictionaries to a DataFrame and save to DB
df_salary_history = pd.DataFrame(salary_history)
df_salary_history.to_sql('salary_history', conn, index=False, if_exists='replace')
# Function to generate random certifications data with `eid`
def generate_certifications(conn):
employees = pd.read_sql_query("SELECT eid FROM employees", conn)
certifications_list = ["AWS Certified", "Google Cloud Certified", "PMP", "Scrum Master", "Cisco Certified"]
employee_certifications = []
for _ in range(300): # 300 certification records
eid = random.choice(employees['eid'])
certification = random.choice(certifications_list)
cert_record = {
"eid": eid, # Foreign key from employees table
"certification_name": certification
}
employee_certifications.append(cert_record)
# Convert the list of dictionaries to a DataFrame and save to DB
df_certifications = pd.DataFrame(employee_certifications)
df_certifications.to_sql('certifications', conn, index=False, if_exists='replace')

View file

@ -0,0 +1,4 @@
fastapi
uvicorn
pandas
dateparser

View file

@ -4,7 +4,7 @@ timeout_ms: 5000
overrides: overrides:
# confidence threshold for prompt target intent matching # confidence threshold for prompt target intent matching
prompt_target_intent_matching_threshold: 0.8 prompt_target_intent_matching_threshold: 0.7
llm_providers: llm_providers:
@ -30,15 +30,15 @@ prompt_targets:
required: true required: true
type: string type: string
description: | description: |
Choose how you'd like to rank the employees. You can rank them by their salary, their yoe, or their rating. The tool will sort the employees based on this ranking and return the best ones from each group. Choose how you'd like to rank the employees. You can rank them by their salary, their years of experience, or their rating. The tool will sort the employees based on this ranking and return the best ones from each group.
enum: [salary, yoe, rating] enum: [salary, years_of_experience, performance_score]
- name: top_n - name: top_n
required: true required: true
type: integer type: integer
description: | description: |
Enter how many of the top employees you want to see in each group. For example, if you enter 3, the tool will show you the top 3 employees for each group you selected. Enter how many of the top employees you want to see in each group. For example, if you enter 3, the tool will show you the top 3 employees for each group you selected.
endpoint: endpoint:
cluster: databasehost cluster: api_server
path: /top_employees path: /top_employees
system_prompt: | system_prompt: |
You are responsible for retrieving the top N employees per group ranked by a constraint. You are responsible for retrieving the top N employees per group ranked by a constraint.
@ -46,7 +46,7 @@ prompt_targets:
- type: function_resolver - type: function_resolver
name: aggregate_stats name: aggregate_stats
description: | description: |
Calculate summary statistics for groups of employees. You can group employees by categories like department or location and then compute totals, averages, or other statistics for specific attributes such as salary or yoe. Calculate summary statistics for groups of employees. You can group employees by categories like department or location and then compute totals, averages, or other statistics for specific attributes such as salary or years of experience.
parameters: parameters:
- name: grouping - name: grouping
description: | description: |
@ -55,20 +55,143 @@ prompt_targets:
enum: [department, location, position] enum: [department, location, position]
- name: aggregate_criteria - name: aggregate_criteria
description: | description: |
Select the specific attribute you'd like to analyze. This could be something like salary, yoe, or rating. The tool will calculate the statistic you request for this attribute. Select the specific attribute you'd like to analyze. This could be something like salary, years of experience, or rating. The tool will calculate the statistic you request for this attribute.
required: true required: true
enum: [salary, yoe, rating] enum: [salary, years_of_experience, performance_score]
- name: aggregate_type - name: aggregate_type
description: | description: |
Choose the type of statistic you'd like to calculate for the selected attribute. For example, you can calculate the sum, average, minimum, or maximum value for each group. Choose the type of statistic you'd like to calculate for the selected attribute. For example, you can calculate the sum, average, minimum, or maximum value for each group.
required: true required: true
enum: [SUM, AVG, MIN, MAX] enum: [SUM, AVG, MIN, MAX]
endpoint: endpoint:
cluster: databasehost cluster: api_server
path: /aggregate_stats path: /aggregate_stats
system_prompt: | system_prompt: |
You help calculate summary statistics for groups of employees. First, organize the employees by the specified grouping (e.g., department, location, or position). Then, compute the requested statistic (e.g., total, average, minimum, or maximum) for a specific attribute like salary, experience, or rating. You help calculate summary statistics for groups of employees. First, organize the employees by the specified grouping (e.g., department, location, or position). Then, compute the requested statistic (e.g., total, average, minimum, or maximum) for a specific attribute like salary, experience, or rating.
clusters: # 1. Top Employees by Performance, Projects, and Timeframe
databasehost: - type: function_resolver
address: model_server name: employees_projects
description: |
Fetch employees with the highest performance scores, considering their project participation and years of experience. You can filter by minimum performance score, years of experience, and department. Optionally, you can also filter by recent project participation within the last Y months.
parameters:
- name: min_performance_score
description: Minimum performance score to filter employees.
required: true
type: float
- name: min_years_experience
description: Minimum years of experience to filter employees.
required: true
type: integer
- name: department
description: Department to filter employees by.
required: true
type: string
- name: min_project_count
description: Minimum number of projects employees participated in (optional).
required: false
type: integer
- name: months_range
description: Timeframe (in months) for filtering recent projects (optional).
required: false
type: integer
endpoint:
cluster: api_server
path: /employees_projects
system_prompt: |
You are responsible for retrieving the top N employees ranked by performance and project participation. Use filters for experience and optional project criteria.
# 2. Employees with Salary Growth Since Last Promotion
- type: function_resolver
name: salary_growth
description: |
Fetch employees with the highest salary growth since their last promotion, grouped by department. You can filter by a minimum salary increase percentage and department.
parameters:
- name: min_salary_increase_percentage
description: Minimum percentage increase in salary since the last promotion.
required: true
type: float
- name: department
description: Department to filter employees by (optional).
required: false
type: string
endpoint:
cluster: api_server
path: /salary_growth
system_prompt: |
You are responsible for retrieving employees with the highest salary growth since their last promotion. Filter by minimum salary increase percentage and department.
# 4. Employees with Promotions and Salary Increases by Year
- type: function_resolver
name: promotions_increases
description: |
Fetch employees who were promoted and received a salary increase in a specific year, grouped by department. You can optionally filter by minimum percentage salary increase and department.
parameters:
- name: year
description: The year in which the promotion and salary increase occurred.
required: true
type: integer
- name: min_salary_increase_percentage
description: Minimum percentage salary increase to filter employees.
required: false
type: float
- name: department
description: Department to filter by (optional).
required: false
type: string
endpoint:
cluster: api_server
path: /promotions_increases
system_prompt: |
You are responsible for fetching employees who were promoted and received a salary increase in a specific year. Apply filters for salary increase percentage and department.
# 5. Employees with Highest Average Project Performance
- type: function_resolver
name: avg_project_performance
description: |
Fetch employees with the highest average performance across all projects they have worked on over time. You can filter by minimum project count, department, and minimum performance score.
parameters:
- name: min_project_count
description: Minimum number of projects an employee must have participated in.
required: true
type: integer
- name: min_performance_score
description: Minimum performance score to filter employees.
required: true
type: float
- name: department
description: Department to filter by (optional).
required: false
type: string
endpoint:
cluster: api_server
path: /avg_project_performance
system_prompt: |
You are responsible for fetching employees with the highest average performance across all projects theyve worked on. Apply filters for minimum project count, performance score, and department.
# 6. Employees by Certification and Years of Experience
- type: function_resolver
name: certifications_experience
description: |
Fetch employees who have all the required certifications and meet the minimum years of experience. You can filter by department and provide a list of certifications to match.
parameters:
- name: certifications
description: List of required certifications.
required: true
type: list
- name: min_years_experience
description: Minimum years of experience.
required: true
type: integer
- name: department
description: Department to filter employees by (optional).
required: false
type: string
endpoint:
cluster: api_server
path: /certifications_experience
system_prompt: |
You are responsible for fetching employees who have the required certifications and meet the minimum years of experience. Optionally, filter by department.

View file

@ -40,6 +40,18 @@ services:
retries: 20 retries: 20
volumes: volumes:
- ~/.cache/huggingface:/root/.cache/huggingface - ~/.cache/huggingface:/root/.cache/huggingface
- ./bolt_config.yaml:/root/bolt_config.yaml
api_server:
build:
context: api_server
dockerfile: Dockerfile
ports:
- "18083:80"
healthcheck:
test: ["CMD", "curl" ,"http://localhost:80/healthz"]
interval: 5s
retries: 20
function_resolver: function_resolver:
build: build:
@ -58,6 +70,7 @@ services:
- OLLAMA_ENDPOINT=${OLLAMA_ENDPOINT:-host.docker.internal} - OLLAMA_ENDPOINT=${OLLAMA_ENDPOINT:-host.docker.internal}
# uncomment following line to use ollama endpoint that is hosted by docker # uncomment following line to use ollama endpoint that is hosted by docker
# - OLLAMA_ENDPOINT=ollama # - OLLAMA_ENDPOINT=ollama
- OLLAMA_MODEL=Bolt-Function-Calling-1B:Q4_K_M
ollama: ollama:
image: ollama/ollama image: ollama/ollama
@ -85,6 +98,8 @@ services:
extra_hosts: extra_hosts:
- host.docker.internal:host-gateway - host.docker.internal:host-gateway
restart: unless-stopped restart: unless-stopped
profiles:
- monitoring
chatbot_ui: chatbot_ui:
build: build:
@ -94,7 +109,7 @@ services:
- "18080:8080" - "18080:8080"
environment: environment:
- OPENAI_API_KEY=${OPENAI_API_KEY:?error} - OPENAI_API_KEY=${OPENAI_API_KEY:?error}
- CHAT_COMPLETION_ENDPOINT=http://bolt:10000/v1/chat/completions - CHAT_COMPLETION_ENDPOINT=http://bolt:10000/v1
prometheus: prometheus:
image: prom/prometheus image: prom/prometheus

View file

@ -0,0 +1,12 @@
apiVersion: 1
providers:
- name: "Dashboard provider"
orgId: 1
type: file
disableDeletion: false
updateIntervalSeconds: 10
allowUiUpdates: false
options:
path: /var/lib/grafana/dashboards
foldersFromFilesStructure: true

View file

@ -0,0 +1,355 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"links": [],
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 2,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "avg(rate(envoy_cluster_internal_upstream_rq_time_sum[1m]) / rate(envoy_cluster_internal_upstream_rq_time_count[1m])) by (envoy_cluster_name)",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "request latency - internal (ms)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 1,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "avg(rate(envoy_cluster_external_upstream_rq_time_sum[1m]) / rate(envoy_cluster_external_upstream_rq_time_count[1m])) by (envoy_cluster_name)",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "request latency - external (ms)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 3,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "avg(rate(envoy_cluster_internal_upstream_rq_completed[1m])) by (envoy_cluster_name)",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "avg(rate(envoy_cluster_external_upstream_rq_completed[1m])) by (envoy_cluster_name)",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "B",
"useBackend": false
}
],
"title": "Upstream request count",
"type": "timeseries"
}
],
"schemaVersion": 39,
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-15m",
"to": "now"
},
"timepicker": {},
"timezone": "browser",
"title": "Intelligent Gateway Overview",
"uid": "adt6uhx5lk8aob",
"version": 3,
"weekStart": ""
}

View file

@ -0,0 +1,9 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus:9090
isDefault: true
access: proxy
editable: true

View file

@ -0,0 +1,23 @@
global:
scrape_interval: 15s
scrape_timeout: 10s
evaluation_interval: 15s
alerting:
alertmanagers:
- static_configs:
- targets: []
scheme: http
timeout: 10s
api_version: v1
scrape_configs:
- job_name: envoy
honor_timestamps: true
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /stats
scheme: http
static_configs:
- targets:
- bolt:9901
params:
format: ['prometheus']

View file

@ -1,7 +1,7 @@
# Function calling # Function calling
This demo shows how you can use intelligent prompt gateway to do function calling. This demo assumes you are using ollama running natively. If you want to run ollama running inside docker then please update ollama endpoint in docker-compose file. This demo shows how you can use intelligent prompt gateway to do function calling. This demo assumes you are using ollama running natively. If you want to run ollama running inside docker then please update ollama endpoint in docker-compose file.
# Startig the demo # Starting the demo
1. Ensure that submodule is up to date 1. Ensure that submodule is up to date
```sh ```sh
git submodule sync --recursive git submodule sync --recursive

View file

@ -0,0 +1,25 @@
FROM Bolt-Function-Calling-1B-Q3_K_L.gguf
# Set the size of the context window used to generate the next token
# PARAMETER num_ctx 16384
PARAMETER num_ctx 4096
# Set parameters for response generation
PARAMETER num_predict 1024
PARAMETER temperature 0.1
PARAMETER top_p 0.5
PARAMETER top_k 32022
PARAMETER repeat_penalty 1.0
PARAMETER stop "<|EOT|>"
# Set the random number seed to use for generation
PARAMETER seed 42
# Set the prompt template to be passed into the model
TEMPLATE """{{ if .System }}<begin▁of▁sentence>
{{ .System }}
{{ end }}{{ if .Prompt }}### Instruction:
{{ .Prompt }}
{{ end }}### Response:
{{ .Response }}
<|EOT|>"""

View file

@ -0,0 +1,24 @@
FROM Bolt-Function-Calling-1B-Q4_K_M.gguf
# Set the size of the context window used to generate the next token
PARAMETER num_ctx 4096
# Set parameters for response generation
PARAMETER num_predict 1024
PARAMETER temperature 0.1
PARAMETER top_p 0.5
PARAMETER top_k 32022
PARAMETER repeat_penalty 1.0
PARAMETER stop "<|EOT|>"
# Set the random number seed to use for generation
PARAMETER seed 42
# Set the prompt template to be passed into the model
TEMPLATE """{{ if .System }}<begin▁of▁sentence>
{{ .System }}
{{ end }}{{ if .Prompt }}### Instruction:
{{ .Prompt }}
{{ end }}### Response:
{{ .Response }}
<|EOT|>"""

View file

@ -1,7 +1,7 @@
# Function calling # Function calling
This demo shows how you can use intelligent prompt gateway as a network copilot that could give information about correlation between packet loss with device reboots, downs, or maintainence. This demo assumes you are using ollama running natively. If you want to run ollama running inside docker then please update ollama endpoint in docker-compose file. This demo shows how you can use intelligent prompt gateway as a network copilot that could give information about correlation between packet loss with device reboots, downs, or maintainence. This demo assumes you are using ollama running natively. If you want to run ollama running inside docker then please update ollama endpoint in docker-compose file.
# Startig the demo # Starting the demo
1. Ensure that submodule is up to date 1. Ensure that submodule is up to date
```sh ```sh
git submodule sync --recursive git submodule sync --recursive

View file

@ -0,0 +1,16 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "function-calling api server",
"cwd": "${workspaceFolder}/app",
"type": "debugpy",
"request": "launch",
"module": "uvicorn",
"args": ["main:app","--reload", "--port", "8001"],
}
]
}

View file

@ -0,0 +1,19 @@
FROM python:3 AS base
FROM base AS builder
WORKDIR /src
COPY requirements.txt /src/
RUN pip install --prefix=/runtime --force-reinstall -r requirements.txt
COPY . /src
FROM python:3-slim AS output
COPY --from=builder /runtime /usr/local
COPY /app /app
WORKDIR /app
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80"]

View file

@ -0,0 +1,184 @@
from fastapi import FastAPI, Response
from datetime import datetime, timezone
import logging
from pydantic import BaseModel
from utils import load_sql, load_params
import pandas as pd
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
app = FastAPI()
@app.get("/healthz")
async def healthz():
return {
"status": "ok"
}
conn = load_sql()
name_col = "name"
class PacketDropCorrelationRequest(BaseModel):
from_time: str = None # Optional natural language timeframe
ifname: str = None # Optional interface name filter
region: str = None # Optional region filter
min_in_errors: int = None
max_in_errors: int = None
min_out_errors: int = None
max_out_errors: int = None
min_in_discards: int = None
max_in_discards: int = None
min_out_discards: int = None
max_out_discards: int = None
@app.post("/interface_down_pkt_drop")
async def interface_down_packet_drop(req: PacketDropCorrelationRequest, res: Response):
params, filters = load_params(req)
# Join the filters using AND
where_clause = " AND ".join(filters)
if where_clause:
where_clause = "AND " + where_clause
# Step 3: Query packet errors and flows from interfacestats and ts_flow
query = f"""
SELECT
d.switchip AS device_ip_address,
i.in_errors,
i.in_discards,
i.out_errors,
i.out_discards,
i.ifname,
t.src_addr,
t.dst_addr,
t.time AS flow_time,
i.time AS interface_time
FROM
device d
INNER JOIN
interfacestats i
ON d.device_mac_address = i.device_mac_address
INNER JOIN
ts_flow t
ON d.switchip = t.sampler_address
WHERE
i.time >= :from_time -- Using the converted timestamp
{where_clause}
ORDER BY
i.time;
"""
correlated_data = pd.read_sql_query(query, conn, params=params)
if correlated_data.empty:
default_response = {
"device_ip_address": "0.0.0.0", # Placeholder IP
"in_errors": 0,
"in_discards": 0,
"out_errors": 0,
"out_discards": 0,
"ifname": req.ifname
or "unknown", # Placeholder or interface provided in the request
"src_addr": "0.0.0.0", # Placeholder source IP
"dst_addr": "0.0.0.0", # Placeholder destination IP
"flow_time": str(
datetime.now(timezone.utc)
), # Current timestamp or placeholder
"interface_time": str(
datetime.now(timezone.utc)
), # Current timestamp or placeholder
}
return [default_response]
logger.info(f"Correlated Packet Drop Data: {correlated_data}")
return correlated_data.to_dict(orient='records')
class FlowPacketErrorCorrelationRequest(BaseModel):
from_time: str = None # Optional natural language timeframe
ifname: str = None # Optional interface name filter
region: str = None # Optional region filter
min_in_errors: int = None
max_in_errors: int = None
min_out_errors: int = None
max_out_errors: int = None
min_in_discards: int = None
max_in_discards: int = None
min_out_discards: int = None
max_out_discards: int = None
@app.post("/packet_errors_impact_flow")
async def packet_errors_impact_flow(
req: FlowPacketErrorCorrelationRequest, res: Response
):
params, filters = load_params(req)
# Join the filters using AND
where_clause = " AND ".join(filters)
if where_clause:
where_clause = "AND " + where_clause
# Step 3: Query the packet errors and flows, correlating by timestamps
query = f"""
SELECT
d.switchip AS device_ip_address,
i.in_errors,
i.in_discards,
i.out_errors,
i.out_discards,
i.ifname,
t.src_addr,
t.dst_addr,
t.src_port,
t.dst_port,
t.packets,
t.time AS flow_time,
i.time AS error_time
FROM
device d
INNER JOIN
interfacestats i
ON d.device_mac_address = i.device_mac_address
INNER JOIN
ts_flow t
ON d.switchip = t.sampler_address
WHERE
i.time >= :from_time
AND ABS(strftime('%s', t.time) - strftime('%s', i.time)) <= 300 -- Correlate within 5 minutes
{where_clause}
ORDER BY
i.time;
"""
correlated_data = pd.read_sql_query(query, conn, params=params)
if correlated_data.empty:
default_response = {
"device_ip_address": "0.0.0.0", # Placeholder IP
"in_errors": 0,
"in_discards": 0,
"out_errors": 0,
"out_discards": 0,
"ifname": req.ifname
or "unknown", # Placeholder or interface provided in the request
"src_addr": "0.0.0.0", # Placeholder source IP
"dst_addr": "0.0.0.0", # Placeholder destination IP
"src_port": 0,
"dst_port": 0,
"packets": 0,
"flow_time": str(
datetime.now(timezone.utc)
), # Current timestamp or placeholder
"error_time": str(
datetime.now(timezone.utc)
), # Current timestamp or placeholder
}
return [default_response]
# Return the correlated data if found
return correlated_data.to_dict(orient='records')

View file

@ -0,0 +1,247 @@
import pandas as pd
import random
from datetime import datetime, timedelta, timezone
import re
import logging
from dateparser import parse
import sqlite3
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
def load_sql():
# Example Usage
conn = sqlite3.connect(":memory:")
# create and load the devices table
device_data = generate_device_data(conn)
# create and load the interface_stats table
generate_interface_stats_data(conn, device_data)
# create and load the flow table
generate_flow_data(conn, device_data)
return conn
# Function to convert natural language time expressions to "X {time} ago" format
def convert_to_ago_format(expression):
# Define patterns for different time units
time_units = {
r"seconds": "seconds",
r"minutes": "minutes",
r"mins": "mins",
r"hrs": "hrs",
r"hours": "hours",
r"hour": "hour",
r"hr": "hour",
r"days": "days",
r"day": "day",
r"weeks": "weeks",
r"week": "week",
r"months": "months",
r"month": "month",
r"years": "years",
r"yrs": "years",
r"year": "year",
r"yr": "year",
}
# Iterate over each time unit and create regex for each phrase format
for pattern, unit in time_units.items():
# Handle "for the past X {unit}"
match = re.search(rf"(\d+) {pattern}", expression)
if match:
quantity = match.group(1)
return f"{quantity} {unit} ago"
# If the format is not recognized, return None or raise an error
return None
# Function to generate random MAC addresses
def random_mac():
return "AA:BB:CC:DD:EE:" + ":".join(
[f"{random.randint(0, 255):02X}" for _ in range(2)]
)
# Function to generate random IP addresses
def random_ip():
return f"{random.randint(1, 255)}.{random.randint(1, 255)}.{random.randint(1, 255)}.{random.randint(1, 255)}"
# Generate synthetic data for the device table
def generate_device_data(
conn,
n=1000,
):
device_data = {
"switchip": [random_ip() for _ in range(n)],
"hwsku": [f"HW{i+1}" for i in range(n)],
"hostname": [f"switch{i+1}" for i in range(n)],
"osversion": [f"v{i+1}" for i in range(n)],
"layer": ["L2" if i % 2 == 0 else "L3" for i in range(n)],
"region": [random.choice(["US", "EU", "ASIA"]) for _ in range(n)],
"uptime": [
f"{random.randint(0, 10)} days {random.randint(0, 23)}:{random.randint(0, 59)}:{random.randint(0, 59)}"
for _ in range(n)
],
"device_mac_address": [random_mac() for _ in range(n)],
}
df = pd.DataFrame(device_data)
df.to_sql("device", conn, index=False)
return df
# Generate synthetic data for the interfacestats table
def generate_interface_stats_data(conn, device_df, n=1000):
interface_stats_data = []
for _ in range(n):
device_mac = random.choice(device_df["device_mac_address"])
ifname = random.choice(["eth0", "eth1", "eth2", "eth3"])
time = datetime.now(timezone.utc) - timedelta(
minutes=random.randint(0, 1440 * 5)
) # random timestamps in the past 5 day
in_discards = random.randint(0, 1000)
in_errors = random.randint(0, 500)
out_discards = random.randint(0, 800)
out_errors = random.randint(0, 400)
in_octets = random.randint(1000, 100000)
out_octets = random.randint(1000, 100000)
interface_stats_data.append(
{
"device_mac_address": device_mac,
"ifname": ifname,
"time": time,
"in_discards": in_discards,
"in_errors": in_errors,
"out_discards": out_discards,
"out_errors": out_errors,
"in_octets": in_octets,
"out_octets": out_octets,
}
)
df = pd.DataFrame(interface_stats_data)
df.to_sql("interfacestats", conn, index=False)
return
# Generate synthetic data for the ts_flow table
def generate_flow_data(conn, device_df, n=1000):
flow_data = []
for _ in range(n):
sampler_address = random.choice(device_df["switchip"])
proto = random.choice(["TCP", "UDP"])
src_addr = random_ip()
dst_addr = random_ip()
src_port = random.randint(1024, 65535)
dst_port = random.randint(1024, 65535)
in_if = random.randint(1, 10)
out_if = random.randint(1, 10)
flow_start = int(
(datetime.now() - timedelta(days=random.randint(1, 30))).timestamp()
)
flow_end = int(
(datetime.now() - timedelta(days=random.randint(1, 30))).timestamp()
)
bytes_transferred = random.randint(1000, 100000)
packets = random.randint(1, 1000)
flow_time = datetime.now(timezone.utc) - timedelta(
minutes=random.randint(0, 1440 * 5)
) # random flow time
flow_data.append(
{
"sampler_address": sampler_address,
"proto": proto,
"src_addr": src_addr,
"dst_addr": dst_addr,
"src_port": src_port,
"dst_port": dst_port,
"in_if": in_if,
"out_if": out_if,
"flow_start": flow_start,
"flow_end": flow_end,
"bytes": bytes_transferred,
"packets": packets,
"time": flow_time,
}
)
df = pd.DataFrame(flow_data)
df.to_sql("ts_flow", conn, index=False)
return
def load_params(req):
# Step 1: Convert the from_time natural language string to a timestamp if provided
if req.from_time:
# Use `dateparser` to parse natural language timeframes
logger.info(f"{'* ' * 50}\n\nCaptured from time: {req.from_time}\n\n")
parsed_time = parse(req.from_time, settings={"RELATIVE_BASE": datetime.now()})
if not parsed_time:
conv_time = convert_to_ago_format(req.from_time)
if conv_time:
parsed_time = parse(
conv_time, settings={"RELATIVE_BASE": datetime.now()}
)
else:
return {
"error": "Invalid from_time format. Please provide a valid time description such as 'past 7 days' or 'since last month'."
}
logger.info(f"\n\nConverted from time: {parsed_time}\n\n{'* ' * 50}\n\n")
from_time = parsed_time
logger.info(f"Using parsed from_time: {from_time}")
else:
# If no from_time is provided, use a default value (e.g., the past 7 days)
from_time = datetime.now() - timedelta(days=7)
logger.info(f"Using default from_time: {from_time}")
# Step 2: Build the dynamic SQL query based on the optional filters
filters = []
params = {"from_time": from_time}
if req.ifname:
filters.append("i.ifname = :ifname")
params["ifname"] = req.ifname
if req.region:
filters.append("d.region = :region")
params["region"] = req.region
if req.min_in_errors is not None:
filters.append("i.in_errors >= :min_in_errors")
params["min_in_errors"] = req.min_in_errors
if req.max_in_errors is not None:
filters.append("i.in_errors <= :max_in_errors")
params["max_in_errors"] = req.max_in_errors
if req.min_out_errors is not None:
filters.append("i.out_errors >= :min_out_errors")
params["min_out_errors"] = req.min_out_errors
if req.max_out_errors is not None:
filters.append("i.out_errors <= :max_out_errors")
params["max_out_errors"] = req.max_out_errors
if req.min_in_discards is not None:
filters.append("i.in_discards >= :min_in_discards")
params["min_in_discards"] = req.min_in_discards
if req.max_in_discards is not None:
filters.append("i.in_discards <= :max_in_discards")
params["max_in_discards"] = req.max_in_discards
if req.min_out_discards is not None:
filters.append("i.out_discards >= :min_out_discards")
params["min_out_discards"] = req.min_out_discards
if req.max_out_discards is not None:
filters.append("i.out_discards <= :max_out_discards")
params["max_out_discards"] = req.max_out_discards
return params, filters

View file

@ -0,0 +1,4 @@
fastapi
uvicorn
pandas
dateparser

View file

@ -65,7 +65,7 @@ prompt_targets:
required: false required: false
type: integer type: integer
endpoint: endpoint:
cluster: databasehost cluster: api_server
path: /interface_down_packet_drop path: /interface_down_packet_drop
system_prompt: | system_prompt: |
You are responsible for correlating packet drops with interface down events by analyzing packet errors from the given data. You are responsible for correlating packet drops with interface down events by analyzing packet errors from the given data.
@ -120,11 +120,7 @@ prompt_targets:
required: false required: false
type: integer type: integer
endpoint: endpoint:
cluster: databasehost cluster: api_server
path: /packet_errors_impact_flow path: /packet_errors_impact_flow
system_prompt: | system_prompt: |
You are responsible for finding and correlating packet errors with the packet flows based on timestamps given in the data. This correlation helps identify if packet flows are impacted by packet errors. You are responsible for finding and correlating packet errors with the packet flows based on timestamps given in the data. This correlation helps identify if packet flows are impacted by packet errors.
clusters:
databasehost:
address: model_server

View file

@ -40,6 +40,18 @@ services:
retries: 20 retries: 20
volumes: volumes:
- ~/.cache/huggingface:/root/.cache/huggingface - ~/.cache/huggingface:/root/.cache/huggingface
- ./bolt_config.yaml:/root/bolt_config.yaml
api_server:
build:
context: api_server
dockerfile: Dockerfile
ports:
- "18083:80"
healthcheck:
test: ["CMD", "curl" ,"http://localhost:80/healthz"]
interval: 5s
retries: 20
function_resolver: function_resolver:
build: build:
@ -85,6 +97,8 @@ services:
extra_hosts: extra_hosts:
- host.docker.internal:host-gateway - host.docker.internal:host-gateway
restart: unless-stopped restart: unless-stopped
profiles:
- monitoring
chatbot_ui: chatbot_ui:
build: build:

View file

@ -0,0 +1,12 @@
apiVersion: 1
providers:
- name: "Dashboard provider"
orgId: 1
type: file
disableDeletion: false
updateIntervalSeconds: 10
allowUiUpdates: false
options:
path: /var/lib/grafana/dashboards
foldersFromFilesStructure: true

View file

@ -0,0 +1,355 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"links": [],
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 2,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "avg(rate(envoy_cluster_internal_upstream_rq_time_sum[1m]) / rate(envoy_cluster_internal_upstream_rq_time_count[1m])) by (envoy_cluster_name)",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "request latency - internal (ms)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 1,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "avg(rate(envoy_cluster_external_upstream_rq_time_sum[1m]) / rate(envoy_cluster_external_upstream_rq_time_count[1m])) by (envoy_cluster_name)",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "request latency - external (ms)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 3,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "avg(rate(envoy_cluster_internal_upstream_rq_completed[1m])) by (envoy_cluster_name)",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "avg(rate(envoy_cluster_external_upstream_rq_completed[1m])) by (envoy_cluster_name)",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "B",
"useBackend": false
}
],
"title": "Upstream request count",
"type": "timeseries"
}
],
"schemaVersion": 39,
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-15m",
"to": "now"
},
"timepicker": {},
"timezone": "browser",
"title": "Intelligent Gateway Overview",
"uid": "adt6uhx5lk8aob",
"version": 3,
"weekStart": ""
}

View file

@ -0,0 +1,9 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus:9090
isDefault: true
access: proxy
editable: true

View file

@ -0,0 +1,23 @@
global:
scrape_interval: 15s
scrape_timeout: 10s
evaluation_interval: 15s
alerting:
alertmanagers:
- static_configs:
- targets: []
scheme: http
timeout: 10s
api_version: v1
scrape_configs:
- job_name: envoy
honor_timestamps: true
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /stats
scheme: http
static_configs:
- targets:
- bolt:9901
params:
format: ['prometheus']