diff --git a/_downloads/ca9d3b7116524473d8adbde7cf15d167/arch_config_full_reference.yaml b/_downloads/ca9d3b7116524473d8adbde7cf15d167/arch_config_full_reference.yaml index fad8962c..8ee8658e 100755 --- a/_downloads/ca9d3b7116524473d8adbde7cf15d167/arch_config_full_reference.yaml +++ b/_downloads/ca9d3b7116524473d8adbde7cf15d167/arch_config_full_reference.yaml @@ -1,4 +1,4 @@ -version: "0.1-beta" +version: v0.1 listener: address: 0.0.0.0 # or 127.0.0.1 @@ -8,9 +8,9 @@ listener: common_tls_context: # If you configure port 443, you'll need to update the listener with your TLS certificates tls_certificates: - certificate_chain: - filename: "/etc/certs/cert.pem" + filename: /etc/certs/cert.pem private_key: - filename: "/etc/certs/key.pem" + filename: /etc/certs/key.pem # Arch creates a round-robin load balancing between different endpoints, managed via the cluster subsystem. endpoints: @@ -18,42 +18,42 @@ endpoints: # value could be ip address or a hostname with port # this could also be a list of endpoints for load balancing # for example endpoint: [ ip1:port, ip2:port ] - endpoint: "127.0.0.1:80" + endpoint: 127.0.0.1:80 # max time to wait for a connection to be established connect_timeout: 0.005s mistral_local: - endpoint: "127.0.0.1:8001" + endpoint: 127.0.0.1:8001 error_target: - endpoint: "error_target_1" + endpoint: error_target_1 # Centralized way to manage LLMs, manage keys, retry logic, failover and limits in a central way llm_providers: - - name: "OpenAI" - provider: "openai" - access_key: $OPENAI_API_KEY + - name: OpenAI + provider: openai + access_key: OPENAI_API_KEY model: gpt-4o default: true stream: true rate_limits: selector: #optional headers, to add rate limiting based on http headers like JWT tokens or API keys http_header: - name: "Authorization" + name: Authorization value: "" # Empty value means each separate value has a separate limit limit: tokens: 100000 # Tokens per unit - unit: "minute" + unit: minute - - name: "Mistral8x7b" - provider: "mistral" - access_key: $MISTRAL_API_KEY - model: "mistral-8x7b" + - name: Mistral8x7b + provider: mistral + access_key: MISTRAL_API_KEY + model: mistral-8x7b - - name: "MistralLocal7b" - provider: "local" - model: "mistral-7b-instruct" - endpoint: "mistral_local" + - name: MistralLocal7b + provider: local + model: mistral-7b-instruct + endpoint: mistral_local # provides a way to override default settings for the arch system overrides: @@ -62,44 +62,41 @@ overrides: prompt_target_intent_matching_threshold: 0.60 # default system prompt used by all prompt targets -system_prompt: | - You are a network assistant that just offers facts; not advice on manufacturers or purchasing decisions. +system_prompt: You are a network assistant that just offers facts; not advice on manufacturers or purchasing decisions. prompt_guards: input_guards: jailbreak: on_exception: - message: "Looks like you're curious about my abilities, but I can only provide assistance within my programmed parameters." + message: Looks like you're curious about my abilities, but I can only provide assistance within my programmed parameters. prompt_targets: - - name: "reboot_network_device" - description: "Helps network operators perform device operations like rebooting a device." - endpoint: - name: app_server - path: "/agent/action" - parameters: - - name: "device_id" - # additional type options include: int | float | bool | string | list | dict - type: "string" - description: "Identifier of the network device to reboot." - required: true - - name: "confirmation" - type: "string" - description: "Confirmation flag to proceed with reboot." - default: "no" - enum: [yes, no] - - - name: "information_extraction" + - name: information_extraction default: true - description: "This prompt handles all scenarios that are question and answer in nature. Like summarization, information extraction, etc." + description: handel all scenarios that are question and answer in nature. Like summarization, information extraction, etc. endpoint: name: app_server - path: "/agent/summary" + path: /agent/summary # Arch uses the default LLM and treats the response from the endpoint as the prompt to send to the LLM auto_llm_dispatch_on_response: true # override system prompt for this prompt target - system_prompt: | - You are a helpful information extraction assistant. Use the information that is provided to you. + system_prompt: You are a helpful information extraction assistant. Use the information that is provided to you. + + - name: reboot_network_device + description: Reboot a specific network device + endpoint: + name: app_server + path: /agent/action + parameters: + - name: device_id + type: str + description: Identifier of the network device to reboot. + required: true + - name: confirmation + type: bool + description: Confirmation flag to proceed with reboot. + default: false + enum: [true, false] error_target: endpoint: diff --git a/build_with_arch/agent.html b/build_with_arch/agent.html index 4081a12d..9b05cdcf 100755 --- a/build_with_arch/agent.html +++ b/build_with_arch/agent.html @@ -101,9 +101,10 @@
Resources
@@ -160,7 +160,7 @@ you have the flexibility to support “agentic” apps tailored to specific use claims to creating ad campaigns - via prompts.Arch analyzes prompts, extracts critical information from prompts, engages in lightweight conversation with the user to gather any missing parameters and makes API calls so that you can focus on writing business logic. -Arch does this via its purpose-built Arch-FC LLM - the fastest (200ms p90 - 10x faser than GPT-4o) +Arch does this via its purpose-built Arch-Function - the fastest (200ms p90 - 10x faser than GPT-4o) and cheapest (100x than GPT-40) function-calling LLM that matches performance with frontier models.
@@ -169,175 +169,171 @@ and cheapest (100x than GPT-40) function-calling LLM that matches performance wi
In the most common scenario, users will request a single action via prompts, and Arch efficiently processes the request by extracting relevant parameters, validating the input, and calling the designated function or API. Here is how you would go about enabling this scenario with Arch:
- 1version: "0.1-beta"
- 2listen:
- 3 address: 127.0.0.1 | 0.0.0.0
- 4 port_value: 8080 #If you configure port 443, you'll need to update the listener with tls_certificates
- 5
- 6system_prompt: |
- 7 You are a network assistant that just offers facts; not advice on manufacturers or purchasing decisions.
+
+Step 1: Define Prompt Targets
+
+
+ 1version: v0.1
+ 2
+ 3listen:
+ 4 address: 0.0.0.0 # or 127.0.0.1
+ 5 port: 10000
+ 6 # Defines how Arch should parse the content from application/json or text/pain Content-type in the http request
+ 7 message_format: huggingface
8
- 9llm_providers:
-10 - name: "OpenAI"
-11 provider: "openai"
-12 access_key: OPENAI_API_KEY
-13 model: gpt-4o
-14 stream: true
-15
-16prompt_targets:
-17 - name: reboot_devices
-18 description: >
-19 This prompt target handles user requests to reboot devices.
-20 It ensures that when users request to reboot specific devices or device groups, the system processes the reboot commands accurately.
-21
-22 **Examples of user prompts:**
-23
-24 - "Please reboot device 12345."
-25 - "Restart all devices in tenant group tenant-XYZ
-26 - "I need to reboot devices A, B, and C."
-27
-28 path: /agent/device_reboot
-29 parameters:
-30 - name: "device_ids"
-31 type: list # Options: integer | float | list | dictionary | set
-32 description: "A list of device identifiers (IDs) to reboot."
-33 required: false
-34 - name: "device_group"
-35 type: string # Options: string | integer | float | list | dictionary | set
-36 description: "The name of the device group to reboot."
-37 required: false
-38
-39# Arch creates a round-robin load balancing between different endpoints, managed via the cluster subsystem.
-40endpoints:
-41 app_server:
-42 # value could be ip address or a hostname with port
-43 # this could also be a list of endpoints for load balancing
-44 # for example endpoint: [ ip1:port, ip2:port ]
-45 endpoint: "127.0.0.1:80"
-46 # max time to wait for a connection to be established
-47 connect_timeout: 0.005s
+ 9# Centralized way to manage LLMs, manage keys, retry logic, failover and limits in a central way
+10llm_providers:
+11 - name: OpenAI
+12 provider: openai
+13 access_key: OPENAI_API_KEY
+14 model: gpt-4o
+15 default: true
+16 stream: true
+17
+18# default system prompt used by all prompt targets
+19system_prompt: You are a network assistant that just offers facts; not advice on manufacturers or purchasing decisions.
+20
+21prompt_targets:
+22 - name: reboot_devices
+23 description: Reboot specific devices or device groups
+24
+25 path: /agent/device_reboot
+26 parameters:
+27 - name: device_ids
+28 type: list
+29 description: A list of device identifiers (IDs) to reboot.
+30 required: false
+31 - name: device_group
+32 type: str
+33 description: The name of the device group to reboot
+34 required: false
+35
+36# Arch creates a round-robin load balancing between different endpoints, managed via the cluster subsystem.
+37endpoints:
+38 app_server:
+39 # value could be ip address or a hostname with port
+40 # this could also be a list of endpoints for load balancing
+41 # for example endpoint: [ ip1:port, ip2:port ]
+42 endpoint: 127.0.0.1:80
+43 # max time to wait for a connection to be established
+44 connect_timeout: 0.005s
-
-Step 2: Process request parameters in Flask
+
+Step 2: Process Request Parameters
Once the prompt targets are configured as above, handling those parameters is
-
-
+
+
1from flask import Flask, request, jsonify
2
3app = Flask(__name__)
4
- 5@app.route('/agent/device_summary', methods=['POST'])
- 6def get_device_summary():
- 7 """
- 8 Endpoint to retrieve device statistics based on device IDs and an optional time range.
- 9 """
-10 data = request.get_json()
-11
-12 # Validate 'device_ids' parameter
-13 device_ids = data.get('device_ids')
-14 if not device_ids or not isinstance(device_ids, list):
-15 return jsonify({'error': "'device_ids' parameter is required and must be a list"}), 400
-16
-17 # Validate 'time_range' parameter (optional, defaults to 7)
-18 time_range = data.get('time_range', 7)
-19 if not isinstance(time_range, int):
-20 return jsonify({'error': "'time_range' must be an integer"}), 400
-21
-22 # Simulate retrieving statistics for the given device IDs and time range
-23 # In a real application, you would query your database or external service here
-24 statistics = []
-25 for device_id in device_ids:
-26 # Placeholder for actual data retrieval
-27 stats = {
-28 'device_id': device_id,
-29 'time_range': f'Last {time_range} days',
-30 'data': f'Statistics data for device {device_id} over the last {time_range} days.'
-31 }
-32 statistics.append(stats)
-33
-34 response = {
-35 'statistics': statistics
-36 }
-37
-38 return jsonify(response), 200
-39
-40if __name__ == '__main__':
-41 app.run(debug=True)
+ 5
+ 6@app.route("/agent/device_summary", methods=["POST"])
+ 7def get_device_summary():
+ 8 """
+ 9 Endpoint to retrieve device statistics based on device IDs and an optional time range.
+10 """
+11 data = request.get_json()
+12
+13 # Validate 'device_ids' parameter
+14 device_ids = data.get("device_ids")
+15 if not device_ids or not isinstance(device_ids, list):
+16 return jsonify(
+17 {"error": "'device_ids' parameter is required and must be a list"}
+18 ), 400
+19
+20 # Validate 'time_range' parameter (optional, defaults to 7)
+21 time_range = data.get("time_range", 7)
+22 if not isinstance(time_range, int):
+23 return jsonify({"error": "'time_range' must be an integer"}), 400
+24
+25 # Simulate retrieving statistics for the given device IDs and time range
+26 # In a real application, you would query your database or external service here
+27 statistics = []
+28 for device_id in device_ids:
+29 # Placeholder for actual data retrieval
+30 stats = {
+31 "device_id": device_id,
+32 "time_range": f"Last {time_range} days",
+33 "data": f"Statistics data for device {device_id} over the last {time_range} days.",
+34 }
+35 statistics.append(stats)
+36
+37 response = {"statistics": statistics}
+38
+39 return jsonify(response), 200
+40
+41
+42if __name__ == "__main__":
+43 app.run(debug=True)
-Parallel/ Multiple Function Calling
+Parallel & Multiple Function Calling
In more complex use cases, users may request multiple actions or need multiple APIs/functions to be called
simultaneously or sequentially. With Arch, you can handle these scenarios efficiently using parallel or multiple
function calling. This allows your application to engage in a broader range of interactions, such as updating
different datasets, triggering events across systems, or collecting results from multiple services in one prompt.
Arch-FC1B is built to manage these parallel tasks efficiently, ensuring low latency and high throughput, even
when multiple functions are invoked. It provides two mechanisms to handle these cases:
-
-Step 1: Define Multiple Function Targets
+
+Step 1: Define Prompt Targets
When enabling multiple function calling, define the prompt targets in a way that supports multiple functions or
API calls based on the user’s prompt. These targets can be triggered in parallel or sequentially, depending on
the user’s intent.
Example of Multiple Prompt Targets in YAML:
-
-
- 1version: "0.1-beta"
- 2listen:
- 3 address: 127.0.0.1 | 0.0.0.0
- 4 port_value: 8080 #If you configure port 443, you'll need to update the listener with tls_certificates
- 5
- 6system_prompt: |
- 7 You are a network assistant that just offers facts; not advice on manufacturers or purchasing decisions.
+
+
+ 1version: v0.1
+ 2
+ 3listen:
+ 4 address: 0.0.0.0 # or 127.0.0.1
+ 5 port: 10000
+ 6 # Defines how Arch should parse the content from application/json or text/pain Content-type in the http request
+ 7 message_format: huggingface
8
- 9llm_providers:
-10 - name: "OpenAI"
-11 provider: "openai"
-12 access_key: OPENAI_API_KEY
-13 model: gpt-4o
-14 stream: true
-15
-16prompt_targets:
-17 - name: reboot_devices
-18 description: >
-19 This prompt target handles user requests to reboot devices.
-20 It ensures that when users request to reboot specific devices or device groups, the system processes the reboot commands accurately.
-21
-22 **Examples of user prompts:**
-23
-24 - "Please reboot device 12345."
-25 - "Restart all devices in tenant group tenant-XYZ
-26 - "I need to reboot devices A, B, and C."
-27
-28 path: /agent/device_reboot
-29 parameters:
-30 - name: "device_ids"
-31 type: list # Options: integer | float | list | dictionary | set
-32 description: "A list of device identifiers (IDs) to reboot."
-33 required: false
-34 - name: "device_group"
-35 type: string # Options: string | integer | float | list | dictionary | set
-36 description: "The name of the device group to reboot."
-37 required: false
-38
-39# Arch creates a round-robin load balancing between different endpoints, managed via the cluster subsystem.
-40endpoints:
-41 app_server:
-42 # value could be ip address or a hostname with port
-43 # this could also be a list of endpoints for load balancing
-44 # for example endpoint: [ ip1:port, ip2:port ]
-45 endpoint: "127.0.0.1:80"
-46 # max time to wait for a connection to be established
-47 connect_timeout: 0.005s
+ 9# Centralized way to manage LLMs, manage keys, retry logic, failover and limits in a central way
+10llm_providers:
+11 - name: OpenAI
+12 provider: openai
+13 access_key: OPENAI_API_KEY
+14 model: gpt-4o
+15 default: true
+16 stream: true
+17
+18# default system prompt used by all prompt targets
+19system_prompt: You are a network assistant that just offers facts; not advice on manufacturers or purchasing decisions.
+20
+21prompt_targets:
+22 - name: reboot_devices
+23 description: Reboot specific devices or device groups
+24
+25 path: /agent/device_reboot
+26 parameters:
+27 - name: device_ids
+28 type: list
+29 description: A list of device identifiers (IDs) to reboot.
+30 required: false
+31 - name: device_group
+32 type: str
+33 description: The name of the device group to reboot
+34 required: false
+35
+36# Arch creates a round-robin load balancing between different endpoints, managed via the cluster subsystem.
+37endpoints:
+38 app_server:
+39 # value could be ip address or a hostname with port
+40 # this could also be a list of endpoints for load balancing
+41 # for example endpoint: [ ip1:port, ip2:port ]
+42 endpoint: 127.0.0.1:80
+43 # max time to wait for a connection to be established
+44 connect_timeout: 0.005s
@@ -365,12 +361,12 @@ the user’s intent.
On this page
- Single Function Call
-- Parallel/ Multiple Function Calling
diff --git a/build_with_arch/rag.html b/build_with_arch/rag.html
index dbc986c2..4f609078 100755
--- a/build_with_arch/rag.html
+++ b/build_with_arch/rag.html
@@ -101,9 +101,10 @@
- Terminology
- Threading Model
- Listener
+- Prompts
- Model Serving
-- Prompt
- Request Lifecycle
+- Error Target
LLM Provider
@@ -128,7 +129,6 @@
Resources
@@ -157,49 +157,72 @@
Retrieval-Augmented Generation (RAG) applications.
Intent-drift Detection
-Developers struggle to handle follow-up
-or clarifying
-questions. Specifically, when users ask for changes or additions to previous responses their AI applications often
-generate entirely new responses instead of adjusting previous ones. Arch offers intent-drift tracking as a feature so
-that developers can know when the user has shifted away from a previous intent so that they can dramatically improve
-retrieval accuracy, lower overall token cost and improve the speed of their responses back to users.
+Developers struggle to handle follow-up or clarification questions.
+Specifically, when users ask for changes or additions to previous responses their AI applications often generate entirely new responses instead of adjusting previous ones.
+Arch offers intent-drift tracking as a feature so that developers can know when the user has shifted away from a previous intent so that they can dramatically improve retrieval accuracy, lower overall token cost and improve the speed of their responses back to users.
Arch uses its built-in lightweight NLI and embedding models to know if the user has steered away from an active intent.
-Arch’s intent-drift detection mechanism is based on its’ prompt_targets primtive. Arch tries to match an incoming
-prompt to one of the prompt_targets configured in the gateway. Once it detects that the user has moved away from an active
+Arch’s intent-drift detection mechanism is based on its’ prompt_targets primtive. Arch tries to match an incoming
+prompt to one of the prompt_targets configured in the gateway. Once it detects that the user has moved away from an active
active intent, Arch adds the x-arch-intent-drift headers to the request before sending it your application servers.
- 1@app.route('/process_rag', methods=['POST'])
+ 1@app.route("/process_rag", methods=["POST"])
2def process_rag():
3 # Extract JSON data from the request
4 data = request.get_json()
5
- 6 user_id = data.get('user_id')
+ 6 user_id = data.get("user_id")
7 if not user_id:
- 8 return jsonify({'error': 'User ID is required'}), 400
+ 8 return jsonify({"error": "User ID is required"}), 400
9
-10 client_messages = data.get('messages')
+10 client_messages = data.get("messages")
11 if not client_messages or not isinstance(client_messages, list):
-12 return jsonify({'error': 'Messages array is required'}), 400
+12 return jsonify({"error": "Messages array is required"}), 400
13
14 # Extract the intent change marker from Arch's headers if present for the current prompt
-15 intent_changed_header = request.headers.get('x-arch-intent-marker', '').lower()
-16 if intent_changed_header in ['', 'false']:
+15 intent_changed_header = request.headers.get("x-arch-intent-marker", "").lower()
+16 if intent_changed_header in ["", "false"]:
17 intent_changed = False
-18 elif intent_changed_header == 'true':
+18 elif intent_changed_header == "true":
19 intent_changed = True
20 else:
21 # Invalid value provided
-22 return jsonify({'error': 'Invalid value for x-arch-prompt-intent-change header'}), 400
-23
-24 # Update user conversation based on intent change
-25 memory = update_user_conversation(user_id, client_messages, intent_changed)
-26
-27 # Retrieve messages since last intent change for LLM
-28 messages_for_llm = get_messages_since_last_intent(memory.chat_memory.messages)
-29
-30 # Forward messages to upstream LLM
-31 llm_response = forward_to_llm(messages_for_llm)
+22 return jsonify(
+23 {"error": "Invalid value for x-arch-prompt-intent-change header"}
+24 ), 400
+25
+26 # Update user conversation based on intent change
+27 memory = update_user_conversation(user_id, client_messages, intent_changed)
+28
+29 # Retrieve messages since last intent change for LLM
+30 messages_for_llm = get_messages_since_last_intent(memory.chat_memory.messages)
+31
+32 # Forward messages to upstream LLM
+33 llm_response = forward_to_llm(messages_for_llm)
+34
+35 # Prepare the messages to return
+36 messages_to_return = []
+37 for message in memory.chat_memory.messages:
+38 role = "user" if isinstance(message, HumanMessage) else "assistant"
+39 content = message.content
+40 metadata = message.additional_kwargs.get("metadata", {})
+41 message_entry = {
+42 "uuid": metadata.get("uuid"),
+43 "timestamp": metadata.get("timestamp"),
+44 "role": role,
+45 "content": content,
+46 "intent_changed": metadata.get("intent_changed", False),
+47 }
+48 messages_to_return.append(message_entry)
+49
+50 # Prepare the response
+51 response = {
+52 "user_id": user_id,
+53 "messages": messages_to_return,
+54 "llm_response": llm_response,
+55 }
+56
+57 return jsonify(response), 200
@@ -224,20 +247,20 @@ so that you can use the most relevant prompts for your retrieval and for prompti
10# Global dictionary to keep track of user memories
11user_memories = {}
12
-13def get_user_conversation(user_id):
-14 """
-15 Retrieve the user's conversation memory using LangChain.
-16 If the user does not exist, initialize their conversation memory.
-17 """
-18 if user_id not in user_memories:
-19 user_memories[user_id] = ConversationBufferMemory(return_messages=True)
-20 return user_memories[user_id]
-21
+13
+14def get_user_conversation(user_id):
+15 """
+16 Retrieve the user's conversation memory using LangChain.
+17 If the user does not exist, initialize their conversation memory.
+18 """
+19 if user_id not in user_memories:
+20 user_memories[user_id] = ConversationBufferMemory(return_messages=True)
+21 return user_memories[user_id]
-
-Step 2: Update ConversationBufferMemory w/ intent
+
+Step 2: Update ConversationBufferMemory with Intents
1def update_user_conversation(user_id, client_messages, intent_changed):
2 """
3 Update the user's conversation memory with new messages using LangChain.
@@ -253,26 +276,26 @@ so that you can use the most relevant prompts for your retrieval and for prompti
13
14 # Process each new message
15 for index, message in enumerate(new_messages):
-16 role = message.get('role')
-17 content = message.get('content')
+16 role = message.get("role")
+17 content = message.get("content")
18 metadata = {
-19 'uuid': str(uuid.uuid4()),
-20 'timestamp': datetime.utcnow().isoformat(),
-21 'intent_changed': False # Default value
+19 "uuid": str(uuid.uuid4()),
+20 "timestamp": datetime.utcnow().isoformat(),
+21 "intent_changed": False, # Default value
22 }
23
24 # Mark the intent change on the last message if detected
25 if intent_changed and index == len(new_messages) - 1:
-26 metadata['intent_changed'] = True
+26 metadata["intent_changed"] = True
27
28 # Create a new message with metadata
-29 if role == 'user':
+29 if role == "user":
30 memory.chat_memory.add_message(
-31 HumanMessage(content=content, additional_kwargs={'metadata': metadata})
+31 HumanMessage(content=content, additional_kwargs={"metadata": metadata})
32 )
-33 elif role == 'assistant':
+33 elif role == "assistant":
34 memory.chat_memory.add_message(
-35 AIMessage(content=content, additional_kwargs={'metadata': metadata})
+35 AIMessage(content=content, additional_kwargs={"metadata": metadata})
36 )
37 else:
38 # Handle other roles if necessary
@@ -292,11 +315,12 @@ so that you can use the most relevant prompts for your retrieval and for prompti
6 for message in reversed(messages):
7 # Insert message at the beginning to maintain correct order
8 messages_since_intent.insert(0, message)
- 9 metadata = message.additional_kwargs.get('metadata', {})
+ 9 metadata = message.additional_kwargs.get("metadata", {})
10 # Break if intent_changed is True
-11 if metadata.get('intent_changed', False) == True:
+11 if metadata.get("intent_changed", False) == True:
12 break
-13 return messages_since_intent
+13
+14 return messages_since_intent
You can used the last set of messages that match to an intent to prompt an LLM, use it with an vector-DB for
@@ -311,37 +335,31 @@ enabling Arch to retrieve critical information in a structured way for processin
retrieval quality and speed of your application. By extracting parameters from the conversation, you can pull
the appropriate chunks from a vector database or SQL-like data store to enhance accuracy. With Arch, you can
streamline data retrieval and processing to build more efficient and precise RAG applications.
-
-Step 1: Define prompt targets with parameter definitions
+
+Step 1: Define Prompt Targets
1prompt_targets:
2 - name: get_device_statistics
- 3 description: >
- 4 This prompt target ensures that when users request device-related statistics, the system accurately retrieves and presents the relevant data
- 5 based on the specified devices and time range. Examples of user prompts, include:
- 6
- 7 - "Show me the performance stats for device 12345 over the past week."
- 8 - "What are the error rates for my devices in the last 24 hours?"
- 9 - "I need statistics on device 789 over the last 10 days."
-10
-11 path: /agent/device_summary
-12 parameters:
-13 - name: "device_ids"
-14 type: list # Options: integer | float | list | dictionary | set
-15 description: "A list of device identifiers (IDs) for which the statistics are requested."
-16 required: true
-17 - name: "time_range"
-18 type: integer # Options: integer | float | list | dictionary | set
-19 description: "The number of days in the past over which to retrieve device statistics. Defaults to 7 days if not specified."
-20 required: false
-21 default: 7
+ 3 description: Retrieve and present the relevant data based on the specified devices and time range
+ 4
+ 5 path: /agent/device_summary
+ 6 parameters:
+ 7 - name: device_ids
+ 8 type: list
+ 9 description: A list of device identifiers (IDs) to reboot.
+10 required: true
+11 - name: time_range
+12 type: int
+13 description: The number of days in the past over which to retrieve device statistics
+14 required: false
+15 default: 7
-Step 2: Process request parameters in Flask
+Step 2: Process Request Parameters in Flask
Once the prompt targets are configured as above, handling those parameters is
@@ -349,43 +367,45 @@ streamline data retrieval and processing to build more efficient and precise RAG
2
3app = Flask(__name__)
4
- 5@app.route('/agent/device_summary', methods=['POST'])
- 6def get_device_summary():
- 7 """
- 8 Endpoint to retrieve device statistics based on device IDs and an optional time range.
- 9 """
-10 data = request.get_json()
-11
-12 # Validate 'device_ids' parameter
-13 device_ids = data.get('device_ids')
-14 if not device_ids or not isinstance(device_ids, list):
-15 return jsonify({'error': "'device_ids' parameter is required and must be a list"}), 400
-16
-17 # Validate 'time_range' parameter (optional, defaults to 7)
-18 time_range = data.get('time_range', 7)
-19 if not isinstance(time_range, int):
-20 return jsonify({'error': "'time_range' must be an integer"}), 400
-21
-22 # Simulate retrieving statistics for the given device IDs and time range
-23 # In a real application, you would query your database or external service here
-24 statistics = []
-25 for device_id in device_ids:
-26 # Placeholder for actual data retrieval
-27 stats = {
-28 'device_id': device_id,
-29 'time_range': f'Last {time_range} days',
-30 'data': f'Statistics data for device {device_id} over the last {time_range} days.'
-31 }
-32 statistics.append(stats)
-33
-34 response = {
-35 'statistics': statistics
-36 }
-37
-38 return jsonify(response), 200
-39
-40if __name__ == '__main__':
-41 app.run(debug=True)
+ 5
+ 6@app.route("/agent/device_summary", methods=["POST"])
+ 7def get_device_summary():
+ 8 """
+ 9 Endpoint to retrieve device statistics based on device IDs and an optional time range.
+10 """
+11 data = request.get_json()
+12
+13 # Validate 'device_ids' parameter
+14 device_ids = data.get("device_ids")
+15 if not device_ids or not isinstance(device_ids, list):
+16 return jsonify(
+17 {"error": "'device_ids' parameter is required and must be a list"}
+18 ), 400
+19
+20 # Validate 'time_range' parameter (optional, defaults to 7)
+21 time_range = data.get("time_range", 7)
+22 if not isinstance(time_range, int):
+23 return jsonify({"error": "'time_range' must be an integer"}), 400
+24
+25 # Simulate retrieving statistics for the given device IDs and time range
+26 # In a real application, you would query your database or external service here
+27 statistics = []
+28 for device_id in device_ids:
+29 # Placeholder for actual data retrieval
+30 stats = {
+31 "device_id": device_id,
+32 "time_range": f"Last {time_range} days",
+33 "data": f"Statistics data for device {device_id} over the last {time_range} days.",
+34 }
+35 statistics.append(stats)
+36
+37 response = {"statistics": statistics}
+38
+39 return jsonify(response), 200
+40
+41
+42if __name__ == "__main__":
+43 app.run(debug=True)
@@ -414,13 +434,13 @@ streamline data retrieval and processing to build more efficient and precise RAG
diff --git a/concepts/llm_provider.html b/concepts/llm_provider.html
index 89e70033..6198c131 100755
--- a/concepts/llm_provider.html
+++ b/concepts/llm_provider.html
@@ -19,7 +19,7 @@
-
+
-
-
-
-
+
+
+
+
+