diff --git a/arch/Dockerfile b/arch/Dockerfile index 009c4744..0af5c62d 100644 --- a/arch/Dockerfile +++ b/arch/Dockerfile @@ -10,8 +10,17 @@ COPY public_types /public_types RUN cargo build --release --target wasm32-wasi # copy built filter into envoy image -FROM envoyproxy/envoy:v1.30-latest +FROM envoyproxy/envoy:v1.30-latest as envoy + +#Build config generator, so that we have a single build image for both Rust and Python +FROM python:3-slim as arch COPY --from=builder /arch/target/wasm32-wasi/release/intelligent_prompt_gateway.wasm /etc/envoy/proxy-wasm-plugins/intelligent_prompt_gateway.wasm -# CMD ["envoy", "-c", "/etc/envoy/envoy.yaml"] -# CMD ["envoy", "-c", "/etc/envoy/envoy.yaml", "--log-level", "debug"] -CMD ["envoy", "-c", "/etc/envoy/envoy.yaml", "--component-log-level", "wasm:debug"] +COPY --from=envoy /usr/local/bin/envoy /usr/local/bin/envoy +WORKDIR /config +COPY arch/requirements.txt . +RUN pip install -r requirements.txt +COPY arch/config_generator.py . +COPY arch/envoy.template.yaml . +COPY arch/arch_config_schema.yaml . + +CMD ["sh", "-c", "python config_generator.py && envoy -c /etc/envoy/envoy.yaml --component-log-level wasm:debug"] diff --git a/config_generator/arch_config_schema.yaml b/arch/arch_config_schema.yaml similarity index 100% rename from config_generator/arch_config_schema.yaml rename to arch/arch_config_schema.yaml diff --git a/config_generator/config_generator.py b/arch/config_generator.py similarity index 95% rename from config_generator/config_generator.py rename to arch/config_generator.py index 8f98b92c..2fb0ded7 100644 --- a/config_generator/config_generator.py +++ b/arch/config_generator.py @@ -4,9 +4,9 @@ import yaml from jsonschema import validate ENVOY_CONFIG_TEMPLATE_FILE = os.getenv('ENVOY_CONFIG_TEMPLATE_FILE', 'envoy.template.yaml') -ARCH_CONFIG_FILE = os.getenv('ARCH_CONFIG_FILE', 'arch_config.yaml') +ARCH_CONFIG_FILE = os.getenv('ARCH_CONFIG_FILE', '/config/arch_config.yaml') +ENVOY_CONFIG_FILE_RENDERED = os.getenv('ENVOY_CONFIG_FILE_RENDERED', '/etc/envoy/envoy.yaml') ARCH_CONFIG_SCHEMA_FILE = os.getenv('ARCH_CONFIG_SCHEMA_FILE', 'arch_config_schema.yaml') -ENVOY_CONFIG_FILE_RENDERED = os.getenv('ENVOY_CONFIG_FILE_RENDERED', '/usr/src/app/out/envoy.yaml') env = Environment(loader=FileSystemLoader('./')) template = env.get_template('envoy.template.yaml') diff --git a/arch/docker-compose.yaml b/arch/docker-compose.yaml index 37a70ecc..4c2f9fe4 100644 --- a/arch/docker-compose.yaml +++ b/arch/docker-compose.yaml @@ -1,43 +1,28 @@ services: - envoy: - image: envoyproxy/envoy:v1.30-latest - hostname: envoy + archgw: + build: + context: ../ + dockerfile: arch/Dockerfile ports: - "10000:10000" - - "19901:9901" + - "18080:9901" volumes: - - ./envoy.yaml:/etc/envoy/envoy.yaml - - ./target/wasm32-wasi/release:/etc/envoy/proxy-wasm-plugins + - ${ARCH_CONFIG_FILE}:/config/arch_config.yaml - /etc/ssl/cert.pem:/etc/ssl/cert.pem + - ./arch_log:/var/log/ depends_on: - qdrant: - condition: service_started - embeddingserver: + archgw_model_server: condition: service_healthy - embeddingserver: + archgw_model_server: build: - context: ../embedding-server + context: ../model_server dockerfile: Dockerfile ports: - - "18080:80" + - "18081:80" healthcheck: - test: ["CMD", "curl" ,"http://localhost:80/healthz"] + test: ["CMD", "curl" ,"http://localhost/healthz"] interval: 5s retries: 20 - - qdrant: - image: qdrant/qdrant - hostname: vector-db - ports: - - 16333:6333 - - 16334:6334 - - chatbot-ui: - build: - context: ../chatbot-ui - dockerfile: Dockerfile - ports: - - "18080:8080" - environment: - - CHAT_COMPLETION_ENDPOINT=http://envoy:10000/v1 + volumes: + - ~/.cache/huggingface:/root/.cache/huggingface diff --git a/arch/envoy.template.yaml b/arch/envoy.template.yaml index 76ce41be..8c5acbac 100644 --- a/arch/envoy.template.yaml +++ b/arch/envoy.template.yaml @@ -132,20 +132,20 @@ static_resources: typed_config: "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: api.mistral.ai - - name: model_server + - name: archgw_model_server connect_timeout: 5s type: STRICT_DNS lb_policy: ROUND_ROBIN load_assignment: - cluster_name: model_server + cluster_name: archgw_model_server endpoints: - lb_endpoints: - endpoint: address: socket_address: - address: model_server + address: archgw_model_server port_value: 80 - hostname: "model_server" + hostname: "archgw_model_server" - name: mistral_7b_instruct connect_timeout: 5s type: STRICT_DNS @@ -171,7 +171,7 @@ static_resources: - endpoint: address: socket_address: - address: model_server + address: archgw_model_server port_value: 80 hostname: "arch_fc" {% for _, cluster in arch_clusters.items() %} diff --git a/arch/init_vector_store.sh b/arch/init_vector_store.sh deleted file mode 100644 index e22f46b0..00000000 --- a/arch/init_vector_store.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh - -echo 'Deleting prompt_vector_store collection' -curl -X DELETE http://localhost:16333/collections/prompt_vector_store -echo -echo 'Creating prompt_vector_store collection' -curl -X PUT 'http://localhost:16333/collections/prompt_vector_store' \ - -H 'Content-Type: application/json' \ - --data-raw '{ - "vectors": { - "size": 1024, - "distance": "Cosine" - } - }' -echo -echo 'Created prompt_vector_store collection' diff --git a/config_generator/requirements.txt b/arch/requirements.txt similarity index 100% rename from config_generator/requirements.txt rename to arch/requirements.txt diff --git a/arch/src/consts.rs b/arch/src/consts.rs index ccee4640..0fe7467c 100644 --- a/arch/src/consts.rs +++ b/arch/src/consts.rs @@ -7,6 +7,6 @@ pub const USER_ROLE: &str = "user"; pub const GPT_35_TURBO: &str = "gpt-3.5-turbo"; pub const ARC_FC_CLUSTER: &str = "arch_fc"; pub const ARCH_FC_REQUEST_TIMEOUT_MS: u64 = 120000; // 2 minutes -pub const MODEL_SERVER_NAME: &str = "model_server"; +pub const MODEL_SERVER_NAME: &str = "archgw_model_server"; pub const ARCH_ROUTING_HEADER: &str = "x-arch-llm-provider"; pub const ARCH_MESSAGES_KEY: &str = "arch_messages"; diff --git a/arch/src/filter_context.rs b/arch/src/filter_context.rs index d9310fd9..61d146ed 100644 --- a/arch/src/filter_context.rs +++ b/arch/src/filter_context.rs @@ -141,7 +141,10 @@ impl FilterContext { ) { Ok(token_id) => token_id, Err(e) => { - panic!("Error dispatching HTTP call: {:?}", e); + panic!( + "Error dispatching HTTP call: {}, error: {:?}", + MODEL_SERVER_NAME, e + ); } }; token_id diff --git a/arch/tests/integration.rs b/arch/tests/integration.rs index 336454e0..befff2c8 100644 --- a/arch/tests/integration.rs +++ b/arch/tests/integration.rs @@ -104,7 +104,7 @@ fn normal_flow(module: &mut Tester, filter_context: i32, http_context: i32) { .expect_get_buffer_bytes(Some(BufferType::HttpRequestBody)) .returning(Some(chat_completions_request_body)) // The actual call is not important in this test, we just need to grab the token_id - .expect_http_call(Some("model_server"), None, None, None, None) + .expect_http_call(Some("archgw_model_server"), None, None, None, None) .returning(Some(1)) .expect_log(Some(LogLevel::Debug), None) .expect_metric_increment("active_http_calls", 1) @@ -136,7 +136,7 @@ fn normal_flow(module: &mut Tester, filter_context: i32, http_context: i32) { .returning(Some(&embeddings_response_buffer)) .expect_log(Some(LogLevel::Debug), None) .expect_log(Some(LogLevel::Debug), None) - .expect_http_call(Some("model_server"), None, None, None, None) + .expect_http_call(Some("archgw_model_server"), None, None, None, None) .returning(Some(2)) .expect_metric_increment("active_http_calls", 1) .expect_log(Some(LogLevel::Debug), None) @@ -313,7 +313,7 @@ fn successful_request_to_open_ai_chat_completions() { .returning(Some(chat_completions_request_body)) .expect_log(Some(LogLevel::Debug), None) .expect_log(Some(LogLevel::Info), None) - .expect_http_call(Some("model_server"), None, None, None, None) + .expect_http_call(Some("archgw_model_server"), None, None, None, None) .returning(Some(4)) .expect_metric_increment("active_http_calls", 1) .execute_and_expect(ReturnType::Action(Action::Pause)) diff --git a/config_generator/Dockerfile b/config_generator/Dockerfile deleted file mode 100644 index a639504c..00000000 --- a/config_generator/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM python:3-slim as config-generator -WORKDIR /usr/src/app -COPY config_generator/requirements.txt . -RUN pip install -r requirements.txt -COPY config_generator/config_generator.py . -COPY arch/envoy.template.yaml . -COPY config_generator/arch_config_schema.yaml . -CMD ["python", "config_generator.py"] diff --git a/demos/employee_details_copilot/docker-compose.yaml b/demos/employee_details_copilot/docker-compose.yaml index ad05eb71..f90edb7e 100644 --- a/demos/employee_details_copilot/docker-compose.yaml +++ b/demos/employee_details_copilot/docker-compose.yaml @@ -28,7 +28,7 @@ services: environment: - LOG_LEVEL=debug - model_server: + archgw_model_server: build: context: ../../model_server dockerfile: Dockerfile diff --git a/demos/employee_details_copilot_arch/docker-compose.yaml b/demos/employee_details_copilot_arch/docker-compose.yaml index 0b58c614..80dd99a4 100644 --- a/demos/employee_details_copilot_arch/docker-compose.yaml +++ b/demos/employee_details_copilot_arch/docker-compose.yaml @@ -28,7 +28,7 @@ services: environment: - LOG_LEVEL=debug - model_server: + archgw_model_server: build: context: ../../model_server dockerfile: Dockerfile diff --git a/demos/function_calling/docker-compose.yaml b/demos/function_calling/docker-compose.yaml index a4365f97..b71efc27 100644 --- a/demos/function_calling/docker-compose.yaml +++ b/demos/function_calling/docker-compose.yaml @@ -28,7 +28,7 @@ services: environment: - LOG_LEVEL=debug - model_server: + archgw_model_server: build: context: ../../model_server dockerfile: Dockerfile diff --git a/demos/network_copilot/docker-compose.yaml b/demos/network_copilot/docker-compose.yaml index 7e8436da..29912936 100644 --- a/demos/network_copilot/docker-compose.yaml +++ b/demos/network_copilot/docker-compose.yaml @@ -28,7 +28,7 @@ services: environment: - LOG_LEVEL=debug - model_server: + archgw_model_server: build: context: ../../model_server dockerfile: Dockerfile diff --git a/demos/prompt_guards/docker-compose.yaml b/demos/prompt_guards/docker-compose.yaml index 8426bbe7..43efb56b 100644 --- a/demos/prompt_guards/docker-compose.yaml +++ b/demos/prompt_guards/docker-compose.yaml @@ -29,7 +29,7 @@ services: environment: - LOG_LEVEL=debug - model_server: + archgw_model_server: build: context: ../../model_server dockerfile: Dockerfile diff --git a/model_server/Dockerfile b/model_server/Dockerfile index fef7471d..e90f332f 100644 --- a/model_server/Dockerfile +++ b/model_server/Dockerfile @@ -27,7 +27,6 @@ FROM python:3.10-slim AS output # following models have been tested to work with this image # "sentence-transformers/all-MiniLM-L6-v2,sentence-transformers/all-mpnet-base-v2,thenlper/gte-base,thenlper/gte-large,thenlper/gte-small" ENV MODELS="BAAI/bge-large-en-v1.5" -ENV NER_MODELS="urchade/gliner_large-v2.1" COPY --from=builder /runtime /usr/local diff --git a/model_server/app/load_models.py b/model_server/app/load_models.py index c7dd939e..673614a7 100644 --- a/model_server/app/load_models.py +++ b/model_server/app/load_models.py @@ -1,6 +1,5 @@ import os import sentence_transformers -from gliner import GLiNER from transformers import AutoTokenizer, pipeline import sqlite3 from app.employee_data_generator import generate_employee_data @@ -20,15 +19,6 @@ def load_transformers(models=os.getenv("MODELS", "BAAI/bge-large-en-v1.5")): return transformers -def load_ner_models(models=os.getenv("NER_MODELS", "urchade/gliner_large-v2.1")): - ner_models = {} - - for model in models.split(","): - ner_models[model] = GLiNER.from_pretrained(model) - - return ner_models - - def load_guard_model( model_name, hardware_config="cpu", diff --git a/model_server/app/main.py b/model_server/app/main.py index accc379d..e073b66d 100644 --- a/model_server/app/main.py +++ b/model_server/app/main.py @@ -2,7 +2,6 @@ import os from fastapi import FastAPI, Response, HTTPException from pydantic import BaseModel from app.load_models import ( - load_ner_models, load_transformers, load_guard_model, load_zero_shot_models, @@ -22,46 +21,18 @@ logging.basicConfig( logger = logging.getLogger(__name__) transformers = load_transformers() -ner_models = load_ner_models() zero_shot_models = load_zero_shot_models() -config = {} - -if os.path.exists("/root/arch_config.yaml"): - with open("/root/arch_config.yaml", "r") as file: - config = yaml.safe_load(file) with open("guard_model_config.yaml") as f: guard_model_config = yaml.safe_load(f) -if "prompt_guards" in config.keys(): - if len(config["prompt_guards"]["input_guards"]) == 2: - task = "both" - jailbreak_hardware = "gpu" if torch.cuda.is_available() else "cpu" - toxic_hardware = "gpu" if torch.cuda.is_available() else "cpu" - toxic_model = load_guard_model( - guard_model_config["toxic"][jailbreak_hardware], toxic_hardware - ) - jailbreak_model = load_guard_model( - guard_model_config["jailbreak"][toxic_hardware], jailbreak_hardware - ) +task = "both" +hardware = "gpu" if torch.cuda.is_available() else "cpu" +jailbreak_model = load_guard_model( + guard_model_config["jailbreak"][hardware], hardware +) - else: - task = list(config["prompt_guards"]["input_guards"].keys())[0] - - hardware = "gpu" if torch.cuda.is_available() else "cpu" - if task == "toxic": - toxic_model = load_guard_model( - guard_model_config["toxic"][hardware], hardware - ) - jailbreak_model = None - elif task == "jailbreak": - jailbreak_model = load_guard_model( - guard_model_config["jailbreak"][hardware], hardware - ) - toxic_model = None - - - guard_handler = GuardHandler(toxic_model, jailbreak_model) +guard_handler = GuardHandler(toxic_model=None, jailbreak_model=jailbreak_model) app = FastAPI() @@ -108,27 +79,6 @@ async def embedding(req: EmbeddingRequest, res: Response): return {"data": data, "model": req.model, "object": "list", "usage": usage} -class NERRequest(BaseModel): - input: str - labels: list[str] - model: str - - -@app.post("/ner") -async def ner(req: NERRequest, res: Response): - if req.model not in ner_models: - raise HTTPException(status_code=400, detail="unknown model: " + req.model) - - model = ner_models[req.model] - entities = model.predict_entities(req.input, req.labels) - - return { - "data": entities, - "model": req.model, - "object": "list", - } - - class GuardRequest(BaseModel): input: str task: str @@ -236,270 +186,7 @@ async def zeroshot(req: ZeroShotRequest, res: Response): "model": req.model, } - @app.post("/v1/chat/completions") async def chat_completion(req: ChatMessage, res: Response): result = await arch_fc_chat_completion(req, res) return result - - -''' -***** -Adding new functions to test the usecases - Sampreeth -***** -""" - -conn = load_sql() -name_col = "name" - - -class TopEmployees(BaseModel): - grouping: str - ranking_criteria: str - top_n: int - - -@app.post("/top_employees") -async def top_employees(req: TopEmployees, res: Response): - name_col = "name" - # Check if `req.ranking_criteria` is a Text object and extract its value accordingly - logger.info( - f"{'* ' * 50}\n\nCaptured Ranking Criteria: {req.ranking_criteria}\n\n{'* ' * 50}" - ) - - if req.ranking_criteria == "yoe": - req.ranking_criteria = "years_of_experience" - elif req.ranking_criteria == "rating": - req.ranking_criteria = "performance_score" - - logger.info( - f"{'* ' * 50}\n\nFinal Ranking Criteria: {req.ranking_criteria}\n\n{'* ' * 50}" - ) - - query = f""" - SELECT {req.grouping}, {name_col}, {req.ranking_criteria} - FROM ( - SELECT {req.grouping}, {name_col}, {req.ranking_criteria}, - DENSE_RANK() OVER (PARTITION BY {req.grouping} ORDER BY {req.ranking_criteria} DESC) as emp_rank - FROM employees - ) ranked_employees - WHERE emp_rank <= {req.top_n}; - """ - result_df = pd.read_sql_query(query, conn) - result = result_df.to_dict(orient="records") - return result - - -class AggregateStats(BaseModel): - grouping: str - aggregate_criteria: str - aggregate_type: str - - -@app.post("/aggregate_stats") -async def aggregate_stats(req: AggregateStats, res: Response): - logger.info( - f"{'* ' * 50}\n\nCaptured Aggregate Criteria: {req.aggregate_criteria}\n\n{'* ' * 50}" - ) - - if req.aggregate_criteria == "yoe": - req.aggregate_criteria = "years_of_experience" - - logger.info( - f"{'* ' * 50}\n\nFinal Aggregate Criteria: {req.aggregate_criteria}\n\n{'* ' * 50}" - ) - - logger.info( - f"{'* ' * 50}\n\nCaptured Aggregate Type: {req.aggregate_type}\n\n{'* ' * 50}" - ) - if req.aggregate_type.lower() not in ["sum", "avg", "min", "max"]: - if req.aggregate_type.lower() == "count": - req.aggregate_type = "COUNT" - elif req.aggregate_type.lower() == "total": - req.aggregate_type = "SUM" - elif req.aggregate_type.lower() == "average": - req.aggregate_type = "AVG" - elif req.aggregate_type.lower() == "minimum": - req.aggregate_type = "MIN" - elif req.aggregate_type.lower() == "maximum": - req.aggregate_type = "MAX" - else: - raise HTTPException(status_code=400, detail="Invalid aggregate type") - - logger.info( - f"{'* ' * 50}\n\nFinal Aggregate Type: {req.aggregate_type}\n\n{'* ' * 50}" - ) - - query = f""" - SELECT {req.grouping}, {req.aggregate_type}({req.aggregate_criteria}) as {req.aggregate_type}_{req.aggregate_criteria} - FROM employees - GROUP BY {req.grouping}; - """ - result_df = pd.read_sql_query(query, conn) - result = result_df.to_dict(orient="records") - return result - - -class PacketDropCorrelationRequest(BaseModel): - from_time: str = None # Optional natural language timeframe - ifname: str = None # Optional interface name filter - region: str = None # Optional region filter - min_in_errors: int = None - max_in_errors: int = None - min_out_errors: int = None - max_out_errors: int = None - min_in_discards: int = None - max_in_discards: int = None - min_out_discards: int = None - max_out_discards: int = None - - -@app.post("/interface_down_pkt_drop") -async def interface_down_packet_drop(req: PacketDropCorrelationRequest, res: Response): - params, filters = load_params(req) - - # Join the filters using AND - where_clause = " AND ".join(filters) - if where_clause: - where_clause = "AND " + where_clause - - # Step 3: Query packet errors and flows from interfacestats and ts_flow - query = f""" - SELECT - d.switchip AS device_ip_address, - i.in_errors, - i.in_discards, - i.out_errors, - i.out_discards, - i.ifname, - t.src_addr, - t.dst_addr, - t.time AS flow_time, - i.time AS interface_time - FROM - device d - INNER JOIN - interfacestats i - ON d.device_mac_address = i.device_mac_address - INNER JOIN - ts_flow t - ON d.switchip = t.sampler_address - WHERE - i.time >= :from_time -- Using the converted timestamp - {where_clause} - ORDER BY - i.time; - """ - - correlated_data = pd.read_sql_query(query, conn, params=params) - - if correlated_data.empty: - default_response = { - "device_ip_address": "0.0.0.0", # Placeholder IP - "in_errors": 0, - "in_discards": 0, - "out_errors": 0, - "out_discards": 0, - "ifname": req.ifname - or "unknown", # Placeholder or interface provided in the request - "src_addr": "0.0.0.0", # Placeholder source IP - "dst_addr": "0.0.0.0", # Placeholder destination IP - "flow_time": str( - datetime.now(timezone.utc) - ), # Current timestamp or placeholder - "interface_time": str( - datetime.now(timezone.utc) - ), # Current timestamp or placeholder - } - return [default_response] - - logger.info(f"Correlated Packet Drop Data: {correlated_data}") - - return correlated_data.to_dict(orient='records') - - -class FlowPacketErrorCorrelationRequest(BaseModel): - from_time: str = None # Optional natural language timeframe - ifname: str = None # Optional interface name filter - region: str = None # Optional region filter - min_in_errors: int = None - max_in_errors: int = None - min_out_errors: int = None - max_out_errors: int = None - min_in_discards: int = None - max_in_discards: int = None - min_out_discards: int = None - max_out_discards: int = None - - -@app.post("/packet_errors_impact_flow") -async def packet_errors_impact_flow( - req: FlowPacketErrorCorrelationRequest, res: Response -): - params, filters = load_params(req) - - # Join the filters using AND - where_clause = " AND ".join(filters) - if where_clause: - where_clause = "AND " + where_clause - - # Step 3: Query the packet errors and flows, correlating by timestamps - query = f""" - SELECT - d.switchip AS device_ip_address, - i.in_errors, - i.in_discards, - i.out_errors, - i.out_discards, - i.ifname, - t.src_addr, - t.dst_addr, - t.src_port, - t.dst_port, - t.packets, - t.time AS flow_time, - i.time AS error_time - FROM - device d - INNER JOIN - interfacestats i - ON d.device_mac_address = i.device_mac_address - INNER JOIN - ts_flow t - ON d.switchip = t.sampler_address - WHERE - i.time >= :from_time - AND ABS(strftime('%s', t.time) - strftime('%s', i.time)) <= 300 -- Correlate within 5 minutes - {where_clause} - ORDER BY - i.time; - """ - - correlated_data = pd.read_sql_query(query, conn, params=params) - - if correlated_data.empty: - default_response = { - "device_ip_address": "0.0.0.0", # Placeholder IP - "in_errors": 0, - "in_discards": 0, - "out_errors": 0, - "out_discards": 0, - "ifname": req.ifname - or "unknown", # Placeholder or interface provided in the request - "src_addr": "0.0.0.0", # Placeholder source IP - "dst_addr": "0.0.0.0", # Placeholder destination IP - "src_port": 0, - "dst_port": 0, - "packets": 0, - "flow_time": str( - datetime.now(timezone.utc) - ), # Current timestamp or placeholder - "error_time": str( - datetime.now(timezone.utc) - ), # Current timestamp or placeholder - } - return [default_response] - - # Return the correlated data if found - return correlated_data.to_dict(orient="records") -''' diff --git a/model_server/guard_model_config.yaml b/model_server/guard_model_config.yaml index 842a54c6..5c0d7802 100644 --- a/model_server/guard_model_config.yaml +++ b/model_server/guard_model_config.yaml @@ -1,6 +1,3 @@ -toxic: - cpu: "katanemolabs/toxic_ovn_4bit" - gpu: "katanemolabs/Bolt-Toxic-v1-eetq" jailbreak: cpu: "katanemolabs/jailbreak_ovn_4bit" gpu: "katanemolabs/Bolt-Guard-EEtq"