Revert "Feature/configure flows (#345)"

This reverts commit a9197d11ee.
This commit is contained in:
Cyber MacGeddon 2025-04-25 19:02:08 +01:00
parent 3adb3cf59c
commit 1822ca395f
125 changed files with 2628 additions and 3751 deletions

View file

@ -27,7 +27,7 @@ from .... clients.llm_client import LlmClient
from . prompts import to_definitions, to_relationships, to_topics
from . prompts import to_kg_query, to_document_query, to_rows
module = "prompt"
module = ".".join(__name__.split(".")[1:-1])
default_input_queue = prompt_request_queue
default_output_queue = prompt_response_queue

View file

@ -4,6 +4,8 @@ import json
from jsonschema import validate
import re
from trustgraph.clients.llm_client import LlmClient
class PromptConfiguration:
def __init__(self, system_template, global_terms={}, prompts={}):
self.system_template = system_template
@ -19,7 +21,8 @@ class Prompt:
class PromptManager:
def __init__(self, config):
def __init__(self, llm, config):
self.llm = llm
self.config = config
self.terms = config.global_terms
@ -51,9 +54,7 @@ class PromptManager:
return json.loads(json_str)
async def invoke(self, id, input, llm):
print("Invoke...", flush=True)
def invoke(self, id, input):
if id not in self.prompts:
raise RuntimeError("ID invalid")
@ -67,7 +68,9 @@ class PromptManager:
"prompt": self.templates[id].render(terms)
}
resp = await llm(**prompt)
resp = self.llm.request(**prompt)
print(resp, flush=True)
if resp_type == "text":
return resp
@ -78,13 +81,13 @@ class PromptManager:
try:
obj = self.parse_json(resp)
except:
print("Parse fail:", resp, flush=True)
raise RuntimeError("JSON parse fail")
print(obj, flush=True)
if self.prompts[id].schema:
try:
print(self.prompts[id].schema)
validate(instance=obj, schema=self.prompts[id].schema)
print("Validated", flush=True)
except Exception as e:
raise RuntimeError(f"Schema validation fail: {e}")

View file

@ -3,7 +3,6 @@
Language service abstracts prompt engineering from LLM.
"""
import asyncio
import json
import re
@ -11,59 +10,74 @@ from .... schema import Definition, Relationship, Triple
from .... schema import Topic
from .... schema import PromptRequest, PromptResponse, Error
from .... schema import TextCompletionRequest, TextCompletionResponse
from .... base import FlowProcessor
from .... base import ProducerSpec, ConsumerSpec, TextCompletionClientSpec
from .... schema import text_completion_request_queue
from .... schema import text_completion_response_queue
from .... schema import prompt_request_queue, prompt_response_queue
from .... base import ConsumerProducer
from .... clients.llm_client import LlmClient
from . prompt_manager import PromptConfiguration, Prompt, PromptManager
default_ident = "prompt"
module = ".".join(__name__.split(".")[1:-1])
class Processor(FlowProcessor):
default_input_queue = prompt_request_queue
default_output_queue = prompt_response_queue
default_subscriber = module
class Processor(ConsumerProducer):
def __init__(self, **params):
id = params.get("id")
input_queue = params.get("input_queue", default_input_queue)
output_queue = params.get("output_queue", default_output_queue)
subscriber = params.get("subscriber", default_subscriber)
tc_request_queue = params.get(
"text_completion_request_queue", text_completion_request_queue
)
tc_response_queue = params.get(
"text_completion_response_queue", text_completion_response_queue
)
# Config key for prompts
self.config_key = params.get("config_type", "prompt")
super(Processor, self).__init__(
**params | {
"id": id,
"input_queue": input_queue,
"output_queue": output_queue,
"subscriber": subscriber,
"input_schema": PromptRequest,
"output_schema": PromptResponse,
"text_completion_request_queue": tc_request_queue,
"text_completion_response_queue": tc_response_queue,
}
)
self.register_specification(
ConsumerSpec(
name = "request",
schema = PromptRequest,
handler = self.on_request
)
self.llm = LlmClient(
subscriber=subscriber,
input_queue=tc_request_queue,
output_queue=tc_response_queue,
pulsar_host = self.pulsar_host,
pulsar_api_key=self.pulsar_api_key,
)
self.register_specification(
TextCompletionClientSpec(
request_name = "text-completion-request",
response_name = "text-completion-response",
)
)
# System prompt hack
class Llm:
def __init__(self, llm):
self.llm = llm
def request(self, system, prompt):
print(system)
print(prompt, flush=True)
return self.llm.request(system, prompt)
self.register_specification(
ProducerSpec(
name = "response",
schema = PromptResponse
)
)
self.register_config_handler(self.on_prompt_config)
self.llm = Llm(self.llm)
# Null configuration, should reload quickly
self.manager = PromptManager(
llm = self.llm,
config = PromptConfiguration("", {}, {})
)
async def on_prompt_config(self, config, version):
async def on_config(self, version, config):
print("Loading configuration version", version)
@ -97,6 +111,7 @@ class Processor(FlowProcessor):
)
self.manager = PromptManager(
self.llm,
PromptConfiguration(
system,
{},
@ -111,7 +126,7 @@ class Processor(FlowProcessor):
print("Exception:", e, flush=True)
print("Configuration reload failed", flush=True)
async def on_request(self, msg, consumer, flow):
async def handle(self, msg):
v = msg.value()
@ -123,7 +138,7 @@ class Processor(FlowProcessor):
try:
print(v.terms, flush=True)
print(v.terms)
input = {
k: json.loads(v)
@ -131,33 +146,14 @@ class Processor(FlowProcessor):
}
print(f"Handling kind {kind}...", flush=True)
print(input, flush=True)
async def llm(system, prompt):
print(system, flush=True)
print(prompt, flush=True)
resp = await flow("text-completion-request").text_completion(
system = system, prompt = prompt,
)
try:
return resp
except Exception as e:
print("LLM Exception:", e, flush=True)
return None
try:
resp = await self.manager.invoke(kind, input, llm)
except Exception as e:
print("Invocation exception:", e, flush=True)
raise e
print(resp, flush=True)
resp = self.manager.invoke(kind, input)
if isinstance(resp, str):
print("Send text response...", flush=True)
print(resp, flush=True)
r = PromptResponse(
text=resp,
@ -165,7 +161,7 @@ class Processor(FlowProcessor):
error=None,
)
await flow("response").send(r, properties={"id": id})
await self.send(r, properties={"id": id})
return
@ -180,13 +176,13 @@ class Processor(FlowProcessor):
error=None,
)
await flow("response").send(r, properties={"id": id})
await self.send(r, properties={"id": id})
return
except Exception as e:
print(f"Exception: {e}", flush=True)
print(f"Exception: {e}")
print("Send error response...", flush=True)
@ -198,11 +194,11 @@ class Processor(FlowProcessor):
response=None,
)
await flow("response").send(r, properties={"id": id})
await self.send(r, properties={"id": id})
except Exception as e:
print(f"Exception: {e}", flush=True)
print(f"Exception: {e}")
print("Send error response...", flush=True)
@ -219,7 +215,22 @@ class Processor(FlowProcessor):
@staticmethod
def add_args(parser):
FlowProcessor.add_args(parser)
ConsumerProducer.add_args(
parser, default_input_queue, default_subscriber,
default_output_queue,
)
parser.add_argument(
'--text-completion-request-queue',
default=text_completion_request_queue,
help=f'Text completion request queue (default: {text_completion_request_queue})',
)
parser.add_argument(
'--text-completion-response-queue',
default=text_completion_response_queue,
help=f'Text completion response queue (default: {text_completion_response_queue})',
)
parser.add_argument(
'--config-type',
@ -229,5 +240,5 @@ class Processor(FlowProcessor):
def run():
Processor.launch(default_ident, __doc__)
Processor.launch(module, __doc__)

View file

@ -16,7 +16,7 @@ from .... log_level import LogLevel
from .... base import ConsumerProducer
from .... exceptions import TooManyRequests
module = "text-completion"
module = ".".join(__name__.split(".")[1:-1])
default_input_queue = text_completion_request_queue
default_output_queue = text_completion_response_queue

View file

@ -16,7 +16,7 @@ from .... log_level import LogLevel
from .... base import ConsumerProducer
from .... exceptions import TooManyRequests
module = "text-completion"
module = ".".join(__name__.split(".")[1:-1])
default_input_queue = text_completion_request_queue
default_output_queue = text_completion_response_queue

View file

@ -15,7 +15,7 @@ from .... log_level import LogLevel
from .... base import ConsumerProducer
from .... exceptions import TooManyRequests
module = "text-completion"
module = ".".join(__name__.split(".")[1:-1])
default_input_queue = text_completion_request_queue
default_output_queue = text_completion_response_queue

View file

@ -15,7 +15,7 @@ from .... log_level import LogLevel
from .... base import ConsumerProducer
from .... exceptions import TooManyRequests
module = "text-completion"
module = ".".join(__name__.split(".")[1:-1])
default_input_queue = text_completion_request_queue
default_output_queue = text_completion_response_queue

View file

@ -17,7 +17,7 @@ from .... log_level import LogLevel
from .... base import ConsumerProducer
from .... exceptions import TooManyRequests
module = "text-completion"
module = ".".join(__name__.split(".")[1:-1])
default_input_queue = text_completion_request_queue
default_output_queue = text_completion_response_queue

View file

@ -14,7 +14,7 @@ from .... log_level import LogLevel
from .... base import ConsumerProducer
from .... exceptions import TooManyRequests
module = "text-completion"
module = ".".join(__name__.split(".")[1:-1])
default_input_queue = text_completion_request_queue
default_output_queue = text_completion_response_queue

View file

@ -15,7 +15,7 @@ from .... log_level import LogLevel
from .... base import ConsumerProducer
from .... exceptions import TooManyRequests
module = "text-completion"
module = ".".join(__name__.split(".")[1:-1])
default_input_queue = text_completion_request_queue
default_output_queue = text_completion_response_queue

View file

@ -15,7 +15,7 @@ from .... log_level import LogLevel
from .... base import ConsumerProducer
from .... exceptions import TooManyRequests
module = "text-completion"
module = ".".join(__name__.split(".")[1:-1])
default_input_queue = text_completion_request_queue
default_output_queue = text_completion_response_queue

View file

@ -15,7 +15,7 @@ from .... log_level import LogLevel
from .... base import ConsumerProducer
from .... exceptions import TooManyRequests
module = "text-completion"
module = ".".join(__name__.split(".")[1:-1])
default_input_queue = text_completion_request_queue
default_output_queue = text_completion_response_queue

View file

@ -15,7 +15,7 @@ from .... log_level import LogLevel
from .... base import ConsumerProducer
from .... exceptions import TooManyRequests
module = "text-completion"
module = ".".join(__name__.split(".")[1:-1])
default_input_queue = text_completion_request_queue
default_output_queue = text_completion_response_queue