Extending test coverage (#434)

* Contract tests

* Testing embeedings

* Agent unit tests

* Knowledge pipeline tests

* Turn on contract tests
This commit is contained in:
cybermaggedon 2025-07-14 17:54:04 +01:00 committed by GitHub
parent 2f7fddd206
commit 4daa54abaf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
23 changed files with 6303 additions and 44 deletions

View file

@ -3,81 +3,46 @@
Embeddings service, applies an embeddings model hosted on a local Ollama.
Input is text, output is embeddings vector.
"""
from ... base import EmbeddingsService
from ... schema import EmbeddingsRequest, EmbeddingsResponse
from ... schema import embeddings_request_queue, embeddings_response_queue
from ... log_level import LogLevel
from ... base import ConsumerProducer
from ollama import Client
import os
module = "embeddings"
default_ident = "embeddings"
default_input_queue = embeddings_request_queue
default_output_queue = embeddings_response_queue
default_subscriber = module
default_model="mxbai-embed-large"
default_ollama = os.getenv("OLLAMA_HOST", 'http://localhost:11434')
class Processor(ConsumerProducer):
class Processor(EmbeddingsService):
def __init__(self, **params):
input_queue = params.get("input_queue", default_input_queue)
output_queue = params.get("output_queue", default_output_queue)
subscriber = params.get("subscriber", default_subscriber)
ollama = params.get("ollama", default_ollama)
model = params.get("model", default_model)
ollama = params.get("ollama", default_ollama)
super(Processor, self).__init__(
**params | {
"input_queue": input_queue,
"output_queue": output_queue,
"subscriber": subscriber,
"input_schema": EmbeddingsRequest,
"output_schema": EmbeddingsResponse,
"ollama": ollama,
"model": model,
"model": model
}
)
self.client = Client(host=ollama)
self.model = model
async def handle(self, msg):
async def on_embeddings(self, text):
v = msg.value()
# Sender-produced ID
id = msg.properties()["id"]
print(f"Handling input {id}...", flush=True)
text = v.text
embeds = self.client.embed(
model = self.model,
input = text
)
print("Send response...", flush=True)
r = EmbeddingsResponse(
vectors=embeds.embeddings,
error=None,
)
await self.send(r, properties={"id": id})
print("Done.", flush=True)
return embeds.embeddings
@staticmethod
def add_args(parser):
ConsumerProducer.add_args(
parser, default_input_queue, default_subscriber,
default_output_queue,
)
EmbeddingsService.add_args(parser)
parser.add_argument(
'-m', '--model',
@ -93,5 +58,6 @@ class Processor(ConsumerProducer):
def run():
Processor.launch(module, __doc__)
Processor.launch(default_ident, __doc__)