mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-27 17:36:23 +02:00
Extending test coverage (#434)
* Contract tests * Testing embeedings * Agent unit tests * Knowledge pipeline tests * Turn on contract tests
This commit is contained in:
parent
2f7fddd206
commit
4daa54abaf
23 changed files with 6303 additions and 44 deletions
|
|
@ -3,81 +3,46 @@
|
|||
Embeddings service, applies an embeddings model hosted on a local Ollama.
|
||||
Input is text, output is embeddings vector.
|
||||
"""
|
||||
from ... base import EmbeddingsService
|
||||
|
||||
from ... schema import EmbeddingsRequest, EmbeddingsResponse
|
||||
from ... schema import embeddings_request_queue, embeddings_response_queue
|
||||
from ... log_level import LogLevel
|
||||
from ... base import ConsumerProducer
|
||||
from ollama import Client
|
||||
import os
|
||||
|
||||
module = "embeddings"
|
||||
default_ident = "embeddings"
|
||||
|
||||
default_input_queue = embeddings_request_queue
|
||||
default_output_queue = embeddings_response_queue
|
||||
default_subscriber = module
|
||||
default_model="mxbai-embed-large"
|
||||
default_ollama = os.getenv("OLLAMA_HOST", 'http://localhost:11434')
|
||||
|
||||
class Processor(ConsumerProducer):
|
||||
class Processor(EmbeddingsService):
|
||||
|
||||
def __init__(self, **params):
|
||||
|
||||
input_queue = params.get("input_queue", default_input_queue)
|
||||
output_queue = params.get("output_queue", default_output_queue)
|
||||
subscriber = params.get("subscriber", default_subscriber)
|
||||
|
||||
ollama = params.get("ollama", default_ollama)
|
||||
model = params.get("model", default_model)
|
||||
ollama = params.get("ollama", default_ollama)
|
||||
|
||||
super(Processor, self).__init__(
|
||||
**params | {
|
||||
"input_queue": input_queue,
|
||||
"output_queue": output_queue,
|
||||
"subscriber": subscriber,
|
||||
"input_schema": EmbeddingsRequest,
|
||||
"output_schema": EmbeddingsResponse,
|
||||
"ollama": ollama,
|
||||
"model": model,
|
||||
"model": model
|
||||
}
|
||||
)
|
||||
|
||||
self.client = Client(host=ollama)
|
||||
self.model = model
|
||||
|
||||
async def handle(self, msg):
|
||||
async def on_embeddings(self, text):
|
||||
|
||||
v = msg.value()
|
||||
|
||||
# Sender-produced ID
|
||||
|
||||
id = msg.properties()["id"]
|
||||
|
||||
print(f"Handling input {id}...", flush=True)
|
||||
|
||||
text = v.text
|
||||
embeds = self.client.embed(
|
||||
model = self.model,
|
||||
input = text
|
||||
)
|
||||
|
||||
print("Send response...", flush=True)
|
||||
r = EmbeddingsResponse(
|
||||
vectors=embeds.embeddings,
|
||||
error=None,
|
||||
)
|
||||
|
||||
await self.send(r, properties={"id": id})
|
||||
|
||||
print("Done.", flush=True)
|
||||
return embeds.embeddings
|
||||
|
||||
@staticmethod
|
||||
def add_args(parser):
|
||||
|
||||
ConsumerProducer.add_args(
|
||||
parser, default_input_queue, default_subscriber,
|
||||
default_output_queue,
|
||||
)
|
||||
EmbeddingsService.add_args(parser)
|
||||
|
||||
parser.add_argument(
|
||||
'-m', '--model',
|
||||
|
|
@ -93,5 +58,6 @@ class Processor(ConsumerProducer):
|
|||
|
||||
def run():
|
||||
|
||||
Processor.launch(module, __doc__)
|
||||
Processor.launch(default_ident, __doc__)
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue