* Fix LMStudio, cache documents with tg-load-sample-documents

* Fix Mistral
This commit is contained in:
cybermaggedon 2025-05-06 16:17:16 +01:00 committed by GitHub
parent 54e475fa3a
commit d0da122bed
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 25 additions and 7 deletions

View file

@ -67,6 +67,8 @@ some-containers:
-t ${CONTAINER_BASE}/trustgraph-flow:${VERSION} .
${DOCKER} build -f containers/Containerfile.vertexai \
-t ${CONTAINER_BASE}/trustgraph-vertexai:${VERSION} .
${DOCKER} build -f containers/Containerfile.bedrock \
-t ${CONTAINER_BASE}/trustgraph-bedrock:${VERSION} .
basic-containers: update-package-versions
${DOCKER} build -f containers/Containerfile.base \

View file

@ -14,7 +14,7 @@ default_url = os.getenv("TRUSTGRAPH_URL", 'http://localhost:8088/')
def query(url, flow_id, system, prompt):
api = Api(url).flow().id(flow_id).
api = Api(url).flow().id(flow_id)
resp = api.text_completion(system=system, prompt=prompt)

View file

@ -11,7 +11,7 @@ import datetime
import requests
from trustgraph.api import Api
from trustgraph.api.types import Uri, Literal, Triple
from trustgraph.api.types import hash, Uri, Literal, Triple
default_url = os.getenv("TRUSTGRAPH_URL", 'http://localhost:8088/')
default_user = 'trustgraph'
@ -29,6 +29,11 @@ session = requests.session()
session.mount('file://', FileAdapter())
try:
os.mkdir("doc-cache")
except:
pass
documents = [
{
@ -669,11 +674,19 @@ class Loader:
try:
print(doc["title"], ":")
print(" downloading...")
print(" done.")
resp = session.get(doc["url"])
content = resp.content
hid = hash(doc["url"])
cache_file = f"doc-cache/{hid}"
if os.path.isfile(cache_file):
print(" (use cache file)")
content = open(cache_file, "rb").read()
else:
print(" downloading...")
resp = session.get(doc["url"])
content = resp.content
open(cache_file, "wb").write(content)
print(" done.")
print(" adding...")

View file

@ -70,6 +70,8 @@ class Processor(LlmService):
model = self.model
)
return resp
# FIXME: Wrong exception, don't know what this LLM throws
# for a rate limit
except cohere.TooManyRequestsError:

View file

@ -12,7 +12,6 @@ from .... base import LlmService, LlmResult
default_ident = "text-completion"
default_subscriber = module
default_model = 'gemma3:9b'
default_url = os.getenv("LMSTUDIO_URL", "http://localhost:1234/")
default_temperature = 0.0

View file

@ -86,6 +86,8 @@ class Processor(LlmService):
model = self.model
)
return resp
# FIXME: Wrong exception. The MistralAI library has retry logic
# so retry-able errors are retried transparently. It means we
# don't get rate limit events.