diff --git a/Makefile b/Makefile index 1899e602..2553abe0 100644 --- a/Makefile +++ b/Makefile @@ -67,6 +67,8 @@ some-containers: -t ${CONTAINER_BASE}/trustgraph-flow:${VERSION} . ${DOCKER} build -f containers/Containerfile.vertexai \ -t ${CONTAINER_BASE}/trustgraph-vertexai:${VERSION} . + ${DOCKER} build -f containers/Containerfile.bedrock \ + -t ${CONTAINER_BASE}/trustgraph-bedrock:${VERSION} . basic-containers: update-package-versions ${DOCKER} build -f containers/Containerfile.base \ diff --git a/trustgraph-cli/scripts/tg-invoke-llm b/trustgraph-cli/scripts/tg-invoke-llm index b0ea84d9..86c8d60f 100755 --- a/trustgraph-cli/scripts/tg-invoke-llm +++ b/trustgraph-cli/scripts/tg-invoke-llm @@ -14,7 +14,7 @@ default_url = os.getenv("TRUSTGRAPH_URL", 'http://localhost:8088/') def query(url, flow_id, system, prompt): - api = Api(url).flow().id(flow_id). + api = Api(url).flow().id(flow_id) resp = api.text_completion(system=system, prompt=prompt) diff --git a/trustgraph-cli/scripts/tg-load-sample-documents b/trustgraph-cli/scripts/tg-load-sample-documents index 7d99487b..2bbad89f 100755 --- a/trustgraph-cli/scripts/tg-load-sample-documents +++ b/trustgraph-cli/scripts/tg-load-sample-documents @@ -11,7 +11,7 @@ import datetime import requests from trustgraph.api import Api -from trustgraph.api.types import Uri, Literal, Triple +from trustgraph.api.types import hash, Uri, Literal, Triple default_url = os.getenv("TRUSTGRAPH_URL", 'http://localhost:8088/') default_user = 'trustgraph' @@ -29,6 +29,11 @@ session = requests.session() session.mount('file://', FileAdapter()) +try: + os.mkdir("doc-cache") +except: + pass + documents = [ { @@ -669,11 +674,19 @@ class Loader: try: print(doc["title"], ":") - print(" downloading...") - print(" done.") - resp = session.get(doc["url"]) - content = resp.content + hid = hash(doc["url"]) + cache_file = f"doc-cache/{hid}" + + if os.path.isfile(cache_file): + print(" (use cache file)") + content = open(cache_file, "rb").read() + else: + print(" downloading...") + resp = session.get(doc["url"]) + content = resp.content + open(cache_file, "wb").write(content) + print(" done.") print(" adding...") diff --git a/trustgraph-flow/trustgraph/model/text_completion/cohere/llm.py b/trustgraph-flow/trustgraph/model/text_completion/cohere/llm.py index d6b2b971..8e583040 100755 --- a/trustgraph-flow/trustgraph/model/text_completion/cohere/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/cohere/llm.py @@ -70,6 +70,8 @@ class Processor(LlmService): model = self.model ) + return resp + # FIXME: Wrong exception, don't know what this LLM throws # for a rate limit except cohere.TooManyRequestsError: diff --git a/trustgraph-flow/trustgraph/model/text_completion/lmstudio/llm.py b/trustgraph-flow/trustgraph/model/text_completion/lmstudio/llm.py index c64bd4fa..db1ec00e 100755 --- a/trustgraph-flow/trustgraph/model/text_completion/lmstudio/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/lmstudio/llm.py @@ -12,7 +12,6 @@ from .... base import LlmService, LlmResult default_ident = "text-completion" -default_subscriber = module default_model = 'gemma3:9b' default_url = os.getenv("LMSTUDIO_URL", "http://localhost:1234/") default_temperature = 0.0 diff --git a/trustgraph-flow/trustgraph/model/text_completion/mistral/llm.py b/trustgraph-flow/trustgraph/model/text_completion/mistral/llm.py index 93eccb35..0c5c1430 100755 --- a/trustgraph-flow/trustgraph/model/text_completion/mistral/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/mistral/llm.py @@ -86,6 +86,8 @@ class Processor(LlmService): model = self.model ) + return resp + # FIXME: Wrong exception. The MistralAI library has retry logic # so retry-able errors are retried transparently. It means we # don't get rate limit events.