mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-25 08:26:21 +02:00
Better proc group logging and concurrency (#810)
- Silence pika, cassandra etc. logging at INFO (too much chatter) - Add per processor log tags so that logs can be understood in processor group. - Deal with RabbitMQ lag weirdness - Added more processor group examples
This commit is contained in:
parent
ce3c8b421b
commit
2bf4af294e
20 changed files with 1021 additions and 647 deletions
24
dev-tools/proc-group/groups/llm.yaml
Normal file
24
dev-tools/proc-group/groups/llm.yaml
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
# LLM. Outbound text-completion calls. Isolated because the upstream
|
||||
# LLM API is often the bottleneck and the most likely thing to need
|
||||
# restart (provider changes, model changes, API flakiness).
|
||||
|
||||
_defaults: &defaults
|
||||
pubsub_backend: rabbitmq
|
||||
rabbitmq_host: localhost
|
||||
log_level: INFO
|
||||
|
||||
processors:
|
||||
|
||||
- class: trustgraph.model.text_completion.openai.Processor
|
||||
params:
|
||||
<<: *defaults
|
||||
id: text-completion
|
||||
max_output: 8192
|
||||
temperature: 0.0
|
||||
|
||||
- class: trustgraph.model.text_completion.openai.Processor
|
||||
params:
|
||||
<<: *defaults
|
||||
id: text-completion-rag
|
||||
max_output: 8192
|
||||
temperature: 0.0
|
||||
Loading…
Add table
Add a link
Reference in a new issue