mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-26 00:46:22 +02:00
- Increased max-iterations to 15 default, and made it a configurable (#163)
parameter with --max-iterations. Was previously hard-coded at 10. - Fixed arg passing causing pull the wrong tool name
This commit is contained in:
parent
0253281727
commit
f2c78b701e
1 changed files with 13 additions and 3 deletions
|
|
@ -32,6 +32,7 @@ module = ".".join(__name__.split(".")[1:-1])
|
||||||
default_input_queue = agent_request_queue
|
default_input_queue = agent_request_queue
|
||||||
default_output_queue = agent_response_queue
|
default_output_queue = agent_response_queue
|
||||||
default_subscriber = module
|
default_subscriber = module
|
||||||
|
default_max_iterations = 15
|
||||||
|
|
||||||
class Processor(ConsumerProducer):
|
class Processor(ConsumerProducer):
|
||||||
|
|
||||||
|
|
@ -39,6 +40,8 @@ class Processor(ConsumerProducer):
|
||||||
|
|
||||||
additional = params.get("context", None)
|
additional = params.get("context", None)
|
||||||
|
|
||||||
|
self.max_iterations = int(params.get("max_iterations", default_max_iterations))
|
||||||
|
|
||||||
tools = {}
|
tools = {}
|
||||||
|
|
||||||
# Parsing the prompt information to the prompt configuration
|
# Parsing the prompt information to the prompt configuration
|
||||||
|
|
@ -67,8 +70,9 @@ class Processor(ConsumerProducer):
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(ttoks) == 1:
|
if len(ttoks) == 1:
|
||||||
|
|
||||||
tools[toks[0]] = Tool(
|
tools[toks[0]] = Tool(
|
||||||
name = ttoks[0],
|
name = toks[0],
|
||||||
description = "",
|
description = "",
|
||||||
implementation = impl,
|
implementation = impl,
|
||||||
config = { "input": "query" },
|
config = { "input": "query" },
|
||||||
|
|
@ -76,7 +80,7 @@ class Processor(ConsumerProducer):
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
tools[toks[0]] = Tool(
|
tools[toks[0]] = Tool(
|
||||||
name = ttoks[0],
|
name = toks[0],
|
||||||
description = "",
|
description = "",
|
||||||
implementation = impl,
|
implementation = impl,
|
||||||
config = { "input": ttoks[1] },
|
config = { "input": ttoks[1] },
|
||||||
|
|
@ -226,7 +230,7 @@ class Processor(ConsumerProducer):
|
||||||
|
|
||||||
print(f"Question: {v.question}", flush=True)
|
print(f"Question: {v.question}", flush=True)
|
||||||
|
|
||||||
if len(history) > 10:
|
if len(history) >= self.max_iterations:
|
||||||
raise RuntimeError("Too many agent iterations")
|
raise RuntimeError("Too many agent iterations")
|
||||||
|
|
||||||
print(f"History: {history}", flush=True)
|
print(f"History: {history}", flush=True)
|
||||||
|
|
@ -394,6 +398,12 @@ description.'''
|
||||||
help=f'Optional, specifies additional context text for the LLM.'
|
help=f'Optional, specifies additional context text for the LLM.'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--max-iterations',
|
||||||
|
default=default_max_iterations,
|
||||||
|
help=f'Maximum number of react iterations (default: {default_max_iterations})',
|
||||||
|
)
|
||||||
|
|
||||||
def run():
|
def run():
|
||||||
|
|
||||||
Processor.start(module, __doc__)
|
Processor.start(module, __doc__)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue