Refactor rate limit handling (#280)

* - Refactored retry for rate limits into the base class
- ConsumerProducer is derived from Consumer to simplify code
- Added rate_limit_count metrics for rate limit events

* Add rate limit events to VertexAI and Google AI Studio

* Added Grafana rate limit dashboard

* Add rate limit handling to all LLMs
This commit is contained in:
cybermaggedon 2025-01-27 17:04:49 +00:00 committed by GitHub
parent 26a586034c
commit 0e03bc05a4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
14 changed files with 174 additions and 298 deletions

View file

@ -87,8 +87,6 @@ class Processor(ConsumerProducer):
try:
# FIXME: Rate limits?
with __class__.text_completion_metric.time():
response = message = self.claude.messages.create(
@ -117,34 +115,26 @@ class Processor(ConsumerProducer):
print(f"Output Tokens: {outputtokens}", flush=True)
print("Send response...", flush=True)
r = TextCompletionResponse(response=resp, error=None, in_token=inputtokens, out_token=outputtokens, model=self.model)
r = TextCompletionResponse(
response=resp,
error=None,
in_token=inputtokens,
out_token=outputtokens,
model=self.model
)
self.send(r, properties={"id": id})
print("Done.", flush=True)
# FIXME: Wrong exception, don't know what this LLM throws
# for a rate limit
except TooManyRequests:
except anthropic.RateLimitError:
print("Send rate limit response...", flush=True)
r = TextCompletionResponse(
error=Error(
type = "rate-limit",
message = str(e),
),
response=None,
in_token=None,
out_token=None,
model=None,
)
self.producer.send(r, properties={"id": id})
self.consumer.acknowledge(msg)
# Leave rate limit retries to the base handler
raise TooManyRequests()
except Exception as e:
# Apart from rate limits, treat all exceptions as unrecoverable
print(f"Exception: {e}")
print("Send error response...", flush=True)