Rate limit handling (#11)

* Added a rate limit exception
* Reduce request/response timeouts because looks like there are major issues
* Add rate limit exception catch to all consumers
* Version to 0.6.3
This commit is contained in:
cybermaggedon 2024-08-19 22:15:32 +01:00 committed by GitHub
parent 25f557d8a5
commit a38f530c5f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
25 changed files with 188 additions and 152 deletions

View file

@ -1,8 +1,10 @@
from pulsar.schema import JsonSchema
from prometheus_client import start_http_server, Histogram, Info, Counter
import time
from . base_processor import BaseProcessor
from .. exceptions import TooManyRequests
class Consumer(BaseProcessor):
@ -59,6 +61,13 @@ class Consumer(BaseProcessor):
__class__.processing_metric.labels(status="success").inc()
except TooManyRequests:
self.consumer.negative_acknowledge(msg)
print("TooManyRequests: will retry")
__class__.processing_metric.labels(status="rate-limit").inc()
time.sleep(5)
continue
except Exception as e:
print("Exception:", e, flush=True)

View file

@ -1,8 +1,10 @@
from pulsar.schema import JsonSchema
from prometheus_client import Histogram, Info, Counter
import time
from . base_processor import BaseProcessor
from .. exceptions import TooManyRequests
# FIXME: Derive from consumer? And producer?
@ -78,6 +80,13 @@ class ConsumerProducer(BaseProcessor):
__class__.processing_metric.labels(status="success").inc()
except TooManyRequests:
self.consumer.negative_acknowledge(msg)
print("TooManyRequests: will retry")
__class__.processing_metric.labels(status="rate-limit").inc()
time.sleep(5)
continue
except Exception as e:
print("Exception:", e, flush=True)