2024-12-02 17:41:30 +00:00
|
|
|
|
2025-06-04 10:49:56 +01:00
|
|
|
# Subscriber is similar to consumer: It provides a service to take stuff
|
|
|
|
|
# off of a queue and make it available using an internal broker system,
|
|
|
|
|
# so suitable for when multiple recipients are reading from the same queue
|
|
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
import asyncio
|
2024-12-03 18:03:00 +00:00
|
|
|
import time
|
2025-07-30 23:18:38 +01:00
|
|
|
import logging
|
2025-08-28 13:39:28 +01:00
|
|
|
import uuid
|
Subscriber resilience and RabbitMQ fixes (#765)
Subscriber resilience: recreate consumer after connection failure
- Move consumer creation from Subscriber.start() into the run() loop,
matching the pattern used by Consumer. If the connection drops and the
consumer is closed in the finally block, the loop now recreates it on
the next iteration instead of spinning forever on a None consumer.
Consumer thread safety:
- Dedicated ThreadPoolExecutor per consumer so all pika operations
(create, receive, acknowledge, negative_acknowledge) run on the
same thread — pika BlockingConnection is not thread-safe
- Applies to both Consumer and Subscriber classes
Config handler type audit — fix four mismatched type registrations:
- librarian: was ["librarian"] (non-existent type), now ["flow",
"active-flow"] (matches config["flow"] that the handler reads)
- cores/service: was ["kg-core"], now ["flow"] (reads
config["flow"])
- metering/counter: was ["token-costs"], now ["token-cost"]
(singular)
- agent/mcp_tool: was ["mcp-tool"], now ["mcp"] (reads
config["mcp"])
Update tests
2026-04-07 14:51:14 +01:00
|
|
|
from concurrent.futures import ThreadPoolExecutor
|
2025-07-30 23:18:38 +01:00
|
|
|
|
|
|
|
|
# Module logger
|
|
|
|
|
logger = logging.getLogger(__name__)
|
2024-12-02 17:41:30 +00:00
|
|
|
|
2025-12-17 21:40:43 +00:00
|
|
|
# Timeout exception - can come from different backends
|
|
|
|
|
class TimeoutError(Exception):
|
|
|
|
|
pass
|
|
|
|
|
|
2024-12-02 17:41:30 +00:00
|
|
|
class Subscriber:
|
|
|
|
|
|
2025-12-17 21:40:43 +00:00
|
|
|
def __init__(self, backend, topic, subscription, consumer_name,
|
2025-08-28 13:39:28 +01:00
|
|
|
schema=None, max_size=100, metrics=None,
|
|
|
|
|
backpressure_strategy="block", drain_timeout=5.0):
|
2025-12-17 21:40:43 +00:00
|
|
|
self.backend = backend # Changed from 'client' to 'backend'
|
2024-12-02 17:41:30 +00:00
|
|
|
self.topic = topic
|
|
|
|
|
self.subscription = subscription
|
|
|
|
|
self.consumer_name = consumer_name
|
|
|
|
|
self.schema = schema
|
|
|
|
|
self.q = {}
|
|
|
|
|
self.full = {}
|
2024-12-03 18:03:00 +00:00
|
|
|
self.max_size = max_size
|
2025-04-22 20:21:38 +01:00
|
|
|
self.lock = asyncio.Lock()
|
2025-02-11 16:01:03 +00:00
|
|
|
self.running = True
|
2025-08-28 13:39:28 +01:00
|
|
|
self.draining = False # New state for graceful shutdown
|
2025-04-24 18:57:33 +01:00
|
|
|
self.metrics = metrics
|
2025-05-02 21:11:50 +01:00
|
|
|
self.task = None
|
2025-08-28 13:39:28 +01:00
|
|
|
self.backpressure_strategy = backpressure_strategy
|
|
|
|
|
self.drain_timeout = drain_timeout
|
|
|
|
|
self.pending_acks = {} # Track messages awaiting delivery
|
2024-12-03 18:03:00 +00:00
|
|
|
|
2025-05-08 12:57:33 +01:00
|
|
|
self.consumer = None
|
Subscriber resilience and RabbitMQ fixes (#765)
Subscriber resilience: recreate consumer after connection failure
- Move consumer creation from Subscriber.start() into the run() loop,
matching the pattern used by Consumer. If the connection drops and the
consumer is closed in the finally block, the loop now recreates it on
the next iteration instead of spinning forever on a None consumer.
Consumer thread safety:
- Dedicated ThreadPoolExecutor per consumer so all pika operations
(create, receive, acknowledge, negative_acknowledge) run on the
same thread — pika BlockingConnection is not thread-safe
- Applies to both Consumer and Subscriber classes
Config handler type audit — fix four mismatched type registrations:
- librarian: was ["librarian"] (non-existent type), now ["flow",
"active-flow"] (matches config["flow"] that the handler reads)
- cores/service: was ["kg-core"], now ["flow"] (reads
config["flow"])
- metering/counter: was ["token-costs"], now ["token-cost"]
(singular)
- agent/mcp_tool: was ["mcp-tool"], now ["mcp"] (reads
config["mcp"])
Update tests
2026-04-07 14:51:14 +01:00
|
|
|
self.executor = None
|
2025-05-08 12:57:33 +01:00
|
|
|
|
2025-04-29 00:06:41 +01:00
|
|
|
def __del__(self):
|
2025-05-08 12:57:33 +01:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
self.running = False
|
|
|
|
|
|
|
|
|
|
async def start(self):
|
2025-05-08 12:57:33 +01:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
self.task = asyncio.create_task(self.run())
|
2024-12-03 18:03:00 +00:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
async def stop(self):
|
2025-08-28 13:39:28 +01:00
|
|
|
"""Initiate graceful shutdown with draining"""
|
2025-02-11 16:01:03 +00:00
|
|
|
self.running = False
|
2025-08-28 13:39:28 +01:00
|
|
|
self.draining = True
|
2025-05-02 21:11:50 +01:00
|
|
|
|
|
|
|
|
if self.task:
|
2025-08-28 13:39:28 +01:00
|
|
|
# Wait for run() to complete draining
|
2025-05-02 21:11:50 +01:00
|
|
|
await self.task
|
2025-02-11 16:01:03 +00:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
async def join(self):
|
|
|
|
|
await self.stop()
|
2025-05-02 21:11:50 +01:00
|
|
|
|
|
|
|
|
if self.task:
|
|
|
|
|
await self.task
|
2025-02-11 16:01:03 +00:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
async def run(self):
|
2025-08-28 13:39:28 +01:00
|
|
|
"""Enhanced run method with integrated draining logic"""
|
|
|
|
|
while self.running or self.draining:
|
2024-12-03 18:03:00 +00:00
|
|
|
|
2025-04-24 18:57:33 +01:00
|
|
|
if self.metrics:
|
|
|
|
|
self.metrics.state("stopped")
|
|
|
|
|
|
2024-12-02 17:41:30 +00:00
|
|
|
try:
|
2024-12-03 18:03:00 +00:00
|
|
|
|
Subscriber resilience and RabbitMQ fixes (#765)
Subscriber resilience: recreate consumer after connection failure
- Move consumer creation from Subscriber.start() into the run() loop,
matching the pattern used by Consumer. If the connection drops and the
consumer is closed in the finally block, the loop now recreates it on
the next iteration instead of spinning forever on a None consumer.
Consumer thread safety:
- Dedicated ThreadPoolExecutor per consumer so all pika operations
(create, receive, acknowledge, negative_acknowledge) run on the
same thread — pika BlockingConnection is not thread-safe
- Applies to both Consumer and Subscriber classes
Config handler type audit — fix four mismatched type registrations:
- librarian: was ["librarian"] (non-existent type), now ["flow",
"active-flow"] (matches config["flow"] that the handler reads)
- cores/service: was ["kg-core"], now ["flow"] (reads
config["flow"])
- metering/counter: was ["token-costs"], now ["token-cost"]
(singular)
- agent/mcp_tool: was ["mcp-tool"], now ["mcp"] (reads
config["mcp"])
Update tests
2026-04-07 14:51:14 +01:00
|
|
|
# Create consumer and dedicated thread if needed
|
|
|
|
|
# (first run or after failure)
|
|
|
|
|
if self.consumer is None:
|
|
|
|
|
self.executor = ThreadPoolExecutor(max_workers=1)
|
|
|
|
|
loop = asyncio.get_event_loop()
|
|
|
|
|
self.consumer = await loop.run_in_executor(
|
|
|
|
|
self.executor,
|
|
|
|
|
lambda: self.backend.create_consumer(
|
|
|
|
|
topic=self.topic,
|
|
|
|
|
subscription=self.subscription,
|
|
|
|
|
schema=self.schema,
|
|
|
|
|
consumer_type='exclusive',
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
|
2025-04-24 18:57:33 +01:00
|
|
|
if self.metrics:
|
|
|
|
|
self.metrics.state("running")
|
|
|
|
|
|
2025-07-30 23:18:38 +01:00
|
|
|
logger.info("Subscriber running...")
|
2025-08-28 13:39:28 +01:00
|
|
|
drain_end_time = None
|
|
|
|
|
|
|
|
|
|
while self.running or self.draining:
|
|
|
|
|
# Start drain timeout when entering drain mode
|
|
|
|
|
if self.draining and drain_end_time is None:
|
|
|
|
|
drain_end_time = time.time() + self.drain_timeout
|
|
|
|
|
logger.info(f"Subscriber entering drain mode, timeout={self.drain_timeout}s")
|
2025-11-26 15:16:17 +00:00
|
|
|
|
2025-12-17 21:40:43 +00:00
|
|
|
# Stop accepting new messages during drain
|
|
|
|
|
# Note: Not all backends support pausing message listeners
|
|
|
|
|
if self.consumer and hasattr(self.consumer, 'pause_message_listener'):
|
2025-11-26 15:16:17 +00:00
|
|
|
try:
|
|
|
|
|
self.consumer.pause_message_listener()
|
2025-12-17 21:40:43 +00:00
|
|
|
except Exception:
|
|
|
|
|
# Not all consumers support message listeners
|
2025-11-26 15:16:17 +00:00
|
|
|
pass
|
2025-08-28 13:39:28 +01:00
|
|
|
|
|
|
|
|
# Check drain timeout
|
|
|
|
|
if self.draining and drain_end_time and time.time() > drain_end_time:
|
|
|
|
|
async with self.lock:
|
|
|
|
|
total_pending = sum(
|
|
|
|
|
q.qsize() for q in
|
|
|
|
|
list(self.q.values()) + list(self.full.values())
|
|
|
|
|
)
|
|
|
|
|
if total_pending > 0:
|
|
|
|
|
logger.warning(f"Drain timeout reached with {total_pending} messages in queues")
|
|
|
|
|
self.draining = False
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
# Check if we can exit drain mode
|
|
|
|
|
if self.draining:
|
|
|
|
|
async with self.lock:
|
|
|
|
|
all_empty = all(
|
|
|
|
|
q.empty() for q in
|
|
|
|
|
list(self.q.values()) + list(self.full.values())
|
|
|
|
|
)
|
|
|
|
|
if all_empty and len(self.pending_acks) == 0:
|
|
|
|
|
logger.info("Subscriber queues drained successfully")
|
|
|
|
|
self.draining = False
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
# Process messages only if not draining
|
|
|
|
|
if not self.draining:
|
|
|
|
|
try:
|
Subscriber resilience and RabbitMQ fixes (#765)
Subscriber resilience: recreate consumer after connection failure
- Move consumer creation from Subscriber.start() into the run() loop,
matching the pattern used by Consumer. If the connection drops and the
consumer is closed in the finally block, the loop now recreates it on
the next iteration instead of spinning forever on a None consumer.
Consumer thread safety:
- Dedicated ThreadPoolExecutor per consumer so all pika operations
(create, receive, acknowledge, negative_acknowledge) run on the
same thread — pika BlockingConnection is not thread-safe
- Applies to both Consumer and Subscriber classes
Config handler type audit — fix four mismatched type registrations:
- librarian: was ["librarian"] (non-existent type), now ["flow",
"active-flow"] (matches config["flow"] that the handler reads)
- cores/service: was ["kg-core"], now ["flow"] (reads
config["flow"])
- metering/counter: was ["token-costs"], now ["token-cost"]
(singular)
- agent/mcp_tool: was ["mcp-tool"], now ["mcp"] (reads
config["mcp"])
Update tests
2026-04-07 14:51:14 +01:00
|
|
|
loop = asyncio.get_event_loop()
|
|
|
|
|
msg = await loop.run_in_executor(
|
|
|
|
|
self.executor,
|
|
|
|
|
lambda: self.consumer.receive(
|
|
|
|
|
timeout_millis=250
|
|
|
|
|
),
|
2025-08-28 13:39:28 +01:00
|
|
|
)
|
|
|
|
|
except Exception as e:
|
2025-12-17 21:40:43 +00:00
|
|
|
# Handle timeout from any backend
|
|
|
|
|
if 'timeout' in str(type(e)).lower() or 'timeout' in str(e).lower():
|
|
|
|
|
continue
|
2025-08-28 13:39:28 +01:00
|
|
|
logger.error(f"Exception in subscriber receive: {e}", exc_info=True)
|
|
|
|
|
raise e
|
|
|
|
|
|
|
|
|
|
if self.metrics:
|
|
|
|
|
self.metrics.received()
|
|
|
|
|
|
|
|
|
|
# Process the message with deferred acknowledgment
|
|
|
|
|
await self._process_message(msg)
|
|
|
|
|
else:
|
|
|
|
|
# During draining, just wait for queues to empty
|
|
|
|
|
await asyncio.sleep(0.1)
|
2025-04-22 20:21:38 +01:00
|
|
|
|
2024-12-02 17:41:30 +00:00
|
|
|
|
|
|
|
|
except Exception as e:
|
2025-07-30 23:18:38 +01:00
|
|
|
logger.error(f"Subscriber exception: {e}", exc_info=True)
|
2025-04-22 20:21:38 +01:00
|
|
|
|
2025-04-29 23:34:41 +01:00
|
|
|
finally:
|
2025-08-28 13:39:28 +01:00
|
|
|
# Negative acknowledge any pending messages
|
|
|
|
|
for msg in self.pending_acks.values():
|
2025-11-26 15:16:17 +00:00
|
|
|
try:
|
|
|
|
|
self.consumer.negative_acknowledge(msg)
|
2025-12-17 21:40:43 +00:00
|
|
|
except Exception:
|
|
|
|
|
pass # Consumer already closed or error
|
2025-08-28 13:39:28 +01:00
|
|
|
self.pending_acks.clear()
|
2025-04-29 23:34:41 +01:00
|
|
|
|
2025-05-08 12:57:33 +01:00
|
|
|
if self.consumer:
|
2025-12-17 21:40:43 +00:00
|
|
|
if hasattr(self.consumer, 'unsubscribe'):
|
|
|
|
|
try:
|
|
|
|
|
self.consumer.unsubscribe()
|
|
|
|
|
except Exception:
|
|
|
|
|
pass # Already closed or error
|
2025-11-26 15:16:17 +00:00
|
|
|
try:
|
|
|
|
|
self.consumer.close()
|
2025-12-17 21:40:43 +00:00
|
|
|
except Exception:
|
|
|
|
|
pass # Already closed or error
|
2025-05-08 12:57:33 +01:00
|
|
|
self.consumer = None
|
Subscriber resilience and RabbitMQ fixes (#765)
Subscriber resilience: recreate consumer after connection failure
- Move consumer creation from Subscriber.start() into the run() loop,
matching the pattern used by Consumer. If the connection drops and the
consumer is closed in the finally block, the loop now recreates it on
the next iteration instead of spinning forever on a None consumer.
Consumer thread safety:
- Dedicated ThreadPoolExecutor per consumer so all pika operations
(create, receive, acknowledge, negative_acknowledge) run on the
same thread — pika BlockingConnection is not thread-safe
- Applies to both Consumer and Subscriber classes
Config handler type audit — fix four mismatched type registrations:
- librarian: was ["librarian"] (non-existent type), now ["flow",
"active-flow"] (matches config["flow"] that the handler reads)
- cores/service: was ["kg-core"], now ["flow"] (reads
config["flow"])
- metering/counter: was ["token-costs"], now ["token-cost"]
(singular)
- agent/mcp_tool: was ["mcp-tool"], now ["mcp"] (reads
config["mcp"])
Update tests
2026-04-07 14:51:14 +01:00
|
|
|
|
|
|
|
|
if self.executor:
|
|
|
|
|
self.executor.shutdown(wait=False)
|
|
|
|
|
self.executor = None
|
|
|
|
|
|
2025-04-24 18:57:33 +01:00
|
|
|
if self.metrics:
|
|
|
|
|
self.metrics.state("stopped")
|
|
|
|
|
|
2025-08-28 13:39:28 +01:00
|
|
|
if not self.running and not self.draining:
|
2025-04-29 23:34:41 +01:00
|
|
|
return
|
Subscriber resilience and RabbitMQ fixes (#765)
Subscriber resilience: recreate consumer after connection failure
- Move consumer creation from Subscriber.start() into the run() loop,
matching the pattern used by Consumer. If the connection drops and the
consumer is closed in the finally block, the loop now recreates it on
the next iteration instead of spinning forever on a None consumer.
Consumer thread safety:
- Dedicated ThreadPoolExecutor per consumer so all pika operations
(create, receive, acknowledge, negative_acknowledge) run on the
same thread — pika BlockingConnection is not thread-safe
- Applies to both Consumer and Subscriber classes
Config handler type audit — fix four mismatched type registrations:
- librarian: was ["librarian"] (non-existent type), now ["flow",
"active-flow"] (matches config["flow"] that the handler reads)
- cores/service: was ["kg-core"], now ["flow"] (reads
config["flow"])
- metering/counter: was ["token-costs"], now ["token-cost"]
(singular)
- agent/mcp_tool: was ["mcp-tool"], now ["mcp"] (reads
config["mcp"])
Update tests
2026-04-07 14:51:14 +01:00
|
|
|
|
|
|
|
|
# Sleep before retry
|
2025-04-29 23:34:41 +01:00
|
|
|
await asyncio.sleep(1)
|
2024-12-03 18:03:00 +00:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
async def subscribe(self, id):
|
2024-12-03 18:03:00 +00:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
async with self.lock:
|
2024-12-03 18:03:00 +00:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
q = asyncio.Queue(maxsize=self.max_size)
|
2024-12-03 18:03:00 +00:00
|
|
|
self.q[id] = q
|
2024-12-02 17:41:30 +00:00
|
|
|
|
|
|
|
|
return q
|
|
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
async def unsubscribe(self, id):
|
2024-12-03 18:03:00 +00:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
async with self.lock:
|
2024-12-03 18:03:00 +00:00
|
|
|
|
|
|
|
|
if id in self.q:
|
|
|
|
|
# self.q[id].shutdown(immediate=True)
|
|
|
|
|
del self.q[id]
|
2024-12-02 17:41:30 +00:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
async def subscribe_all(self, id):
|
2024-12-03 18:03:00 +00:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
async with self.lock:
|
2024-12-03 18:03:00 +00:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
q = asyncio.Queue(maxsize=self.max_size)
|
2024-12-03 18:03:00 +00:00
|
|
|
self.full[id] = q
|
|
|
|
|
|
2024-12-02 17:41:30 +00:00
|
|
|
return q
|
|
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
async def unsubscribe_all(self, id):
|
2024-12-03 18:03:00 +00:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
async with self.lock:
|
2024-12-03 18:03:00 +00:00
|
|
|
|
|
|
|
|
if id in self.full:
|
|
|
|
|
# self.full[id].shutdown(immediate=True)
|
|
|
|
|
del self.full[id]
|
2024-12-02 17:41:30 +00:00
|
|
|
|
2025-08-28 13:39:28 +01:00
|
|
|
async def _process_message(self, msg):
|
|
|
|
|
"""Process a single message with deferred acknowledgment"""
|
|
|
|
|
# Store message for later acknowledgment
|
|
|
|
|
msg_id = str(uuid.uuid4())
|
|
|
|
|
self.pending_acks[msg_id] = msg
|
2026-02-23 14:34:05 +00:00
|
|
|
|
2025-08-28 13:39:28 +01:00
|
|
|
try:
|
|
|
|
|
id = msg.properties()["id"]
|
|
|
|
|
except:
|
|
|
|
|
id = None
|
2026-02-23 14:34:05 +00:00
|
|
|
|
2025-08-28 13:39:28 +01:00
|
|
|
value = msg.value()
|
|
|
|
|
delivery_success = False
|
2026-02-23 14:34:05 +00:00
|
|
|
has_matching_waiter = False
|
|
|
|
|
|
2025-08-28 13:39:28 +01:00
|
|
|
async with self.lock:
|
|
|
|
|
# Deliver to specific subscribers
|
|
|
|
|
if id in self.q:
|
2026-02-23 14:34:05 +00:00
|
|
|
has_matching_waiter = True
|
2025-08-28 13:39:28 +01:00
|
|
|
delivery_success = await self._deliver_to_queue(
|
|
|
|
|
self.q[id], value
|
|
|
|
|
)
|
2026-02-23 14:34:05 +00:00
|
|
|
|
2025-08-28 13:39:28 +01:00
|
|
|
# Deliver to all subscribers
|
|
|
|
|
for q in self.full.values():
|
2026-02-23 14:34:05 +00:00
|
|
|
has_matching_waiter = True
|
2025-08-28 13:39:28 +01:00
|
|
|
if await self._deliver_to_queue(q, value):
|
|
|
|
|
delivery_success = True
|
2026-02-23 14:34:05 +00:00
|
|
|
|
|
|
|
|
# Always acknowledge the message to prevent redelivery storms
|
|
|
|
|
# on shared topics. Negative acknowledging orphaned messages
|
|
|
|
|
# (no matching waiter) causes immediate redelivery to all
|
|
|
|
|
# subscribers, none of whom can handle it either.
|
|
|
|
|
self.consumer.acknowledge(msg)
|
|
|
|
|
del self.pending_acks[msg_id]
|
|
|
|
|
|
|
|
|
|
if not delivery_success:
|
|
|
|
|
if not has_matching_waiter:
|
|
|
|
|
# Message arrived for a waiter that no longer exists
|
|
|
|
|
# (likely due to client disconnect or timeout)
|
|
|
|
|
logger.debug(
|
|
|
|
|
f"Discarding orphaned message with id={id} - "
|
|
|
|
|
"no matching waiter"
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
# Delivery failed (e.g., queue full with drop_new strategy)
|
|
|
|
|
logger.debug(
|
|
|
|
|
f"Message with id={id} dropped due to backpressure"
|
|
|
|
|
)
|
2025-08-28 13:39:28 +01:00
|
|
|
|
|
|
|
|
async def _deliver_to_queue(self, queue, value):
|
|
|
|
|
"""Deliver message to queue with backpressure handling"""
|
|
|
|
|
try:
|
|
|
|
|
if self.backpressure_strategy == "block":
|
|
|
|
|
# Block until space available (no timeout)
|
|
|
|
|
await queue.put(value)
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
elif self.backpressure_strategy == "drop_oldest":
|
|
|
|
|
# Drop oldest message if queue full
|
|
|
|
|
if queue.full():
|
|
|
|
|
try:
|
|
|
|
|
queue.get_nowait()
|
|
|
|
|
if self.metrics:
|
|
|
|
|
self.metrics.dropped()
|
|
|
|
|
except asyncio.QueueEmpty:
|
|
|
|
|
pass
|
|
|
|
|
await queue.put(value)
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
elif self.backpressure_strategy == "drop_new":
|
|
|
|
|
# Drop new message if queue full
|
|
|
|
|
if queue.full():
|
|
|
|
|
if self.metrics:
|
|
|
|
|
self.metrics.dropped()
|
|
|
|
|
return False
|
|
|
|
|
await queue.put(value)
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(f"Failed to deliver message: {e}")
|
|
|
|
|
return False
|
|
|
|
|
|