Initial commit
This commit is contained in:
22
backend/venv/Lib/site-packages/pika/__init__.py
Normal file
22
backend/venv/Lib/site-packages/pika/__init__.py
Normal file
@@ -0,0 +1,22 @@
|
||||
__version__ = '1.3.2'
|
||||
|
||||
import logging
|
||||
|
||||
# Add NullHandler before importing Pika modules to prevent logging warnings
|
||||
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
||||
|
||||
# pylint: disable=C0413
|
||||
|
||||
from pika.connection import ConnectionParameters
|
||||
from pika.connection import URLParameters
|
||||
from pika.connection import SSLOptions
|
||||
from pika.credentials import PlainCredentials
|
||||
from pika.spec import BasicProperties
|
||||
from pika.delivery_mode import DeliveryMode
|
||||
|
||||
from pika import adapters
|
||||
from pika.adapters import BaseConnection
|
||||
from pika.adapters import BlockingConnection
|
||||
from pika.adapters import SelectConnection
|
||||
|
||||
from pika.adapters.utils.connection_workflow import AMQPConnectionWorkflow
|
||||
23
backend/venv/Lib/site-packages/pika/adapters/__init__.py
Normal file
23
backend/venv/Lib/site-packages/pika/adapters/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
Connection Adapters
|
||||
===================
|
||||
|
||||
Pika provides multiple adapters to connect to RabbitMQ:
|
||||
|
||||
- adapters.asyncio_connection.AsyncioConnection: Native Python3 AsyncIO use
|
||||
- adapters.blocking_connection.BlockingConnection: Enables blocking,
|
||||
synchronous operation on top of library for simple uses.
|
||||
- adapters.gevent_connection.GeventConnection: Connection adapter for use with
|
||||
Gevent.
|
||||
- adapters.select_connection.SelectConnection: A native event based connection
|
||||
adapter that implements select, kqueue, poll and epoll.
|
||||
- adapters.tornado_connection.TornadoConnection: Connection adapter for use
|
||||
with the Tornado web framework.
|
||||
- adapters.twisted_connection.TwistedProtocolConnection: Connection adapter for
|
||||
use with the Twisted framework
|
||||
|
||||
"""
|
||||
from pika.adapters.base_connection import BaseConnection
|
||||
from pika.adapters.blocking_connection import BlockingConnection
|
||||
from pika.adapters.select_connection import SelectConnection
|
||||
from pika.adapters.select_connection import IOLoop
|
||||
@@ -0,0 +1,283 @@
|
||||
"""Use pika with the Asyncio EventLoop"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from pika.adapters import base_connection
|
||||
from pika.adapters.utils import nbio_interface, io_services_utils
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
if sys.platform == 'win32':
|
||||
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
||||
|
||||
|
||||
class AsyncioConnection(base_connection.BaseConnection):
|
||||
""" The AsyncioConnection runs on the Asyncio EventLoop.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
parameters=None,
|
||||
on_open_callback=None,
|
||||
on_open_error_callback=None,
|
||||
on_close_callback=None,
|
||||
custom_ioloop=None,
|
||||
internal_connection_workflow=True):
|
||||
""" Create a new instance of the AsyncioConnection class, connecting
|
||||
to RabbitMQ automatically
|
||||
|
||||
:param pika.connection.Parameters parameters: Connection parameters
|
||||
:param callable on_open_callback: The method to call when the connection
|
||||
is open
|
||||
:param None | method on_open_error_callback: Called if the connection
|
||||
can't be established or connection establishment is interrupted by
|
||||
`Connection.close()`: on_open_error_callback(Connection, exception).
|
||||
:param None | method on_close_callback: Called when a previously fully
|
||||
open connection is closed:
|
||||
`on_close_callback(Connection, exception)`, where `exception` is
|
||||
either an instance of `exceptions.ConnectionClosed` if closed by
|
||||
user or broker or exception of another type that describes the cause
|
||||
of connection failure.
|
||||
:param None | asyncio.AbstractEventLoop |
|
||||
nbio_interface.AbstractIOServices custom_ioloop:
|
||||
Defaults to asyncio.get_event_loop().
|
||||
:param bool internal_connection_workflow: True for autonomous connection
|
||||
establishment which is default; False for externally-managed
|
||||
connection workflow via the `create_connection()` factory.
|
||||
|
||||
"""
|
||||
if isinstance(custom_ioloop, nbio_interface.AbstractIOServices):
|
||||
nbio = custom_ioloop
|
||||
else:
|
||||
nbio = _AsyncioIOServicesAdapter(custom_ioloop)
|
||||
|
||||
super().__init__(
|
||||
parameters,
|
||||
on_open_callback,
|
||||
on_open_error_callback,
|
||||
on_close_callback,
|
||||
nbio,
|
||||
internal_connection_workflow=internal_connection_workflow)
|
||||
|
||||
@classmethod
|
||||
def create_connection(cls,
|
||||
connection_configs,
|
||||
on_done,
|
||||
custom_ioloop=None,
|
||||
workflow=None):
|
||||
"""Implement
|
||||
:py:classmethod::`pika.adapters.BaseConnection.create_connection()`.
|
||||
|
||||
"""
|
||||
nbio = _AsyncioIOServicesAdapter(custom_ioloop)
|
||||
|
||||
def connection_factory(params):
|
||||
"""Connection factory."""
|
||||
if params is None:
|
||||
raise ValueError('Expected pika.connection.Parameters '
|
||||
'instance, but got None in params arg.')
|
||||
return cls(
|
||||
parameters=params,
|
||||
custom_ioloop=nbio,
|
||||
internal_connection_workflow=False)
|
||||
|
||||
return cls._start_connection_workflow(
|
||||
connection_configs=connection_configs,
|
||||
connection_factory=connection_factory,
|
||||
nbio=nbio,
|
||||
workflow=workflow,
|
||||
on_done=on_done)
|
||||
|
||||
|
||||
class _AsyncioIOServicesAdapter(io_services_utils.SocketConnectionMixin,
|
||||
io_services_utils.StreamingConnectionMixin,
|
||||
nbio_interface.AbstractIOServices,
|
||||
nbio_interface.AbstractFileDescriptorServices):
|
||||
"""Implements
|
||||
:py:class:`.utils.nbio_interface.AbstractIOServices` interface
|
||||
on top of `asyncio`.
|
||||
|
||||
NOTE:
|
||||
:py:class:`.utils.nbio_interface.AbstractFileDescriptorServices`
|
||||
interface is only required by the mixins.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, loop=None):
|
||||
"""
|
||||
:param asyncio.AbstractEventLoop | None loop: If None, gets default
|
||||
event loop from asyncio.
|
||||
|
||||
"""
|
||||
self._loop = loop or asyncio.get_event_loop()
|
||||
|
||||
def get_native_ioloop(self):
|
||||
"""Implement
|
||||
:py:meth:`.utils.nbio_interface.AbstractIOServices.get_native_ioloop()`.
|
||||
|
||||
"""
|
||||
return self._loop
|
||||
|
||||
def close(self):
|
||||
"""Implement
|
||||
:py:meth:`.utils.nbio_interface.AbstractIOServices.close()`.
|
||||
|
||||
"""
|
||||
self._loop.close()
|
||||
|
||||
def run(self):
|
||||
"""Implement :py:meth:`.utils.nbio_interface.AbstractIOServices.run()`.
|
||||
|
||||
"""
|
||||
self._loop.run_forever()
|
||||
|
||||
def stop(self):
|
||||
"""Implement :py:meth:`.utils.nbio_interface.AbstractIOServices.stop()`.
|
||||
|
||||
"""
|
||||
self._loop.stop()
|
||||
|
||||
def add_callback_threadsafe(self, callback):
|
||||
"""Implement
|
||||
:py:meth:`.utils.nbio_interface.AbstractIOServices.add_callback_threadsafe()`.
|
||||
|
||||
"""
|
||||
self._loop.call_soon_threadsafe(callback)
|
||||
|
||||
def call_later(self, delay, callback):
|
||||
"""Implement
|
||||
:py:meth:`.utils.nbio_interface.AbstractIOServices.call_later()`.
|
||||
|
||||
"""
|
||||
return _TimerHandle(self._loop.call_later(delay, callback))
|
||||
|
||||
def getaddrinfo(self,
|
||||
host,
|
||||
port,
|
||||
on_done,
|
||||
family=0,
|
||||
socktype=0,
|
||||
proto=0,
|
||||
flags=0):
|
||||
"""Implement
|
||||
:py:meth:`.utils.nbio_interface.AbstractIOServices.getaddrinfo()`.
|
||||
|
||||
"""
|
||||
return self._schedule_and_wrap_in_io_ref(
|
||||
self._loop.getaddrinfo(
|
||||
host,
|
||||
port,
|
||||
family=family,
|
||||
type=socktype,
|
||||
proto=proto,
|
||||
flags=flags), on_done)
|
||||
|
||||
def set_reader(self, fd, on_readable):
|
||||
"""Implement
|
||||
:py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.set_reader()`.
|
||||
|
||||
"""
|
||||
self._loop.add_reader(fd, on_readable)
|
||||
LOGGER.debug('set_reader(%s, _)', fd)
|
||||
|
||||
def remove_reader(self, fd):
|
||||
"""Implement
|
||||
:py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.remove_reader()`.
|
||||
|
||||
"""
|
||||
LOGGER.debug('remove_reader(%s)', fd)
|
||||
return self._loop.remove_reader(fd)
|
||||
|
||||
def set_writer(self, fd, on_writable):
|
||||
"""Implement
|
||||
:py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.set_writer()`.
|
||||
|
||||
"""
|
||||
self._loop.add_writer(fd, on_writable)
|
||||
LOGGER.debug('set_writer(%s, _)', fd)
|
||||
|
||||
def remove_writer(self, fd):
|
||||
"""Implement
|
||||
:py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.remove_writer()`.
|
||||
|
||||
"""
|
||||
LOGGER.debug('remove_writer(%s)', fd)
|
||||
return self._loop.remove_writer(fd)
|
||||
|
||||
def _schedule_and_wrap_in_io_ref(self, coro, on_done):
|
||||
"""Schedule the coroutine to run and return _AsyncioIOReference
|
||||
|
||||
:param coroutine-obj coro:
|
||||
:param callable on_done: user callback that takes the completion result
|
||||
or exception as its only arg. It will not be called if the operation
|
||||
was cancelled.
|
||||
:rtype: _AsyncioIOReference which is derived from
|
||||
nbio_interface.AbstractIOReference
|
||||
|
||||
"""
|
||||
if not callable(on_done):
|
||||
raise TypeError(
|
||||
'on_done arg must be callable, but got {!r}'.format(on_done))
|
||||
|
||||
return _AsyncioIOReference(
|
||||
asyncio.ensure_future(coro, loop=self._loop), on_done)
|
||||
|
||||
|
||||
class _TimerHandle(nbio_interface.AbstractTimerReference):
|
||||
"""This module's adaptation of `nbio_interface.AbstractTimerReference`.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, handle):
|
||||
"""
|
||||
|
||||
:param asyncio.Handle handle:
|
||||
"""
|
||||
self._handle = handle
|
||||
|
||||
def cancel(self):
|
||||
if self._handle is not None:
|
||||
self._handle.cancel()
|
||||
self._handle = None
|
||||
|
||||
|
||||
class _AsyncioIOReference(nbio_interface.AbstractIOReference):
|
||||
"""This module's adaptation of `nbio_interface.AbstractIOReference`.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, future, on_done):
|
||||
"""
|
||||
:param asyncio.Future future:
|
||||
:param callable on_done: user callback that takes the completion result
|
||||
or exception as its only arg. It will not be called if the operation
|
||||
was cancelled.
|
||||
|
||||
"""
|
||||
if not callable(on_done):
|
||||
raise TypeError(
|
||||
'on_done arg must be callable, but got {!r}'.format(on_done))
|
||||
|
||||
self._future = future
|
||||
|
||||
def on_done_adapter(future):
|
||||
"""Handle completion callback from the future instance"""
|
||||
|
||||
# NOTE: Asyncio schedules callback for cancelled futures, but pika
|
||||
# doesn't want that
|
||||
if not future.cancelled():
|
||||
on_done(future.exception() or future.result())
|
||||
|
||||
future.add_done_callback(on_done_adapter)
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel pending operation
|
||||
|
||||
:returns: False if was already done or cancelled; True otherwise
|
||||
:rtype: bool
|
||||
|
||||
"""
|
||||
return self._future.cancel()
|
||||
501
backend/venv/Lib/site-packages/pika/adapters/base_connection.py
Normal file
501
backend/venv/Lib/site-packages/pika/adapters/base_connection.py
Normal file
@@ -0,0 +1,501 @@
|
||||
"""Base class extended by connection adapters. This extends the
|
||||
connection.Connection class to encapsulate connection behavior but still
|
||||
isolate socket and low level communication.
|
||||
|
||||
"""
|
||||
import abc
|
||||
import functools
|
||||
import logging
|
||||
|
||||
import pika.compat
|
||||
import pika.exceptions
|
||||
import pika.tcp_socket_opts
|
||||
|
||||
from pika.adapters.utils import connection_workflow, nbio_interface
|
||||
from pika import connection
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseConnection(connection.Connection):
|
||||
"""BaseConnection class that should be extended by connection adapters.
|
||||
|
||||
This class abstracts I/O loop and transport services from pika core.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, parameters, on_open_callback, on_open_error_callback,
|
||||
on_close_callback, nbio, internal_connection_workflow):
|
||||
"""Create a new instance of the Connection object.
|
||||
|
||||
:param None|pika.connection.Parameters parameters: Connection parameters
|
||||
:param None|method on_open_callback: Method to call on connection open
|
||||
:param None | method on_open_error_callback: Called if the connection
|
||||
can't be established or connection establishment is interrupted by
|
||||
`Connection.close()`: on_open_error_callback(Connection, exception).
|
||||
:param None | method on_close_callback: Called when a previously fully
|
||||
open connection is closed:
|
||||
`on_close_callback(Connection, exception)`, where `exception` is
|
||||
either an instance of `exceptions.ConnectionClosed` if closed by
|
||||
user or broker or exception of another type that describes the cause
|
||||
of connection failure.
|
||||
:param pika.adapters.utils.nbio_interface.AbstractIOServices nbio:
|
||||
asynchronous services
|
||||
:param bool internal_connection_workflow: True for autonomous connection
|
||||
establishment which is default; False for externally-managed
|
||||
connection workflow via the `create_connection()` factory.
|
||||
:raises: RuntimeError
|
||||
:raises: ValueError
|
||||
|
||||
"""
|
||||
if parameters and not isinstance(parameters, connection.Parameters):
|
||||
raise ValueError(
|
||||
'Expected instance of Parameters, not %r' % (parameters,))
|
||||
|
||||
self._nbio = nbio
|
||||
|
||||
self._connection_workflow = None # type: connection_workflow.AMQPConnectionWorkflow
|
||||
self._transport = None # type: pika.adapters.utils.nbio_interface.AbstractStreamTransport
|
||||
|
||||
self._got_eof = False # transport indicated EOF (connection reset)
|
||||
|
||||
super(BaseConnection, self).__init__(
|
||||
parameters,
|
||||
on_open_callback,
|
||||
on_open_error_callback,
|
||||
on_close_callback,
|
||||
internal_connection_workflow=internal_connection_workflow)
|
||||
|
||||
def _init_connection_state(self):
|
||||
"""Initialize or reset all of our internal state variables for a given
|
||||
connection. If we disconnect and reconnect, all of our state needs to
|
||||
be wiped.
|
||||
|
||||
"""
|
||||
super(BaseConnection, self)._init_connection_state()
|
||||
|
||||
self._connection_workflow = None
|
||||
self._transport = None
|
||||
self._got_eof = False
|
||||
|
||||
def __repr__(self):
|
||||
|
||||
# def get_socket_repr(sock):
|
||||
# """Return socket info suitable for use in repr"""
|
||||
# if sock is None:
|
||||
# return None
|
||||
#
|
||||
# sockname = None
|
||||
# peername = None
|
||||
# try:
|
||||
# sockname = sock.getsockname()
|
||||
# except pika.compat.SOCKET_ERROR:
|
||||
# # closed?
|
||||
# pass
|
||||
# else:
|
||||
# try:
|
||||
# peername = sock.getpeername()
|
||||
# except pika.compat.SOCKET_ERROR:
|
||||
# # not connected?
|
||||
# pass
|
||||
#
|
||||
# return '%s->%s' % (sockname, peername)
|
||||
# TODO need helpful __repr__ in transports
|
||||
return ('<%s %s transport=%s params=%s>' % (
|
||||
self.__class__.__name__, self._STATE_NAMES[self.connection_state],
|
||||
self._transport, self.params))
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def create_connection(cls,
|
||||
connection_configs,
|
||||
on_done,
|
||||
custom_ioloop=None,
|
||||
workflow=None):
|
||||
"""Asynchronously create a connection to an AMQP broker using the given
|
||||
configurations. Will attempt to connect using each config in the given
|
||||
order, including all compatible resolved IP addresses of the hostname
|
||||
supplied in each config, until one is established or all attempts fail.
|
||||
|
||||
See also `_start_connection_workflow()`.
|
||||
|
||||
:param sequence connection_configs: A sequence of one or more
|
||||
`pika.connection.Parameters`-based objects.
|
||||
:param callable on_done: as defined in
|
||||
`connection_workflow.AbstractAMQPConnectionWorkflow.start()`.
|
||||
:param object | None custom_ioloop: Provide a custom I/O loop that is
|
||||
native to the specific adapter implementation; if None, the adapter
|
||||
will use a default loop instance, which is typically a singleton.
|
||||
:param connection_workflow.AbstractAMQPConnectionWorkflow | None workflow:
|
||||
Pass an instance of an implementation of the
|
||||
`connection_workflow.AbstractAMQPConnectionWorkflow` interface;
|
||||
defaults to a `connection_workflow.AMQPConnectionWorkflow` instance
|
||||
with default values for optional args.
|
||||
:returns: Connection workflow instance in use. The user should limit
|
||||
their interaction with this object only to it's `abort()` method.
|
||||
:rtype: connection_workflow.AbstractAMQPConnectionWorkflow
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def _start_connection_workflow(cls, connection_configs, connection_factory,
|
||||
nbio, workflow, on_done):
|
||||
"""Helper function for custom implementations of `create_connection()`.
|
||||
|
||||
:param sequence connection_configs: A sequence of one or more
|
||||
`pika.connection.Parameters`-based objects.
|
||||
:param callable connection_factory: A function that takes
|
||||
`pika.connection.Parameters` as its only arg and returns a brand new
|
||||
`pika.connection.Connection`-based adapter instance each time it is
|
||||
called. The factory must instantiate the connection with
|
||||
`internal_connection_workflow=False`.
|
||||
:param pika.adapters.utils.nbio_interface.AbstractIOServices nbio:
|
||||
:param connection_workflow.AbstractAMQPConnectionWorkflow | None workflow:
|
||||
Pass an instance of an implementation of the
|
||||
`connection_workflow.AbstractAMQPConnectionWorkflow` interface;
|
||||
defaults to a `connection_workflow.AMQPConnectionWorkflow` instance
|
||||
with default values for optional args.
|
||||
:param callable on_done: as defined in
|
||||
:py:meth:`connection_workflow.AbstractAMQPConnectionWorkflow.start()`.
|
||||
:returns: Connection workflow instance in use. The user should limit
|
||||
their interaction with this object only to it's `abort()` method.
|
||||
:rtype: connection_workflow.AbstractAMQPConnectionWorkflow
|
||||
|
||||
"""
|
||||
if workflow is None:
|
||||
workflow = connection_workflow.AMQPConnectionWorkflow()
|
||||
LOGGER.debug('Created default connection workflow %r', workflow)
|
||||
|
||||
if isinstance(workflow, connection_workflow.AMQPConnectionWorkflow):
|
||||
workflow.set_io_services(nbio)
|
||||
|
||||
def create_connector():
|
||||
"""`AMQPConnector` factory."""
|
||||
return connection_workflow.AMQPConnector(
|
||||
lambda params: _StreamingProtocolShim(
|
||||
connection_factory(params)),
|
||||
nbio)
|
||||
|
||||
workflow.start(
|
||||
connection_configs=connection_configs,
|
||||
connector_factory=create_connector,
|
||||
native_loop=nbio.get_native_ioloop(),
|
||||
on_done=functools.partial(cls._unshim_connection_workflow_callback,
|
||||
on_done))
|
||||
|
||||
return workflow
|
||||
|
||||
@property
|
||||
def ioloop(self):
|
||||
"""
|
||||
:returns: the native I/O loop instance underlying async services selected
|
||||
by user or the default selected by the specialized connection
|
||||
adapter (e.g., Twisted reactor, `asyncio.SelectorEventLoop`,
|
||||
`select_connection.IOLoop`, etc.)
|
||||
:rtype: object
|
||||
"""
|
||||
return self._nbio.get_native_ioloop()
|
||||
|
||||
def _adapter_call_later(self, delay, callback):
|
||||
"""Implement
|
||||
:py:meth:`pika.connection.Connection._adapter_call_later()`.
|
||||
|
||||
"""
|
||||
return self._nbio.call_later(delay, callback)
|
||||
|
||||
def _adapter_remove_timeout(self, timeout_id):
|
||||
"""Implement
|
||||
:py:meth:`pika.connection.Connection._adapter_remove_timeout()`.
|
||||
|
||||
"""
|
||||
timeout_id.cancel()
|
||||
|
||||
def _adapter_add_callback_threadsafe(self, callback):
|
||||
"""Implement
|
||||
:py:meth:`pika.connection.Connection._adapter_add_callback_threadsafe()`.
|
||||
|
||||
"""
|
||||
if not callable(callback):
|
||||
raise TypeError(
|
||||
'callback must be a callable, but got %r' % (callback,))
|
||||
|
||||
self._nbio.add_callback_threadsafe(callback)
|
||||
|
||||
def _adapter_connect_stream(self):
|
||||
"""Initiate full-stack connection establishment asynchronously for
|
||||
internally-initiated connection bring-up.
|
||||
|
||||
Upon failed completion, we will invoke
|
||||
`Connection._on_stream_terminated()`. NOTE: On success,
|
||||
the stack will be up already, so there is no corresponding callback.
|
||||
|
||||
"""
|
||||
self._connection_workflow = connection_workflow.AMQPConnectionWorkflow(
|
||||
_until_first_amqp_attempt=True)
|
||||
|
||||
self._connection_workflow.set_io_services(self._nbio)
|
||||
|
||||
def create_connector():
|
||||
"""`AMQPConnector` factory"""
|
||||
return connection_workflow.AMQPConnector(
|
||||
lambda _params: _StreamingProtocolShim(self), self._nbio)
|
||||
|
||||
self._connection_workflow.start(
|
||||
[self.params],
|
||||
connector_factory=create_connector,
|
||||
native_loop=self._nbio.get_native_ioloop(),
|
||||
on_done=functools.partial(self._unshim_connection_workflow_callback,
|
||||
self._on_connection_workflow_done))
|
||||
|
||||
@staticmethod
|
||||
def _unshim_connection_workflow_callback(user_on_done, shim_or_exc):
|
||||
"""
|
||||
|
||||
:param callable user_on_done: user's `on_done` callback as defined in
|
||||
:py:meth:`connection_workflow.AbstractAMQPConnectionWorkflow.start()`.
|
||||
:param _StreamingProtocolShim | Exception shim_or_exc:
|
||||
"""
|
||||
result = shim_or_exc
|
||||
if isinstance(result, _StreamingProtocolShim):
|
||||
result = result.conn
|
||||
|
||||
user_on_done(result)
|
||||
|
||||
def _abort_connection_workflow(self):
|
||||
"""Asynchronously abort connection workflow. Upon
|
||||
completion, `Connection._on_stream_terminated()` will be called with None
|
||||
as the error argument.
|
||||
|
||||
Assumption: may be called only while connection is opening.
|
||||
|
||||
"""
|
||||
assert not self._opened, (
|
||||
'_abort_connection_workflow() may be called only when '
|
||||
'connection is opening.')
|
||||
|
||||
if self._transport is None:
|
||||
# NOTE: this is possible only when user calls Connection.close() to
|
||||
# interrupt internally-initiated connection establishment.
|
||||
# self._connection_workflow.abort() would not call
|
||||
# Connection.close() before pairing of connection with transport.
|
||||
assert self._internal_connection_workflow, (
|
||||
'Unexpected _abort_connection_workflow() call with '
|
||||
'no transport in external connection workflow mode.')
|
||||
|
||||
# This will result in call to _on_connection_workflow_done() upon
|
||||
# completion
|
||||
self._connection_workflow.abort()
|
||||
else:
|
||||
# NOTE: we can't use self._connection_workflow.abort() in this case,
|
||||
# because it would result in infinite recursion as we're called
|
||||
# from Connection.close() and _connection_workflow.abort() calls
|
||||
# Connection.close() to abort a connection that's already been
|
||||
# paired with a transport. During internally-initiated connection
|
||||
# establishment, AMQPConnectionWorkflow will discover that user
|
||||
# aborted the connection when it receives
|
||||
# pika.exceptions.ConnectionOpenAborted.
|
||||
|
||||
# This completes asynchronously, culminating in call to our method
|
||||
# `connection_lost()`
|
||||
self._transport.abort()
|
||||
|
||||
def _on_connection_workflow_done(self, conn_or_exc):
|
||||
"""`AMQPConnectionWorkflow` completion callback.
|
||||
|
||||
:param BaseConnection | Exception conn_or_exc: Our own connection
|
||||
instance on success; exception on failure. See
|
||||
`AbstractAMQPConnectionWorkflow.start()` for details.
|
||||
|
||||
"""
|
||||
LOGGER.debug('Full-stack connection workflow completed: %r',
|
||||
conn_or_exc)
|
||||
|
||||
self._connection_workflow = None
|
||||
|
||||
# Notify protocol of failure
|
||||
if isinstance(conn_or_exc, Exception):
|
||||
self._transport = None
|
||||
if isinstance(conn_or_exc,
|
||||
connection_workflow.AMQPConnectionWorkflowAborted):
|
||||
LOGGER.info('Full-stack connection workflow aborted: %r',
|
||||
conn_or_exc)
|
||||
# So that _handle_connection_workflow_failure() will know it's
|
||||
# not a failure
|
||||
conn_or_exc = None
|
||||
else:
|
||||
LOGGER.error('Full-stack connection workflow failed: %r',
|
||||
conn_or_exc)
|
||||
if (isinstance(conn_or_exc,
|
||||
connection_workflow.AMQPConnectionWorkflowFailed)
|
||||
and isinstance(
|
||||
conn_or_exc.exceptions[-1], connection_workflow.
|
||||
AMQPConnectorSocketConnectError)):
|
||||
conn_or_exc = pika.exceptions.AMQPConnectionError(
|
||||
conn_or_exc)
|
||||
|
||||
self._handle_connection_workflow_failure(conn_or_exc)
|
||||
else:
|
||||
# NOTE: On success, the stack will be up already, so there is no
|
||||
# corresponding callback.
|
||||
assert conn_or_exc is self, \
|
||||
'Expected self conn={!r} from workflow, but got {!r}.'.format(
|
||||
self, conn_or_exc)
|
||||
|
||||
def _handle_connection_workflow_failure(self, error):
|
||||
"""Handle failure of self-initiated stack bring-up and call
|
||||
`Connection._on_stream_terminated()` if connection is not in closed state
|
||||
yet. Called by adapter layer when the full-stack connection workflow
|
||||
fails.
|
||||
|
||||
:param Exception | None error: exception instance describing the reason
|
||||
for failure or None if the connection workflow was aborted.
|
||||
"""
|
||||
if error is None:
|
||||
LOGGER.info('Self-initiated stack bring-up aborted.')
|
||||
else:
|
||||
LOGGER.error('Self-initiated stack bring-up failed: %r', error)
|
||||
|
||||
if not self.is_closed:
|
||||
self._on_stream_terminated(error)
|
||||
else:
|
||||
# This may happen when AMQP layer bring up was started but did not
|
||||
# complete
|
||||
LOGGER.debug('_handle_connection_workflow_failure(): '
|
||||
'suppressing - connection already closed.')
|
||||
|
||||
def _adapter_disconnect_stream(self):
|
||||
"""Asynchronously bring down the streaming transport layer and invoke
|
||||
`Connection._on_stream_terminated()` asynchronously when complete.
|
||||
|
||||
"""
|
||||
if not self._opened:
|
||||
self._abort_connection_workflow()
|
||||
else:
|
||||
# This completes asynchronously, culminating in call to our method
|
||||
# `connection_lost()`
|
||||
self._transport.abort()
|
||||
|
||||
def _adapter_emit_data(self, data):
|
||||
"""Take ownership of data and send it to AMQP server as soon as
|
||||
possible.
|
||||
|
||||
:param bytes data:
|
||||
|
||||
"""
|
||||
self._transport.write(data)
|
||||
|
||||
def _proto_connection_made(self, transport):
|
||||
"""Introduces transport to protocol after transport is connected.
|
||||
|
||||
:py:class:`.utils.nbio_interface.AbstractStreamProtocol` implementation.
|
||||
|
||||
:param nbio_interface.AbstractStreamTransport transport:
|
||||
:raises Exception: Exception-based exception on error
|
||||
|
||||
"""
|
||||
self._transport = transport
|
||||
|
||||
# Let connection know that stream is available
|
||||
self._on_stream_connected()
|
||||
|
||||
def _proto_connection_lost(self, error):
|
||||
"""Called upon loss or closing of TCP connection.
|
||||
|
||||
:py:class:`.utils.nbio_interface.AbstractStreamProtocol` implementation.
|
||||
|
||||
NOTE: `connection_made()` and `connection_lost()` are each called just
|
||||
once and in that order. All other callbacks are called between them.
|
||||
|
||||
:param BaseException | None error: An exception (check for
|
||||
`BaseException`) indicates connection failure. None indicates that
|
||||
connection was closed on this side, such as when it's aborted or
|
||||
when `AbstractStreamProtocol.eof_received()` returns a falsy result.
|
||||
:raises Exception: Exception-based exception on error
|
||||
|
||||
"""
|
||||
self._transport = None
|
||||
|
||||
if error is None:
|
||||
# Either result of `eof_received()` or abort
|
||||
if self._got_eof:
|
||||
error = pika.exceptions.StreamLostError(
|
||||
'Transport indicated EOF')
|
||||
else:
|
||||
error = pika.exceptions.StreamLostError(
|
||||
'Stream connection lost: {!r}'.format(error))
|
||||
|
||||
LOGGER.log(logging.DEBUG if error is None else logging.ERROR,
|
||||
'connection_lost: %r', error)
|
||||
|
||||
self._on_stream_terminated(error)
|
||||
|
||||
def _proto_eof_received(self): # pylint: disable=R0201
|
||||
"""Called after the remote peer shuts its write end of the connection.
|
||||
:py:class:`.utils.nbio_interface.AbstractStreamProtocol` implementation.
|
||||
|
||||
:returns: A falsy value (including None) will cause the transport to
|
||||
close itself, resulting in an eventual `connection_lost()` call
|
||||
from the transport. If a truthy value is returned, it will be the
|
||||
protocol's responsibility to close/abort the transport.
|
||||
:rtype: falsy|truthy
|
||||
:raises Exception: Exception-based exception on error
|
||||
|
||||
"""
|
||||
LOGGER.error('Transport indicated EOF.')
|
||||
|
||||
self._got_eof = True
|
||||
|
||||
# This is how a reset connection will typically present itself
|
||||
# when we have nothing to send to the server over plaintext stream.
|
||||
#
|
||||
# Have transport tear down the connection and invoke our
|
||||
# `connection_lost` method
|
||||
return False
|
||||
|
||||
def _proto_data_received(self, data):
|
||||
"""Called to deliver incoming data from the server to the protocol.
|
||||
|
||||
:py:class:`.utils.nbio_interface.AbstractStreamProtocol` implementation.
|
||||
|
||||
:param data: Non-empty data bytes.
|
||||
:raises Exception: Exception-based exception on error
|
||||
|
||||
"""
|
||||
self._on_data_available(data)
|
||||
|
||||
|
||||
class _StreamingProtocolShim(nbio_interface.AbstractStreamProtocol):
|
||||
"""Shim for callbacks from transport so that we BaseConnection can
|
||||
delegate to private methods, thus avoiding contamination of API with
|
||||
methods that look public, but aren't.
|
||||
|
||||
"""
|
||||
|
||||
# Override AbstractStreamProtocol abstract methods to enable instantiation
|
||||
connection_made = None
|
||||
connection_lost = None
|
||||
eof_received = None
|
||||
data_received = None
|
||||
|
||||
def __init__(self, conn):
|
||||
"""
|
||||
:param BaseConnection conn:
|
||||
"""
|
||||
self.conn = conn
|
||||
# pylint: disable=W0212
|
||||
self.connection_made = conn._proto_connection_made
|
||||
self.connection_lost = conn._proto_connection_lost
|
||||
self.eof_received = conn._proto_eof_received
|
||||
self.data_received = conn._proto_data_received
|
||||
|
||||
def __getattr__(self, attr):
|
||||
"""Proxy inexistent attribute requests to our connection instance
|
||||
so that AMQPConnectionWorkflow/AMQPConnector may treat the shim as an
|
||||
actual connection.
|
||||
|
||||
"""
|
||||
return getattr(self.conn, attr)
|
||||
|
||||
def __repr__(self):
|
||||
return '{}: {!r}'.format(self.__class__.__name__, self.conn)
|
||||
2664
backend/venv/Lib/site-packages/pika/adapters/blocking_connection.py
Normal file
2664
backend/venv/Lib/site-packages/pika/adapters/blocking_connection.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,469 @@
|
||||
"""Use pika with the Gevent IOLoop."""
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import weakref
|
||||
|
||||
try:
|
||||
import queue
|
||||
except ImportError: # Python <= v2.7
|
||||
import Queue as queue
|
||||
|
||||
import gevent
|
||||
import gevent.hub
|
||||
import gevent.socket
|
||||
|
||||
import pika.compat
|
||||
from pika.adapters.base_connection import BaseConnection
|
||||
from pika.adapters.utils.io_services_utils import check_callback_arg
|
||||
from pika.adapters.utils.nbio_interface import (
|
||||
AbstractIOReference,
|
||||
AbstractIOServices,
|
||||
)
|
||||
from pika.adapters.utils.selector_ioloop_adapter import (
|
||||
AbstractSelectorIOLoop,
|
||||
SelectorIOServicesAdapter,
|
||||
)
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GeventConnection(BaseConnection):
|
||||
"""Implementation of pika's ``BaseConnection``.
|
||||
|
||||
An async selector-based connection which integrates with Gevent.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
parameters=None,
|
||||
on_open_callback=None,
|
||||
on_open_error_callback=None,
|
||||
on_close_callback=None,
|
||||
custom_ioloop=None,
|
||||
internal_connection_workflow=True):
|
||||
"""Create a new GeventConnection instance and connect to RabbitMQ on
|
||||
Gevent's event-loop.
|
||||
|
||||
:param pika.connection.Parameters|None parameters: The connection
|
||||
parameters
|
||||
:param callable|None on_open_callback: The method to call when the
|
||||
connection is open
|
||||
:param callable|None on_open_error_callback: Called if the connection
|
||||
can't be established or connection establishment is interrupted by
|
||||
`Connection.close()`:
|
||||
on_open_error_callback(Connection, exception)
|
||||
:param callable|None on_close_callback: Called when a previously fully
|
||||
open connection is closed:
|
||||
`on_close_callback(Connection, exception)`, where `exception` is
|
||||
either an instance of `exceptions.ConnectionClosed` if closed by
|
||||
user or broker or exception of another type that describes the
|
||||
cause of connection failure
|
||||
:param gevent._interfaces.ILoop|nbio_interface.AbstractIOServices|None
|
||||
custom_ioloop: Use a custom Gevent ILoop.
|
||||
:param bool internal_connection_workflow: True for autonomous connection
|
||||
establishment which is default; False for externally-managed
|
||||
connection workflow via the `create_connection()` factory
|
||||
"""
|
||||
if pika.compat.ON_WINDOWS:
|
||||
raise RuntimeError('GeventConnection is not supported on Windows.')
|
||||
|
||||
custom_ioloop = (custom_ioloop or
|
||||
_GeventSelectorIOLoop(gevent.get_hub()))
|
||||
|
||||
if isinstance(custom_ioloop, AbstractIOServices):
|
||||
nbio = custom_ioloop
|
||||
else:
|
||||
nbio = _GeventSelectorIOServicesAdapter(custom_ioloop)
|
||||
|
||||
super(GeventConnection, self).__init__(
|
||||
parameters,
|
||||
on_open_callback,
|
||||
on_open_error_callback,
|
||||
on_close_callback,
|
||||
nbio,
|
||||
internal_connection_workflow=internal_connection_workflow)
|
||||
|
||||
@classmethod
|
||||
def create_connection(cls,
|
||||
connection_configs,
|
||||
on_done,
|
||||
custom_ioloop=None,
|
||||
workflow=None):
|
||||
"""Implement
|
||||
:py:classmethod::`pika.adapters.BaseConnection.create_connection()`.
|
||||
"""
|
||||
custom_ioloop = (custom_ioloop or
|
||||
_GeventSelectorIOLoop(gevent.get_hub()))
|
||||
|
||||
nbio = _GeventSelectorIOServicesAdapter(custom_ioloop)
|
||||
|
||||
def connection_factory(params):
|
||||
"""Connection factory."""
|
||||
if params is None:
|
||||
raise ValueError('Expected pika.connection.Parameters '
|
||||
'instance, but got None in params arg.')
|
||||
return cls(parameters=params,
|
||||
custom_ioloop=nbio,
|
||||
internal_connection_workflow=False)
|
||||
|
||||
return cls._start_connection_workflow(
|
||||
connection_configs=connection_configs,
|
||||
connection_factory=connection_factory,
|
||||
nbio=nbio,
|
||||
workflow=workflow,
|
||||
on_done=on_done)
|
||||
|
||||
|
||||
class _TSafeCallbackQueue(object):
|
||||
"""Dispatch callbacks from any thread to be executed in the main thread
|
||||
efficiently with IO events.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
:param _GeventSelectorIOLoop loop: IO loop to add callbacks to.
|
||||
"""
|
||||
# Thread-safe, blocking queue.
|
||||
self._queue = queue.Queue()
|
||||
# PIPE to trigger an event when the queue is ready.
|
||||
self._read_fd, self._write_fd = os.pipe()
|
||||
# Lock around writes to the PIPE in case some platform/implementation
|
||||
# requires this.
|
||||
self._write_lock = threading.RLock()
|
||||
|
||||
@property
|
||||
def fd(self):
|
||||
"""The file-descriptor to register for READ events in the IO loop."""
|
||||
return self._read_fd
|
||||
|
||||
def add_callback_threadsafe(self, callback):
|
||||
"""Add an item to the queue from any thread. The configured handler
|
||||
will be invoked with the item in the main thread.
|
||||
|
||||
:param item: Object to add to the queue.
|
||||
"""
|
||||
self._queue.put(callback)
|
||||
with self._write_lock:
|
||||
# The value written is not important.
|
||||
os.write(self._write_fd, b'\xFF')
|
||||
|
||||
def run_next_callback(self):
|
||||
"""Invoke the next callback from the queue.
|
||||
|
||||
MUST run in the main thread. If no callback was added to the queue,
|
||||
this will block the IO loop.
|
||||
|
||||
Performs a blocking READ on the pipe so must only be called when the
|
||||
pipe is ready for reading.
|
||||
"""
|
||||
try:
|
||||
callback = self._queue.get_nowait()
|
||||
except queue.Empty:
|
||||
# Should never happen.
|
||||
LOGGER.warning("Callback queue was empty.")
|
||||
else:
|
||||
# Read the byte from the pipe so the event doesn't re-fire.
|
||||
os.read(self._read_fd, 1)
|
||||
callback()
|
||||
|
||||
|
||||
class _GeventSelectorIOLoop(AbstractSelectorIOLoop):
|
||||
"""Implementation of `AbstractSelectorIOLoop` using the Gevent event loop.
|
||||
|
||||
Required by implementations of `SelectorIOServicesAdapter`.
|
||||
"""
|
||||
# Gevent's READ and WRITE masks are defined as 1 and 2 respectively. No
|
||||
# ERROR mask is defined.
|
||||
# See http://www.gevent.org/api/gevent.hub.html#gevent._interfaces.ILoop.io
|
||||
READ = 1
|
||||
WRITE = 2
|
||||
ERROR = 0
|
||||
|
||||
def __init__(self, gevent_hub=None):
|
||||
"""
|
||||
:param gevent._interfaces.ILoop gevent_loop:
|
||||
"""
|
||||
self._hub = gevent_hub or gevent.get_hub()
|
||||
self._io_watchers_by_fd = {}
|
||||
# Used to start/stop the loop.
|
||||
self._waiter = gevent.hub.Waiter()
|
||||
|
||||
# For adding callbacks from other threads. See `add_callback(..)`.
|
||||
self._callback_queue = _TSafeCallbackQueue()
|
||||
|
||||
def run_callback_in_main_thread(fd, events):
|
||||
"""Swallow the fd and events arguments."""
|
||||
del fd
|
||||
del events
|
||||
self._callback_queue.run_next_callback()
|
||||
|
||||
self.add_handler(self._callback_queue.fd, run_callback_in_main_thread,
|
||||
self.READ)
|
||||
|
||||
def close(self):
|
||||
"""Release the loop's resources."""
|
||||
self._hub.loop.destroy()
|
||||
self._hub = None
|
||||
|
||||
def start(self):
|
||||
"""Run the I/O loop. It will loop until requested to exit. See `stop()`.
|
||||
"""
|
||||
LOGGER.debug("Passing control to Gevent's IOLoop")
|
||||
self._waiter.get() # Block until 'stop()' is called.
|
||||
|
||||
LOGGER.debug("Control was passed back from Gevent's IOLoop")
|
||||
self._waiter.clear()
|
||||
|
||||
def stop(self):
|
||||
"""Request exit from the ioloop. The loop is NOT guaranteed to
|
||||
stop before this method returns.
|
||||
|
||||
To invoke `stop()` safely from a thread other than this IOLoop's thread,
|
||||
call it via `add_callback_threadsafe`; e.g.,
|
||||
|
||||
`ioloop.add_callback(ioloop.stop)`
|
||||
"""
|
||||
self._waiter.switch(None)
|
||||
|
||||
def add_callback(self, callback):
|
||||
"""Requests a call to the given function as soon as possible in the
|
||||
context of this IOLoop's thread.
|
||||
|
||||
NOTE: This is the only thread-safe method in IOLoop. All other
|
||||
manipulations of IOLoop must be performed from the IOLoop's thread.
|
||||
|
||||
For example, a thread may request a call to the `stop` method of an
|
||||
ioloop that is running in a different thread via
|
||||
`ioloop.add_callback_threadsafe(ioloop.stop)`
|
||||
|
||||
:param callable callback: The callback method
|
||||
"""
|
||||
if gevent.get_hub() == self._hub:
|
||||
# We're in the main thread; just add the callback.
|
||||
LOGGER.debug("Adding callback from main thread")
|
||||
self._hub.loop.run_callback(callback)
|
||||
else:
|
||||
# This isn't the main thread and Gevent's hub/loop don't provide
|
||||
# any thread-safety so enqueue the callback for it to be registered
|
||||
# in the main thread.
|
||||
LOGGER.debug("Adding callback from another thread")
|
||||
callback = functools.partial(self._hub.loop.run_callback, callback)
|
||||
self._callback_queue.add_callback_threadsafe(callback)
|
||||
|
||||
def call_later(self, delay, callback):
|
||||
"""Add the callback to the IOLoop timer to be called after delay seconds
|
||||
from the time of call on best-effort basis. Returns a handle to the
|
||||
timeout.
|
||||
|
||||
:param float delay: The number of seconds to wait to call callback
|
||||
:param callable callback: The callback method
|
||||
:returns: handle to the created timeout that may be passed to
|
||||
`remove_timeout()`
|
||||
:rtype: object
|
||||
"""
|
||||
timer = self._hub.loop.timer(delay)
|
||||
timer.start(callback)
|
||||
return timer
|
||||
|
||||
def remove_timeout(self, timeout_handle):
|
||||
"""Remove a timeout
|
||||
|
||||
:param timeout_handle: Handle of timeout to remove
|
||||
"""
|
||||
timeout_handle.close()
|
||||
|
||||
def add_handler(self, fd, handler, events):
|
||||
"""Start watching the given file descriptor for events
|
||||
|
||||
:param int fd: The file descriptor
|
||||
:param callable handler: When requested event(s) occur,
|
||||
`handler(fd, events)` will be called.
|
||||
:param int events: The event mask (READ|WRITE)
|
||||
"""
|
||||
io_watcher = self._hub.loop.io(fd, events)
|
||||
self._io_watchers_by_fd[fd] = io_watcher
|
||||
io_watcher.start(handler, fd, events)
|
||||
|
||||
def update_handler(self, fd, events):
|
||||
"""Change the events being watched for.
|
||||
|
||||
:param int fd: The file descriptor
|
||||
:param int events: The new event mask (READ|WRITE)
|
||||
"""
|
||||
io_watcher = self._io_watchers_by_fd[fd]
|
||||
# Save callback from the original watcher. The close the old watcher
|
||||
# and create a new one using the saved callback and the new events.
|
||||
callback = io_watcher.callback
|
||||
io_watcher.close()
|
||||
del self._io_watchers_by_fd[fd]
|
||||
self.add_handler(fd, callback, events)
|
||||
|
||||
def remove_handler(self, fd):
|
||||
"""Stop watching the given file descriptor for events
|
||||
|
||||
:param int fd: The file descriptor
|
||||
"""
|
||||
io_watcher = self._io_watchers_by_fd[fd]
|
||||
io_watcher.close()
|
||||
del self._io_watchers_by_fd[fd]
|
||||
|
||||
|
||||
class _GeventSelectorIOServicesAdapter(SelectorIOServicesAdapter):
|
||||
"""SelectorIOServicesAdapter implementation using Gevent's DNS resolver."""
|
||||
|
||||
def getaddrinfo(self,
|
||||
host,
|
||||
port,
|
||||
on_done,
|
||||
family=0,
|
||||
socktype=0,
|
||||
proto=0,
|
||||
flags=0):
|
||||
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.getaddrinfo()`.
|
||||
"""
|
||||
resolver = _GeventAddressResolver(native_loop=self._loop,
|
||||
host=host,
|
||||
port=port,
|
||||
family=family,
|
||||
socktype=socktype,
|
||||
proto=proto,
|
||||
flags=flags,
|
||||
on_done=on_done)
|
||||
resolver.start()
|
||||
# Return needs an implementation of `AbstractIOReference`.
|
||||
return _GeventIOLoopIOHandle(resolver)
|
||||
|
||||
|
||||
class _GeventIOLoopIOHandle(AbstractIOReference):
|
||||
"""Implement `AbstractIOReference`.
|
||||
|
||||
Only used to wrap the _GeventAddressResolver.
|
||||
"""
|
||||
|
||||
def __init__(self, subject):
|
||||
"""
|
||||
:param subject: subject of the reference containing a `cancel()` method
|
||||
"""
|
||||
self._cancel = subject.cancel
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel pending operation
|
||||
|
||||
:returns: False if was already done or cancelled; True otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
return self._cancel()
|
||||
|
||||
|
||||
class _GeventAddressResolver(object):
|
||||
"""Performs getaddrinfo asynchronously Gevent's configured resolver in a
|
||||
separate greenlet and invoking the provided callback with the result.
|
||||
|
||||
See: http://www.gevent.org/dns.html
|
||||
"""
|
||||
__slots__ = (
|
||||
'_loop',
|
||||
'_on_done',
|
||||
'_greenlet',
|
||||
# getaddrinfo(..) args:
|
||||
'_ga_host',
|
||||
'_ga_port',
|
||||
'_ga_family',
|
||||
'_ga_socktype',
|
||||
'_ga_proto',
|
||||
'_ga_flags')
|
||||
|
||||
def __init__(self, native_loop, host, port, family, socktype, proto, flags,
|
||||
on_done):
|
||||
"""Initialize the `_GeventAddressResolver`.
|
||||
|
||||
:param AbstractSelectorIOLoop native_loop:
|
||||
:param host: `see socket.getaddrinfo()`
|
||||
:param port: `see socket.getaddrinfo()`
|
||||
:param family: `see socket.getaddrinfo()`
|
||||
:param socktype: `see socket.getaddrinfo()`
|
||||
:param proto: `see socket.getaddrinfo()`
|
||||
:param flags: `see socket.getaddrinfo()`
|
||||
:param on_done: on_done(records|BaseException) callback for reporting
|
||||
result from the given I/O loop. The single arg will be either an
|
||||
exception object (check for `BaseException`) in case of failure or
|
||||
the result returned by `socket.getaddrinfo()`.
|
||||
"""
|
||||
check_callback_arg(on_done, 'on_done')
|
||||
|
||||
self._loop = native_loop
|
||||
self._on_done = on_done
|
||||
# Reference to the greenlet performing `getaddrinfo`.
|
||||
self._greenlet = None
|
||||
# getaddrinfo(..) args.
|
||||
self._ga_host = host
|
||||
self._ga_port = port
|
||||
self._ga_family = family
|
||||
self._ga_socktype = socktype
|
||||
self._ga_proto = proto
|
||||
self._ga_flags = flags
|
||||
|
||||
def start(self):
|
||||
"""Start an asynchronous getaddrinfo invocation."""
|
||||
if self._greenlet is None:
|
||||
self._greenlet = gevent.spawn_raw(self._resolve)
|
||||
else:
|
||||
LOGGER.warning("_GeventAddressResolver already started")
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel the pending resolver."""
|
||||
changed = False
|
||||
|
||||
if self._greenlet is not None:
|
||||
changed = True
|
||||
self._stop_greenlet()
|
||||
|
||||
self._cleanup()
|
||||
return changed
|
||||
|
||||
def _cleanup(self):
|
||||
"""Stop the resolver and release any resources."""
|
||||
self._stop_greenlet()
|
||||
self._loop = None
|
||||
self._on_done = None
|
||||
|
||||
def _stop_greenlet(self):
|
||||
"""Stop the greenlet performing getaddrinfo if running.
|
||||
|
||||
Otherwise, this is a no-op.
|
||||
"""
|
||||
if self._greenlet is not None:
|
||||
gevent.kill(self._greenlet)
|
||||
self._greenlet = None
|
||||
|
||||
def _resolve(self):
|
||||
"""Call `getaddrinfo()` and return result via user's callback
|
||||
function on the configured IO loop.
|
||||
"""
|
||||
try:
|
||||
# NOTE(JG): Can't use kwargs with getaddrinfo on Python <= v2.7.
|
||||
result = gevent.socket.getaddrinfo(self._ga_host, self._ga_port,
|
||||
self._ga_family,
|
||||
self._ga_socktype,
|
||||
self._ga_proto, self._ga_flags)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
LOGGER.error('Address resolution failed: %r', exc)
|
||||
result = exc
|
||||
|
||||
callback = functools.partial(self._dispatch_callback, result)
|
||||
self._loop.add_callback(callback)
|
||||
|
||||
def _dispatch_callback(self, result):
|
||||
"""Invoke the configured completion callback and any subsequent cleanup.
|
||||
|
||||
:param result: result from getaddrinfo, or the exception if raised.
|
||||
"""
|
||||
try:
|
||||
LOGGER.debug(
|
||||
'Invoking async getaddrinfo() completion callback; host=%r',
|
||||
self._ga_host)
|
||||
self._on_done(result)
|
||||
finally:
|
||||
self._cleanup()
|
||||
1267
backend/venv/Lib/site-packages/pika/adapters/select_connection.py
Normal file
1267
backend/venv/Lib/site-packages/pika/adapters/select_connection.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,90 @@
|
||||
"""Use pika with the Tornado IOLoop
|
||||
|
||||
"""
|
||||
import logging
|
||||
|
||||
from tornado import ioloop
|
||||
|
||||
from pika.adapters.utils import nbio_interface, selector_ioloop_adapter
|
||||
from pika.adapters import base_connection
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TornadoConnection(base_connection.BaseConnection):
|
||||
"""The TornadoConnection runs on the Tornado IOLoop.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
parameters=None,
|
||||
on_open_callback=None,
|
||||
on_open_error_callback=None,
|
||||
on_close_callback=None,
|
||||
custom_ioloop=None,
|
||||
internal_connection_workflow=True):
|
||||
"""Create a new instance of the TornadoConnection class, connecting
|
||||
to RabbitMQ automatically.
|
||||
|
||||
:param pika.connection.Parameters|None parameters: The connection
|
||||
parameters
|
||||
:param callable|None on_open_callback: The method to call when the
|
||||
connection is open
|
||||
:param callable|None on_open_error_callback: Called if the connection
|
||||
can't be established or connection establishment is interrupted by
|
||||
`Connection.close()`:
|
||||
on_open_error_callback(Connection, exception)
|
||||
:param callable|None on_close_callback: Called when a previously fully
|
||||
open connection is closed:
|
||||
`on_close_callback(Connection, exception)`, where `exception` is
|
||||
either an instance of `exceptions.ConnectionClosed` if closed by
|
||||
user or broker or exception of another type that describes the
|
||||
cause of connection failure
|
||||
:param ioloop.IOLoop|nbio_interface.AbstractIOServices|None custom_ioloop:
|
||||
Override using the global IOLoop in Tornado
|
||||
:param bool internal_connection_workflow: True for autonomous connection
|
||||
establishment which is default; False for externally-managed
|
||||
connection workflow via the `create_connection()` factory
|
||||
|
||||
"""
|
||||
if isinstance(custom_ioloop, nbio_interface.AbstractIOServices):
|
||||
nbio = custom_ioloop
|
||||
else:
|
||||
nbio = (selector_ioloop_adapter.SelectorIOServicesAdapter(
|
||||
custom_ioloop or ioloop.IOLoop.instance()))
|
||||
super(TornadoConnection, self).__init__(
|
||||
parameters,
|
||||
on_open_callback,
|
||||
on_open_error_callback,
|
||||
on_close_callback,
|
||||
nbio,
|
||||
internal_connection_workflow=internal_connection_workflow)
|
||||
|
||||
@classmethod
|
||||
def create_connection(cls,
|
||||
connection_configs,
|
||||
on_done,
|
||||
custom_ioloop=None,
|
||||
workflow=None):
|
||||
"""Implement
|
||||
:py:classmethod::`pika.adapters.BaseConnection.create_connection()`.
|
||||
|
||||
"""
|
||||
nbio = selector_ioloop_adapter.SelectorIOServicesAdapter(
|
||||
custom_ioloop or ioloop.IOLoop.instance())
|
||||
|
||||
def connection_factory(params):
|
||||
"""Connection factory."""
|
||||
if params is None:
|
||||
raise ValueError('Expected pika.connection.Parameters '
|
||||
'instance, but got None in params arg.')
|
||||
return cls(
|
||||
parameters=params,
|
||||
custom_ioloop=nbio,
|
||||
internal_connection_workflow=False)
|
||||
|
||||
return cls._start_connection_workflow(
|
||||
connection_configs=connection_configs,
|
||||
connection_factory=connection_factory,
|
||||
nbio=nbio,
|
||||
workflow=workflow,
|
||||
on_done=on_done)
|
||||
1293
backend/venv/Lib/site-packages/pika/adapters/twisted_connection.py
Normal file
1293
backend/venv/Lib/site-packages/pika/adapters/twisted_connection.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,866 @@
|
||||
"""Implements `AMQPConnectionWorkflow` - the default workflow of performing
|
||||
multiple TCP/[SSL]/AMQP connection attempts with timeouts and retries until one
|
||||
succeeds or all attempts fail.
|
||||
|
||||
Defines the interface `AbstractAMQPConnectionWorkflow` that facilitates
|
||||
implementing custom connection workflows.
|
||||
|
||||
"""
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import socket
|
||||
|
||||
import pika.compat
|
||||
import pika.exceptions
|
||||
import pika.tcp_socket_opts
|
||||
from pika import __version__
|
||||
|
||||
_LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AMQPConnectorException(Exception):
|
||||
"""Base exception for this module"""
|
||||
|
||||
|
||||
class AMQPConnectorStackTimeout(AMQPConnectorException):
|
||||
"""Overall TCP/[SSL]/AMQP stack connection attempt timed out."""
|
||||
|
||||
|
||||
class AMQPConnectorAborted(AMQPConnectorException):
|
||||
"""Asynchronous request was aborted"""
|
||||
|
||||
|
||||
class AMQPConnectorWrongState(AMQPConnectorException):
|
||||
"""AMQPConnector operation requested in wrong state, such as aborting after
|
||||
completion was reported.
|
||||
"""
|
||||
|
||||
|
||||
class AMQPConnectorPhaseErrorBase(AMQPConnectorException):
|
||||
"""Wrapper for exception that occurred during a particular bring-up phase.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, exception, *args):
|
||||
"""
|
||||
|
||||
:param BaseException exception: error that occurred while waiting for a
|
||||
subclass-specific protocol bring-up phase to complete.
|
||||
:param args: args for parent class
|
||||
"""
|
||||
super(AMQPConnectorPhaseErrorBase, self).__init__(*args)
|
||||
self.exception = exception
|
||||
|
||||
def __repr__(self):
|
||||
return '{}: {!r}'.format(self.__class__.__name__, self.exception)
|
||||
|
||||
|
||||
class AMQPConnectorSocketConnectError(AMQPConnectorPhaseErrorBase):
|
||||
"""Error connecting TCP socket to remote peer"""
|
||||
|
||||
|
||||
class AMQPConnectorTransportSetupError(AMQPConnectorPhaseErrorBase):
|
||||
"""Error setting up transport after TCP connected but before AMQP handshake.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class AMQPConnectorAMQPHandshakeError(AMQPConnectorPhaseErrorBase):
|
||||
"""Error during AMQP handshake"""
|
||||
|
||||
|
||||
class AMQPConnectionWorkflowAborted(AMQPConnectorException):
|
||||
"""AMQP Connection workflow was aborted."""
|
||||
|
||||
|
||||
class AMQPConnectionWorkflowWrongState(AMQPConnectorException):
|
||||
"""AMQP Connection Workflow operation requested in wrong state, such as
|
||||
aborting after completion was reported.
|
||||
"""
|
||||
|
||||
|
||||
class AMQPConnectionWorkflowFailed(AMQPConnectorException):
|
||||
"""Indicates that AMQP connection workflow failed.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, exceptions, *args):
|
||||
"""
|
||||
:param sequence exceptions: Exceptions that occurred during the
|
||||
workflow.
|
||||
:param args: args to pass to base class
|
||||
|
||||
"""
|
||||
super(AMQPConnectionWorkflowFailed, self).__init__(*args)
|
||||
self.exceptions = tuple(exceptions)
|
||||
|
||||
def __repr__(self):
|
||||
return ('{}: {} exceptions in all; last exception - {!r}; first '
|
||||
'exception - {!r}').format(
|
||||
self.__class__.__name__, len(self.exceptions),
|
||||
self.exceptions[-1],
|
||||
self.exceptions[0] if len(self.exceptions) > 1 else None)
|
||||
|
||||
|
||||
class AMQPConnector(object):
|
||||
"""Performs a single TCP/[SSL]/AMQP connection workflow.
|
||||
|
||||
"""
|
||||
|
||||
_STATE_INIT = 0 # start() hasn't been called yet
|
||||
_STATE_TCP = 1 # TCP/IP connection establishment
|
||||
_STATE_TRANSPORT = 2 # [SSL] and transport linkup
|
||||
_STATE_AMQP = 3 # AMQP connection handshake
|
||||
_STATE_TIMEOUT = 4 # overall TCP/[SSL]/AMQP timeout
|
||||
_STATE_ABORTING = 5 # abort() called - aborting workflow
|
||||
_STATE_DONE = 6 # result reported to client
|
||||
|
||||
def __init__(self, conn_factory, nbio):
|
||||
"""
|
||||
|
||||
:param callable conn_factory: A function that takes
|
||||
`pika.connection.Parameters` as its only arg and returns a brand new
|
||||
`pika.connection.Connection`-based adapter instance each time it is
|
||||
called. The factory must instantiate the connection with
|
||||
`internal_connection_workflow=False`.
|
||||
:param pika.adapters.utils.nbio_interface.AbstractIOServices nbio:
|
||||
|
||||
"""
|
||||
self._conn_factory = conn_factory
|
||||
self._nbio = nbio
|
||||
self._addr_record = None # type: tuple
|
||||
self._conn_params = None # type: pika.connection.Parameters
|
||||
self._on_done = None # will be provided via start()
|
||||
# TCP connection timeout
|
||||
# pylint: disable=C0301
|
||||
self._tcp_timeout_ref = None # type: pika.adapters.utils.nbio_interface.AbstractTimerReference
|
||||
# Overall TCP/[SSL]/AMQP timeout
|
||||
self._stack_timeout_ref = None # type: pika.adapters.utils.nbio_interface.AbstractTimerReference
|
||||
# Current task
|
||||
self._task_ref = None # type: pika.adapters.utils.nbio_interface.AbstractIOReference
|
||||
self._sock = None # type: socket.socket
|
||||
self._amqp_conn = None # type: pika.connection.Connection
|
||||
|
||||
self._state = self._STATE_INIT
|
||||
|
||||
def start(self, addr_record, conn_params, on_done):
|
||||
"""Asynchronously perform a single TCP/[SSL]/AMQP connection attempt.
|
||||
|
||||
:param tuple addr_record: a single resolved address record compatible
|
||||
with `socket.getaddrinfo()` format.
|
||||
:param pika.connection.Parameters conn_params:
|
||||
:param callable on_done: Function to call upon completion of the
|
||||
workflow: `on_done(pika.connection.Connection | BaseException)`. If
|
||||
exception, it's going to be one of the following:
|
||||
`AMQPConnectorSocketConnectError`
|
||||
`AMQPConnectorTransportSetupError`
|
||||
`AMQPConnectorAMQPHandshakeError`
|
||||
`AMQPConnectorAborted`
|
||||
|
||||
"""
|
||||
if self._state != self._STATE_INIT:
|
||||
raise AMQPConnectorWrongState(
|
||||
'Already in progress or finished; state={}'.format(self._state))
|
||||
|
||||
self._addr_record = addr_record
|
||||
self._conn_params = conn_params
|
||||
self._on_done = on_done
|
||||
|
||||
# Create socket and initiate TCP/IP connection
|
||||
self._state = self._STATE_TCP
|
||||
self._sock = socket.socket(*self._addr_record[:3])
|
||||
self._sock.setsockopt(pika.compat.SOL_TCP, socket.TCP_NODELAY, 1)
|
||||
pika.tcp_socket_opts.set_sock_opts(self._conn_params.tcp_options,
|
||||
self._sock)
|
||||
self._sock.setblocking(False)
|
||||
|
||||
addr = self._addr_record[4]
|
||||
_LOG.info('Pika version %s connecting to %r', __version__, addr)
|
||||
self._task_ref = self._nbio.connect_socket(
|
||||
self._sock, addr, on_done=self._on_tcp_connection_done)
|
||||
|
||||
# Start socket connection timeout timer
|
||||
self._tcp_timeout_ref = None
|
||||
if self._conn_params.socket_timeout is not None:
|
||||
self._tcp_timeout_ref = self._nbio.call_later(
|
||||
self._conn_params.socket_timeout,
|
||||
self._on_tcp_connection_timeout)
|
||||
|
||||
# Start overall TCP/[SSL]/AMQP stack connection timeout timer
|
||||
self._stack_timeout_ref = None
|
||||
if self._conn_params.stack_timeout is not None:
|
||||
self._stack_timeout_ref = self._nbio.call_later(
|
||||
self._conn_params.stack_timeout, self._on_overall_timeout)
|
||||
|
||||
def abort(self):
|
||||
"""Abort the workflow asynchronously. The completion callback will be
|
||||
called with an instance of AMQPConnectorAborted.
|
||||
|
||||
NOTE: we can't cancel/close synchronously because aborting pika
|
||||
Connection and its transport requires an asynchronous operation.
|
||||
|
||||
:raises AMQPConnectorWrongState: If called after completion has been
|
||||
reported or the workflow not started yet.
|
||||
"""
|
||||
if self._state == self._STATE_INIT:
|
||||
raise AMQPConnectorWrongState('Cannot abort before starting.')
|
||||
|
||||
if self._state == self._STATE_DONE:
|
||||
raise AMQPConnectorWrongState('Cannot abort after completion was reported')
|
||||
|
||||
self._state = self._STATE_ABORTING
|
||||
self._deactivate()
|
||||
|
||||
_LOG.info(
|
||||
'AMQPConnector: beginning client-initiated asynchronous '
|
||||
'abort; %r/%s', self._conn_params.host, self._addr_record)
|
||||
|
||||
if self._amqp_conn is None:
|
||||
_LOG.debug('AMQPConnector.abort(): no connection, so just '
|
||||
'scheduling completion report via I/O loop.')
|
||||
self._nbio.add_callback_threadsafe(
|
||||
functools.partial(self._report_completion_and_cleanup,
|
||||
AMQPConnectorAborted()))
|
||||
else:
|
||||
if not self._amqp_conn.is_closing:
|
||||
# Initiate close of AMQP connection and wait for asynchronous
|
||||
# callback from the Connection instance before reporting
|
||||
# completion to client
|
||||
_LOG.debug('AMQPConnector.abort(): closing Connection.')
|
||||
self._amqp_conn.close(
|
||||
320, 'Client-initiated abort of AMQP Connection Workflow.')
|
||||
else:
|
||||
# It's already closing, must be due to our timeout processing,
|
||||
# so we'll just piggy back on the callback it registered
|
||||
_LOG.debug('AMQPConnector.abort(): closing of Connection was '
|
||||
'already initiated.')
|
||||
assert self._state == self._STATE_TIMEOUT, \
|
||||
('Connection is closing, but not in TIMEOUT state; state={}'
|
||||
.format(self._state))
|
||||
|
||||
def _close(self):
|
||||
"""Cancel asynchronous tasks and clean up to assist garbage collection.
|
||||
|
||||
Transition to STATE_DONE.
|
||||
|
||||
"""
|
||||
self._deactivate()
|
||||
|
||||
if self._sock is not None:
|
||||
self._sock.close()
|
||||
self._sock = None
|
||||
|
||||
self._conn_factory = None
|
||||
self._nbio = None
|
||||
self._addr_record = None
|
||||
self._on_done = None
|
||||
|
||||
self._state = self._STATE_DONE
|
||||
|
||||
def _deactivate(self):
|
||||
"""Cancel asynchronous tasks.
|
||||
|
||||
"""
|
||||
# NOTE: self._amqp_conn requires special handling as it doesn't support
|
||||
# synchronous closing. We special-case it elsewhere in the code where
|
||||
# needed.
|
||||
assert self._amqp_conn is None, \
|
||||
'_deactivate called with self._amqp_conn not None; state={}'.format(
|
||||
self._state)
|
||||
|
||||
if self._tcp_timeout_ref is not None:
|
||||
self._tcp_timeout_ref.cancel()
|
||||
self._tcp_timeout_ref = None
|
||||
|
||||
if self._stack_timeout_ref is not None:
|
||||
self._stack_timeout_ref.cancel()
|
||||
self._stack_timeout_ref = None
|
||||
|
||||
if self._task_ref is not None:
|
||||
self._task_ref.cancel()
|
||||
self._task_ref = None
|
||||
|
||||
def _report_completion_and_cleanup(self, result):
|
||||
"""Clean up and invoke client's `on_done` callback.
|
||||
|
||||
:param pika.connection.Connection | BaseException result: value to pass
|
||||
to user's `on_done` callback.
|
||||
"""
|
||||
if isinstance(result, BaseException):
|
||||
_LOG.error('AMQPConnector - reporting failure: %r', result)
|
||||
else:
|
||||
_LOG.info('AMQPConnector - reporting success: %r', result)
|
||||
|
||||
on_done = self._on_done
|
||||
self._close()
|
||||
|
||||
on_done(result)
|
||||
|
||||
def _on_tcp_connection_timeout(self):
|
||||
"""Handle TCP connection timeout
|
||||
|
||||
Reports AMQPConnectorSocketConnectError with socket.timeout inside.
|
||||
|
||||
"""
|
||||
self._tcp_timeout_ref = None
|
||||
|
||||
error = AMQPConnectorSocketConnectError(
|
||||
socket.timeout('TCP connection attempt timed out: {!r}/{}'.format(
|
||||
self._conn_params.host, self._addr_record)))
|
||||
self._report_completion_and_cleanup(error)
|
||||
|
||||
def _on_overall_timeout(self):
|
||||
"""Handle overall TCP/[SSL]/AMQP connection attempt timeout by reporting
|
||||
`Timeout` error to the client.
|
||||
|
||||
Reports AMQPConnectorSocketConnectError if timeout occurred during
|
||||
socket TCP connection attempt.
|
||||
Reports AMQPConnectorTransportSetupError if timeout occurred during
|
||||
tramsport [SSL] setup attempt.
|
||||
Reports AMQPConnectorAMQPHandshakeError if timeout occurred during
|
||||
AMQP handshake.
|
||||
|
||||
"""
|
||||
self._stack_timeout_ref = None
|
||||
|
||||
prev_state = self._state
|
||||
self._state = self._STATE_TIMEOUT
|
||||
|
||||
if prev_state == self._STATE_AMQP:
|
||||
msg = ('Timeout while setting up AMQP to {!r}/{}; ssl={}'.format(
|
||||
self._conn_params.host, self._addr_record,
|
||||
bool(self._conn_params.ssl_options)))
|
||||
_LOG.error(msg)
|
||||
# Initiate close of AMQP connection and wait for asynchronous
|
||||
# callback from the Connection instance before reporting completion
|
||||
# to client
|
||||
assert not self._amqp_conn.is_open, \
|
||||
'Unexpected open state of {!r}'.format(self._amqp_conn)
|
||||
if not self._amqp_conn.is_closing:
|
||||
self._amqp_conn.close(320, msg)
|
||||
return
|
||||
|
||||
if prev_state == self._STATE_TCP:
|
||||
error = AMQPConnectorSocketConnectError(
|
||||
AMQPConnectorStackTimeout(
|
||||
'Timeout while connecting socket to {!r}/{}'.format(
|
||||
self._conn_params.host, self._addr_record)))
|
||||
else:
|
||||
assert prev_state == self._STATE_TRANSPORT
|
||||
error = AMQPConnectorTransportSetupError(
|
||||
AMQPConnectorStackTimeout(
|
||||
'Timeout while setting up transport to {!r}/{}; ssl={}'.
|
||||
format(self._conn_params.host, self._addr_record,
|
||||
bool(self._conn_params.ssl_options))))
|
||||
|
||||
self._report_completion_and_cleanup(error)
|
||||
|
||||
def _on_tcp_connection_done(self, exc):
|
||||
"""Handle completion of asynchronous socket connection attempt.
|
||||
|
||||
Reports AMQPConnectorSocketConnectError if TCP socket connection
|
||||
failed.
|
||||
|
||||
:param None|BaseException exc: None on success; exception object on
|
||||
failure
|
||||
|
||||
"""
|
||||
self._task_ref = None
|
||||
if self._tcp_timeout_ref is not None:
|
||||
self._tcp_timeout_ref.cancel()
|
||||
self._tcp_timeout_ref = None
|
||||
|
||||
if exc is not None:
|
||||
_LOG.error('TCP Connection attempt failed: %r; dest=%r', exc,
|
||||
self._addr_record)
|
||||
self._report_completion_and_cleanup(
|
||||
AMQPConnectorSocketConnectError(exc))
|
||||
return
|
||||
|
||||
# We succeeded in making a TCP/IP connection to the server
|
||||
_LOG.debug('TCP connection to broker established: %r.', self._sock)
|
||||
|
||||
# Now set up the transport
|
||||
self._state = self._STATE_TRANSPORT
|
||||
|
||||
ssl_context = server_hostname = None
|
||||
if self._conn_params.ssl_options is not None:
|
||||
ssl_context = self._conn_params.ssl_options.context
|
||||
server_hostname = self._conn_params.ssl_options.server_hostname
|
||||
if server_hostname is None:
|
||||
server_hostname = self._conn_params.host
|
||||
|
||||
self._task_ref = self._nbio.create_streaming_connection(
|
||||
protocol_factory=functools.partial(self._conn_factory,
|
||||
self._conn_params),
|
||||
sock=self._sock,
|
||||
ssl_context=ssl_context,
|
||||
server_hostname=server_hostname,
|
||||
on_done=self._on_transport_establishment_done)
|
||||
|
||||
self._sock = None # create_streaming_connection() takes ownership
|
||||
|
||||
def _on_transport_establishment_done(self, result):
|
||||
"""Handle asynchronous completion of
|
||||
`AbstractIOServices.create_streaming_connection()`
|
||||
|
||||
Reports AMQPConnectorTransportSetupError if transport ([SSL]) setup
|
||||
failed.
|
||||
|
||||
:param sequence|BaseException result: On success, a two-tuple
|
||||
(transport, protocol); on failure, exception instance.
|
||||
|
||||
"""
|
||||
self._task_ref = None
|
||||
|
||||
if isinstance(result, BaseException):
|
||||
_LOG.error(
|
||||
'Attempt to create the streaming transport failed: %r; '
|
||||
'%r/%s; ssl=%s', result, self._conn_params.host,
|
||||
self._addr_record, bool(self._conn_params.ssl_options))
|
||||
self._report_completion_and_cleanup(
|
||||
AMQPConnectorTransportSetupError(result))
|
||||
return
|
||||
|
||||
# We succeeded in setting up the streaming transport!
|
||||
# result is a two-tuple (transport, protocol)
|
||||
_LOG.info('Streaming transport linked up: %r.', result)
|
||||
_transport, self._amqp_conn = result
|
||||
|
||||
# AMQP handshake is in progress - initiated during transport link-up
|
||||
self._state = self._STATE_AMQP
|
||||
# We explicitly remove default handler because it raises an exception.
|
||||
self._amqp_conn.add_on_open_error_callback(
|
||||
self._on_amqp_handshake_done, remove_default=True)
|
||||
self._amqp_conn.add_on_open_callback(self._on_amqp_handshake_done)
|
||||
|
||||
def _on_amqp_handshake_done(self, connection, error=None):
|
||||
"""Handle completion of AMQP connection handshake attempt.
|
||||
|
||||
NOTE: we handle two types of callbacks - success with just connection
|
||||
arg as well as the open-error callback with connection and error
|
||||
|
||||
Reports AMQPConnectorAMQPHandshakeError if AMQP handshake failed.
|
||||
|
||||
:param pika.connection.Connection connection:
|
||||
:param BaseException | None error: None on success, otherwise
|
||||
failure
|
||||
|
||||
"""
|
||||
_LOG.debug(
|
||||
'AMQPConnector: AMQP handshake attempt completed; state=%s; '
|
||||
'error=%r; %r/%s', self._state, error, self._conn_params.host,
|
||||
self._addr_record)
|
||||
|
||||
# Don't need it any more; and _deactivate() checks that it's None
|
||||
self._amqp_conn = None
|
||||
|
||||
if self._state == self._STATE_ABORTING:
|
||||
# Client-initiated abort takes precedence over timeout
|
||||
result = AMQPConnectorAborted()
|
||||
elif self._state == self._STATE_TIMEOUT:
|
||||
result = AMQPConnectorAMQPHandshakeError(
|
||||
AMQPConnectorStackTimeout(
|
||||
'Timeout during AMQP handshake{!r}/{}; ssl={}'.format(
|
||||
self._conn_params.host, self._addr_record,
|
||||
bool(self._conn_params.ssl_options))))
|
||||
elif self._state == self._STATE_AMQP:
|
||||
if error is None:
|
||||
_LOG.debug(
|
||||
'AMQPConnector: AMQP connection established for %r/%s: %r',
|
||||
self._conn_params.host, self._addr_record, connection)
|
||||
result = connection
|
||||
else:
|
||||
_LOG.debug(
|
||||
'AMQPConnector: AMQP connection handshake failed for '
|
||||
'%r/%s: %r', self._conn_params.host, self._addr_record,
|
||||
error)
|
||||
result = AMQPConnectorAMQPHandshakeError(error)
|
||||
else:
|
||||
# We timed out or aborted and initiated closing of the connection,
|
||||
# but this callback snuck in
|
||||
_LOG.debug(
|
||||
'AMQPConnector: Ignoring AMQP handshake completion '
|
||||
'notification due to wrong state=%s; error=%r; conn=%r',
|
||||
self._state, error, connection)
|
||||
return
|
||||
|
||||
self._report_completion_and_cleanup(result)
|
||||
|
||||
|
||||
class AbstractAMQPConnectionWorkflow(pika.compat.AbstractBase):
|
||||
"""Interface for implementing a custom TCP/[SSL]/AMQP connection workflow.
|
||||
|
||||
"""
|
||||
|
||||
def start(self, connection_configs, connector_factory, native_loop,
|
||||
on_done):
|
||||
"""Asynchronously perform the workflow until success or all retries
|
||||
are exhausted. Called by the adapter.
|
||||
|
||||
:param sequence connection_configs: A sequence of one or more
|
||||
`pika.connection.Parameters`-based objects. Will attempt to connect
|
||||
using each config in the given order.
|
||||
:param callable connector_factory: call it without args to obtain a new
|
||||
instance of `AMQPConnector` for each connection attempt.
|
||||
See `AMQPConnector` for details.
|
||||
:param native_loop: Native I/O loop passed by app to the adapter or
|
||||
obtained by the adapter by default.
|
||||
:param callable on_done: Function to call upon completion of the
|
||||
workflow:
|
||||
`on_done(pika.connection.Connection |
|
||||
AMQPConnectionWorkflowFailed |
|
||||
AMQPConnectionWorkflowAborted)`.
|
||||
`Connection`-based adapter on success,
|
||||
`AMQPConnectionWorkflowFailed` on failure,
|
||||
`AMQPConnectionWorkflowAborted` if workflow was aborted.
|
||||
|
||||
:raises AMQPConnectionWorkflowWrongState: If called in wrong state, such
|
||||
as after starting the workflow.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def abort(self):
|
||||
"""Abort the workflow asynchronously. The completion callback will be
|
||||
called with an instance of AMQPConnectionWorkflowAborted.
|
||||
|
||||
NOTE: we can't cancel/close synchronously because aborting pika
|
||||
Connection and its transport requires an asynchronous operation.
|
||||
|
||||
:raises AMQPConnectionWorkflowWrongState: If called in wrong state, such
|
||||
as before starting or after completion has been reported.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class AMQPConnectionWorkflow(AbstractAMQPConnectionWorkflow):
|
||||
"""Implements Pika's default workflow for performing multiple TCP/[SSL]/AMQP
|
||||
connection attempts with timeouts and retries until one succeeds or all
|
||||
attempts fail.
|
||||
|
||||
The workflow:
|
||||
while not success and retries remain:
|
||||
1. For each given config (pika.connection.Parameters object):
|
||||
A. Perform DNS resolution of the config's host.
|
||||
B. Attempt to establish TCP/[SSL]/AMQP for each resolved address
|
||||
until one succeeds, in which case we're done.
|
||||
2. If all configs failed but retries remain, resume from beginning
|
||||
after the given retry pause. NOTE: failure of DNS resolution
|
||||
is equivalent to one cycle and will be retried after the pause
|
||||
if retries remain.
|
||||
|
||||
"""
|
||||
|
||||
_SOCK_TYPE = socket.SOCK_STREAM
|
||||
_IPPROTO = socket.IPPROTO_TCP
|
||||
|
||||
_STATE_INIT = 0
|
||||
_STATE_ACTIVE = 1
|
||||
_STATE_ABORTING = 2
|
||||
_STATE_DONE = 3
|
||||
|
||||
def __init__(self, _until_first_amqp_attempt=False):
|
||||
"""
|
||||
:param int | float retry_pause: Non-negative number of seconds to wait
|
||||
before retrying the config sequence. Meaningful only if retries is
|
||||
greater than 0. Defaults to 2 seconds.
|
||||
:param bool _until_first_amqp_attempt: INTERNAL USE ONLY; ends workflow
|
||||
after first AMQP handshake attempt, regardless of outcome (success
|
||||
or failure). The automatic connection logic in
|
||||
`pika.connection.Connection` enables this because it's not
|
||||
designed/tested to reset all state properly to handle more than one
|
||||
AMQP handshake attempt.
|
||||
|
||||
TODO: Do we need getaddrinfo timeout?
|
||||
TODO: Would it be useful to implement exponential back-off?
|
||||
|
||||
"""
|
||||
self._attempts_remaining = None # supplied by start()
|
||||
self._retry_pause = None # supplied by start()
|
||||
self._until_first_amqp_attempt = _until_first_amqp_attempt
|
||||
|
||||
# Provided by set_io_services()
|
||||
# pylint: disable=C0301
|
||||
self._nbio = None # type: pika.adapters.utils.nbio_interface.AbstractIOServices
|
||||
|
||||
# Current index within `_connection_configs`; initialized when
|
||||
# starting a new connection sequence.
|
||||
self._current_config_index = None
|
||||
|
||||
self._connection_configs = None # supplied by start()
|
||||
self._connector_factory = None # supplied by start()
|
||||
self._on_done = None # supplied by start()
|
||||
|
||||
self._connector = None # type: AMQPConnector
|
||||
|
||||
self._task_ref = None # current cancelable asynchronous task or timer
|
||||
self._addrinfo_iter = None
|
||||
|
||||
# Exceptions from all failed connection attempts in this workflow
|
||||
self._connection_errors = []
|
||||
|
||||
self._state = self._STATE_INIT
|
||||
|
||||
def set_io_services(self, nbio):
|
||||
"""Called by the conneciton adapter only on pika's
|
||||
`AMQPConnectionWorkflow` instance to provide it the adapter-specific
|
||||
`AbstractIOServices` object before calling the `start()` method.
|
||||
|
||||
NOTE: Custom workflow implementations should use the native I/O loop
|
||||
directly because `AbstractIOServices` is private to Pika
|
||||
implementation and its interface may change without notice.
|
||||
|
||||
:param pika.adapters.utils.nbio_interface.AbstractIOServices nbio:
|
||||
|
||||
"""
|
||||
self._nbio = nbio
|
||||
|
||||
def start(
|
||||
self,
|
||||
connection_configs,
|
||||
connector_factory,
|
||||
native_loop, # pylint: disable=W0613
|
||||
on_done):
|
||||
"""Override `AbstractAMQPConnectionWorkflow.start()`.
|
||||
|
||||
NOTE: This implementation uses `connection_attempts` and `retry_delay`
|
||||
values from the last element of the given `connection_configs` sequence
|
||||
as the overall number of connection attempts of the entire
|
||||
`connection_configs` sequence and pause between each sequence.
|
||||
|
||||
"""
|
||||
if self._state != self._STATE_INIT:
|
||||
raise AMQPConnectorWrongState(
|
||||
'Already in progress or finished; state={}'.format(self._state))
|
||||
|
||||
try:
|
||||
iter(connection_configs)
|
||||
except Exception as error:
|
||||
raise TypeError(
|
||||
'connection_configs does not support iteration: {!r}'.format(
|
||||
error))
|
||||
if not connection_configs:
|
||||
raise ValueError(
|
||||
'connection_configs is empty: {!r}.'.format(connection_configs))
|
||||
|
||||
self._connection_configs = connection_configs
|
||||
self._connector_factory = connector_factory
|
||||
self._on_done = on_done
|
||||
|
||||
self._attempts_remaining = connection_configs[-1].connection_attempts
|
||||
self._retry_pause = connection_configs[-1].retry_delay
|
||||
|
||||
self._state = self._STATE_ACTIVE
|
||||
|
||||
_LOG.debug('Starting AMQP Connection workflow asynchronously.')
|
||||
|
||||
# Begin from our own I/O loop context to avoid calling back into client
|
||||
# from client's call here
|
||||
self._task_ref = self._nbio.call_later(
|
||||
0, functools.partial(self._start_new_cycle_async, first=True))
|
||||
|
||||
def abort(self):
|
||||
"""Override `AbstractAMQPConnectionWorkflow.abort()`.
|
||||
|
||||
"""
|
||||
if self._state == self._STATE_INIT:
|
||||
raise AMQPConnectorWrongState('Cannot abort before starting.')
|
||||
elif self._state == self._STATE_DONE:
|
||||
raise AMQPConnectorWrongState(
|
||||
'Cannot abort after completion was reported')
|
||||
|
||||
self._state = self._STATE_ABORTING
|
||||
self._deactivate()
|
||||
|
||||
_LOG.info('AMQPConnectionWorkflow: beginning client-initiated '
|
||||
'asynchronous abort.')
|
||||
|
||||
if self._connector is None:
|
||||
_LOG.debug('AMQPConnectionWorkflow.abort(): no connector, so just '
|
||||
'scheduling completion report via I/O loop.')
|
||||
self._nbio.add_callback_threadsafe(
|
||||
functools.partial(self._report_completion_and_cleanup,
|
||||
AMQPConnectionWorkflowAborted()))
|
||||
else:
|
||||
_LOG.debug('AMQPConnectionWorkflow.abort(): requesting '
|
||||
'connector.abort().')
|
||||
self._connector.abort()
|
||||
|
||||
def _close(self):
|
||||
"""Cancel asynchronous tasks and clean up to assist garbage collection.
|
||||
|
||||
Transition to _STATE_DONE.
|
||||
|
||||
"""
|
||||
self._deactivate()
|
||||
|
||||
self._connection_configs = None
|
||||
self._nbio = None
|
||||
self._connector_factory = None
|
||||
self._on_done = None
|
||||
self._connector = None
|
||||
self._addrinfo_iter = None
|
||||
self._connection_errors = None
|
||||
|
||||
self._state = self._STATE_DONE
|
||||
|
||||
def _deactivate(self):
|
||||
"""Cancel asynchronous tasks.
|
||||
|
||||
"""
|
||||
if self._task_ref is not None:
|
||||
self._task_ref.cancel()
|
||||
self._task_ref = None
|
||||
|
||||
def _report_completion_and_cleanup(self, result):
|
||||
"""Clean up and invoke client's `on_done` callback.
|
||||
|
||||
:param pika.connection.Connection | AMQPConnectionWorkflowFailed result:
|
||||
value to pass to user's `on_done` callback.
|
||||
"""
|
||||
if isinstance(result, BaseException):
|
||||
_LOG.error('AMQPConnectionWorkflow - reporting failure: %r', result)
|
||||
else:
|
||||
_LOG.info('AMQPConnectionWorkflow - reporting success: %r', result)
|
||||
|
||||
on_done = self._on_done
|
||||
self._close()
|
||||
|
||||
on_done(result)
|
||||
|
||||
def _start_new_cycle_async(self, first):
|
||||
"""Start a new workflow cycle (if any more attempts are left) beginning
|
||||
with the first Parameters object in self._connection_configs. If out of
|
||||
attempts, report `AMQPConnectionWorkflowFailed`.
|
||||
|
||||
:param bool first: if True, don't delay; otherwise delay next attempt by
|
||||
`self._retry_pause` seconds.
|
||||
"""
|
||||
self._task_ref = None
|
||||
|
||||
assert self._attempts_remaining >= 0, self._attempts_remaining
|
||||
|
||||
if self._attempts_remaining <= 0:
|
||||
error = AMQPConnectionWorkflowFailed(self._connection_errors)
|
||||
_LOG.error('AMQP connection workflow failed: %r.', error)
|
||||
self._report_completion_and_cleanup(error)
|
||||
return
|
||||
|
||||
self._attempts_remaining -= 1
|
||||
_LOG.debug(
|
||||
'Beginning a new AMQP connection workflow cycle; attempts '
|
||||
'remaining after this: %s', self._attempts_remaining)
|
||||
|
||||
self._current_config_index = None
|
||||
|
||||
self._task_ref = self._nbio.call_later(
|
||||
0 if first else self._retry_pause, self._try_next_config_async)
|
||||
|
||||
def _try_next_config_async(self):
|
||||
"""Attempt to connect using the next Parameters config. If there are no
|
||||
more configs, start a new cycle.
|
||||
|
||||
"""
|
||||
self._task_ref = None
|
||||
|
||||
if self._current_config_index is None:
|
||||
self._current_config_index = 0
|
||||
else:
|
||||
self._current_config_index += 1
|
||||
|
||||
if self._current_config_index >= len(self._connection_configs):
|
||||
_LOG.debug('_try_next_config_async: starting a new cycle.')
|
||||
self._start_new_cycle_async(first=False)
|
||||
return
|
||||
|
||||
params = self._connection_configs[self._current_config_index]
|
||||
|
||||
_LOG.debug('_try_next_config_async: %r:%s', params.host, params.port)
|
||||
|
||||
# Begin with host address resolution
|
||||
assert self._task_ref is None
|
||||
self._task_ref = self._nbio.getaddrinfo(
|
||||
host=params.host,
|
||||
port=params.port,
|
||||
socktype=self._SOCK_TYPE,
|
||||
proto=self._IPPROTO,
|
||||
on_done=self._on_getaddrinfo_async_done)
|
||||
|
||||
def _on_getaddrinfo_async_done(self, addrinfos_or_exc):
|
||||
"""Handles completion callback from asynchronous `getaddrinfo()`.
|
||||
|
||||
:param list | BaseException addrinfos_or_exc: resolved address records
|
||||
returned by `getaddrinfo()` or an exception object from failure.
|
||||
"""
|
||||
self._task_ref = None
|
||||
|
||||
if isinstance(addrinfos_or_exc, BaseException):
|
||||
_LOG.error('getaddrinfo failed: %r.', addrinfos_or_exc)
|
||||
self._connection_errors.append(addrinfos_or_exc)
|
||||
self._start_new_cycle_async(first=False)
|
||||
return
|
||||
|
||||
_LOG.debug('getaddrinfo returned %s records', len(addrinfos_or_exc))
|
||||
self._addrinfo_iter = iter(addrinfos_or_exc)
|
||||
|
||||
self._try_next_resolved_address()
|
||||
|
||||
def _try_next_resolved_address(self):
|
||||
"""Try connecting using next resolved address. If there aren't any left,
|
||||
continue with next Parameters config.
|
||||
|
||||
"""
|
||||
try:
|
||||
addr_record = next(self._addrinfo_iter)
|
||||
except StopIteration:
|
||||
_LOG.debug(
|
||||
'_try_next_resolved_address: continuing with next config.')
|
||||
self._try_next_config_async()
|
||||
return
|
||||
|
||||
_LOG.debug('Attempting to connect using address record %r', addr_record)
|
||||
|
||||
self._connector = self._connector_factory() # type: AMQPConnector
|
||||
|
||||
self._connector.start(
|
||||
addr_record=addr_record,
|
||||
conn_params=self._connection_configs[self._current_config_index],
|
||||
on_done=self._on_connector_done)
|
||||
|
||||
def _on_connector_done(self, conn_or_exc):
|
||||
"""Handle completion of connection attempt by `AMQPConnector`.
|
||||
|
||||
:param pika.connection.Connection | BaseException conn_or_exc: See
|
||||
`AMQPConnector.start()` for exception details.
|
||||
|
||||
"""
|
||||
self._connector = None
|
||||
_LOG.debug('Connection attempt completed with %r', conn_or_exc)
|
||||
|
||||
if isinstance(conn_or_exc, BaseException):
|
||||
self._connection_errors.append(conn_or_exc)
|
||||
|
||||
if isinstance(conn_or_exc, AMQPConnectorAborted):
|
||||
assert self._state == self._STATE_ABORTING, \
|
||||
'Expected _STATE_ABORTING, but got {!r}'.format(self._state)
|
||||
|
||||
self._report_completion_and_cleanup(
|
||||
AMQPConnectionWorkflowAborted())
|
||||
elif (self._until_first_amqp_attempt and
|
||||
isinstance(conn_or_exc, AMQPConnectorAMQPHandshakeError)):
|
||||
_LOG.debug('Ending AMQP connection workflow after first failed '
|
||||
'AMQP handshake due to _until_first_amqp_attempt.')
|
||||
if isinstance(conn_or_exc.exception,
|
||||
pika.exceptions.ConnectionOpenAborted):
|
||||
error = AMQPConnectionWorkflowAborted
|
||||
else:
|
||||
error = AMQPConnectionWorkflowFailed(
|
||||
self._connection_errors)
|
||||
|
||||
self._report_completion_and_cleanup(error)
|
||||
else:
|
||||
self._try_next_resolved_address()
|
||||
else:
|
||||
# Success!
|
||||
self._report_completion_and_cleanup(conn_or_exc)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,441 @@
|
||||
"""Non-blocking I/O interface for pika connection adapters.
|
||||
|
||||
I/O interface expected by `pika.adapters.base_connection.BaseConnection`
|
||||
|
||||
NOTE: This API is modeled after asyncio in python3 for a couple of reasons
|
||||
1. It's a sensible API
|
||||
2. To make it easy to implement at least on top of the built-in asyncio
|
||||
|
||||
Furthermore, the API caters to the needs of pika core and lack of generalization
|
||||
is intentional for the sake of reducing complexity of the implementation and
|
||||
testing and lessening the maintenance burden.
|
||||
|
||||
"""
|
||||
import abc
|
||||
|
||||
import pika.compat
|
||||
|
||||
|
||||
class AbstractIOServices(pika.compat.AbstractBase):
|
||||
"""Interface to I/O services required by `pika.adapters.BaseConnection` and
|
||||
related utilities.
|
||||
|
||||
NOTE: This is not a public API. Pika users should rely on the native I/O
|
||||
loop APIs (e.g., asyncio event loop, tornado ioloop, twisted reactor, etc.)
|
||||
that corresponds to the chosen Connection adapter.
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_native_ioloop(self):
|
||||
"""Returns the native I/O loop instance, such as Twisted reactor,
|
||||
asyncio's or tornado's event loop
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def close(self):
|
||||
"""Release IOLoop's resources.
|
||||
|
||||
the `close()` method is intended to be called by Pika's own test
|
||||
code only after `start()` returns. After calling `close()`, no other
|
||||
interaction with the closed instance of `IOLoop` should be performed.
|
||||
|
||||
NOTE: This method is provided for Pika's own test scripts that need to
|
||||
be able to run I/O loops generically to test multiple Connection Adapter
|
||||
implementations. Pika users should use the native I/O loop's API
|
||||
instead.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def run(self):
|
||||
"""Run the I/O loop. It will loop until requested to exit. See `stop()`.
|
||||
|
||||
NOTE: the outcome or restarting an instance that had been stopped is
|
||||
UNDEFINED!
|
||||
|
||||
NOTE: This method is provided for Pika's own test scripts that need to
|
||||
be able to run I/O loops generically to test multiple Connection Adapter
|
||||
implementations (not all of the supported I/O Loop frameworks have
|
||||
methods named start/stop). Pika users should use the native I/O loop's
|
||||
API instead.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def stop(self):
|
||||
"""Request exit from the ioloop. The loop is NOT guaranteed to
|
||||
stop before this method returns.
|
||||
|
||||
NOTE: The outcome of calling `stop()` on a non-running instance is
|
||||
UNDEFINED!
|
||||
|
||||
NOTE: This method is provided for Pika's own test scripts that need to
|
||||
be able to run I/O loops generically to test multiple Connection Adapter
|
||||
implementations (not all of the supported I/O Loop frameworks have
|
||||
methods named start/stop). Pika users should use the native I/O loop's
|
||||
API instead.
|
||||
|
||||
To invoke `stop()` safely from a thread other than this IOLoop's thread,
|
||||
call it via `add_callback_threadsafe`; e.g.,
|
||||
|
||||
`ioloop.add_callback_threadsafe(ioloop.stop)`
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def add_callback_threadsafe(self, callback):
|
||||
"""Requests a call to the given function as soon as possible. It will be
|
||||
called from this IOLoop's thread.
|
||||
|
||||
NOTE: This is the only thread-safe method offered by the IOLoop adapter.
|
||||
All other manipulations of the IOLoop adapter and objects governed
|
||||
by it must be performed from the IOLoop's thread.
|
||||
|
||||
NOTE: if you know that the requester is running on the same thread as
|
||||
the connection it is more efficient to use the
|
||||
`ioloop.call_later()` method with a delay of 0.
|
||||
|
||||
:param callable callback: The callback method; must be callable.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def call_later(self, delay, callback):
|
||||
"""Add the callback to the IOLoop timer to be called after delay seconds
|
||||
from the time of call on best-effort basis. Returns a handle to the
|
||||
timeout.
|
||||
|
||||
If two are scheduled for the same time, it's undefined which one will
|
||||
be called first.
|
||||
|
||||
:param float delay: The number of seconds to wait to call callback
|
||||
:param callable callback: The callback method
|
||||
:returns: A handle that can be used to cancel the request.
|
||||
:rtype: AbstractTimerReference
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def getaddrinfo(self,
|
||||
host,
|
||||
port,
|
||||
on_done,
|
||||
family=0,
|
||||
socktype=0,
|
||||
proto=0,
|
||||
flags=0):
|
||||
"""Perform the equivalent of `socket.getaddrinfo()` asynchronously.
|
||||
|
||||
See `socket.getaddrinfo()` for the standard args.
|
||||
|
||||
:param callable on_done: user callback that takes the return value of
|
||||
`socket.getaddrinfo()` upon successful completion or exception upon
|
||||
failure (check for `BaseException`) as its only arg. It will not be
|
||||
called if the operation was cancelled.
|
||||
:rtype: AbstractIOReference
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def connect_socket(self, sock, resolved_addr, on_done):
|
||||
"""Perform the equivalent of `socket.connect()` on a previously-resolved
|
||||
address asynchronously.
|
||||
|
||||
IMPLEMENTATION NOTE: Pika's connection logic resolves the addresses
|
||||
prior to making socket connections, so we don't need to burden the
|
||||
implementations of this method with the extra logic of asynchronous
|
||||
DNS resolution. Implementations can use `socket.inet_pton()` to
|
||||
verify the address.
|
||||
|
||||
:param socket.socket sock: non-blocking socket that needs to be
|
||||
connected via `socket.socket.connect()`
|
||||
:param tuple resolved_addr: resolved destination address/port two-tuple
|
||||
as per `socket.socket.connect()`, except that the first element must
|
||||
be an actual IP address that's consistent with the given socket's
|
||||
address family.
|
||||
:param callable on_done: user callback that takes None upon successful
|
||||
completion or exception (check for `BaseException`) upon error as
|
||||
its only arg. It will not be called if the operation was cancelled.
|
||||
|
||||
:rtype: AbstractIOReference
|
||||
:raises ValueError: if host portion of `resolved_addr` is not an IP
|
||||
address or is inconsistent with the socket's address family as
|
||||
validated via `socket.inet_pton()`
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def create_streaming_connection(self,
|
||||
protocol_factory,
|
||||
sock,
|
||||
on_done,
|
||||
ssl_context=None,
|
||||
server_hostname=None):
|
||||
"""Perform SSL session establishment, if requested, on the already-
|
||||
connected socket and link the streaming transport/protocol pair.
|
||||
|
||||
NOTE: This method takes ownership of the socket.
|
||||
|
||||
:param callable protocol_factory: called without args, returns an
|
||||
instance with the `AbstractStreamProtocol` interface. The protocol's
|
||||
`connection_made(transport)` method will be called to link it to
|
||||
the transport after remaining connection activity (e.g., SSL session
|
||||
establishment), if any, is completed successfully.
|
||||
:param socket.socket sock: Already-connected, non-blocking
|
||||
`socket.SOCK_STREAM` socket to be used by the transport. We take
|
||||
ownership of this socket.
|
||||
:param callable on_done: User callback
|
||||
`on_done(BaseException | (transport, protocol))` to be notified when
|
||||
the asynchronous operation completes. An exception arg indicates
|
||||
failure (check for `BaseException`); otherwise the two-tuple will
|
||||
contain the linked transport/protocol pair having
|
||||
AbstractStreamTransport and AbstractStreamProtocol interfaces
|
||||
respectively.
|
||||
:param None | ssl.SSLContext ssl_context: if None, this will proceed as
|
||||
a plaintext connection; otherwise, if not None, SSL session
|
||||
establishment will be performed prior to linking the transport and
|
||||
protocol.
|
||||
:param str | None server_hostname: For use during SSL session
|
||||
establishment to match against the target server's certificate. The
|
||||
value `None` disables this check (which is a huge security risk)
|
||||
:rtype: AbstractIOReference
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class AbstractFileDescriptorServices(pika.compat.AbstractBase):
|
||||
"""Interface definition of common non-blocking file descriptor services
|
||||
required by some utility implementations.
|
||||
|
||||
NOTE: This is not a public API. Pika users should rely on the native I/O
|
||||
loop APIs (e.g., asyncio event loop, tornado ioloop, twisted reactor, etc.)
|
||||
that corresponds to the chosen Connection adapter.
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def set_reader(self, fd, on_readable):
|
||||
"""Call the given callback when the file descriptor is readable.
|
||||
Replace prior reader, if any, for the given file descriptor.
|
||||
|
||||
:param fd: file descriptor
|
||||
:param callable on_readable: a callback taking no args to be notified
|
||||
when fd becomes readable.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def remove_reader(self, fd):
|
||||
"""Stop watching the given file descriptor for readability
|
||||
|
||||
:param fd: file descriptor
|
||||
:returns: True if reader was removed; False if none was registered.
|
||||
:rtype: bool
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def set_writer(self, fd, on_writable):
|
||||
"""Call the given callback whenever the file descriptor is writable.
|
||||
Replace prior writer callback, if any, for the given file descriptor.
|
||||
|
||||
IMPLEMENTATION NOTE: For portability, implementations of
|
||||
`set_writable()` should also watch for indication of error on the
|
||||
socket and treat it as equivalent to the writable indication (e.g.,
|
||||
also adding the socket to the `exceptfds` arg of `socket.select()`
|
||||
and calling the `on_writable` callback if `select.select()`
|
||||
indicates that the socket is in error state). Specifically, Windows
|
||||
(unlike POSIX) only indicates error on the socket (but not writable)
|
||||
when connection establishment fails.
|
||||
|
||||
:param fd: file descriptor
|
||||
:param callable on_writable: a callback taking no args to be notified
|
||||
when fd becomes writable.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def remove_writer(self, fd):
|
||||
"""Stop watching the given file descriptor for writability
|
||||
|
||||
:param fd: file descriptor
|
||||
:returns: True if reader was removed; False if none was registered.
|
||||
:rtype: bool
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class AbstractTimerReference(pika.compat.AbstractBase):
|
||||
"""Reference to asynchronous operation"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def cancel(self):
|
||||
"""Cancel callback. If already cancelled, has no affect.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class AbstractIOReference(pika.compat.AbstractBase):
|
||||
"""Reference to asynchronous I/O operation"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def cancel(self):
|
||||
"""Cancel pending operation
|
||||
|
||||
:returns: False if was already done or cancelled; True otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class AbstractStreamProtocol(pika.compat.AbstractBase):
|
||||
"""Stream protocol interface. It's compatible with a subset of
|
||||
`asyncio.protocols.Protocol` for compatibility with asyncio-based
|
||||
`AbstractIOServices` implementation.
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def connection_made(self, transport):
|
||||
"""Introduces transport to protocol after transport is connected.
|
||||
|
||||
:param AbstractStreamTransport transport:
|
||||
:raises Exception: Exception-based exception on error
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def connection_lost(self, error):
|
||||
"""Called upon loss or closing of connection.
|
||||
|
||||
NOTE: `connection_made()` and `connection_lost()` are each called just
|
||||
once and in that order. All other callbacks are called between them.
|
||||
|
||||
:param BaseException | None error: An exception (check for
|
||||
`BaseException`) indicates connection failure. None indicates that
|
||||
connection was closed on this side, such as when it's aborted or
|
||||
when `AbstractStreamProtocol.eof_received()` returns a result that
|
||||
doesn't evaluate to True.
|
||||
:raises Exception: Exception-based exception on error
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def eof_received(self):
|
||||
"""Called after the remote peer shuts its write end of the connection.
|
||||
|
||||
:returns: A falsy value (including None) will cause the transport to
|
||||
close itself, resulting in an eventual `connection_lost()` call
|
||||
from the transport. If a truthy value is returned, it will be the
|
||||
protocol's responsibility to close/abort the transport.
|
||||
:rtype: falsy|truthy
|
||||
:raises Exception: Exception-based exception on error
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def data_received(self, data):
|
||||
"""Called to deliver incoming data to the protocol.
|
||||
|
||||
:param data: Non-empty data bytes.
|
||||
:raises Exception: Exception-based exception on error
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
# pylint: disable=W0511
|
||||
# TODO Undecided whether we need write flow-control yet, although it seems
|
||||
# like a good idea.
|
||||
# @abc.abstractmethod
|
||||
# def pause_writing(self):
|
||||
# """Called when the transport's write buffer size becomes greater than or
|
||||
# equal to the transport's high-water mark. It won't be called again until
|
||||
# the transport's write buffer gets back to its low-water mark and then
|
||||
# returns to/past the hight-water mark again.
|
||||
# """
|
||||
# raise NotImplementedError
|
||||
#
|
||||
# @abc.abstractmethod
|
||||
# def resume_writing(self):
|
||||
# """Called when the transport's write buffer size becomes less than or
|
||||
# equal to the transport's low-water mark.
|
||||
# """
|
||||
# raise NotImplementedError
|
||||
|
||||
|
||||
class AbstractStreamTransport(pika.compat.AbstractBase):
|
||||
"""Stream transport interface. It's compatible with a subset of
|
||||
`asyncio.transports.Transport` for compatibility with asyncio-based
|
||||
`AbstractIOServices` implementation.
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def abort(self):
|
||||
"""Close connection abruptly without waiting for pending I/O to
|
||||
complete. Will invoke the corresponding protocol's `connection_lost()`
|
||||
method asynchronously (not in context of the abort() call).
|
||||
|
||||
:raises Exception: Exception-based exception on error
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_protocol(self):
|
||||
"""Return the protocol linked to this transport.
|
||||
|
||||
:rtype: AbstractStreamProtocol
|
||||
:raises Exception: Exception-based exception on error
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def write(self, data):
|
||||
"""Buffer the given data until it can be sent asynchronously.
|
||||
|
||||
:param bytes data:
|
||||
:raises ValueError: if called with empty data
|
||||
:raises Exception: Exception-based exception on error
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_write_buffer_size(self):
|
||||
"""
|
||||
:returns: Current size of output data buffered by the transport
|
||||
:rtype: int
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
# pylint: disable=W0511
|
||||
# TODO Udecided whether we need write flow-control yet, although it seems
|
||||
# like a good idea.
|
||||
# @abc.abstractmethod
|
||||
# def set_write_buffer_limits(self, high, low):
|
||||
# """Set thresholds for calling the protocol's `pause_writing()`
|
||||
# and `resume_writing()` methods. `low` must be less than or equal to
|
||||
# `high`.
|
||||
#
|
||||
# NOTE The unintuitive order of the args is preserved to match the
|
||||
# corresponding method in `asyncio.WriteTransport`. I would expect `low`
|
||||
# to be the first arg, especially since
|
||||
# `asyncio.WriteTransport.get_write_buffer_limits()` returns them in the
|
||||
# opposite order. This seems error-prone.
|
||||
#
|
||||
# See `asyncio.WriteTransport.get_write_buffer_limits()` for more details
|
||||
# about the args.
|
||||
#
|
||||
# :param int high: non-negative high-water mark.
|
||||
# :param int low: non-negative low-water mark.
|
||||
# """
|
||||
# raise NotImplementedError
|
||||
@@ -0,0 +1,600 @@
|
||||
"""
|
||||
Implementation of `nbio_interface.AbstractIOServices` on top of a
|
||||
selector-based I/O loop, such as tornado's and our home-grown
|
||||
select_connection's I/O loops.
|
||||
|
||||
"""
|
||||
import abc
|
||||
import logging
|
||||
import socket
|
||||
import threading
|
||||
|
||||
from pika.adapters.utils import nbio_interface, io_services_utils
|
||||
from pika.adapters.utils.io_services_utils import (check_callback_arg,
|
||||
check_fd_arg)
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AbstractSelectorIOLoop(object):
|
||||
"""Selector-based I/O loop interface expected by
|
||||
`selector_ioloop_adapter.SelectorIOServicesAdapter`
|
||||
|
||||
NOTE: this interface follows the corresponding methods and attributes
|
||||
of `tornado.ioloop.IOLoop` in order to avoid additional adapter layering
|
||||
when wrapping tornado's IOLoop.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def READ(self): # pylint: disable=C0103
|
||||
"""The value of the I/O loop's READ flag; READ/WRITE/ERROR may be used
|
||||
with bitwise operators as expected.
|
||||
|
||||
Implementation note: the implementations can simply replace these
|
||||
READ/WRITE/ERROR properties with class-level attributes
|
||||
|
||||
"""
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def WRITE(self): # pylint: disable=C0103
|
||||
"""The value of the I/O loop's WRITE flag; READ/WRITE/ERROR may be used
|
||||
with bitwise operators as expected
|
||||
|
||||
"""
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def ERROR(self): # pylint: disable=C0103
|
||||
"""The value of the I/O loop's ERROR flag; READ/WRITE/ERROR may be used
|
||||
with bitwise operators as expected
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def close(self):
|
||||
"""Release IOLoop's resources.
|
||||
|
||||
the `close()` method is intended to be called by the application or test
|
||||
code only after `start()` returns. After calling `close()`, no other
|
||||
interaction with the closed instance of `IOLoop` should be performed.
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def start(self):
|
||||
"""Run the I/O loop. It will loop until requested to exit. See `stop()`.
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def stop(self):
|
||||
"""Request exit from the ioloop. The loop is NOT guaranteed to
|
||||
stop before this method returns.
|
||||
|
||||
To invoke `stop()` safely from a thread other than this IOLoop's thread,
|
||||
call it via `add_callback_threadsafe`; e.g.,
|
||||
|
||||
`ioloop.add_callback(ioloop.stop)`
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def call_later(self, delay, callback):
|
||||
"""Add the callback to the IOLoop timer to be called after delay seconds
|
||||
from the time of call on best-effort basis. Returns a handle to the
|
||||
timeout.
|
||||
|
||||
:param float delay: The number of seconds to wait to call callback
|
||||
:param callable callback: The callback method
|
||||
:returns: handle to the created timeout that may be passed to
|
||||
`remove_timeout()`
|
||||
:rtype: object
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def remove_timeout(self, timeout_handle):
|
||||
"""Remove a timeout
|
||||
|
||||
:param timeout_handle: Handle of timeout to remove
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def add_callback(self, callback):
|
||||
"""Requests a call to the given function as soon as possible in the
|
||||
context of this IOLoop's thread.
|
||||
|
||||
NOTE: This is the only thread-safe method in IOLoop. All other
|
||||
manipulations of IOLoop must be performed from the IOLoop's thread.
|
||||
|
||||
For example, a thread may request a call to the `stop` method of an
|
||||
ioloop that is running in a different thread via
|
||||
`ioloop.add_callback_threadsafe(ioloop.stop)`
|
||||
|
||||
:param callable callback: The callback method
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def add_handler(self, fd, handler, events):
|
||||
"""Start watching the given file descriptor for events
|
||||
|
||||
:param int fd: The file descriptor
|
||||
:param callable handler: When requested event(s) occur,
|
||||
`handler(fd, events)` will be called.
|
||||
:param int events: The event mask using READ, WRITE, ERROR.
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def update_handler(self, fd, events):
|
||||
"""Changes the events we watch for
|
||||
|
||||
:param int fd: The file descriptor
|
||||
:param int events: The event mask using READ, WRITE, ERROR
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def remove_handler(self, fd):
|
||||
"""Stop watching the given file descriptor for events
|
||||
|
||||
:param int fd: The file descriptor
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class SelectorIOServicesAdapter(io_services_utils.SocketConnectionMixin,
|
||||
io_services_utils.StreamingConnectionMixin,
|
||||
nbio_interface.AbstractIOServices,
|
||||
nbio_interface.AbstractFileDescriptorServices):
|
||||
"""Implements the
|
||||
:py:class:`.nbio_interface.AbstractIOServices` interface
|
||||
on top of selector-style native loop having the
|
||||
:py:class:`AbstractSelectorIOLoop` interface, such as
|
||||
:py:class:`pika.selection_connection.IOLoop` and :py:class:`tornado.IOLoop`.
|
||||
|
||||
NOTE:
|
||||
:py:class:`.nbio_interface.AbstractFileDescriptorServices`
|
||||
interface is only required by the mixins.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, native_loop):
|
||||
"""
|
||||
:param AbstractSelectorIOLoop native_loop: An instance compatible with
|
||||
the `AbstractSelectorIOLoop` interface, but not necessarily derived
|
||||
from it.
|
||||
"""
|
||||
self._loop = native_loop
|
||||
|
||||
# Active watchers: maps file descriptors to `_FileDescriptorCallbacks`
|
||||
self._watchers = dict()
|
||||
|
||||
# Native loop-specific event masks of interest
|
||||
self._readable_mask = self._loop.READ
|
||||
# NOTE: tying ERROR to WRITE is particularly handy for Windows, whose
|
||||
# `select.select()` differs from Posix by reporting
|
||||
# connection-establishment failure only through exceptfds (ERROR event),
|
||||
# while the typical application workflow is to wait for the socket to
|
||||
# become writable when waiting for socket connection to be established.
|
||||
self._writable_mask = self._loop.WRITE | self._loop.ERROR
|
||||
|
||||
def get_native_ioloop(self):
|
||||
"""Implement
|
||||
:py:meth:`.nbio_interface.AbstractIOServices.get_native_ioloop()`.
|
||||
|
||||
"""
|
||||
return self._loop
|
||||
|
||||
def close(self):
|
||||
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.close()`.
|
||||
|
||||
"""
|
||||
self._loop.close()
|
||||
|
||||
def run(self):
|
||||
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.run()`.
|
||||
|
||||
"""
|
||||
self._loop.start()
|
||||
|
||||
def stop(self):
|
||||
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.stop()`.
|
||||
|
||||
"""
|
||||
self._loop.stop()
|
||||
|
||||
def add_callback_threadsafe(self, callback):
|
||||
"""Implement
|
||||
:py:meth:`.nbio_interface.AbstractIOServices.add_callback_threadsafe()`.
|
||||
|
||||
"""
|
||||
self._loop.add_callback(callback)
|
||||
|
||||
def call_later(self, delay, callback):
|
||||
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.call_later()`.
|
||||
|
||||
"""
|
||||
return _TimerHandle(self._loop.call_later(delay, callback), self._loop)
|
||||
|
||||
def getaddrinfo(self,
|
||||
host,
|
||||
port,
|
||||
on_done,
|
||||
family=0,
|
||||
socktype=0,
|
||||
proto=0,
|
||||
flags=0):
|
||||
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.getaddrinfo()`.
|
||||
|
||||
"""
|
||||
return _SelectorIOLoopIOHandle(
|
||||
_AddressResolver(
|
||||
native_loop=self._loop,
|
||||
host=host,
|
||||
port=port,
|
||||
family=family,
|
||||
socktype=socktype,
|
||||
proto=proto,
|
||||
flags=flags,
|
||||
on_done=on_done).start())
|
||||
|
||||
def set_reader(self, fd, on_readable):
|
||||
"""Implement
|
||||
:py:meth:`.nbio_interface.AbstractFileDescriptorServices.set_reader()`.
|
||||
|
||||
"""
|
||||
LOGGER.debug('SelectorIOServicesAdapter.set_reader(%s, %r)', fd,
|
||||
on_readable)
|
||||
|
||||
check_fd_arg(fd)
|
||||
check_callback_arg(on_readable, 'on_readable')
|
||||
|
||||
try:
|
||||
callbacks = self._watchers[fd]
|
||||
except KeyError:
|
||||
self._loop.add_handler(fd, self._on_reader_writer_fd_events,
|
||||
self._readable_mask)
|
||||
self._watchers[fd] = _FileDescriptorCallbacks(reader=on_readable)
|
||||
LOGGER.debug('set_reader(%s, _) added handler Rd', fd)
|
||||
else:
|
||||
if callbacks.reader is None:
|
||||
assert callbacks.writer is not None
|
||||
self._loop.update_handler(
|
||||
fd, self._readable_mask | self._writable_mask)
|
||||
LOGGER.debug('set_reader(%s, _) updated handler RdWr', fd)
|
||||
else:
|
||||
LOGGER.debug('set_reader(%s, _) replacing reader', fd)
|
||||
|
||||
callbacks.reader = on_readable
|
||||
|
||||
def remove_reader(self, fd):
|
||||
"""Implement
|
||||
:py:meth:`.nbio_interface.AbstractFileDescriptorServices.remove_reader()`.
|
||||
|
||||
"""
|
||||
LOGGER.debug('SelectorIOServicesAdapter.remove_reader(%s)', fd)
|
||||
|
||||
check_fd_arg(fd)
|
||||
|
||||
try:
|
||||
callbacks = self._watchers[fd]
|
||||
except KeyError:
|
||||
LOGGER.debug('remove_reader(%s) neither was set', fd)
|
||||
return False
|
||||
|
||||
if callbacks.reader is None:
|
||||
assert callbacks.writer is not None
|
||||
LOGGER.debug('remove_reader(%s) reader wasn\'t set Wr', fd)
|
||||
return False
|
||||
|
||||
callbacks.reader = None
|
||||
|
||||
if callbacks.writer is None:
|
||||
del self._watchers[fd]
|
||||
self._loop.remove_handler(fd)
|
||||
LOGGER.debug('remove_reader(%s) removed handler', fd)
|
||||
else:
|
||||
self._loop.update_handler(fd, self._writable_mask)
|
||||
LOGGER.debug('remove_reader(%s) updated handler Wr', fd)
|
||||
|
||||
return True
|
||||
|
||||
def set_writer(self, fd, on_writable):
|
||||
"""Implement
|
||||
:py:meth:`.nbio_interface.AbstractFileDescriptorServices.set_writer()`.
|
||||
|
||||
"""
|
||||
LOGGER.debug('SelectorIOServicesAdapter.set_writer(%s, %r)', fd,
|
||||
on_writable)
|
||||
|
||||
check_fd_arg(fd)
|
||||
check_callback_arg(on_writable, 'on_writable')
|
||||
|
||||
try:
|
||||
callbacks = self._watchers[fd]
|
||||
except KeyError:
|
||||
self._loop.add_handler(fd, self._on_reader_writer_fd_events,
|
||||
self._writable_mask)
|
||||
self._watchers[fd] = _FileDescriptorCallbacks(writer=on_writable)
|
||||
LOGGER.debug('set_writer(%s, _) added handler Wr', fd)
|
||||
else:
|
||||
if callbacks.writer is None:
|
||||
assert callbacks.reader is not None
|
||||
# NOTE: Set the writer func before setting the mask!
|
||||
# Otherwise a race condition can occur where ioloop tries to
|
||||
# call writer when it is still None.
|
||||
callbacks.writer = on_writable
|
||||
self._loop.update_handler(
|
||||
fd, self._readable_mask | self._writable_mask)
|
||||
LOGGER.debug('set_writer(%s, _) updated handler RdWr', fd)
|
||||
else:
|
||||
LOGGER.debug('set_writer(%s, _) replacing writer', fd)
|
||||
callbacks.writer = on_writable
|
||||
|
||||
def remove_writer(self, fd):
|
||||
"""Implement
|
||||
:py:meth:`.nbio_interface.AbstractFileDescriptorServices.remove_writer()`.
|
||||
|
||||
"""
|
||||
LOGGER.debug('SelectorIOServicesAdapter.remove_writer(%s)', fd)
|
||||
|
||||
check_fd_arg(fd)
|
||||
|
||||
try:
|
||||
callbacks = self._watchers[fd]
|
||||
except KeyError:
|
||||
LOGGER.debug('remove_writer(%s) neither was set.', fd)
|
||||
return False
|
||||
|
||||
if callbacks.writer is None:
|
||||
assert callbacks.reader is not None
|
||||
LOGGER.debug('remove_writer(%s) writer wasn\'t set Rd', fd)
|
||||
return False
|
||||
|
||||
callbacks.writer = None
|
||||
|
||||
if callbacks.reader is None:
|
||||
del self._watchers[fd]
|
||||
self._loop.remove_handler(fd)
|
||||
LOGGER.debug('remove_writer(%s) removed handler', fd)
|
||||
else:
|
||||
self._loop.update_handler(fd, self._readable_mask)
|
||||
LOGGER.debug('remove_writer(%s) updated handler Rd', fd)
|
||||
|
||||
return True
|
||||
|
||||
def _on_reader_writer_fd_events(self, fd, events):
|
||||
"""Handle indicated file descriptor events requested via `set_reader()`
|
||||
and `set_writer()`.
|
||||
|
||||
:param fd: file descriptor
|
||||
:param events: event mask using native loop's READ/WRITE/ERROR. NOTE:
|
||||
depending on the underlying poller mechanism, ERROR may be indicated
|
||||
upon certain file description state even though we don't request it.
|
||||
We ignore ERROR here since `set_reader()`/`set_writer()` don't
|
||||
request for it.
|
||||
"""
|
||||
callbacks = self._watchers[fd]
|
||||
|
||||
if events & self._readable_mask and callbacks.reader is None:
|
||||
# NOTE: we check for consistency here ahead of the writer callback
|
||||
# because the writer callback, if any, can change the events being
|
||||
# watched
|
||||
LOGGER.warning(
|
||||
'READ indicated on fd=%s, but reader callback is None; '
|
||||
'events=%s', fd, bin(events))
|
||||
|
||||
if events & self._writable_mask:
|
||||
if callbacks.writer is not None:
|
||||
callbacks.writer()
|
||||
else:
|
||||
LOGGER.warning(
|
||||
'WRITE indicated on fd=%s, but writer callback is None; '
|
||||
'events=%s', fd, bin(events))
|
||||
|
||||
if events & self._readable_mask:
|
||||
if callbacks.reader is not None:
|
||||
callbacks.reader()
|
||||
else:
|
||||
# Reader callback might have been removed in the scope of writer
|
||||
# callback.
|
||||
pass
|
||||
|
||||
|
||||
class _FileDescriptorCallbacks(object):
|
||||
"""Holds reader and writer callbacks for a file descriptor"""
|
||||
|
||||
__slots__ = ('reader', 'writer')
|
||||
|
||||
def __init__(self, reader=None, writer=None):
|
||||
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
|
||||
|
||||
class _TimerHandle(nbio_interface.AbstractTimerReference):
|
||||
"""This module's adaptation of `nbio_interface.AbstractTimerReference`.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, handle, loop):
|
||||
"""
|
||||
|
||||
:param opaque handle: timer handle from the underlying loop
|
||||
implementation that may be passed to its `remove_timeout()` method
|
||||
:param AbstractSelectorIOLoop loop: the I/O loop instance that created
|
||||
the timeout.
|
||||
"""
|
||||
self._handle = handle
|
||||
self._loop = loop
|
||||
|
||||
def cancel(self):
|
||||
if self._loop is not None:
|
||||
self._loop.remove_timeout(self._handle)
|
||||
self._handle = None
|
||||
self._loop = None
|
||||
|
||||
|
||||
class _SelectorIOLoopIOHandle(nbio_interface.AbstractIOReference):
|
||||
"""This module's adaptation of `nbio_interface.AbstractIOReference`
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, subject):
|
||||
"""
|
||||
:param subject: subject of the reference containing a `cancel()` method
|
||||
|
||||
"""
|
||||
self._cancel = subject.cancel
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel pending operation
|
||||
|
||||
:returns: False if was already done or cancelled; True otherwise
|
||||
:rtype: bool
|
||||
|
||||
"""
|
||||
return self._cancel()
|
||||
|
||||
|
||||
class _AddressResolver(object):
|
||||
"""Performs getaddrinfo asynchronously using a thread, then reports result
|
||||
via callback from the given I/O loop.
|
||||
|
||||
NOTE: at this stage, we're using a thread per request, which may prove
|
||||
inefficient and even prohibitive if the app performs many of these
|
||||
operations concurrently.
|
||||
"""
|
||||
NOT_STARTED = 0
|
||||
ACTIVE = 1
|
||||
CANCELED = 2
|
||||
COMPLETED = 3
|
||||
|
||||
def __init__(self, native_loop, host, port, family, socktype, proto, flags,
|
||||
on_done):
|
||||
"""
|
||||
|
||||
:param AbstractSelectorIOLoop native_loop:
|
||||
:param host: `see socket.getaddrinfo()`
|
||||
:param port: `see socket.getaddrinfo()`
|
||||
:param family: `see socket.getaddrinfo()`
|
||||
:param socktype: `see socket.getaddrinfo()`
|
||||
:param proto: `see socket.getaddrinfo()`
|
||||
:param flags: `see socket.getaddrinfo()`
|
||||
:param on_done: on_done(records|BaseException) callback for reporting
|
||||
result from the given I/O loop. The single arg will be either an
|
||||
exception object (check for `BaseException`) in case of failure or
|
||||
the result returned by `socket.getaddrinfo()`.
|
||||
"""
|
||||
check_callback_arg(on_done, 'on_done')
|
||||
|
||||
self._state = self.NOT_STARTED
|
||||
self._result = None
|
||||
self._loop = native_loop
|
||||
self._host = host
|
||||
self._port = port
|
||||
self._family = family
|
||||
self._socktype = socktype
|
||||
self._proto = proto
|
||||
self._flags = flags
|
||||
self._on_done = on_done
|
||||
|
||||
self._mutex = threading.Lock()
|
||||
self._threading_timer = None
|
||||
|
||||
def _cleanup(self):
|
||||
"""Release resources
|
||||
|
||||
"""
|
||||
self._loop = None
|
||||
self._threading_timer = None
|
||||
self._on_done = None
|
||||
|
||||
def start(self):
|
||||
"""Start asynchronous DNS lookup.
|
||||
|
||||
:rtype: nbio_interface.AbstractIOReference
|
||||
|
||||
"""
|
||||
assert self._state == self.NOT_STARTED, self._state
|
||||
|
||||
self._state = self.ACTIVE
|
||||
self._threading_timer = threading.Timer(0, self._resolve)
|
||||
self._threading_timer.start()
|
||||
|
||||
return _SelectorIOLoopIOHandle(self)
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel the pending resolver
|
||||
|
||||
:returns: False if was already done or cancelled; True otherwise
|
||||
:rtype: bool
|
||||
|
||||
"""
|
||||
# Try to cancel, but no guarantees
|
||||
with self._mutex:
|
||||
if self._state == self.ACTIVE:
|
||||
LOGGER.debug('Canceling resolver for %s:%s', self._host,
|
||||
self._port)
|
||||
self._state = self.CANCELED
|
||||
|
||||
# Attempt to cancel, but not guaranteed
|
||||
self._threading_timer.cancel()
|
||||
|
||||
self._cleanup()
|
||||
|
||||
return True
|
||||
else:
|
||||
LOGGER.debug(
|
||||
'Ignoring _AddressResolver cancel request when not ACTIVE; '
|
||||
'(%s:%s); state=%s', self._host, self._port, self._state)
|
||||
return False
|
||||
|
||||
def _resolve(self):
|
||||
"""Call `socket.getaddrinfo()` and return result via user's callback
|
||||
function on the given I/O loop
|
||||
|
||||
"""
|
||||
try:
|
||||
# NOTE: on python 2.x, can't pass keyword args to getaddrinfo()
|
||||
result = socket.getaddrinfo(self._host, self._port, self._family,
|
||||
self._socktype, self._proto,
|
||||
self._flags)
|
||||
except Exception as exc: # pylint: disable=W0703
|
||||
LOGGER.error('Address resolution failed: %r', exc)
|
||||
result = exc
|
||||
|
||||
self._result = result
|
||||
|
||||
# Schedule result to be returned to user via user's event loop
|
||||
with self._mutex:
|
||||
if self._state == self.ACTIVE:
|
||||
self._loop.add_callback(self._dispatch_result)
|
||||
else:
|
||||
LOGGER.debug(
|
||||
'Asynchronous getaddrinfo cancellation detected; '
|
||||
'in thread; host=%r', self._host)
|
||||
|
||||
def _dispatch_result(self):
|
||||
"""This is called from the user's I/O loop to pass the result to the
|
||||
user via the user's on_done callback
|
||||
|
||||
"""
|
||||
if self._state == self.ACTIVE:
|
||||
self._state = self.COMPLETED
|
||||
try:
|
||||
LOGGER.debug(
|
||||
'Invoking asynchronous getaddrinfo() completion callback; '
|
||||
'host=%r', self._host)
|
||||
self._on_done(self._result)
|
||||
finally:
|
||||
self._cleanup()
|
||||
else:
|
||||
LOGGER.debug(
|
||||
'Asynchronous getaddrinfo cancellation detected; '
|
||||
'in I/O loop context; host=%r', self._host)
|
||||
71
backend/venv/Lib/site-packages/pika/amqp_object.py
Normal file
71
backend/venv/Lib/site-packages/pika/amqp_object.py
Normal file
@@ -0,0 +1,71 @@
|
||||
"""Base classes that are extended by low level AMQP frames and higher level
|
||||
AMQP classes and methods.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class AMQPObject(object):
|
||||
"""Base object that is extended by AMQP low level frames and AMQP classes
|
||||
and methods.
|
||||
|
||||
"""
|
||||
NAME = 'AMQPObject'
|
||||
INDEX = None
|
||||
|
||||
def __repr__(self):
|
||||
items = list()
|
||||
for key, value in self.__dict__.items():
|
||||
if getattr(self.__class__, key, None) != value:
|
||||
items.append('%s=%s' % (key, value))
|
||||
if not items:
|
||||
return "<%s>" % self.NAME
|
||||
return "<%s(%s)>" % (self.NAME, sorted(items))
|
||||
|
||||
def __eq__(self, other):
|
||||
if other is not None:
|
||||
return self.__dict__ == other.__dict__
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class Class(AMQPObject):
|
||||
"""Is extended by AMQP classes"""
|
||||
NAME = 'Unextended Class'
|
||||
|
||||
|
||||
class Method(AMQPObject):
|
||||
"""Is extended by AMQP methods"""
|
||||
NAME = 'Unextended Method'
|
||||
synchronous = False
|
||||
|
||||
def _set_content(self, properties, body):
|
||||
"""If the method is a content frame, set the properties and body to
|
||||
be carried as attributes of the class.
|
||||
|
||||
:param pika.frame.Properties properties: AMQP Basic Properties
|
||||
:param bytes body: The message body
|
||||
|
||||
"""
|
||||
self._properties = properties # pylint: disable=W0201
|
||||
self._body = body # pylint: disable=W0201
|
||||
|
||||
def get_properties(self):
|
||||
"""Return the properties if they are set.
|
||||
|
||||
:rtype: pika.frame.Properties
|
||||
|
||||
"""
|
||||
return self._properties
|
||||
|
||||
def get_body(self):
|
||||
"""Return the message body if it is set.
|
||||
|
||||
:rtype: str|unicode
|
||||
|
||||
"""
|
||||
return self._body
|
||||
|
||||
|
||||
class Properties(AMQPObject):
|
||||
"""Class to encompass message properties (AMQP Basic.Properties)"""
|
||||
NAME = 'Unextended Properties'
|
||||
407
backend/venv/Lib/site-packages/pika/callback.py
Normal file
407
backend/venv/Lib/site-packages/pika/callback.py
Normal file
@@ -0,0 +1,407 @@
|
||||
"""Callback management class, common area for keeping track of all callbacks in
|
||||
the Pika stack.
|
||||
|
||||
"""
|
||||
import functools
|
||||
import logging
|
||||
|
||||
from pika import frame
|
||||
from pika import amqp_object
|
||||
from pika.compat import xrange, canonical_str
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def name_or_value(value):
|
||||
"""Will take Frame objects, classes, etc and attempt to return a valid
|
||||
string identifier for them.
|
||||
|
||||
:param pika.amqp_object.AMQPObject|pika.frame.Frame|int|str value: The
|
||||
value to sanitize
|
||||
:rtype: str
|
||||
|
||||
"""
|
||||
# Is it subclass of AMQPObject
|
||||
try:
|
||||
if issubclass(value, amqp_object.AMQPObject):
|
||||
return value.NAME
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# Is it a Pika frame object?
|
||||
if isinstance(value, frame.Method):
|
||||
return value.method.NAME
|
||||
|
||||
# Is it a Pika frame object (go after Method since Method extends this)
|
||||
if isinstance(value, amqp_object.AMQPObject):
|
||||
return value.NAME
|
||||
|
||||
# Cast the value to a str (python 2 and python 3); encoding as UTF-8 on Python 2
|
||||
return canonical_str(value)
|
||||
|
||||
|
||||
def sanitize_prefix(function):
|
||||
"""Automatically call name_or_value on the prefix passed in."""
|
||||
|
||||
@functools.wraps(function)
|
||||
def wrapper(*args, **kwargs):
|
||||
args = list(args)
|
||||
offset = 1
|
||||
if 'prefix' in kwargs:
|
||||
kwargs['prefix'] = name_or_value(kwargs['prefix'])
|
||||
elif len(args) - 1 >= offset:
|
||||
args[offset] = name_or_value(args[offset])
|
||||
offset += 1
|
||||
if 'key' in kwargs:
|
||||
kwargs['key'] = name_or_value(kwargs['key'])
|
||||
elif len(args) - 1 >= offset:
|
||||
args[offset] = name_or_value(args[offset])
|
||||
|
||||
return function(*tuple(args), **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def check_for_prefix_and_key(function):
|
||||
"""Automatically return false if the key or prefix is not in the callbacks
|
||||
for the instance.
|
||||
|
||||
"""
|
||||
|
||||
@functools.wraps(function)
|
||||
def wrapper(*args, **kwargs):
|
||||
offset = 1
|
||||
# Sanitize the prefix
|
||||
if 'prefix' in kwargs:
|
||||
prefix = name_or_value(kwargs['prefix'])
|
||||
else:
|
||||
prefix = name_or_value(args[offset])
|
||||
offset += 1
|
||||
|
||||
# Make sure to sanitize the key as well
|
||||
if 'key' in kwargs:
|
||||
key = name_or_value(kwargs['key'])
|
||||
else:
|
||||
key = name_or_value(args[offset])
|
||||
|
||||
# Make sure prefix and key are in the stack
|
||||
if prefix not in args[0]._stack or key not in args[0]._stack[prefix]: # pylint: disable=W0212
|
||||
return False
|
||||
|
||||
# Execute the method
|
||||
return function(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class CallbackManager(object):
|
||||
"""CallbackManager is a global callback system designed to be a single place
|
||||
where Pika can manage callbacks and process them. It should be referenced
|
||||
by the CallbackManager.instance() method instead of constructing new
|
||||
instances of it.
|
||||
|
||||
"""
|
||||
CALLS = 'calls'
|
||||
ARGUMENTS = 'arguments'
|
||||
DUPLICATE_WARNING = 'Duplicate callback found for "%s:%s"'
|
||||
CALLBACK = 'callback'
|
||||
ONE_SHOT = 'one_shot'
|
||||
ONLY_CALLER = 'only'
|
||||
|
||||
def __init__(self):
|
||||
"""Create an instance of the CallbackManager"""
|
||||
self._stack = dict()
|
||||
|
||||
@sanitize_prefix
|
||||
def add(self,
|
||||
prefix,
|
||||
key,
|
||||
callback,
|
||||
one_shot=True,
|
||||
only_caller=None,
|
||||
arguments=None):
|
||||
"""Add a callback to the stack for the specified key. If the call is
|
||||
specified as one_shot, it will be removed after being fired
|
||||
|
||||
The prefix is usually the channel number but the class is generic
|
||||
and prefix and key may be any value. If you pass in only_caller
|
||||
CallbackManager will restrict processing of the callback to only
|
||||
the calling function/object that you specify.
|
||||
|
||||
:param str|int prefix: Categorize the callback
|
||||
:param str|dict key: The key for the callback
|
||||
:param callable callback: The callback to call
|
||||
:param bool one_shot: Remove this callback after it is called
|
||||
:param object only_caller: Only allow one_caller value to call the
|
||||
event that fires the callback.
|
||||
:param dict arguments: Arguments to validate when processing
|
||||
:rtype: tuple(prefix, key)
|
||||
|
||||
"""
|
||||
# Prep the stack
|
||||
if prefix not in self._stack:
|
||||
self._stack[prefix] = dict()
|
||||
|
||||
if key not in self._stack[prefix]:
|
||||
self._stack[prefix][key] = list()
|
||||
|
||||
# Check for a duplicate
|
||||
for callback_dict in self._stack[prefix][key]:
|
||||
if (callback_dict[self.CALLBACK] == callback and
|
||||
callback_dict[self.ARGUMENTS] == arguments and
|
||||
callback_dict[self.ONLY_CALLER] == only_caller):
|
||||
if callback_dict[self.ONE_SHOT] is True:
|
||||
callback_dict[self.CALLS] += 1
|
||||
LOGGER.debug('Incremented callback reference counter: %r',
|
||||
callback_dict)
|
||||
else:
|
||||
LOGGER.warning(self.DUPLICATE_WARNING, prefix, key)
|
||||
return prefix, key
|
||||
|
||||
# Create the callback dictionary
|
||||
callback_dict = self._callback_dict(callback, one_shot, only_caller,
|
||||
arguments)
|
||||
self._stack[prefix][key].append(callback_dict)
|
||||
LOGGER.debug('Added: %r', callback_dict)
|
||||
return prefix, key
|
||||
|
||||
def clear(self):
|
||||
"""Clear all the callbacks if there are any defined."""
|
||||
self._stack = dict()
|
||||
LOGGER.debug('Callbacks cleared')
|
||||
|
||||
@sanitize_prefix
|
||||
def cleanup(self, prefix):
|
||||
"""Remove all callbacks from the stack by a prefix. Returns True
|
||||
if keys were there to be removed
|
||||
|
||||
:param str or int prefix: The prefix for keeping track of callbacks with
|
||||
:rtype: bool
|
||||
|
||||
"""
|
||||
LOGGER.debug('Clearing out %r from the stack', prefix)
|
||||
if prefix not in self._stack or not self._stack[prefix]:
|
||||
return False
|
||||
del self._stack[prefix]
|
||||
return True
|
||||
|
||||
@sanitize_prefix
|
||||
def pending(self, prefix, key):
|
||||
"""Return count of callbacks for a given prefix or key or None
|
||||
|
||||
:param str|int prefix: Categorize the callback
|
||||
:param object|str|dict key: The key for the callback
|
||||
:rtype: None or int
|
||||
|
||||
"""
|
||||
if not prefix in self._stack or not key in self._stack[prefix]:
|
||||
return None
|
||||
return len(self._stack[prefix][key])
|
||||
|
||||
@sanitize_prefix
|
||||
@check_for_prefix_and_key
|
||||
def process(self, prefix, key, caller, *args, **keywords):
|
||||
"""Run through and process all the callbacks for the specified keys.
|
||||
Caller should be specified at all times so that callbacks which
|
||||
require a specific function to call CallbackManager.process will
|
||||
not be processed.
|
||||
|
||||
:param str|int prefix: Categorize the callback
|
||||
:param object|str|int key: The key for the callback
|
||||
:param object caller: Who is firing the event
|
||||
:param list args: Any optional arguments
|
||||
:param dict keywords: Optional keyword arguments
|
||||
:rtype: bool
|
||||
|
||||
"""
|
||||
LOGGER.debug('Processing %s:%s', prefix, key)
|
||||
if prefix not in self._stack or key not in self._stack[prefix]:
|
||||
return False
|
||||
|
||||
callbacks = list()
|
||||
# Check each callback, append it to the list if it should be called
|
||||
for callback_dict in list(self._stack[prefix][key]):
|
||||
if self._should_process_callback(callback_dict, caller, list(args)):
|
||||
callbacks.append(callback_dict[self.CALLBACK])
|
||||
if callback_dict[self.ONE_SHOT]:
|
||||
self._use_one_shot_callback(prefix, key, callback_dict)
|
||||
|
||||
# Call each callback
|
||||
for callback in callbacks:
|
||||
LOGGER.debug('Calling %s for "%s:%s"', callback, prefix, key)
|
||||
try:
|
||||
callback(*args, **keywords)
|
||||
except:
|
||||
LOGGER.exception('Calling %s for "%s:%s" failed', callback,
|
||||
prefix, key)
|
||||
raise
|
||||
return True
|
||||
|
||||
@sanitize_prefix
|
||||
@check_for_prefix_and_key
|
||||
def remove(self, prefix, key, callback_value=None, arguments=None):
|
||||
"""Remove a callback from the stack by prefix, key and optionally
|
||||
the callback itself. If you only pass in prefix and key, all
|
||||
callbacks for that prefix and key will be removed.
|
||||
|
||||
:param str or int prefix: The prefix for keeping track of callbacks with
|
||||
:param str key: The callback key
|
||||
:param callable callback_value: The method defined to call on callback
|
||||
:param dict arguments: Optional arguments to check
|
||||
:rtype: bool
|
||||
|
||||
"""
|
||||
if callback_value:
|
||||
offsets_to_remove = list()
|
||||
for offset in xrange(len(self._stack[prefix][key]), 0, -1):
|
||||
callback_dict = self._stack[prefix][key][offset - 1]
|
||||
|
||||
if (callback_dict[self.CALLBACK] == callback_value and
|
||||
self._arguments_match(callback_dict, [arguments])):
|
||||
offsets_to_remove.append(offset - 1)
|
||||
|
||||
for offset in offsets_to_remove:
|
||||
try:
|
||||
LOGGER.debug('Removing callback #%i: %r', offset,
|
||||
self._stack[prefix][key][offset])
|
||||
del self._stack[prefix][key][offset]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
self._cleanup_callback_dict(prefix, key)
|
||||
return True
|
||||
|
||||
@sanitize_prefix
|
||||
@check_for_prefix_and_key
|
||||
def remove_all(self, prefix, key):
|
||||
"""Remove all callbacks for the specified prefix and key.
|
||||
|
||||
:param str prefix: The prefix for keeping track of callbacks with
|
||||
:param str key: The callback key
|
||||
|
||||
"""
|
||||
del self._stack[prefix][key]
|
||||
self._cleanup_callback_dict(prefix, key)
|
||||
|
||||
def _arguments_match(self, callback_dict, args):
|
||||
"""Validate if the arguments passed in match the expected arguments in
|
||||
the callback_dict. We expect this to be a frame passed in to *args for
|
||||
process or passed in as a list from remove.
|
||||
|
||||
:param dict callback_dict: The callback dictionary to evaluate against
|
||||
:param list args: The arguments passed in as a list
|
||||
|
||||
"""
|
||||
if callback_dict[self.ARGUMENTS] is None:
|
||||
return True
|
||||
if not args:
|
||||
return False
|
||||
if isinstance(args[0], dict):
|
||||
return self._dict_arguments_match(args[0],
|
||||
callback_dict[self.ARGUMENTS])
|
||||
return self._obj_arguments_match(
|
||||
args[0].method if hasattr(args[0], 'method') else args[0],
|
||||
callback_dict[self.ARGUMENTS])
|
||||
|
||||
def _callback_dict(self, callback, one_shot, only_caller, arguments):
|
||||
"""Return the callback dictionary.
|
||||
|
||||
:param callable callback: The callback to call
|
||||
:param bool one_shot: Remove this callback after it is called
|
||||
:param object only_caller: Only allow one_caller value to call the
|
||||
event that fires the callback.
|
||||
:rtype: dict
|
||||
|
||||
"""
|
||||
value = {
|
||||
self.CALLBACK: callback,
|
||||
self.ONE_SHOT: one_shot,
|
||||
self.ONLY_CALLER: only_caller,
|
||||
self.ARGUMENTS: arguments
|
||||
}
|
||||
if one_shot:
|
||||
value[self.CALLS] = 1
|
||||
return value
|
||||
|
||||
def _cleanup_callback_dict(self, prefix, key=None):
|
||||
"""Remove empty dict nodes in the callback stack.
|
||||
|
||||
:param str or int prefix: The prefix for keeping track of callbacks with
|
||||
:param str key: The callback key
|
||||
|
||||
"""
|
||||
if key and key in self._stack[prefix] and not self._stack[prefix][key]:
|
||||
del self._stack[prefix][key]
|
||||
if prefix in self._stack and not self._stack[prefix]:
|
||||
del self._stack[prefix]
|
||||
|
||||
@staticmethod
|
||||
def _dict_arguments_match(value, expectation):
|
||||
"""Checks an dict to see if it has attributes that meet the expectation.
|
||||
|
||||
:param dict value: The dict to evaluate
|
||||
:param dict expectation: The values to check against
|
||||
:rtype: bool
|
||||
|
||||
"""
|
||||
LOGGER.debug('Comparing %r to %r', value, expectation)
|
||||
for key in expectation:
|
||||
if value.get(key) != expectation[key]:
|
||||
LOGGER.debug('Values in dict do not match for %s', key)
|
||||
return False
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _obj_arguments_match(value, expectation):
|
||||
"""Checks an object to see if it has attributes that meet the
|
||||
expectation.
|
||||
|
||||
:param object value: The object to evaluate
|
||||
:param dict expectation: The values to check against
|
||||
:rtype: bool
|
||||
|
||||
"""
|
||||
for key in expectation:
|
||||
if not hasattr(value, key):
|
||||
LOGGER.debug('%r does not have required attribute: %s',
|
||||
type(value), key)
|
||||
return False
|
||||
if getattr(value, key) != expectation[key]:
|
||||
LOGGER.debug('Values in %s do not match for %s', type(value),
|
||||
key)
|
||||
return False
|
||||
return True
|
||||
|
||||
def _should_process_callback(self, callback_dict, caller, args):
|
||||
"""Returns True if the callback should be processed.
|
||||
|
||||
:param dict callback_dict: The callback configuration
|
||||
:param object caller: Who is firing the event
|
||||
:param list args: Any optional arguments
|
||||
:rtype: bool
|
||||
|
||||
"""
|
||||
if not self._arguments_match(callback_dict, args):
|
||||
LOGGER.debug('Arguments do not match for %r, %r', callback_dict,
|
||||
args)
|
||||
return False
|
||||
return (callback_dict[self.ONLY_CALLER] is None or
|
||||
(callback_dict[self.ONLY_CALLER] and
|
||||
callback_dict[self.ONLY_CALLER] == caller))
|
||||
|
||||
def _use_one_shot_callback(self, prefix, key, callback_dict):
|
||||
"""Process the one-shot callback, decrementing the use counter and
|
||||
removing it from the stack if it's now been fully used.
|
||||
|
||||
:param str or int prefix: The prefix for keeping track of callbacks with
|
||||
:param str key: The callback key
|
||||
:param dict callback_dict: The callback dict to process
|
||||
|
||||
"""
|
||||
LOGGER.debug('Processing use of oneshot callback')
|
||||
callback_dict[self.CALLS] -= 1
|
||||
LOGGER.debug('%i registered uses left', callback_dict[self.CALLS])
|
||||
|
||||
if callback_dict[self.CALLS] <= 0:
|
||||
self.remove(prefix, key, callback_dict[self.CALLBACK],
|
||||
callback_dict[self.ARGUMENTS])
|
||||
1515
backend/venv/Lib/site-packages/pika/channel.py
Normal file
1515
backend/venv/Lib/site-packages/pika/channel.py
Normal file
File diff suppressed because it is too large
Load Diff
261
backend/venv/Lib/site-packages/pika/compat.py
Normal file
261
backend/venv/Lib/site-packages/pika/compat.py
Normal file
@@ -0,0 +1,261 @@
|
||||
"""The compat module provides various Python 2 / Python 3
|
||||
compatibility functions
|
||||
|
||||
"""
|
||||
# pylint: disable=C0103
|
||||
|
||||
import abc
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import socket
|
||||
import sys as _sys
|
||||
import time
|
||||
|
||||
PY2 = _sys.version_info.major == 2
|
||||
PY3 = not PY2
|
||||
RE_NUM = re.compile(r'(\d+).+')
|
||||
|
||||
ON_LINUX = platform.system() == 'Linux'
|
||||
ON_OSX = platform.system() == 'Darwin'
|
||||
ON_WINDOWS = platform.system() == 'Windows'
|
||||
|
||||
# Portable Abstract Base Class
|
||||
AbstractBase = abc.ABCMeta('AbstractBase', (object,), {})
|
||||
|
||||
if _sys.version_info[:2] < (3, 3):
|
||||
SOCKET_ERROR = socket.error
|
||||
else:
|
||||
# socket.error was deprecated and replaced by OSError in python 3.3
|
||||
SOCKET_ERROR = OSError
|
||||
|
||||
try:
|
||||
SOL_TCP = socket.SOL_TCP
|
||||
except AttributeError:
|
||||
SOL_TCP = socket.IPPROTO_TCP
|
||||
|
||||
if PY3:
|
||||
# these were moved around for Python 3
|
||||
# pylint: disable=W0611
|
||||
from urllib.parse import (quote as url_quote, unquote as url_unquote,
|
||||
urlencode, parse_qs as url_parse_qs, urlparse)
|
||||
from io import StringIO
|
||||
|
||||
# Python 3 does not have basestring anymore; we include
|
||||
# *only* the str here as this is used for textual data.
|
||||
basestring = (str,)
|
||||
|
||||
# for assertions that the data is either encoded or non-encoded text
|
||||
str_or_bytes = (str, bytes)
|
||||
|
||||
# xrange is gone, replace it with range
|
||||
xrange = range
|
||||
|
||||
# the unicode type is str
|
||||
unicode_type = str
|
||||
|
||||
def time_now():
|
||||
"""
|
||||
Python 3 supports monotonic time
|
||||
"""
|
||||
return time.monotonic()
|
||||
|
||||
def dictkeys(dct):
|
||||
"""
|
||||
Returns a list of keys of dictionary
|
||||
|
||||
dict.keys returns a view that works like .keys in Python 2
|
||||
*except* any modifications in the dictionary will be visible
|
||||
(and will cause errors if the view is being iterated over while
|
||||
it is modified).
|
||||
"""
|
||||
|
||||
return list(dct.keys())
|
||||
|
||||
def dictvalues(dct):
|
||||
"""
|
||||
Returns a list of values of a dictionary
|
||||
|
||||
dict.values returns a view that works like .values in Python 2
|
||||
*except* any modifications in the dictionary will be visible
|
||||
(and will cause errors if the view is being iterated over while
|
||||
it is modified).
|
||||
"""
|
||||
return list(dct.values())
|
||||
|
||||
def dict_iteritems(dct):
|
||||
"""
|
||||
Returns an iterator of items (key/value pairs) of a dictionary
|
||||
|
||||
dict.items returns a view that works like .items in Python 2
|
||||
*except* any modifications in the dictionary will be visible
|
||||
(and will cause errors if the view is being iterated over while
|
||||
it is modified).
|
||||
"""
|
||||
return dct.items()
|
||||
|
||||
def dict_itervalues(dct):
|
||||
"""
|
||||
:param dict dct:
|
||||
:returns: an iterator of the values of a dictionary
|
||||
:rtype: iterator
|
||||
"""
|
||||
return dct.values()
|
||||
|
||||
def byte(*args):
|
||||
"""
|
||||
This is the same as Python 2 `chr(n)` for bytes in Python 3
|
||||
|
||||
Returns a single byte `bytes` for the given int argument (we
|
||||
optimize it a bit here by passing the positional argument tuple
|
||||
directly to the bytes constructor.
|
||||
"""
|
||||
return bytes(args)
|
||||
|
||||
class long(int):
|
||||
"""
|
||||
A marker class that signifies that the integer value should be
|
||||
serialized as `l` instead of `I`
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
return str(int(self))
|
||||
|
||||
def __repr__(self):
|
||||
return str(self) + 'L'
|
||||
|
||||
def canonical_str(value):
|
||||
"""
|
||||
Return the canonical str value for the string.
|
||||
In both Python 3 and Python 2 this is str.
|
||||
"""
|
||||
|
||||
return str(value)
|
||||
|
||||
def is_integer(value):
|
||||
"""
|
||||
Is value an integer?
|
||||
"""
|
||||
return isinstance(value, int)
|
||||
else:
|
||||
from urllib import (quote as url_quote, unquote as url_unquote, urlencode) # pylint: disable=C0412,E0611
|
||||
from urlparse import (parse_qs as url_parse_qs, urlparse) # pylint: disable=E0401
|
||||
from StringIO import StringIO # pylint: disable=E0401
|
||||
|
||||
basestring = basestring
|
||||
str_or_bytes = basestring
|
||||
xrange = xrange
|
||||
unicode_type = unicode # pylint: disable=E0602
|
||||
dictkeys = dict.keys
|
||||
dictvalues = dict.values
|
||||
dict_iteritems = dict.iteritems # pylint: disable=E1101
|
||||
dict_itervalues = dict.itervalues # pylint: disable=E1101
|
||||
byte = chr
|
||||
long = long
|
||||
|
||||
def time_now():
|
||||
"""
|
||||
Python 2 does not support monotonic time
|
||||
"""
|
||||
return time.time()
|
||||
|
||||
def canonical_str(value):
|
||||
"""
|
||||
Returns the canonical string value of the given string.
|
||||
In Python 2 this is the value unchanged if it is an str, otherwise
|
||||
it is the unicode value encoded as UTF-8.
|
||||
"""
|
||||
|
||||
try:
|
||||
return str(value)
|
||||
except UnicodeEncodeError:
|
||||
return str(value.encode('utf-8'))
|
||||
|
||||
def is_integer(value):
|
||||
"""
|
||||
Is value an integer?
|
||||
"""
|
||||
return isinstance(value, (int, long))
|
||||
|
||||
|
||||
def as_bytes(value):
|
||||
"""
|
||||
Returns value as bytes
|
||||
"""
|
||||
if not isinstance(value, bytes):
|
||||
return value.encode('UTF-8')
|
||||
return value
|
||||
|
||||
|
||||
def to_digit(value):
|
||||
"""
|
||||
Returns value as in integer
|
||||
"""
|
||||
if value.isdigit():
|
||||
return int(value)
|
||||
match = RE_NUM.match(value)
|
||||
return int(match.groups()[0]) if match else 0
|
||||
|
||||
|
||||
def get_linux_version(release_str):
|
||||
"""
|
||||
Gets linux version
|
||||
"""
|
||||
ver_str = release_str.split('-')[0]
|
||||
return tuple(map(to_digit, ver_str.split('.')[:3]))
|
||||
|
||||
|
||||
HAVE_SIGNAL = os.name == 'posix'
|
||||
|
||||
EINTR_IS_EXPOSED = _sys.version_info[:2] <= (3, 4)
|
||||
|
||||
LINUX_VERSION = None
|
||||
if platform.system() == 'Linux':
|
||||
LINUX_VERSION = get_linux_version(platform.release())
|
||||
|
||||
_LOCALHOST = '127.0.0.1'
|
||||
_LOCALHOST_V6 = '::1'
|
||||
|
||||
|
||||
def _nonblocking_socketpair(family=socket.AF_INET,
|
||||
socket_type=socket.SOCK_STREAM,
|
||||
proto=0):
|
||||
"""
|
||||
Returns a pair of sockets in the manner of socketpair with the additional
|
||||
feature that they will be non-blocking. Prior to Python 3.5, socketpair
|
||||
did not exist on Windows at all.
|
||||
"""
|
||||
if family == socket.AF_INET:
|
||||
host = _LOCALHOST
|
||||
elif family == socket.AF_INET6:
|
||||
host = _LOCALHOST_V6
|
||||
else:
|
||||
raise ValueError('Only AF_INET and AF_INET6 socket address families '
|
||||
'are supported')
|
||||
if socket_type != socket.SOCK_STREAM:
|
||||
raise ValueError('Only SOCK_STREAM socket socket_type is supported')
|
||||
if proto != 0:
|
||||
raise ValueError('Only protocol zero is supported')
|
||||
|
||||
lsock = socket.socket(family, socket_type, proto)
|
||||
try:
|
||||
lsock.bind((host, 0))
|
||||
lsock.listen(min(socket.SOMAXCONN, 128))
|
||||
# On IPv6, ignore flow_info and scope_id
|
||||
addr, port = lsock.getsockname()[:2]
|
||||
csock = socket.socket(family, socket_type, proto)
|
||||
try:
|
||||
csock.connect((addr, port))
|
||||
ssock, _ = lsock.accept()
|
||||
except Exception:
|
||||
csock.close()
|
||||
raise
|
||||
finally:
|
||||
lsock.close()
|
||||
|
||||
# Make sockets non-blocking to prevent deadlocks
|
||||
# See https://github.com/pika/pika/issues/917
|
||||
csock.setblocking(False)
|
||||
ssock.setblocking(False)
|
||||
|
||||
return ssock, csock
|
||||
2332
backend/venv/Lib/site-packages/pika/connection.py
Normal file
2332
backend/venv/Lib/site-packages/pika/connection.py
Normal file
File diff suppressed because it is too large
Load Diff
129
backend/venv/Lib/site-packages/pika/credentials.py
Normal file
129
backend/venv/Lib/site-packages/pika/credentials.py
Normal file
@@ -0,0 +1,129 @@
|
||||
"""The credentials classes are used to encapsulate all authentication
|
||||
information for the :class:`~pika.connection.ConnectionParameters` class.
|
||||
|
||||
The :class:`~pika.credentials.PlainCredentials` class returns the properly
|
||||
formatted username and password to the :class:`~pika.connection.Connection`.
|
||||
|
||||
To authenticate with Pika, create a :class:`~pika.credentials.PlainCredentials`
|
||||
object passing in the username and password and pass it as the credentials
|
||||
argument value to the :class:`~pika.connection.ConnectionParameters` object.
|
||||
|
||||
If you are using :class:`~pika.connection.URLParameters` you do not need a
|
||||
credentials object, one will automatically be created for you.
|
||||
|
||||
If you are looking to implement SSL certificate style authentication, you would
|
||||
extend the :class:`~pika.credentials.ExternalCredentials` class implementing
|
||||
the required behavior.
|
||||
|
||||
"""
|
||||
import logging
|
||||
from .compat import as_bytes
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PlainCredentials(object):
|
||||
"""A credentials object for the default authentication methodology with
|
||||
RabbitMQ.
|
||||
|
||||
If you do not pass in credentials to the ConnectionParameters object, it
|
||||
will create credentials for 'guest' with the password of 'guest'.
|
||||
|
||||
If you pass True to erase_on_connect the credentials will not be stored
|
||||
in memory after the Connection attempt has been made.
|
||||
|
||||
:param str username: The username to authenticate with
|
||||
:param str password: The password to authenticate with
|
||||
:param bool erase_on_connect: erase credentials on connect.
|
||||
|
||||
"""
|
||||
TYPE = 'PLAIN'
|
||||
|
||||
def __init__(self, username, password, erase_on_connect=False):
|
||||
"""Create a new instance of PlainCredentials
|
||||
|
||||
:param str username: The username to authenticate with
|
||||
:param str password: The password to authenticate with
|
||||
:param bool erase_on_connect: erase credentials on connect.
|
||||
|
||||
"""
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.erase_on_connect = erase_on_connect
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, PlainCredentials):
|
||||
return (self.username == other.username and
|
||||
self.password == other.password and
|
||||
self.erase_on_connect == other.erase_on_connect)
|
||||
return NotImplemented
|
||||
|
||||
def __ne__(self, other):
|
||||
result = self.__eq__(other)
|
||||
if result is not NotImplemented:
|
||||
return not result
|
||||
return NotImplemented
|
||||
|
||||
def response_for(self, start):
|
||||
"""Validate that this type of authentication is supported
|
||||
|
||||
:param spec.Connection.Start start: Connection.Start method
|
||||
:rtype: tuple(str|None, str|None)
|
||||
|
||||
"""
|
||||
if as_bytes(PlainCredentials.TYPE) not in\
|
||||
as_bytes(start.mechanisms).split():
|
||||
return None, None
|
||||
return (
|
||||
PlainCredentials.TYPE,
|
||||
b'\0' + as_bytes(self.username) + b'\0' + as_bytes(self.password))
|
||||
|
||||
def erase_credentials(self):
|
||||
"""Called by Connection when it no longer needs the credentials"""
|
||||
if self.erase_on_connect:
|
||||
LOGGER.info("Erasing stored credential values")
|
||||
self.username = None
|
||||
self.password = None
|
||||
|
||||
|
||||
class ExternalCredentials(object):
|
||||
"""The ExternalCredentials class allows the connection to use EXTERNAL
|
||||
authentication, generally with a client SSL certificate.
|
||||
|
||||
"""
|
||||
TYPE = 'EXTERNAL'
|
||||
|
||||
def __init__(self):
|
||||
"""Create a new instance of ExternalCredentials"""
|
||||
self.erase_on_connect = False
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, ExternalCredentials):
|
||||
return self.erase_on_connect == other.erase_on_connect
|
||||
return NotImplemented
|
||||
|
||||
def __ne__(self, other):
|
||||
result = self.__eq__(other)
|
||||
if result is not NotImplemented:
|
||||
return not result
|
||||
return NotImplemented
|
||||
|
||||
def response_for(self, start): # pylint: disable=R0201
|
||||
"""Validate that this type of authentication is supported
|
||||
|
||||
:param spec.Connection.Start start: Connection.Start method
|
||||
:rtype: tuple(str or None, str or None)
|
||||
|
||||
"""
|
||||
if as_bytes(ExternalCredentials.TYPE) not in\
|
||||
as_bytes(start.mechanisms).split():
|
||||
return None, None
|
||||
return ExternalCredentials.TYPE, b''
|
||||
|
||||
def erase_credentials(self): # pylint: disable=R0201
|
||||
"""Called by Connection when it no longer needs the credentials"""
|
||||
LOGGER.debug('Not supported by this Credentials type')
|
||||
|
||||
|
||||
# Append custom credential types to this list for validation support
|
||||
VALID_TYPES = [PlainCredentials, ExternalCredentials]
|
||||
328
backend/venv/Lib/site-packages/pika/data.py
Normal file
328
backend/venv/Lib/site-packages/pika/data.py
Normal file
@@ -0,0 +1,328 @@
|
||||
"""AMQP Table Encoding/Decoding"""
|
||||
import struct
|
||||
import decimal
|
||||
import calendar
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from pika import exceptions
|
||||
from pika.compat import PY2, basestring
|
||||
from pika.compat import unicode_type, long, as_bytes
|
||||
|
||||
|
||||
def encode_short_string(pieces, value):
|
||||
"""Encode a string value as short string and append it to pieces list
|
||||
returning the size of the encoded value.
|
||||
|
||||
:param list pieces: Already encoded values
|
||||
:param str value: String value to encode
|
||||
:rtype: int
|
||||
|
||||
"""
|
||||
encoded_value = as_bytes(value)
|
||||
length = len(encoded_value)
|
||||
|
||||
# 4.2.5.3
|
||||
# Short strings, stored as an 8-bit unsigned integer length followed by zero
|
||||
# or more octets of data. Short strings can carry up to 255 octets of UTF-8
|
||||
# data, but may not contain binary zero octets.
|
||||
# ...
|
||||
# 4.2.5.5
|
||||
# The server SHOULD validate field names and upon receiving an invalid field
|
||||
# name, it SHOULD signal a connection exception with reply code 503 (syntax
|
||||
# error).
|
||||
# -> validate length (avoid truncated utf-8 / corrupted data), but skip null
|
||||
# byte check.
|
||||
if length > 255:
|
||||
raise exceptions.ShortStringTooLong(encoded_value)
|
||||
|
||||
pieces.append(struct.pack('B', length))
|
||||
pieces.append(encoded_value)
|
||||
return 1 + length
|
||||
|
||||
|
||||
if PY2:
|
||||
|
||||
def decode_short_string(encoded, offset):
|
||||
"""Decode a short string value from ``encoded`` data at ``offset``.
|
||||
"""
|
||||
length = struct.unpack_from('B', encoded, offset)[0]
|
||||
offset += 1
|
||||
# Purely for compatibility with original python2 code. No idea what
|
||||
# and why this does.
|
||||
value = encoded[offset:offset + length]
|
||||
try:
|
||||
value = bytes(value)
|
||||
except UnicodeEncodeError:
|
||||
pass
|
||||
offset += length
|
||||
return value, offset
|
||||
|
||||
else:
|
||||
|
||||
def decode_short_string(encoded, offset):
|
||||
"""Decode a short string value from ``encoded`` data at ``offset``.
|
||||
"""
|
||||
length = struct.unpack_from('B', encoded, offset)[0]
|
||||
offset += 1
|
||||
value = encoded[offset:offset + length]
|
||||
try:
|
||||
value = value.decode('utf8')
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
offset += length
|
||||
return value, offset
|
||||
|
||||
|
||||
def encode_table(pieces, table):
|
||||
"""Encode a dict as an AMQP table appending the encded table to the
|
||||
pieces list passed in.
|
||||
|
||||
:param list pieces: Already encoded frame pieces
|
||||
:param dict table: The dict to encode
|
||||
:rtype: int
|
||||
|
||||
"""
|
||||
table = table or {}
|
||||
length_index = len(pieces)
|
||||
pieces.append(None) # placeholder
|
||||
tablesize = 0
|
||||
for (key, value) in table.items():
|
||||
tablesize += encode_short_string(pieces, key)
|
||||
tablesize += encode_value(pieces, value)
|
||||
|
||||
pieces[length_index] = struct.pack('>I', tablesize)
|
||||
return tablesize + 4
|
||||
|
||||
|
||||
def encode_value(pieces, value): # pylint: disable=R0911
|
||||
"""Encode the value passed in and append it to the pieces list returning
|
||||
the the size of the encoded value.
|
||||
|
||||
:param list pieces: Already encoded values
|
||||
:param any value: The value to encode
|
||||
:rtype: int
|
||||
|
||||
"""
|
||||
|
||||
if PY2:
|
||||
if isinstance(value, basestring):
|
||||
if isinstance(value, unicode_type):
|
||||
value = value.encode('utf-8')
|
||||
pieces.append(struct.pack('>cI', b'S', len(value)))
|
||||
pieces.append(value)
|
||||
return 5 + len(value)
|
||||
else:
|
||||
# support only str on Python 3
|
||||
if isinstance(value, basestring):
|
||||
value = value.encode('utf-8')
|
||||
pieces.append(struct.pack('>cI', b'S', len(value)))
|
||||
pieces.append(value)
|
||||
return 5 + len(value)
|
||||
|
||||
if isinstance(value, bytes):
|
||||
pieces.append(struct.pack('>cI', b'x', len(value)))
|
||||
pieces.append(value)
|
||||
return 5 + len(value)
|
||||
|
||||
if isinstance(value, bool):
|
||||
pieces.append(struct.pack('>cB', b't', int(value)))
|
||||
return 2
|
||||
if isinstance(value, long):
|
||||
if value < 0:
|
||||
pieces.append(struct.pack('>cq', b'L', value))
|
||||
else:
|
||||
pieces.append(struct.pack('>cQ', b'l', value))
|
||||
return 9
|
||||
elif isinstance(value, int):
|
||||
try:
|
||||
packed = struct.pack('>ci', b'I', value)
|
||||
pieces.append(packed)
|
||||
return 5
|
||||
except struct.error:
|
||||
if value < 0:
|
||||
packed = struct.pack('>cq', b'L', long(value))
|
||||
else:
|
||||
packed = struct.pack('>cQ', b'l', long(value))
|
||||
pieces.append(packed)
|
||||
return 9
|
||||
elif isinstance(value, decimal.Decimal):
|
||||
value = value.normalize()
|
||||
if value.as_tuple().exponent < 0:
|
||||
decimals = -value.as_tuple().exponent
|
||||
raw = int(value * (decimal.Decimal(10)**decimals))
|
||||
pieces.append(struct.pack('>cBi', b'D', decimals, raw))
|
||||
else:
|
||||
# per spec, the "decimals" octet is unsigned (!)
|
||||
pieces.append(struct.pack('>cBi', b'D', 0, int(value)))
|
||||
return 6
|
||||
elif isinstance(value, datetime):
|
||||
pieces.append(
|
||||
struct.pack('>cQ', b'T', calendar.timegm(value.utctimetuple())))
|
||||
return 9
|
||||
elif isinstance(value, dict):
|
||||
pieces.append(struct.pack('>c', b'F'))
|
||||
return 1 + encode_table(pieces, value)
|
||||
elif isinstance(value, list):
|
||||
list_pieces = []
|
||||
for val in value:
|
||||
encode_value(list_pieces, val)
|
||||
piece = b''.join(list_pieces)
|
||||
pieces.append(struct.pack('>cI', b'A', len(piece)))
|
||||
pieces.append(piece)
|
||||
return 5 + len(piece)
|
||||
elif value is None:
|
||||
pieces.append(struct.pack('>c', b'V'))
|
||||
return 1
|
||||
else:
|
||||
raise exceptions.UnsupportedAMQPFieldException(pieces, value)
|
||||
|
||||
|
||||
def decode_table(encoded, offset):
|
||||
"""Decode the AMQP table passed in from the encoded value returning the
|
||||
decoded result and the number of bytes read plus the offset.
|
||||
|
||||
:param str encoded: The binary encoded data to decode
|
||||
:param int offset: The starting byte offset
|
||||
:rtype: tuple
|
||||
|
||||
"""
|
||||
result = {}
|
||||
tablesize = struct.unpack_from('>I', encoded, offset)[0]
|
||||
offset += 4
|
||||
limit = offset + tablesize
|
||||
while offset < limit:
|
||||
key, offset = decode_short_string(encoded, offset)
|
||||
value, offset = decode_value(encoded, offset)
|
||||
result[key] = value
|
||||
return result, offset
|
||||
|
||||
|
||||
def decode_value(encoded, offset): # pylint: disable=R0912,R0915
|
||||
"""Decode the value passed in returning the decoded value and the number
|
||||
of bytes read in addition to the starting offset.
|
||||
|
||||
:param str encoded: The binary encoded data to decode
|
||||
:param int offset: The starting byte offset
|
||||
:rtype: tuple
|
||||
:raises: pika.exceptions.InvalidFieldTypeException
|
||||
|
||||
"""
|
||||
# slice to get bytes in Python 3 and str in Python 2
|
||||
kind = encoded[offset:offset + 1]
|
||||
offset += 1
|
||||
|
||||
# Bool
|
||||
if kind == b't':
|
||||
value = struct.unpack_from('>B', encoded, offset)[0]
|
||||
value = bool(value)
|
||||
offset += 1
|
||||
|
||||
# Short-Short Int
|
||||
elif kind == b'b':
|
||||
value = struct.unpack_from('>B', encoded, offset)[0]
|
||||
offset += 1
|
||||
|
||||
# Short-Short Unsigned Int
|
||||
elif kind == b'B':
|
||||
value = struct.unpack_from('>b', encoded, offset)[0]
|
||||
offset += 1
|
||||
|
||||
# Short Int
|
||||
elif kind == b'U':
|
||||
value = struct.unpack_from('>h', encoded, offset)[0]
|
||||
offset += 2
|
||||
|
||||
# Short Unsigned Int
|
||||
elif kind == b'u':
|
||||
value = struct.unpack_from('>H', encoded, offset)[0]
|
||||
offset += 2
|
||||
|
||||
# Long Int
|
||||
elif kind == b'I':
|
||||
value = struct.unpack_from('>i', encoded, offset)[0]
|
||||
offset += 4
|
||||
|
||||
# Long Unsigned Int
|
||||
elif kind == b'i':
|
||||
value = struct.unpack_from('>I', encoded, offset)[0]
|
||||
offset += 4
|
||||
|
||||
# Long-Long Int
|
||||
elif kind == b'L':
|
||||
value = long(struct.unpack_from('>q', encoded, offset)[0])
|
||||
offset += 8
|
||||
|
||||
# Long-Long Unsigned Int
|
||||
elif kind == b'l':
|
||||
value = long(struct.unpack_from('>Q', encoded, offset)[0])
|
||||
offset += 8
|
||||
|
||||
# Float
|
||||
elif kind == b'f':
|
||||
value = long(struct.unpack_from('>f', encoded, offset)[0])
|
||||
offset += 4
|
||||
|
||||
# Double
|
||||
elif kind == b'd':
|
||||
value = long(struct.unpack_from('>d', encoded, offset)[0])
|
||||
offset += 8
|
||||
|
||||
# Decimal
|
||||
elif kind == b'D':
|
||||
decimals = struct.unpack_from('B', encoded, offset)[0]
|
||||
offset += 1
|
||||
raw = struct.unpack_from('>i', encoded, offset)[0]
|
||||
offset += 4
|
||||
value = decimal.Decimal(raw) * (decimal.Decimal(10)**-decimals)
|
||||
|
||||
# https://github.com/pika/pika/issues/1205
|
||||
# Short Signed Int
|
||||
elif kind == b's':
|
||||
value = struct.unpack_from('>h', encoded, offset)[0]
|
||||
offset += 2
|
||||
|
||||
# Long String
|
||||
elif kind == b'S':
|
||||
length = struct.unpack_from('>I', encoded, offset)[0]
|
||||
offset += 4
|
||||
value = encoded[offset:offset + length]
|
||||
try:
|
||||
value = value.decode('utf8')
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
offset += length
|
||||
|
||||
elif kind == b'x':
|
||||
length = struct.unpack_from('>I', encoded, offset)[0]
|
||||
offset += 4
|
||||
value = encoded[offset:offset + length]
|
||||
offset += length
|
||||
|
||||
# Field Array
|
||||
elif kind == b'A':
|
||||
length = struct.unpack_from('>I', encoded, offset)[0]
|
||||
offset += 4
|
||||
offset_end = offset + length
|
||||
value = []
|
||||
while offset < offset_end:
|
||||
val, offset = decode_value(encoded, offset)
|
||||
value.append(val)
|
||||
|
||||
# Timestamp
|
||||
elif kind == b'T':
|
||||
value = datetime.utcfromtimestamp(
|
||||
struct.unpack_from('>Q', encoded, offset)[0])
|
||||
offset += 8
|
||||
|
||||
# Field Table
|
||||
elif kind == b'F':
|
||||
(value, offset) = decode_table(encoded, offset)
|
||||
|
||||
# Null / Void
|
||||
elif kind == b'V':
|
||||
value = None
|
||||
else:
|
||||
raise exceptions.InvalidFieldTypeException(kind)
|
||||
|
||||
return value, offset
|
||||
6
backend/venv/Lib/site-packages/pika/delivery_mode.py
Normal file
6
backend/venv/Lib/site-packages/pika/delivery_mode.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class DeliveryMode(Enum):
|
||||
Transient = 1
|
||||
Persistent = 2
|
||||
62
backend/venv/Lib/site-packages/pika/diagnostic_utils.py
Normal file
62
backend/venv/Lib/site-packages/pika/diagnostic_utils.py
Normal file
@@ -0,0 +1,62 @@
|
||||
"""
|
||||
Diagnostic utilities
|
||||
"""
|
||||
|
||||
import functools
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
|
||||
def create_log_exception_decorator(logger):
|
||||
"""Create a decorator that logs and reraises any exceptions that escape
|
||||
the decorated function
|
||||
|
||||
:param logging.Logger logger:
|
||||
:returns: the decorator
|
||||
:rtype: callable
|
||||
|
||||
Usage example
|
||||
|
||||
import logging
|
||||
|
||||
from pika.diagnostics_utils import create_log_exception_decorator
|
||||
|
||||
_log_exception = create_log_exception_decorator(logging.getLogger(__name__))
|
||||
|
||||
@_log_exception
|
||||
def my_func_or_method():
|
||||
raise Exception('Oops!')
|
||||
|
||||
"""
|
||||
|
||||
def log_exception(func):
|
||||
"""The decorator returned by the parent function
|
||||
|
||||
:param func: function to be wrapped
|
||||
:returns: the function wrapper
|
||||
:rtype: callable
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def log_exception_func_wrap(*args, **kwargs):
|
||||
"""The wrapper function returned by the decorator. Invokes the
|
||||
function with the given args/kwargs and returns the function's
|
||||
return value. If the function exits with an exception, logs the
|
||||
exception traceback and re-raises the
|
||||
|
||||
:param args: positional args passed to wrapped function
|
||||
:param kwargs: keyword args passed to wrapped function
|
||||
:returns: whatever the wrapped function returns
|
||||
:rtype: object
|
||||
"""
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except:
|
||||
logger.exception(
|
||||
'Wrapped func exited with exception. Caller\'s stack:\n%s',
|
||||
''.join(traceback.format_exception(*sys.exc_info())))
|
||||
raise
|
||||
|
||||
return log_exception_func_wrap
|
||||
|
||||
return log_exception
|
||||
355
backend/venv/Lib/site-packages/pika/exceptions.py
Normal file
355
backend/venv/Lib/site-packages/pika/exceptions.py
Normal file
@@ -0,0 +1,355 @@
|
||||
"""Pika specific exceptions"""
|
||||
# pylint: disable=C0111,E1136
|
||||
|
||||
|
||||
class AMQPError(Exception):
|
||||
|
||||
def __repr__(self):
|
||||
return '%s: An unspecified AMQP error has occurred; %s' % (
|
||||
self.__class__.__name__, self.args)
|
||||
|
||||
|
||||
class AMQPConnectionError(AMQPError):
|
||||
|
||||
def __repr__(self):
|
||||
if len(self.args) == 2:
|
||||
return '{}: ({}) {}'.format(self.__class__.__name__, self.args[0],
|
||||
self.args[1])
|
||||
else:
|
||||
return '{}: {}'.format(self.__class__.__name__, self.args)
|
||||
|
||||
|
||||
class ConnectionOpenAborted(AMQPConnectionError):
|
||||
"""Client closed connection while opening."""
|
||||
|
||||
|
||||
class StreamLostError(AMQPConnectionError):
|
||||
"""Stream (TCP) connection lost."""
|
||||
|
||||
|
||||
class IncompatibleProtocolError(AMQPConnectionError):
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
'%s: The protocol returned by the server is not supported: %s' % (
|
||||
self.__class__.__name__,
|
||||
self.args,
|
||||
))
|
||||
|
||||
|
||||
class AuthenticationError(AMQPConnectionError):
|
||||
|
||||
def __repr__(self):
|
||||
return ('%s: Server and client could not negotiate use of the %s '
|
||||
'authentication mechanism' % (self.__class__.__name__,
|
||||
self.args[0]))
|
||||
|
||||
|
||||
class ProbableAuthenticationError(AMQPConnectionError):
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
'%s: Client was disconnected at a connection stage indicating a '
|
||||
'probable authentication error: %s' % (
|
||||
self.__class__.__name__,
|
||||
self.args,
|
||||
))
|
||||
|
||||
|
||||
class ProbableAccessDeniedError(AMQPConnectionError):
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
'%s: Client was disconnected at a connection stage indicating a '
|
||||
'probable denial of access to the specified virtual host: %s' % (
|
||||
self.__class__.__name__,
|
||||
self.args,
|
||||
))
|
||||
|
||||
|
||||
class NoFreeChannels(AMQPConnectionError):
|
||||
|
||||
def __repr__(self):
|
||||
return '%s: The connection has run out of free channels' % (
|
||||
self.__class__.__name__)
|
||||
|
||||
|
||||
class ConnectionWrongStateError(AMQPConnectionError):
|
||||
"""Connection is in wrong state for the requested operation."""
|
||||
|
||||
def __repr__(self):
|
||||
if self.args:
|
||||
return super(ConnectionWrongStateError, self).__repr__()
|
||||
else:
|
||||
return ('%s: The connection is in wrong state for the requested '
|
||||
'operation.' % self.__class__.__name__)
|
||||
|
||||
|
||||
class ConnectionClosed(AMQPConnectionError):
|
||||
|
||||
def __init__(self, reply_code, reply_text):
|
||||
"""
|
||||
|
||||
:param int reply_code: reply-code that was used in user's or broker's
|
||||
`Connection.Close` method. NEW in v1.0.0
|
||||
:param str reply_text: reply-text that was used in user's or broker's
|
||||
`Connection.Close` method. Human-readable string corresponding to
|
||||
`reply_code`. NEW in v1.0.0
|
||||
"""
|
||||
super(ConnectionClosed, self).__init__(int(reply_code), str(reply_text))
|
||||
|
||||
def __repr__(self):
|
||||
return '{}: ({}) {!r}'.format(self.__class__.__name__, self.reply_code,
|
||||
self.reply_text)
|
||||
|
||||
@property
|
||||
def reply_code(self):
|
||||
""" NEW in v1.0.0
|
||||
:rtype: int
|
||||
|
||||
"""
|
||||
return self.args[0]
|
||||
|
||||
@property
|
||||
def reply_text(self):
|
||||
""" NEW in v1.0.0
|
||||
:rtype: str
|
||||
|
||||
"""
|
||||
return self.args[1]
|
||||
|
||||
|
||||
class ConnectionClosedByBroker(ConnectionClosed):
|
||||
"""Connection.Close from broker."""
|
||||
|
||||
|
||||
class ConnectionClosedByClient(ConnectionClosed):
|
||||
"""Connection was closed at request of Pika client."""
|
||||
|
||||
|
||||
class ConnectionBlockedTimeout(AMQPConnectionError):
|
||||
"""RabbitMQ-specific: timed out waiting for connection.unblocked."""
|
||||
|
||||
|
||||
class AMQPHeartbeatTimeout(AMQPConnectionError):
|
||||
"""Connection was dropped as result of heartbeat timeout."""
|
||||
|
||||
|
||||
class AMQPChannelError(AMQPError):
|
||||
|
||||
def __repr__(self):
|
||||
return '{}: {!r}'.format(self.__class__.__name__, self.args)
|
||||
|
||||
|
||||
class ChannelWrongStateError(AMQPChannelError):
|
||||
"""Channel is in wrong state for the requested operation."""
|
||||
|
||||
|
||||
class ChannelClosed(AMQPChannelError):
|
||||
"""The channel closed by client or by broker
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, reply_code, reply_text):
|
||||
"""
|
||||
|
||||
:param int reply_code: reply-code that was used in user's or broker's
|
||||
`Channel.Close` method. One of the AMQP-defined Channel Errors.
|
||||
NEW in v1.0.0
|
||||
:param str reply_text: reply-text that was used in user's or broker's
|
||||
`Channel.Close` method. Human-readable string corresponding to
|
||||
`reply_code`;
|
||||
NEW in v1.0.0
|
||||
|
||||
"""
|
||||
super(ChannelClosed, self).__init__(int(reply_code), str(reply_text))
|
||||
|
||||
def __repr__(self):
|
||||
return '{}: ({}) {!r}'.format(self.__class__.__name__, self.reply_code,
|
||||
self.reply_text)
|
||||
|
||||
@property
|
||||
def reply_code(self):
|
||||
""" NEW in v1.0.0
|
||||
:rtype: int
|
||||
|
||||
"""
|
||||
return self.args[0]
|
||||
|
||||
@property
|
||||
def reply_text(self):
|
||||
""" NEW in v1.0.0
|
||||
:rtype: str
|
||||
|
||||
"""
|
||||
return self.args[1]
|
||||
|
||||
|
||||
class ChannelClosedByBroker(ChannelClosed):
|
||||
"""`Channel.Close` from broker; may be passed as reason to channel's
|
||||
on-closed callback of non-blocking connection adapters or raised by
|
||||
`BlockingConnection`.
|
||||
|
||||
NEW in v1.0.0
|
||||
"""
|
||||
|
||||
|
||||
class ChannelClosedByClient(ChannelClosed):
|
||||
"""Channel closed by client upon receipt of `Channel.CloseOk`; may be passed
|
||||
as reason to channel's on-closed callback of non-blocking connection
|
||||
adapters, but not raised by `BlockingConnection`.
|
||||
|
||||
NEW in v1.0.0
|
||||
"""
|
||||
|
||||
|
||||
class DuplicateConsumerTag(AMQPChannelError):
|
||||
|
||||
def __repr__(self):
|
||||
return ('%s: The consumer tag specified already exists for this '
|
||||
'channel: %s' % (self.__class__.__name__, self.args[0]))
|
||||
|
||||
|
||||
class ConsumerCancelled(AMQPChannelError):
|
||||
|
||||
def __repr__(self):
|
||||
return '%s: Server cancelled consumer' % self.__class__.__name__
|
||||
|
||||
|
||||
class UnroutableError(AMQPChannelError):
|
||||
"""Exception containing one or more unroutable messages returned by broker
|
||||
via Basic.Return.
|
||||
|
||||
Used by BlockingChannel.
|
||||
|
||||
In publisher-acknowledgements mode, this is raised upon receipt of Basic.Ack
|
||||
from broker; in the event of Basic.Nack from broker, `NackError` is raised
|
||||
instead
|
||||
"""
|
||||
|
||||
def __init__(self, messages):
|
||||
"""
|
||||
:param sequence(blocking_connection.ReturnedMessage) messages: Sequence
|
||||
of returned unroutable messages
|
||||
"""
|
||||
super(UnroutableError, self).__init__(
|
||||
"%s unroutable message(s) returned" % (len(messages)))
|
||||
|
||||
self.messages = messages
|
||||
|
||||
def __repr__(self):
|
||||
return '%s: %i unroutable messages returned by broker' % (
|
||||
self.__class__.__name__, len(self.messages))
|
||||
|
||||
|
||||
class NackError(AMQPChannelError):
|
||||
"""This exception is raised when a message published in
|
||||
publisher-acknowledgements mode is Nack'ed by the broker.
|
||||
|
||||
Used by BlockingChannel.
|
||||
"""
|
||||
|
||||
def __init__(self, messages):
|
||||
"""
|
||||
:param sequence(blocking_connection.ReturnedMessage) messages: Sequence
|
||||
of returned unroutable messages
|
||||
"""
|
||||
super(NackError,
|
||||
self).__init__("%s message(s) NACKed" % (len(messages)))
|
||||
|
||||
self.messages = messages
|
||||
|
||||
def __repr__(self):
|
||||
return '%s: %i unroutable messages returned by broker' % (
|
||||
self.__class__.__name__, len(self.messages))
|
||||
|
||||
|
||||
class InvalidChannelNumber(AMQPError):
|
||||
|
||||
def __repr__(self):
|
||||
return '%s: An invalid channel number has been specified: %s' % (
|
||||
self.__class__.__name__, self.args[0])
|
||||
|
||||
|
||||
class ProtocolSyntaxError(AMQPError):
|
||||
|
||||
def __repr__(self):
|
||||
return '%s: An unspecified protocol syntax error occurred' % (
|
||||
self.__class__.__name__)
|
||||
|
||||
|
||||
class UnexpectedFrameError(ProtocolSyntaxError):
|
||||
|
||||
def __repr__(self):
|
||||
return '%s: Received a frame out of sequence: %r' % (
|
||||
self.__class__.__name__, self.args[0])
|
||||
|
||||
|
||||
class ProtocolVersionMismatch(ProtocolSyntaxError):
|
||||
|
||||
def __repr__(self):
|
||||
return '%s: Protocol versions did not match: %r vs %r' % (
|
||||
self.__class__.__name__, self.args[0], self.args[1])
|
||||
|
||||
|
||||
class BodyTooLongError(ProtocolSyntaxError):
|
||||
|
||||
def __repr__(self):
|
||||
return ('%s: Received too many bytes for a message delivery: '
|
||||
'Received %i, expected %i' % (self.__class__.__name__,
|
||||
self.args[0], self.args[1]))
|
||||
|
||||
|
||||
class InvalidFrameError(ProtocolSyntaxError):
|
||||
|
||||
def __repr__(self):
|
||||
return '%s: Invalid frame received: %r' % (self.__class__.__name__,
|
||||
self.args[0])
|
||||
|
||||
|
||||
class InvalidFieldTypeException(ProtocolSyntaxError):
|
||||
|
||||
def __repr__(self):
|
||||
return '%s: Unsupported field kind %s' % (self.__class__.__name__,
|
||||
self.args[0])
|
||||
|
||||
|
||||
class UnsupportedAMQPFieldException(ProtocolSyntaxError):
|
||||
|
||||
def __repr__(self):
|
||||
return '%s: Unsupported field kind %s' % (self.__class__.__name__,
|
||||
type(self.args[1]))
|
||||
|
||||
|
||||
class MethodNotImplemented(AMQPError):
|
||||
pass
|
||||
|
||||
|
||||
class ChannelError(Exception):
|
||||
|
||||
def __repr__(self):
|
||||
return '%s: An unspecified error occurred with the Channel' % (
|
||||
self.__class__.__name__)
|
||||
|
||||
|
||||
class ReentrancyError(Exception):
|
||||
"""The requested operation would result in unsupported recursion or
|
||||
reentrancy.
|
||||
|
||||
Used by BlockingConnection/BlockingChannel
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class ShortStringTooLong(AMQPError):
|
||||
|
||||
def __repr__(self):
|
||||
return ('%s: AMQP Short String can contain up to 255 bytes: '
|
||||
'%.300s' % (self.__class__.__name__, self.args[0]))
|
||||
|
||||
|
||||
class DuplicateGetOkCallback(ChannelError):
|
||||
|
||||
def __repr__(self):
|
||||
return ('%s: basic_get can only be called again after the callback for '
|
||||
'the previous basic_get is executed' % self.__class__.__name__)
|
||||
18
backend/venv/Lib/site-packages/pika/exchange_type.py
Normal file
18
backend/venv/Lib/site-packages/pika/exchange_type.py
Normal file
@@ -0,0 +1,18 @@
|
||||
try:
|
||||
from enum import StrEnum # available from Python 3.11
|
||||
|
||||
|
||||
class ExchangeType(StrEnum):
|
||||
direct = 'direct'
|
||||
fanout = 'fanout'
|
||||
headers = 'headers'
|
||||
topic = 'topic'
|
||||
except ImportError:
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class ExchangeType(str, Enum):
|
||||
direct = 'direct'
|
||||
fanout = 'fanout'
|
||||
headers = 'headers'
|
||||
topic = 'topic'
|
||||
264
backend/venv/Lib/site-packages/pika/frame.py
Normal file
264
backend/venv/Lib/site-packages/pika/frame.py
Normal file
@@ -0,0 +1,264 @@
|
||||
"""Frame objects that do the frame demarshaling and marshaling."""
|
||||
import logging
|
||||
import struct
|
||||
|
||||
from pika import amqp_object
|
||||
from pika import exceptions
|
||||
from pika import spec
|
||||
from pika.compat import byte
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Frame(amqp_object.AMQPObject):
|
||||
"""Base Frame object mapping. Defines a behavior for all child classes for
|
||||
assignment of core attributes and implementation of the a core _marshal
|
||||
method which child classes use to create the binary AMQP frame.
|
||||
|
||||
"""
|
||||
NAME = 'Frame'
|
||||
|
||||
def __init__(self, frame_type, channel_number):
|
||||
"""Create a new instance of a frame
|
||||
|
||||
:param int frame_type: The frame type
|
||||
:param int channel_number: The channel number for the frame
|
||||
|
||||
"""
|
||||
self.frame_type = frame_type
|
||||
self.channel_number = channel_number
|
||||
|
||||
def _marshal(self, pieces):
|
||||
"""Create the full AMQP wire protocol frame data representation
|
||||
|
||||
:rtype: bytes
|
||||
|
||||
"""
|
||||
payload = b''.join(pieces)
|
||||
return struct.pack('>BHI', self.frame_type, self.channel_number,
|
||||
len(payload)) + payload + byte(spec.FRAME_END)
|
||||
|
||||
def marshal(self):
|
||||
"""To be ended by child classes
|
||||
|
||||
:raises NotImplementedError
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class Method(Frame):
|
||||
"""Base Method frame object mapping. AMQP method frames are mapped on top
|
||||
of this class for creating or accessing their data and attributes.
|
||||
|
||||
"""
|
||||
NAME = 'METHOD'
|
||||
|
||||
def __init__(self, channel_number, method):
|
||||
"""Create a new instance of a frame
|
||||
|
||||
:param int channel_number: The frame type
|
||||
:param pika.Spec.Class.Method method: The AMQP Class.Method
|
||||
|
||||
"""
|
||||
Frame.__init__(self, spec.FRAME_METHOD, channel_number)
|
||||
self.method = method
|
||||
|
||||
def marshal(self):
|
||||
"""Return the AMQP binary encoded value of the frame
|
||||
|
||||
:rtype: str
|
||||
|
||||
"""
|
||||
pieces = self.method.encode()
|
||||
pieces.insert(0, struct.pack('>I', self.method.INDEX))
|
||||
return self._marshal(pieces)
|
||||
|
||||
|
||||
class Header(Frame):
|
||||
"""Header frame object mapping. AMQP content header frames are mapped
|
||||
on top of this class for creating or accessing their data and attributes.
|
||||
|
||||
"""
|
||||
NAME = 'Header'
|
||||
|
||||
def __init__(self, channel_number, body_size, props):
|
||||
"""Create a new instance of a AMQP ContentHeader object
|
||||
|
||||
:param int channel_number: The channel number for the frame
|
||||
:param int body_size: The number of bytes for the body
|
||||
:param pika.spec.BasicProperties props: Basic.Properties object
|
||||
|
||||
"""
|
||||
Frame.__init__(self, spec.FRAME_HEADER, channel_number)
|
||||
self.body_size = body_size
|
||||
self.properties = props
|
||||
|
||||
def marshal(self):
|
||||
"""Return the AMQP binary encoded value of the frame
|
||||
|
||||
:rtype: str
|
||||
|
||||
"""
|
||||
pieces = self.properties.encode()
|
||||
pieces.insert(
|
||||
0, struct.pack('>HxxQ', self.properties.INDEX, self.body_size))
|
||||
return self._marshal(pieces)
|
||||
|
||||
|
||||
class Body(Frame):
|
||||
"""Body frame object mapping class. AMQP content body frames are mapped on
|
||||
to this base class for getting/setting of attributes/data.
|
||||
|
||||
"""
|
||||
NAME = 'Body'
|
||||
|
||||
def __init__(self, channel_number, fragment):
|
||||
"""
|
||||
Parameters:
|
||||
|
||||
- channel_number: int
|
||||
- fragment: unicode or str
|
||||
"""
|
||||
Frame.__init__(self, spec.FRAME_BODY, channel_number)
|
||||
self.fragment = fragment
|
||||
|
||||
def marshal(self):
|
||||
"""Return the AMQP binary encoded value of the frame
|
||||
|
||||
:rtype: str
|
||||
|
||||
"""
|
||||
return self._marshal([self.fragment])
|
||||
|
||||
|
||||
class Heartbeat(Frame):
|
||||
"""Heartbeat frame object mapping class. AMQP Heartbeat frames are mapped
|
||||
on to this class for a common access structure to the attributes/data
|
||||
values.
|
||||
|
||||
"""
|
||||
NAME = 'Heartbeat'
|
||||
|
||||
def __init__(self):
|
||||
"""Create a new instance of the Heartbeat frame"""
|
||||
Frame.__init__(self, spec.FRAME_HEARTBEAT, 0)
|
||||
|
||||
def marshal(self):
|
||||
"""Return the AMQP binary encoded value of the frame
|
||||
|
||||
:rtype: str
|
||||
|
||||
"""
|
||||
return self._marshal(list())
|
||||
|
||||
|
||||
class ProtocolHeader(amqp_object.AMQPObject):
|
||||
"""AMQP Protocol header frame class which provides a pythonic interface
|
||||
for creating AMQP Protocol headers
|
||||
|
||||
"""
|
||||
NAME = 'ProtocolHeader'
|
||||
|
||||
def __init__(self, major=None, minor=None, revision=None):
|
||||
"""Construct a Protocol Header frame object for the specified AMQP
|
||||
version
|
||||
|
||||
:param int major: Major version number
|
||||
:param int minor: Minor version number
|
||||
:param int revision: Revision
|
||||
|
||||
"""
|
||||
self.frame_type = -1
|
||||
self.major = major or spec.PROTOCOL_VERSION[0]
|
||||
self.minor = minor or spec.PROTOCOL_VERSION[1]
|
||||
self.revision = revision or spec.PROTOCOL_VERSION[2]
|
||||
|
||||
def marshal(self):
|
||||
"""Return the full AMQP wire protocol frame data representation of the
|
||||
ProtocolHeader frame
|
||||
|
||||
:rtype: str
|
||||
|
||||
"""
|
||||
return b'AMQP' + struct.pack('BBBB', 0, self.major, self.minor,
|
||||
self.revision)
|
||||
|
||||
|
||||
def decode_frame(data_in): # pylint: disable=R0911,R0914
|
||||
"""Receives raw socket data and attempts to turn it into a frame.
|
||||
Returns bytes used to make the frame and the frame
|
||||
|
||||
:param str data_in: The raw data stream
|
||||
:rtype: tuple(bytes consumed, frame)
|
||||
:raises: pika.exceptions.InvalidFrameError
|
||||
|
||||
"""
|
||||
# Look to see if it's a protocol header frame
|
||||
try:
|
||||
if data_in[0:4] == b'AMQP':
|
||||
major, minor, revision = struct.unpack_from('BBB', data_in, 5)
|
||||
return 8, ProtocolHeader(major, minor, revision)
|
||||
except (IndexError, struct.error):
|
||||
return 0, None
|
||||
|
||||
# Get the Frame Type, Channel Number and Frame Size
|
||||
try:
|
||||
(frame_type, channel_number, frame_size) = struct.unpack(
|
||||
'>BHL', data_in[0:7])
|
||||
except struct.error:
|
||||
return 0, None
|
||||
|
||||
# Get the frame data
|
||||
frame_end = spec.FRAME_HEADER_SIZE + frame_size + spec.FRAME_END_SIZE
|
||||
|
||||
# We don't have all of the frame yet
|
||||
if frame_end > len(data_in):
|
||||
return 0, None
|
||||
|
||||
# The Frame termination chr is wrong
|
||||
if data_in[frame_end - 1:frame_end] != byte(spec.FRAME_END):
|
||||
raise exceptions.InvalidFrameError("Invalid FRAME_END marker")
|
||||
|
||||
# Get the raw frame data
|
||||
frame_data = data_in[spec.FRAME_HEADER_SIZE:frame_end - 1]
|
||||
|
||||
if frame_type == spec.FRAME_METHOD:
|
||||
|
||||
# Get the Method ID from the frame data
|
||||
method_id = struct.unpack_from('>I', frame_data)[0]
|
||||
|
||||
# Get a Method object for this method_id
|
||||
method = spec.methods[method_id]()
|
||||
|
||||
# Decode the content
|
||||
method.decode(frame_data, 4)
|
||||
|
||||
# Return the amount of data consumed and the Method object
|
||||
return frame_end, Method(channel_number, method)
|
||||
|
||||
elif frame_type == spec.FRAME_HEADER:
|
||||
|
||||
# Return the header class and body size
|
||||
class_id, weight, body_size = struct.unpack_from('>HHQ', frame_data)
|
||||
|
||||
# Get the Properties type
|
||||
properties = spec.props[class_id]()
|
||||
|
||||
# Decode the properties
|
||||
out = properties.decode(frame_data[12:])
|
||||
|
||||
# Return a Header frame
|
||||
return frame_end, Header(channel_number, body_size, properties)
|
||||
|
||||
elif frame_type == spec.FRAME_BODY:
|
||||
|
||||
# Return the amount of data consumed and the Body frame w/ data
|
||||
return frame_end, Body(channel_number, frame_data)
|
||||
|
||||
elif frame_type == spec.FRAME_HEARTBEAT:
|
||||
|
||||
# Return the amount of data and a Heartbeat frame
|
||||
return frame_end, Heartbeat()
|
||||
|
||||
raise exceptions.InvalidFrameError("Unknown frame type: %i" % frame_type)
|
||||
209
backend/venv/Lib/site-packages/pika/heartbeat.py
Normal file
209
backend/venv/Lib/site-packages/pika/heartbeat.py
Normal file
@@ -0,0 +1,209 @@
|
||||
"""Handle AMQP Heartbeats"""
|
||||
import logging
|
||||
|
||||
import pika.exceptions
|
||||
from pika import frame
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HeartbeatChecker(object):
|
||||
"""Sends heartbeats to the broker. The provided timeout is used to
|
||||
determine if the connection is stale - no received heartbeats or
|
||||
other activity will close the connection. See the parameter list for more
|
||||
details.
|
||||
|
||||
"""
|
||||
_STALE_CONNECTION = "No activity or too many missed heartbeats in the last %i seconds"
|
||||
|
||||
def __init__(self, connection, timeout):
|
||||
"""Create an object that will check for activity on the provided
|
||||
connection as well as receive heartbeat frames from the broker. The
|
||||
timeout parameter defines a window within which this activity must
|
||||
happen. If not, the connection is considered dead and closed.
|
||||
|
||||
The value passed for timeout is also used to calculate an interval
|
||||
at which a heartbeat frame is sent to the broker. The interval is
|
||||
equal to the timeout value divided by two.
|
||||
|
||||
:param pika.connection.Connection: Connection object
|
||||
:param int timeout: Connection idle timeout. If no activity occurs on the
|
||||
connection nor heartbeat frames received during the
|
||||
timeout window the connection will be closed. The
|
||||
interval used to send heartbeats is calculated from
|
||||
this value by dividing it by two.
|
||||
|
||||
"""
|
||||
if timeout < 1:
|
||||
raise ValueError('timeout must >= 0, but got %r' % (timeout,))
|
||||
|
||||
self._connection = connection
|
||||
|
||||
# Note: see the following documents:
|
||||
# https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout
|
||||
# https://github.com/pika/pika/pull/1072
|
||||
# https://groups.google.com/d/topic/rabbitmq-users/Fmfeqe5ocTY/discussion
|
||||
# There is a certain amount of confusion around how client developers
|
||||
# interpret the spec. The spec talks about 2 missed heartbeats as a
|
||||
# *timeout*, plus that any activity on the connection counts for a
|
||||
# heartbeat. This is to avoid edge cases and not to depend on network
|
||||
# latency.
|
||||
self._timeout = timeout
|
||||
|
||||
self._send_interval = float(timeout) / 2
|
||||
|
||||
# Note: Pika will calculate the heartbeat / connectivity check interval
|
||||
# by adding 5 seconds to the negotiated timeout to leave a bit of room
|
||||
# for broker heartbeats that may be right at the edge of the timeout
|
||||
# window. This is different behavior from the RabbitMQ Java client and
|
||||
# the spec that suggests a check interval equivalent to two times the
|
||||
# heartbeat timeout value. But, one advantage of adding a small amount
|
||||
# is that bad connections will be detected faster.
|
||||
# https://github.com/pika/pika/pull/1072#issuecomment-397850795
|
||||
# https://github.com/rabbitmq/rabbitmq-java-client/blob/b55bd20a1a236fc2d1ea9369b579770fa0237615/src/main/java/com/rabbitmq/client/impl/AMQConnection.java#L773-L780
|
||||
# https://github.com/ruby-amqp/bunny/blob/3259f3af2e659a49c38c2470aa565c8fb825213c/lib/bunny/session.rb#L1187-L1192
|
||||
self._check_interval = timeout + 5
|
||||
|
||||
LOGGER.debug('timeout: %f send_interval: %f check_interval: %f',
|
||||
self._timeout, self._send_interval, self._check_interval)
|
||||
|
||||
# Initialize counters
|
||||
self._bytes_received = 0
|
||||
self._bytes_sent = 0
|
||||
self._heartbeat_frames_received = 0
|
||||
self._heartbeat_frames_sent = 0
|
||||
self._idle_byte_intervals = 0
|
||||
|
||||
self._send_timer = None
|
||||
self._check_timer = None
|
||||
self._start_send_timer()
|
||||
self._start_check_timer()
|
||||
|
||||
@property
|
||||
def bytes_received_on_connection(self):
|
||||
"""Return the number of bytes received by the connection bytes object.
|
||||
|
||||
:rtype int
|
||||
|
||||
"""
|
||||
return self._connection.bytes_received
|
||||
|
||||
@property
|
||||
def connection_is_idle(self):
|
||||
"""Returns true if the byte count hasn't changed in enough intervals
|
||||
to trip the max idle threshold.
|
||||
|
||||
"""
|
||||
return self._idle_byte_intervals > 0
|
||||
|
||||
def received(self):
|
||||
"""Called when a heartbeat is received"""
|
||||
LOGGER.debug('Received heartbeat frame')
|
||||
self._heartbeat_frames_received += 1
|
||||
|
||||
def _send_heartbeat(self):
|
||||
"""Invoked by a timer to send a heartbeat when we need to.
|
||||
|
||||
"""
|
||||
LOGGER.debug('Sending heartbeat frame')
|
||||
self._send_heartbeat_frame()
|
||||
self._start_send_timer()
|
||||
|
||||
def _check_heartbeat(self):
|
||||
"""Invoked by a timer to check for broker heartbeats. Checks to see
|
||||
if we've missed any heartbeats and disconnect our connection if it's
|
||||
been idle too long.
|
||||
|
||||
"""
|
||||
if self._has_received_data:
|
||||
self._idle_byte_intervals = 0
|
||||
else:
|
||||
# Connection has not received any data, increment the counter
|
||||
self._idle_byte_intervals += 1
|
||||
|
||||
LOGGER.debug(
|
||||
'Received %i heartbeat frames, sent %i, '
|
||||
'idle intervals %i', self._heartbeat_frames_received,
|
||||
self._heartbeat_frames_sent, self._idle_byte_intervals)
|
||||
|
||||
if self.connection_is_idle:
|
||||
self._close_connection()
|
||||
return
|
||||
|
||||
self._start_check_timer()
|
||||
|
||||
def stop(self):
|
||||
"""Stop the heartbeat checker"""
|
||||
if self._send_timer:
|
||||
LOGGER.debug('Removing timer for next heartbeat send interval')
|
||||
self._connection._adapter_remove_timeout(self._send_timer) # pylint: disable=W0212
|
||||
self._send_timer = None
|
||||
if self._check_timer:
|
||||
LOGGER.debug('Removing timer for next heartbeat check interval')
|
||||
self._connection._adapter_remove_timeout(self._check_timer) # pylint: disable=W0212
|
||||
self._check_timer = None
|
||||
|
||||
def _close_connection(self):
|
||||
"""Close the connection with the AMQP Connection-Forced value."""
|
||||
LOGGER.info('Connection is idle, %i stale byte intervals',
|
||||
self._idle_byte_intervals)
|
||||
text = HeartbeatChecker._STALE_CONNECTION % self._timeout
|
||||
|
||||
# Abort the stream connection. There is no point trying to gracefully
|
||||
# close the AMQP connection since lack of heartbeat suggests that the
|
||||
# stream is dead.
|
||||
self._connection._terminate_stream( # pylint: disable=W0212
|
||||
pika.exceptions.AMQPHeartbeatTimeout(text))
|
||||
|
||||
@property
|
||||
def _has_received_data(self):
|
||||
"""Returns True if the connection has received data.
|
||||
|
||||
:rtype: bool
|
||||
|
||||
"""
|
||||
return self._bytes_received != self.bytes_received_on_connection
|
||||
|
||||
@staticmethod
|
||||
def _new_heartbeat_frame():
|
||||
"""Return a new heartbeat frame.
|
||||
|
||||
:rtype pika.frame.Heartbeat
|
||||
|
||||
"""
|
||||
return frame.Heartbeat()
|
||||
|
||||
def _send_heartbeat_frame(self):
|
||||
"""Send a heartbeat frame on the connection.
|
||||
|
||||
"""
|
||||
LOGGER.debug('Sending heartbeat frame')
|
||||
self._connection._send_frame( # pylint: disable=W0212
|
||||
self._new_heartbeat_frame())
|
||||
self._heartbeat_frames_sent += 1
|
||||
|
||||
def _start_send_timer(self):
|
||||
"""Start a new heartbeat send timer."""
|
||||
self._send_timer = self._connection._adapter_call_later( # pylint: disable=W0212
|
||||
self._send_interval,
|
||||
self._send_heartbeat)
|
||||
|
||||
def _start_check_timer(self):
|
||||
"""Start a new heartbeat check timer."""
|
||||
# Note: update counters now to get current values
|
||||
# at the start of the timeout window. Values will be
|
||||
# checked against the connection's byte count at the
|
||||
# end of the window
|
||||
self._update_counters()
|
||||
|
||||
self._check_timer = self._connection._adapter_call_later( # pylint: disable=W0212
|
||||
self._check_interval,
|
||||
self._check_heartbeat)
|
||||
|
||||
def _update_counters(self):
|
||||
"""Update the internal counters for bytes sent and received and the
|
||||
number of frames received
|
||||
|
||||
"""
|
||||
self._bytes_sent = self._connection.bytes_sent
|
||||
self._bytes_received = self._connection.bytes_received
|
||||
2381
backend/venv/Lib/site-packages/pika/spec.py
Normal file
2381
backend/venv/Lib/site-packages/pika/spec.py
Normal file
File diff suppressed because it is too large
Load Diff
47
backend/venv/Lib/site-packages/pika/tcp_socket_opts.py
Normal file
47
backend/venv/Lib/site-packages/pika/tcp_socket_opts.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# pylint: disable=C0111
|
||||
|
||||
import logging
|
||||
import socket
|
||||
import pika.compat
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
_SUPPORTED_TCP_OPTIONS = {}
|
||||
|
||||
try:
|
||||
_SUPPORTED_TCP_OPTIONS['TCP_USER_TIMEOUT'] = socket.TCP_USER_TIMEOUT
|
||||
except AttributeError:
|
||||
if pika.compat.LINUX_VERSION and pika.compat.LINUX_VERSION >= (2, 6, 37):
|
||||
# this is not the timeout value, but the number corresponding
|
||||
# to the constant in tcp.h
|
||||
# https://github.com/torvalds/linux/blob/master/include/uapi/linux/tcp.h#
|
||||
# #define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */
|
||||
_SUPPORTED_TCP_OPTIONS['TCP_USER_TIMEOUT'] = 18
|
||||
|
||||
try:
|
||||
_SUPPORTED_TCP_OPTIONS['TCP_KEEPIDLE'] = socket.TCP_KEEPIDLE
|
||||
_SUPPORTED_TCP_OPTIONS['TCP_KEEPCNT'] = socket.TCP_KEEPCNT
|
||||
_SUPPORTED_TCP_OPTIONS['TCP_KEEPINTVL'] = socket.TCP_KEEPINTVL
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
def socket_requires_keepalive(tcp_options):
|
||||
return ('TCP_KEEPIDLE' in tcp_options or
|
||||
'TCP_KEEPCNT' in tcp_options or
|
||||
'TCP_KEEPINTVL' in tcp_options)
|
||||
|
||||
|
||||
def set_sock_opts(tcp_options, sock):
|
||||
if not tcp_options:
|
||||
return
|
||||
|
||||
if socket_requires_keepalive(tcp_options):
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
|
||||
for key, value in tcp_options.items():
|
||||
option = _SUPPORTED_TCP_OPTIONS.get(key)
|
||||
if option:
|
||||
sock.setsockopt(pika.compat.SOL_TCP, option, value)
|
||||
else:
|
||||
LOGGER.warning('Unsupported TCP option %s:%s', key, value)
|
||||
63
backend/venv/Lib/site-packages/pika/validators.py
Normal file
63
backend/venv/Lib/site-packages/pika/validators.py
Normal file
@@ -0,0 +1,63 @@
|
||||
"""
|
||||
Common validation functions
|
||||
"""
|
||||
|
||||
from pika.compat import basestring
|
||||
|
||||
|
||||
def require_string(value, value_name):
|
||||
"""Require that value is a string
|
||||
|
||||
:raises: TypeError
|
||||
|
||||
"""
|
||||
if not isinstance(value, basestring):
|
||||
raise TypeError('%s must be a str or unicode str, but got %r' % (
|
||||
value_name,
|
||||
value,
|
||||
))
|
||||
|
||||
|
||||
def require_callback(callback, callback_name='callback'):
|
||||
"""Require that callback is callable and is not None
|
||||
|
||||
:raises: TypeError
|
||||
|
||||
"""
|
||||
if not callable(callback):
|
||||
raise TypeError('callback %s must be callable, but got %r' % (
|
||||
callback_name,
|
||||
callback,
|
||||
))
|
||||
|
||||
|
||||
def rpc_completion_callback(callback):
|
||||
"""Verify callback is callable if not None
|
||||
|
||||
:returns: boolean indicating nowait
|
||||
:rtype: bool
|
||||
:raises: TypeError
|
||||
|
||||
"""
|
||||
if callback is None:
|
||||
# No callback means we will not expect a response
|
||||
# i.e. nowait=True
|
||||
return True
|
||||
|
||||
if callable(callback):
|
||||
# nowait=False
|
||||
return False
|
||||
else:
|
||||
raise TypeError('completion callback must be callable if not None')
|
||||
|
||||
|
||||
def zero_or_greater(name, value):
|
||||
"""Verify that value is zero or greater. If not, 'name'
|
||||
will be used in error message
|
||||
|
||||
:raises: ValueError
|
||||
|
||||
"""
|
||||
if int(value) < 0:
|
||||
errmsg = '{} must be >= 0, but got {}'.format(name, value)
|
||||
raise ValueError(errmsg)
|
||||
Reference in New Issue
Block a user