diff --git a/.coveragerc-py2 b/.coveragerc-py2 new file mode 100644 index 0000000..1bf7d6d --- /dev/null +++ b/.coveragerc-py2 @@ -0,0 +1,8 @@ +[report] +show_missing = True + +# Regexes for lines to exclude from consideration +exclude_lines = + # Have to re-enable the standard pragma + pragma: no cover + pragma: python=3\.5 diff --git a/.coveragerc-py35 b/.coveragerc-py35 new file mode 100644 index 0000000..013dd20 --- /dev/null +++ b/.coveragerc-py35 @@ -0,0 +1,2 @@ +[report] +show_missing = True diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..15a54e2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +__pycache__/ +*.pyc +.coverage +virtualenv +MANIFEST +build/ +dist/ +.cache/ +*.egg-info +poetry.lock +.vscode diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..f3f043a --- /dev/null +++ b/.travis.yml @@ -0,0 +1,26 @@ +language: python +python: + - "2.7" + - "3.5" + - "3.6" + - "3.7" + - "3.8" +matrix: + include: + - python: "3.5" + env: PYTHONASYNCIODEBUG=x + - python: "3.6" + env: PYTHONASYNCIODEBUG=x + - python: "3.7" + env: PYTHONASYNCIODEBUG=x + - python: "3.8" + env: PYTHONASYNCIODEBUG=x + +before_install: + - pip install poetry more-itertools +install: + - poetry install +script: poetry run make check +after_success: coveralls +notifications: + email: false diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..4432a3b --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,148 @@ +# Change Log + +## [v1.0.3] - 2014-06-05 +### Changed +- Make logging unicode safe +- Log on_predicate backoff as INFO rather than ERROR + +## [v1.0.4] - 2014-08-12 +### Added +- Python 2.6 support from @Bonko +- Python 3.0 support from @robyoung +- Run tests in Travis from @robyoung + +## [v1.0.5] - 2015-02-03 +### Changed +- Add a default interval of 1 second for the constant generator +- Improve on_predicate stop condition avoiding extra sleep + +## [v1.0.6] - 2015-02-10 +### Added +- Coveralls.io integration from @singingwolfboy + +### Changed +- Fix logging bug for function calls with tuple params + +## [v1.0.7] - 2015-02-10 + +### Changed +- Fix string formatting for python 2.6 + +## [v1.1.0] - 2015-12-08 +### Added +- Event handling for success, backoff, and giveup +- Change log + +### Changed +- Docs and test for multi exception invocations +- Update dev environment test dependencies + +## [v1.2.0] - 2016-05-26 +### Added +- 'Full jitter' algorithm from @jonascheng + +### Changed +- Jitter function now accepts raw value and returns jittered value +- Change README to reST for the benefit of pypi :( +- Remove docstring doc generation and make README canonical + +## [v1.2.1] - 2016-05-27 +### Changed +- Documentation fixes + +## [v1.3.0] - 2016-08-08 +### Added +- Support runtime configuration with optional callable kwargs +- Add giveup kwarg for exception inspection + +### Changed +- Documentation fixes + +## [v1.3.1] - 2016-08-08 +### Changed +- Include README.rst in source distribution (fixes package) + +## [v1.3.2] - 2016-11-18 +### Changed +- Don't log retried args and kwargs by default +- README.rst syntax highlighting from @dethi + +## [v1.4.0] - 2017-02-05 +### Added +- Async support via `asyncio` coroutines (Python 3.4) from @rutsky + +### Changed +- Refactor `backoff` module into package with identical API + +## [v1.4.1] - 2017-04-21 +### Added +- Expose __version__ at package root + +### Changed +- Fix checking for running sync version in coroutine in case when event + loop is not set from @rutsky + +## [v1.4.2] - 2017-04-25 +### Changed + +- Use documented logger name https://github.com/litl/backoff/pull/32 + from @pquentin + +## [v1.4.3] - 2017-05-22 +### Changed + +- Add license to source distribution + +## [v1.5.0] - 2018-04-11 +### Changed + +- Add max_time keyword argument + +## [v1.6.0] - 2018-07-14 +### Changed + +- Change default log level from ERROR to INFO +- Log retries on exception as INFO + +## [v1.7.0] - 2018-11-23 +### Changed + +- Support Python 3.7 +- Drop support for async in Python 3.4 +- Drop support for Python 2.6 +- Update development dependencies +- Use poetry for dependencies and packaging + +## [v1.8.0] - 2018-12-20 +### Changed + +- Give up on StopIteration raised in wait generators +- Iterable intervals for constant wait_gen for predefined wait sequences +- Nullary jitter signature deprecation warning +- Custom loggers + +## [v1.8.1] - 2019-10-11 +### Changed + +- Use arguments in log messages rather than fully formatting log + https://github.com/litl/backoff/pull/82 from @lbernick + +## [v1.9.0] 2019-11-16 +### Changed + +- Support python 3.8 + +## [v1.9.1] 2019-11-18 +### Changed + +- Include tests and changelog in distribution + +## [v1.9.2] 2019-11-19 +### Changed + +- Don't include tests and changelog in distribution + +## [v1.10.0] 2019-12-7 +### Changed + +- Allow sync decorator call from async function diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..9dc3cea --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 litl, LLC. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..9d5d250 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,2 @@ +include LICENSE +include README.rst diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..9daaf0b --- /dev/null +++ b/Makefile @@ -0,0 +1,35 @@ +PY_VERSION := $(wordlist 2,4,$(subst ., ,$(shell python --version 2>&1))) +PY_MAJOR := $(word 1,${PY_VERSION}) +PY_MINOR := $(word 2,${PY_VERSION}) +PY_GTE_35 = $(shell echo $(PY_MAJOR).$(PY_MINOR)\>=3.5 | bc) + + +.PHONY: all flake8 clean test check + +all: + @echo 'flake8 check flake8 compliance' + @echo 'clean cleanup the source tree' + @echo 'test run the unit tests' + @echo 'check make sure you are ready to commit' + +flake8: +ifeq ($(PY_GTE_35),1) + @flake8 backoff tests +else + @flake8 --exclude tests/python35,backoff/_async.py backoff tests +endif + +clean: + @find . -name "*.pyc" -delete + @find . -name "__pycache__" -delete + @rm -rf build dist .coverage MANIFEST + +test: clean +ifeq ($(PY_GTE_35),1) + @PYTHONPATH=. py.test --cov-config .coveragerc-py35 --cov backoff tests +else + @PYTHONPATH=. py.test --cov-config .coveragerc-py2 --cov backoff tests/test_*.py +endif + +check: flake8 test + @coverage report | grep 100% >/dev/null || { echo 'Unit tests coverage is incomplete.'; exit 1; } diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..30ddc32 --- /dev/null +++ b/README.rst @@ -0,0 +1,336 @@ +backoff +======= + +.. image:: https://travis-ci.org/litl/backoff.svg?branch=master + :target: https://travis-ci.org/litl/backoff?branch=master +.. image:: https://coveralls.io/repos/litl/backoff/badge.svg?branch=master + :target: https://coveralls.io/r/litl/backoff?branch=master +.. image:: https://img.shields.io/pypi/v/backoff.svg + :target: https://pypi.python.org/pypi/backoff + +**Function decoration for backoff and retry** + +This module provides function decorators which can be used to wrap a +function such that it will be retried until some condition is met. It +is meant to be of use when accessing unreliable resources with the +potential for intermittent failures i.e. network resources and external +APIs. Somewhat more generally, it may also be of use for dynamically +polling resources for externally generated content. + +Decorators support both regular functions for synchronous code and +`asyncio `_'s coroutines +for asynchronous code. + +Examples +======== + +Since Kenneth Reitz's `requests `_ module +has become a defacto standard for synchronous HTTP clients in Python, +networking examples below are written using it, but it is in no way required +by the backoff module. + +@backoff.on_exception +--------------------- + +The ``on_exception`` decorator is used to retry when a specified exception +is raised. Here's an example using exponential backoff when any +``requests`` exception is raised: + +.. code-block:: python + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException) + def get_url(url): + return requests.get(url) + +The decorator will also accept a tuple of exceptions for cases where +the same backoff behavior is desired for more than one exception type: + +.. code-block:: python + + @backoff.on_exception(backoff.expo, + (requests.exceptions.Timeout, + requests.exceptions.ConnectionError)) + def get_url(url): + return requests.get(url) + +**Give Up Conditions** + +Optional keyword arguments can specify conditions under which to give +up. + +The keyword argument ``max_time`` specifies the maximum amount +of total time in seconds that can elapse before giving up. + +.. code-block:: python + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException, + max_time=60) + def get_url(url): + return requests.get(url) + + +Keyword argument ``max_tries`` specifies the maximum number of calls +to make to the target function before giving up. + +.. code-block:: python + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException, + max_tries=8, + jitter=None) + def get_url(url): + return requests.get(url) + + +In some cases the raised exception instance itself may need to be +inspected in order to determine if it is a retryable condition. The +``giveup`` keyword arg can be used to specify a function which accepts +the exception and returns a truthy value if the exception should not +be retried: + +.. code-block:: python + + def fatal_code(e): + return 400 <= e.response.status_code < 500 + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException, + max_time=300, + giveup=fatal_code) + def get_url(url): + return requests.get(url) + +When a give up event occurs, the exception in question is reraised +and so code calling an `on_exception`-decorated function may still +need to do exception handling. + +@backoff.on_predicate +--------------------- + +The ``on_predicate`` decorator is used to retry when a particular +condition is true of the return value of the target function. This may +be useful when polling a resource for externally generated content. + +Here's an example which uses a fibonacci sequence backoff when the +return value of the target function is the empty list: + +.. code-block:: python + + @backoff.on_predicate(backoff.fibo, lambda x: x == [], max_value=13) + def poll_for_messages(queue): + return queue.get() + +Extra keyword arguments are passed when initializing the +wait generator, so the ``max_value`` param above is passed as a keyword +arg when initializing the fibo generator. + +When not specified, the predicate param defaults to the falsey test, +so the above can more concisely be written: + +.. code-block:: python + + @backoff.on_predicate(backoff.fibo, max_value=13) + def poll_for_message(queue) + return queue.get() + +More simply, a function which continues polling every second until it +gets a non-falsey result could be defined like like this: + +.. code-block:: python + + @backoff.on_predicate(backoff.constant, interval=1) + def poll_for_message(queue) + return queue.get() + +Jitter +------ + +A jitter algorithm can be supplied with the ``jitter`` keyword arg to +either of the backoff decorators. This argument should be a function +accepting the original unadulterated backoff value and returning it's +jittered counterpart. + +As of version 1.2, the default jitter function ``backoff.full_jitter`` +implements the 'Full Jitter' algorithm as defined in the AWS +Architecture Blog's `Exponential Backoff And Jitter +`_ post. +Note that with this algorithm, the time yielded by the wait generator +is actually the *maximum* amount of time to wait. + +Previous versions of backoff defaulted to adding some random number of +milliseconds (up to 1s) to the raw sleep value. If desired, this +behavior is now available as ``backoff.random_jitter``. + +Using multiple decorators +------------------------- + +The backoff decorators may also be combined to specify different +backoff behavior for different cases: + +.. code-block:: python + + @backoff.on_predicate(backoff.fibo, max_value=13) + @backoff.on_exception(backoff.expo, + requests.exceptions.HTTPError, + max_time=60) + @backoff.on_exception(backoff.expo, + requests.exceptions.Timeout, + max_time=300) + def poll_for_message(queue): + return queue.get() + +Runtime Configuration +--------------------- + +The decorator functions ``on_exception`` and ``on_predicate`` are +generally evaluated at import time. This is fine when the keyword args +are passed as constant values, but suppose we want to consult a +dictionary with configuration options that only become available at +runtime. The relevant values are not available at import time. Instead, +decorator functions can be passed callables which are evaluated at +runtime to obtain the value: + +.. code-block:: python + + def lookup_max_time(): + # pretend we have a global reference to 'app' here + # and that it has a dictionary-like 'config' property + return app.config["BACKOFF_MAX_TIME"] + + @backoff.on_exception(backoff.expo, + ValueError, + max_time=lookup_max_time) + +Event handlers +-------------- + +Both backoff decorators optionally accept event handler functions +using the keyword arguments ``on_success``, ``on_backoff``, and ``on_giveup``. +This may be useful in reporting statistics or performing other custom +logging. + +Handlers must be callables with a unary signature accepting a dict +argument. This dict contains the details of the invocation. Valid keys +include: + +* *target*: reference to the function or method being invoked +* *args*: positional arguments to func +* *kwargs*: keyword arguments to func +* *tries*: number of invocation tries so far +* *elapsed*: elapsed time in seconds so far +* *wait*: seconds to wait (``on_backoff`` handler only) +* *value*: value triggering backoff (``on_predicate`` decorator only) + +A handler which prints the details of the backoff event could be +implemented like so: + +.. code-block:: python + + def backoff_hdlr(details): + print ("Backing off {wait:0.1f} seconds afters {tries} tries " + "calling function {target} with args {args} and kwargs " + "{kwargs}".format(**details)) + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException, + on_backoff=backoff_hdlr) + def get_url(url): + return requests.get(url) + +**Multiple handlers per event type** + +In all cases, iterables of handler functions are also accepted, which +are called in turn. For example, you might provide a simple list of +handler functions as the value of the ``on_backoff`` keyword arg: + +.. code-block:: python + + @backoff.on_exception(backoff.expo, + requests.exceptions.RequestException, + on_backoff=[backoff_hdlr1, backoff_hdlr2]) + def get_url(url): + return requests.get(url) + +**Getting exception info** + +In the case of the ``on_exception`` decorator, all ``on_backoff`` and +``on_giveup`` handlers are called from within the except block for the +exception being handled. Therefore exception info is available to the +handler functions via the python standard library, specifically +``sys.exc_info()`` or the ``traceback`` module. + +Asynchronous code +----------------- + +Backoff supports asynchronous execution in Python 3.5 and above. + +To use backoff in asynchronous code based on +`asyncio `_ +you simply need to apply ``backoff.on_exception`` or ``backoff.on_predicate`` +to coroutines. +You can also use coroutines for the ``on_success``, ``on_backoff``, and +``on_giveup`` event handlers, with the interface otherwise being identical. + +The following examples use `aiohttp `_ +asynchronous HTTP client/server library. + +.. code-block:: python + + @backoff.on_exception(backoff.expo, aiohttp.ClientError, max_time=60) + async def get_url(url): + async with aiohttp.ClientSession() as session: + async with session.get(url) as response: + return await response.text() + +Logging configuration +--------------------- + +By default, backoff and retry attempts are logged to the 'backoff' +logger. By default, this logger is configured with a NullHandler, so +there will be nothing output unless you configure a handler. +Programmatically, this might be accomplished with something as simple +as: + +.. code-block:: python + + logging.getLogger('backoff').addHandler(logging.StreamHandler()) + +The default logging level is INFO, which corresponds to logging +anytime a retry event occurs. If you would instead like to log +only when a giveup event occurs, set the logger level to ERROR. + +.. code-block:: python + + logging.getLogger('backoff').setLevel(logging.ERROR) + +It is also possible to specify an alternate logger with the ``logger`` +keyword argument. If a string value is specified the logger will be +looked up by name. + +.. code-block:: python + + @backoff.on_exception(backoff.expo, + requests.exception.RequestException, + logger='my_logger') + # ... + +It is also supported to specify a Logger (or LoggerAdapter) object +directly. + +.. code-block:: python + + my_logger = logging.getLogger('my_logger') + my_handler = logging.StreamHandler() + my_logger.add_handler(my_handler) + my_logger.setLevel(logging.ERROR) + + @backoff.on_exception(backoff.expo, + requests.exception.RequestException, + logger=my_logger) + # ... + +Default logging can be disabled all together by specifying +``logger=None``. In this case, if desired alternative logging behavior +could be defined by using custom event handlers. diff --git a/backoff/__init__.py b/backoff/__init__.py new file mode 100644 index 0000000..fc00001 --- /dev/null +++ b/backoff/__init__.py @@ -0,0 +1,29 @@ +# coding:utf-8 +""" +Function decoration for backoff and retry + +This module provides function decorators which can be used to wrap a +function such that it will be retried until some condition is met. It +is meant to be of use when accessing unreliable resources with the +potential for intermittent failures i.e. network resources and external +APIs. Somewhat more generally, it may also be of use for dynamically +polling resources for externally generated content. + +For examples and full documentation see the README at +https://github.com/litl/backoff +""" +from backoff._decorator import on_predicate, on_exception +from backoff._jitter import full_jitter, random_jitter +from backoff._wait_gen import constant, expo, fibo + +__all__ = [ + 'on_predicate', + 'on_exception', + 'constant', + 'expo', + 'fibo', + 'full_jitter', + 'random_jitter' +] + +__version__ = '1.10.0' diff --git a/backoff/_async.py b/backoff/_async.py new file mode 100644 index 0000000..38cde8e --- /dev/null +++ b/backoff/_async.py @@ -0,0 +1,166 @@ +# coding:utf-8 +import datetime +import functools +import asyncio # Python 3.5 code and syntax is allowed in this file +from datetime import timedelta + +from backoff._common import (_init_wait_gen, _maybe_call, _next_wait) + + +def _ensure_coroutine(coro_or_func): + if asyncio.iscoroutinefunction(coro_or_func): + return coro_or_func + else: + @functools.wraps(coro_or_func) + async def f(*args, **kwargs): + return coro_or_func(*args, **kwargs) + return f + + +def _ensure_coroutines(coros_or_funcs): + return [_ensure_coroutine(f) for f in coros_or_funcs] + + +async def _call_handlers(hdlrs, target, args, kwargs, tries, elapsed, **extra): + details = { + 'target': target, + 'args': args, + 'kwargs': kwargs, + 'tries': tries, + 'elapsed': elapsed, + } + details.update(extra) + for hdlr in hdlrs: + await hdlr(details) + + +def retry_predicate(target, wait_gen, predicate, + max_tries, max_time, jitter, + on_success, on_backoff, on_giveup, + wait_gen_kwargs): + on_success = _ensure_coroutines(on_success) + on_backoff = _ensure_coroutines(on_backoff) + on_giveup = _ensure_coroutines(on_giveup) + + # Easy to implement, please report if you need this. + assert not asyncio.iscoroutinefunction(max_tries) + assert not asyncio.iscoroutinefunction(jitter) + + assert asyncio.iscoroutinefunction(target) + + @functools.wraps(target) + async def retry(*args, **kwargs): + + # change names because python 2.x doesn't have nonlocal + max_tries_ = _maybe_call(max_tries) + max_time_ = _maybe_call(max_time) + + tries = 0 + start = datetime.datetime.now() + wait = _init_wait_gen(wait_gen, wait_gen_kwargs) + while True: + tries += 1 + elapsed = timedelta.total_seconds(datetime.datetime.now() - start) + details = (target, args, kwargs, tries, elapsed) + + ret = await target(*args, **kwargs) + if predicate(ret): + max_tries_exceeded = (tries == max_tries_) + max_time_exceeded = (max_time_ is not None and + elapsed >= max_time_) + + if max_tries_exceeded or max_time_exceeded: + await _call_handlers(on_giveup, *details, value=ret) + break + + try: + seconds = _next_wait(wait, jitter, elapsed, max_time_) + except StopIteration: + await _call_handlers(on_giveup, *details, value=ret) + break + + await _call_handlers(on_backoff, *details, value=ret, + wait=seconds) + + # Note: there is no convenient way to pass explicit event + # loop to decorator, so here we assume that either default + # thread event loop is set and correct (it mostly is + # by default), or Python >= 3.5.3 or Python >= 3.6 is used + # where loop.get_event_loop() in coroutine guaranteed to + # return correct value. + # See for details: + # + # + await asyncio.sleep(seconds) + continue + else: + await _call_handlers(on_success, *details, value=ret) + break + + return ret + + return retry + + +def retry_exception(target, wait_gen, exception, + max_tries, max_time, jitter, giveup, + on_success, on_backoff, on_giveup, + wait_gen_kwargs): + on_success = _ensure_coroutines(on_success) + on_backoff = _ensure_coroutines(on_backoff) + on_giveup = _ensure_coroutines(on_giveup) + giveup = _ensure_coroutine(giveup) + + # Easy to implement, please report if you need this. + assert not asyncio.iscoroutinefunction(max_tries) + assert not asyncio.iscoroutinefunction(jitter) + + @functools.wraps(target) + async def retry(*args, **kwargs): + # change names because python 2.x doesn't have nonlocal + max_tries_ = _maybe_call(max_tries) + max_time_ = _maybe_call(max_time) + + tries = 0 + start = datetime.datetime.now() + wait = _init_wait_gen(wait_gen, wait_gen_kwargs) + while True: + tries += 1 + elapsed = timedelta.total_seconds(datetime.datetime.now() - start) + details = (target, args, kwargs, tries, elapsed) + + try: + ret = await target(*args, **kwargs) + except exception as e: + giveup_result = await giveup(e) + max_tries_exceeded = (tries == max_tries_) + max_time_exceeded = (max_time_ is not None and + elapsed >= max_time_) + + if giveup_result or max_tries_exceeded or max_time_exceeded: + await _call_handlers(on_giveup, *details) + raise + + try: + seconds = _next_wait(wait, jitter, elapsed, max_time_) + except StopIteration: + await _call_handlers(on_giveup, *details) + raise e + + await _call_handlers(on_backoff, *details, wait=seconds) + + # Note: there is no convenient way to pass explicit event + # loop to decorator, so here we assume that either default + # thread event loop is set and correct (it mostly is + # by default), or Python >= 3.5.3 or Python >= 3.6 is used + # where loop.get_event_loop() in coroutine guaranteed to + # return correct value. + # See for details: + # + # + await asyncio.sleep(seconds) + else: + await _call_handlers(on_success, *details) + + return ret + return retry diff --git a/backoff/_common.py b/backoff/_common.py new file mode 100644 index 0000000..efd13f1 --- /dev/null +++ b/backoff/_common.py @@ -0,0 +1,101 @@ +# coding:utf-8 + +import functools +import logging +import sys +import traceback +import warnings + + +# Use module-specific logger with a default null handler. +_logger = logging.getLogger('backoff') +_logger.addHandler(logging.NullHandler()) # pragma: no cover +_logger.setLevel(logging.INFO) + + +# Evaluate arg that can be either a fixed value or a callable. +def _maybe_call(f, *args, **kwargs): + return f(*args, **kwargs) if callable(f) else f + + +def _init_wait_gen(wait_gen, wait_gen_kwargs): + kwargs = {k: _maybe_call(v) for k, v in wait_gen_kwargs.items()} + return wait_gen(**kwargs) + + +def _next_wait(wait, jitter, elapsed, max_time): + value = next(wait) + try: + if jitter is not None: + seconds = jitter(value) + else: + seconds = value + except TypeError: + warnings.warn( + "Nullary jitter function signature is deprecated. Use " + "unary signature accepting a wait value in seconds and " + "returning a jittered version of it.", + DeprecationWarning, + stacklevel=2, + ) + + seconds = value + jitter() + + # don't sleep longer than remaining alloted max_time + if max_time is not None: + seconds = min(seconds, max_time - elapsed) + + return seconds + + +# Configure handler list with user specified handler and optionally +# with a default handler bound to the specified logger. +def _config_handlers(user_handlers, default_handler=None, logger=None): + handlers = [] + if logger is not None: + # bind the specified logger to the default log handler + log_handler = functools.partial(default_handler, logger=logger) + handlers.append(log_handler) + + if user_handlers is None: + return handlers + + # user specified handlers can either be an iterable of handlers + # or a single handler. either way append them to the list. + if hasattr(user_handlers, '__iter__'): + # add all handlers in the iterable + handlers += list(user_handlers) + else: + # append a single handler + handlers.append(user_handlers) + + return handlers + + +# Default backoff handler +def _log_backoff(details, logger): + msg = "Backing off %s(...) for %.1fs (%s)" + log_args = [details['target'].__name__, details['wait']] + + exc_typ, exc, _ = sys.exc_info() + if exc is not None: + exc_fmt = traceback.format_exception_only(exc_typ, exc)[-1] + log_args.append(exc_fmt.rstrip("\n")) + else: + log_args.append(details['value']) + logger.info(msg, *log_args) + + +# Default giveup handler +def _log_giveup(details, logger): + msg = "Giving up %s(...) after %d tries (%s)" + log_args = [details['target'].__name__, details['tries']] + + exc_typ, exc, _ = sys.exc_info() + if exc is not None: + exc_fmt = traceback.format_exception_only(exc_typ, exc)[-1] + log_args.append(exc_fmt.rstrip("\n")) + else: + log_args.append(details['value']) + + logger.error(msg, *log_args) diff --git a/backoff/_decorator.py b/backoff/_decorator.py new file mode 100644 index 0000000..e541904 --- /dev/null +++ b/backoff/_decorator.py @@ -0,0 +1,178 @@ +# coding:utf-8 +from __future__ import unicode_literals + +import logging +import operator +import sys + +from backoff._common import (_config_handlers, _log_backoff, _log_giveup) +from backoff._jitter import full_jitter +from backoff import _sync + + +# python 2.7 -> 3.x compatibility for str and unicode +try: + basestring +except NameError: # pragma: python=3.5 + basestring = str + + +def on_predicate(wait_gen, + predicate=operator.not_, + max_tries=None, + max_time=None, + jitter=full_jitter, + on_success=None, + on_backoff=None, + on_giveup=None, + logger='backoff', + **wait_gen_kwargs): + """Returns decorator for backoff and retry triggered by predicate. + + Args: + wait_gen: A generator yielding successive wait times in + seconds. + predicate: A function which when called on the return value of + the target function will trigger backoff when considered + truthily. If not specified, the default behavior is to + backoff on falsey return values. + max_tries: The maximum number of attempts to make before giving + up. In the case of failure, the result of the last attempt + will be returned. The default value of None means there + is no limit to the number of tries. If a callable is passed, + it will be evaluated at runtime and its return value used. + max_time: The maximum total amount of time to try for before + giving up. If this time expires, the result of the last + attempt will be returned. If a callable is passed, it will + be evaluated at runtime and its return value used. + jitter: A function of the value yielded by wait_gen returning + the actual time to wait. This distributes wait times + stochastically in order to avoid timing collisions across + concurrent clients. Wait times are jittered by default + using the full_jitter function. Jittering may be disabled + altogether by passing jitter=None. + on_success: Callable (or iterable of callables) with a unary + signature to be called in the event of success. The + parameter is a dict containing details about the invocation. + on_backoff: Callable (or iterable of callables) with a unary + signature to be called in the event of a backoff. The + parameter is a dict containing details about the invocation. + on_giveup: Callable (or iterable of callables) with a unary + signature to be called in the event that max_tries + is exceeded. The parameter is a dict containing details + about the invocation. + logger: Name of logger or Logger object to log to. Defaults to + 'backoff'. + **wait_gen_kwargs: Any additional keyword args specified will be + passed to wait_gen when it is initialized. Any callable + args will first be evaluated and their return values passed. + This is useful for runtime configuration. + """ + def decorate(target): + # change names because python 2.x doesn't have nonlocal + logger_ = logger + if isinstance(logger_, basestring): + logger_ = logging.getLogger(logger_) + on_success_ = _config_handlers(on_success) + on_backoff_ = _config_handlers(on_backoff, _log_backoff, logger_) + on_giveup_ = _config_handlers(on_giveup, _log_giveup, logger_) + + retry = None + if sys.version_info >= (3, 5): # pragma: python=3.5 + import asyncio + + if asyncio.iscoroutinefunction(target): + import backoff._async + retry = backoff._async.retry_predicate + + if retry is None: + retry = _sync.retry_predicate + + return retry(target, wait_gen, predicate, + max_tries, max_time, jitter, + on_success_, on_backoff_, on_giveup_, + wait_gen_kwargs) + + # Return a function which decorates a target with a retry loop. + return decorate + + +def on_exception(wait_gen, + exception, + max_tries=None, + max_time=None, + jitter=full_jitter, + giveup=lambda e: False, + on_success=None, + on_backoff=None, + on_giveup=None, + logger='backoff', + **wait_gen_kwargs): + """Returns decorator for backoff and retry triggered by exception. + + Args: + wait_gen: A generator yielding successive wait times in + seconds. + exception: An exception type (or tuple of types) which triggers + backoff. + max_tries: The maximum number of attempts to make before giving + up. Once exhausted, the exception will be allowed to escape. + The default value of None means their is no limit to the + number of tries. If a callable is passed, it will be + evaluated at runtime and its return value used. + max_time: The maximum total amount of time to try for before + giving up. Once expired, the exception will be allowed to + escape. If a callable is passed, it will be + evaluated at runtime and its return value used. + jitter: A function of the value yielded by wait_gen returning + the actual time to wait. This distributes wait times + stochastically in order to avoid timing collisions across + concurrent clients. Wait times are jittered by default + using the full_jitter function. Jittering may be disabled + altogether by passing jitter=None. + giveup: Function accepting an exception instance and + returning whether or not to give up. Optional. The default + is to always continue. + on_success: Callable (or iterable of callables) with a unary + signature to be called in the event of success. The + parameter is a dict containing details about the invocation. + on_backoff: Callable (or iterable of callables) with a unary + signature to be called in the event of a backoff. The + parameter is a dict containing details about the invocation. + on_giveup: Callable (or iterable of callables) with a unary + signature to be called in the event that max_tries + is exceeded. The parameter is a dict containing details + about the invocation. + logger: Name or Logger object to log to. Defaults to 'backoff'. + **wait_gen_kwargs: Any additional keyword args specified will be + passed to wait_gen when it is initialized. Any callable + args will first be evaluated and their return values passed. + This is useful for runtime configuration. + """ + def decorate(target): + # change names because python 2.x doesn't have nonlocal + logger_ = logger + if isinstance(logger_, basestring): + logger_ = logging.getLogger(logger_) + on_success_ = _config_handlers(on_success) + on_backoff_ = _config_handlers(on_backoff, _log_backoff, logger_) + on_giveup_ = _config_handlers(on_giveup, _log_giveup, logger_) + + retry = None + if sys.version_info[:2] >= (3, 5): # pragma: python=3.5 + import asyncio + + if asyncio.iscoroutinefunction(target): + import backoff._async + retry = backoff._async.retry_exception + + if retry is None: + retry = _sync.retry_exception + + return retry(target, wait_gen, exception, + max_tries, max_time, jitter, giveup, + on_success_, on_backoff_, on_giveup_, + wait_gen_kwargs) + + # Return a function which decorates a target with a retry loop. + return decorate diff --git a/backoff/_jitter.py b/backoff/_jitter.py new file mode 100644 index 0000000..19f079b --- /dev/null +++ b/backoff/_jitter.py @@ -0,0 +1,28 @@ +# coding:utf-8 + +import random + + +def random_jitter(value): + """Jitter the value a random number of milliseconds. + + This adds up to 1 second of additional time to the original value. + Prior to backoff version 1.2 this was the default jitter behavior. + + Args: + value: The unadulterated backoff value. + """ + return value + random.random() + + +def full_jitter(value): + """Jitter the value across the full range (0 to value). + + This corresponds to the "Full Jitter" algorithm specified in the + AWS blog's post on the performance of various jitter algorithms. + (http://www.awsarchitectureblog.com/2015/03/backoff.html) + + Args: + value: The unadulterated backoff value. + """ + return random.uniform(0, value) diff --git a/backoff/_sync.py b/backoff/_sync.py new file mode 100644 index 0000000..477765d --- /dev/null +++ b/backoff/_sync.py @@ -0,0 +1,117 @@ +# coding:utf-8 +import datetime +import functools +import time +from datetime import timedelta + +from backoff._common import (_init_wait_gen, _maybe_call, _next_wait) + + +def _call_handlers(hdlrs, target, args, kwargs, tries, elapsed, **extra): + details = { + 'target': target, + 'args': args, + 'kwargs': kwargs, + 'tries': tries, + 'elapsed': elapsed, + } + details.update(extra) + for hdlr in hdlrs: + hdlr(details) + + +def retry_predicate(target, wait_gen, predicate, + max_tries, max_time, jitter, + on_success, on_backoff, on_giveup, + wait_gen_kwargs): + + @functools.wraps(target) + def retry(*args, **kwargs): + + # change names because python 2.x doesn't have nonlocal + max_tries_ = _maybe_call(max_tries) + max_time_ = _maybe_call(max_time) + + tries = 0 + start = datetime.datetime.now() + wait = _init_wait_gen(wait_gen, wait_gen_kwargs) + while True: + tries += 1 + elapsed = timedelta.total_seconds(datetime.datetime.now() - start) + details = (target, args, kwargs, tries, elapsed) + + ret = target(*args, **kwargs) + if predicate(ret): + max_tries_exceeded = (tries == max_tries_) + max_time_exceeded = (max_time_ is not None and + elapsed >= max_time_) + + if max_tries_exceeded or max_time_exceeded: + _call_handlers(on_giveup, *details, value=ret) + break + + try: + seconds = _next_wait(wait, jitter, elapsed, max_time_) + except StopIteration: + _call_handlers(on_giveup, *details) + break + + _call_handlers(on_backoff, *details, + value=ret, wait=seconds) + + time.sleep(seconds) + continue + else: + _call_handlers(on_success, *details, value=ret) + break + + return ret + + return retry + + +def retry_exception(target, wait_gen, exception, + max_tries, max_time, jitter, giveup, + on_success, on_backoff, on_giveup, + wait_gen_kwargs): + + @functools.wraps(target) + def retry(*args, **kwargs): + + # change names because python 2.x doesn't have nonlocal + max_tries_ = _maybe_call(max_tries) + max_time_ = _maybe_call(max_time) + + tries = 0 + start = datetime.datetime.now() + wait = _init_wait_gen(wait_gen, wait_gen_kwargs) + while True: + tries += 1 + elapsed = timedelta.total_seconds(datetime.datetime.now() - start) + details = (target, args, kwargs, tries, elapsed) + + try: + ret = target(*args, **kwargs) + except exception as e: + max_tries_exceeded = (tries == max_tries_) + max_time_exceeded = (max_time_ is not None and + elapsed >= max_time_) + + if giveup(e) or max_tries_exceeded or max_time_exceeded: + _call_handlers(on_giveup, *details) + raise + + try: + seconds = _next_wait(wait, jitter, elapsed, max_time_) + except StopIteration: + _call_handlers(on_giveup, *details) + raise e + + _call_handlers(on_backoff, *details, wait=seconds) + + time.sleep(seconds) + else: + _call_handlers(on_success, *details) + + return ret + return retry diff --git a/backoff/_wait_gen.py b/backoff/_wait_gen.py new file mode 100644 index 0000000..49dbbca --- /dev/null +++ b/backoff/_wait_gen.py @@ -0,0 +1,56 @@ +# coding:utf-8 + +import itertools + + +def expo(base=2, factor=1, max_value=None): + """Generator for exponential decay. + + Args: + base: The mathematical base of the exponentiation operation + factor: Factor to multiply the exponentation by. + max_value: The maximum value to yield. Once the value in the + true exponential sequence exceeds this, the value + of max_value will forever after be yielded. + """ + n = 0 + while True: + a = factor * base ** n + if max_value is None or a < max_value: + yield a + n += 1 + else: + yield max_value + + +def fibo(max_value=None): + """Generator for fibonaccial decay. + + Args: + max_value: The maximum value to yield. Once the value in the + true fibonacci sequence exceeds this, the value + of max_value will forever after be yielded. + """ + a = 1 + b = 1 + while True: + if max_value is None or a < max_value: + yield a + a, b = b, a + b + else: + yield max_value + + +def constant(interval=1): + """Generator for constant intervals. + + Args: + interval: A constant value to yield or an iterable of such values. + """ + try: + itr = iter(interval) + except TypeError: + itr = itertools.repeat(interval) + + for val in itr: + yield val diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..3469bf2 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,42 @@ +[tool.poetry] +name = "backoff" +version = "1.10.0" +description = "Function decoration for backoff and retry" +authors = ["Bob Green "] +readme = "README.rst" +repository = "https://github.com/litl/backoff" +license = "MIT" +keywords = ["retry", "backoff", "decorators"] +classifiers = ['Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Programming Language :: Python', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet :: WWW/HTTP', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: Utilities'] +packages = [ + { include = "backoff" }, +] + +[tool.poetry.dependencies] +python = "^2.7 || ^3.5" + +[tool.poetry.dev-dependencies] +flake8 = "^3.6" +pytest = "^4.0" +pytest-cov = "^2.6" +pytest-asyncio = {version = "^0.10.0",python = "^3.5"} + +[build-system] +requires = ["poetry>=0.12"] +build-backend = "poetry.masonry.api" diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/common.py b/tests/common.py new file mode 100644 index 0000000..56c2a8d --- /dev/null +++ b/tests/common.py @@ -0,0 +1,24 @@ +# coding:utf-8 +import collections +import functools + + +# create event handler which log their invocations to a dict +def _log_hdlrs(): + log = collections.defaultdict(list) + + def log_hdlr(event, details): + log[event].append(details) + + log_success = functools.partial(log_hdlr, 'success') + log_backoff = functools.partial(log_hdlr, 'backoff') + log_giveup = functools.partial(log_hdlr, 'giveup') + + return log, log_success, log_backoff, log_giveup + + +# decorator that that saves the target as +# an attribute of the decorated function +def _save_target(f): + f._target = f + return f diff --git a/tests/python35/test_backoff_async.py b/tests/python35/test_backoff_async.py new file mode 100644 index 0000000..ca62c4a --- /dev/null +++ b/tests/python35/test_backoff_async.py @@ -0,0 +1,664 @@ +# coding:utf-8 + +import asyncio # Python 3.5 code and syntax is allowed in this file +import backoff +import pytest +import random + +from tests.common import _log_hdlrs, _save_target + + +async def _await_none(x): + return None + + +@pytest.mark.asyncio +async def test_on_predicate(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + + @backoff.on_predicate(backoff.expo) + async def return_true(log, n): + val = (len(log) == n - 1) + log.append(val) + return val + + log = [] + ret = await return_true(log, 3) + assert ret is True + assert 3 == len(log) + + +@pytest.mark.asyncio +async def test_on_predicate_max_tries(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + + @backoff.on_predicate(backoff.expo, jitter=None, max_tries=3) + async def return_true(log, n): + val = (len(log) == n) + log.append(val) + return val + + log = [] + ret = await return_true(log, 10) + assert ret is False + assert 3 == len(log) + + +@pytest.mark.asyncio +async def test_on_exception(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + + @backoff.on_exception(backoff.expo, KeyError) + async def keyerror_then_true(log, n): + if len(log) == n: + return True + e = KeyError() + log.append(e) + raise e + + log = [] + assert (await keyerror_then_true(log, 3)) is True + assert 3 == len(log) + + +@pytest.mark.asyncio +async def test_on_exception_tuple(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + + @backoff.on_exception(backoff.expo, (KeyError, ValueError)) + async def keyerror_valueerror_then_true(log): + if len(log) == 2: + return True + if len(log) == 0: + e = KeyError() + if len(log) == 1: + e = ValueError() + log.append(e) + raise e + + log = [] + assert (await keyerror_valueerror_then_true(log)) is True + assert 2 == len(log) + assert isinstance(log[0], KeyError) + assert isinstance(log[1], ValueError) + + +@pytest.mark.asyncio +async def test_on_exception_max_tries(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + + @backoff.on_exception(backoff.expo, KeyError, jitter=None, max_tries=3) + async def keyerror_then_true(log, n, foo=None): + if len(log) == n: + return True + e = KeyError() + log.append(e) + raise e + + log = [] + with pytest.raises(KeyError): + await keyerror_then_true(log, 10, foo="bar") + + assert 3 == len(log) + + +@pytest.mark.asyncio +async def test_on_exception_constant_iterable(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + + backoffs = [] + giveups = [] + successes = [] + + @backoff.on_exception( + backoff.constant, + KeyError, + interval=(1, 2, 3), + on_backoff=backoffs.append, + on_giveup=giveups.append, + on_success=successes.append, + ) + async def endless_exceptions(): + raise KeyError('foo') + + with pytest.raises(KeyError): + await endless_exceptions() + + assert len(backoffs) == 3 + assert len(giveups) == 1 + assert len(successes) == 0 + + +@pytest.mark.asyncio +async def test_on_exception_success_random_jitter(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + + log, log_success, log_backoff, log_giveup = _log_hdlrs() + + @backoff.on_exception(backoff.expo, + Exception, + on_success=log_success, + on_backoff=log_backoff, + on_giveup=log_giveup, + jitter=backoff.random_jitter, + factor=0.5) + @_save_target + async def succeeder(*args, **kwargs): + # succeed after we've backed off twice + if len(log['backoff']) < 2: + raise ValueError("catch me") + + await succeeder(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice before succeeding + assert len(log['success']) == 1 + assert len(log['backoff']) == 2 + assert len(log['giveup']) == 0 + + for i in range(2): + details = log['backoff'][i] + assert details['wait'] >= 0.5 * 2 ** i + + +@pytest.mark.asyncio +async def test_on_exception_success_full_jitter(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + + log, log_success, log_backoff, log_giveup = _log_hdlrs() + + @backoff.on_exception(backoff.expo, + Exception, + on_success=log_success, + on_backoff=log_backoff, + on_giveup=log_giveup, + jitter=backoff.full_jitter, + factor=0.5) + @_save_target + async def succeeder(*args, **kwargs): + # succeed after we've backed off twice + if len(log['backoff']) < 2: + raise ValueError("catch me") + + await succeeder(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice before succeeding + assert len(log['success']) == 1 + assert len(log['backoff']) == 2 + assert len(log['giveup']) == 0 + + for i in range(2): + details = log['backoff'][i] + assert details['wait'] <= 0.5 * 2 ** i + + +@pytest.mark.asyncio +async def test_on_exception_success(): + log, log_success, log_backoff, log_giveup = _log_hdlrs() + + @backoff.on_exception(backoff.constant, + Exception, + on_success=log_success, + on_backoff=log_backoff, + on_giveup=log_giveup, + jitter=None, + interval=0) + @_save_target + async def succeeder(*args, **kwargs): + # succeed after we've backed off twice + if len(log['backoff']) < 2: + raise ValueError("catch me") + + await succeeder(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice before succeeding + assert len(log['success']) == 1 + assert len(log['backoff']) == 2 + assert len(log['giveup']) == 0 + + for i in range(2): + details = log['backoff'][i] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': succeeder._target, + 'tries': i + 1, + 'wait': 0} + + details = log['success'][0] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': succeeder._target, + 'tries': 3} + + +@pytest.mark.asyncio +async def test_on_exception_giveup(): + log, log_success, log_backoff, log_giveup = _log_hdlrs() + + @backoff.on_exception(backoff.constant, + ValueError, + on_success=log_success, + on_backoff=log_backoff, + on_giveup=log_giveup, + max_tries=3, + jitter=None, + interval=0) + @_save_target + async def exceptor(*args, **kwargs): + raise ValueError("catch me") + + with pytest.raises(ValueError): + await exceptor(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice and giving up once + assert len(log['success']) == 0 + assert len(log['backoff']) == 2 + assert len(log['giveup']) == 1 + + details = log['giveup'][0] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': exceptor._target, + 'tries': 3} + + +@pytest.mark.asyncio +async def test_on_exception_giveup_predicate(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + + def on_baz(e): + return str(e) == "baz" + + vals = ["baz", "bar", "foo"] + + @backoff.on_exception(backoff.constant, + ValueError, + giveup=on_baz) + async def foo_bar_baz(): + raise ValueError(vals.pop()) + + with pytest.raises(ValueError): + await foo_bar_baz() + + assert not vals + + +@pytest.mark.asyncio +async def test_on_exception_giveup_coro(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + + async def on_baz(e): + return str(e) == "baz" + + vals = ["baz", "bar", "foo"] + + @backoff.on_exception(backoff.constant, + ValueError, + giveup=on_baz) + async def foo_bar_baz(): + raise ValueError(vals.pop()) + + with pytest.raises(ValueError): + await foo_bar_baz() + + assert not vals + + +@pytest.mark.asyncio +async def test_on_predicate_success(): + log, log_success, log_backoff, log_giveup = _log_hdlrs() + + @backoff.on_predicate(backoff.constant, + on_success=log_success, + on_backoff=log_backoff, + on_giveup=log_giveup, + jitter=None, + interval=0) + @_save_target + async def success(*args, **kwargs): + # succeed after we've backed off twice + return len(log['backoff']) == 2 + + await success(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice before succeeding + assert len(log['success']) == 1 + assert len(log['backoff']) == 2 + assert len(log['giveup']) == 0 + + for i in range(2): + details = log['backoff'][i] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': success._target, + 'tries': i + 1, + 'value': False, + 'wait': 0} + + details = log['success'][0] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': success._target, + 'tries': 3, + 'value': True} + + +@pytest.mark.asyncio +async def test_on_predicate_giveup(): + log, log_success, log_backoff, log_giveup = _log_hdlrs() + + @backoff.on_predicate(backoff.constant, + on_success=log_success, + on_backoff=log_backoff, + on_giveup=log_giveup, + max_tries=3, + jitter=None, + interval=0) + @_save_target + async def emptiness(*args, **kwargs): + pass + + await emptiness(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice and giving up once + assert len(log['success']) == 0 + assert len(log['backoff']) == 2 + assert len(log['giveup']) == 1 + + details = log['giveup'][0] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': emptiness._target, + 'tries': 3, + 'value': None} + + +@pytest.mark.asyncio +async def test_on_predicate_iterable_handlers(): + hdlrs = [_log_hdlrs() for _ in range(3)] + + @backoff.on_predicate(backoff.constant, + on_success=(h[1] for h in hdlrs), + on_backoff=(h[2] for h in hdlrs), + on_giveup=(h[3] for h in hdlrs), + max_tries=3, + jitter=None, + interval=0) + @_save_target + async def emptiness(*args, **kwargs): + pass + + await emptiness(1, 2, 3, foo=1, bar=2) + + for i in range(3): + assert len(hdlrs[i][0]['success']) == 0 + assert len(hdlrs[i][0]['backoff']) == 2 + assert len(hdlrs[i][0]['giveup']) == 1 + + details = dict(hdlrs[i][0]['giveup'][0]) + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': emptiness._target, + 'tries': 3, + 'value': None} + + +@pytest.mark.asyncio +async def test_on_predicate_constant_iterable(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + + waits = [1, 2, 3, 6, 9] + backoffs = [] + giveups = [] + successes = [] + + @backoff.on_predicate( + backoff.constant, + interval=waits, + on_backoff=backoffs.append, + on_giveup=giveups.append, + on_success=successes.append, + jitter=None, + ) + async def falsey(): + return False + + assert not await falsey() + + assert len(backoffs) == len(waits) + for i, wait in enumerate(waits): + assert backoffs[i]['wait'] == wait + + assert len(giveups) == 1 + assert len(successes) == 0 + + +# To maintain backward compatibility, +# on_predicate should support 0-argument jitter function. +@pytest.mark.asyncio +async def test_on_exception_success_0_arg_jitter(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + monkeypatch.setattr('random.random', lambda: 0) + + log, log_success, log_backoff, log_giveup = _log_hdlrs() + + @backoff.on_exception(backoff.constant, + Exception, + on_success=log_success, + on_backoff=log_backoff, + on_giveup=log_giveup, + jitter=random.random, + interval=0) + @_save_target + async def succeeder(*args, **kwargs): + # succeed after we've backed off twice + if len(log['backoff']) < 2: + raise ValueError("catch me") + + with pytest.deprecated_call(): + await succeeder(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice before succeeding + assert len(log['success']) == 1 + assert len(log['backoff']) == 2 + assert len(log['giveup']) == 0 + + for i in range(2): + details = log['backoff'][i] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': succeeder._target, + 'tries': i + 1, + 'wait': 0} + + details = log['success'][0] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': succeeder._target, + 'tries': 3} + + +# To maintain backward compatibility, +# on_predicate should support 0-argument jitter function. +@pytest.mark.asyncio +async def test_on_predicate_success_0_arg_jitter(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + monkeypatch.setattr('random.random', lambda: 0) + + log, log_success, log_backoff, log_giveup = _log_hdlrs() + + @backoff.on_predicate(backoff.constant, + on_success=log_success, + on_backoff=log_backoff, + on_giveup=log_giveup, + jitter=random.random, + interval=0) + @_save_target + async def success(*args, **kwargs): + # succeed after we've backed off twice + return len(log['backoff']) == 2 + + with pytest.deprecated_call(): + await success(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice before succeeding + assert len(log['success']) == 1 + assert len(log['backoff']) == 2 + assert len(log['giveup']) == 0 + + for i in range(2): + details = log['backoff'][i] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': success._target, + 'tries': i + 1, + 'value': False, + 'wait': 0} + + details = log['success'][0] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': success._target, + 'tries': 3, + 'value': True} + + +@pytest.mark.asyncio +async def test_on_exception_callable_max_tries(monkeypatch): + monkeypatch.setattr('asyncio.sleep', _await_none) + + def lookup_max_tries(): + return 3 + + log = [] + + @backoff.on_exception(backoff.constant, + ValueError, + max_tries=lookup_max_tries) + async def exceptor(): + log.append(True) + raise ValueError() + + with pytest.raises(ValueError): + await exceptor() + + assert len(log) == 3 + + +@pytest.mark.asyncio +async def test_on_exception_callable_gen_kwargs(): + + def lookup_foo(): + return "foo" + + def wait_gen(foo=None, bar=None): + assert foo == "foo" + assert bar == "bar" + + while True: + yield 0 + + @backoff.on_exception(wait_gen, + ValueError, + max_tries=2, + foo=lookup_foo, + bar="bar") + async def exceptor(): + raise ValueError("aah") + + with pytest.raises(ValueError): + await exceptor() + + +@pytest.mark.asyncio +async def test_on_exception_coro_cancelling(event_loop): + sleep_started_event = asyncio.Event() + + @backoff.on_predicate(backoff.expo) + async def coro(): + sleep_started_event.set() + + try: + await asyncio.sleep(10) + except asyncio.CancelledError: + return True + + return False + + task = event_loop.create_task(coro()) + + await sleep_started_event.wait() + + task.cancel() + + assert (await task) + + +def test_on_predicate_on_regular_function_without_event_loop(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + # Set default event loop to None. + loop = asyncio.get_event_loop() + asyncio.set_event_loop(None) + + try: + @backoff.on_predicate(backoff.expo) + def return_true(log, n): + val = (len(log) == n - 1) + log.append(val) + return val + + log = [] + ret = return_true(log, 3) + assert ret is True + assert 3 == len(log) + + finally: + # Restore event loop. + asyncio.set_event_loop(loop) + + +def test_on_exception_on_regular_function_without_event_loop(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + # Set default event loop to None. + loop = asyncio.get_event_loop() + asyncio.set_event_loop(None) + + try: + @backoff.on_exception(backoff.expo, KeyError) + def keyerror_then_true(log, n): + if len(log) == n: + return True + e = KeyError() + log.append(e) + raise e + + log = [] + assert keyerror_then_true(log, 3) is True + assert 3 == len(log) + + finally: + # Restore event loop. + asyncio.set_event_loop(loop) diff --git a/tests/test_backoff.py b/tests/test_backoff.py new file mode 100644 index 0000000..20734f5 --- /dev/null +++ b/tests/test_backoff.py @@ -0,0 +1,735 @@ +# coding:utf-8 +import datetime +import logging +import random +import sys +import threading + +import pytest + +import backoff +from tests.common import _save_target + + +def test_on_predicate(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + @backoff.on_predicate(backoff.expo) + def return_true(log, n): + val = (len(log) == n - 1) + log.append(val) + return val + + log = [] + ret = return_true(log, 3) + assert ret is True + assert 3 == len(log) + + +def test_on_predicate_max_tries(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + @backoff.on_predicate(backoff.expo, jitter=None, max_tries=3) + def return_true(log, n): + val = (len(log) == n) + log.append(val) + return val + + log = [] + ret = return_true(log, 10) + assert ret is False + assert 3 == len(log) + + +def test_on_predicate_max_time(monkeypatch): + nows = [ + datetime.datetime(2018, 1, 1, 12, 0, 10, 5), + datetime.datetime(2018, 1, 1, 12, 0, 9, 0), + datetime.datetime(2018, 1, 1, 12, 0, 1, 0), + datetime.datetime(2018, 1, 1, 12, 0, 0, 0), + ] + + class Datetime: + @staticmethod + def now(): + return nows.pop() + + monkeypatch.setattr('time.sleep', lambda x: None) + monkeypatch.setattr('datetime.datetime', Datetime) + + def giveup(details): + assert details['tries'] == 3 + assert details['elapsed'] == 10.000005 + + @backoff.on_predicate(backoff.expo, jitter=None, max_time=10, + on_giveup=giveup) + def return_true(log, n): + val = (len(log) == n) + log.append(val) + return val + + log = [] + ret = return_true(log, 10) + assert ret is False + assert len(log) == 3 + + +def test_on_exception(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + @backoff.on_exception(backoff.expo, KeyError) + def keyerror_then_true(log, n): + if len(log) == n: + return True + e = KeyError() + log.append(e) + raise e + + log = [] + assert keyerror_then_true(log, 3) is True + assert 3 == len(log) + + +def test_on_exception_tuple(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + @backoff.on_exception(backoff.expo, (KeyError, ValueError)) + def keyerror_valueerror_then_true(log): + if len(log) == 2: + return True + if len(log) == 0: + e = KeyError() + if len(log) == 1: + e = ValueError() + log.append(e) + raise e + + log = [] + assert keyerror_valueerror_then_true(log) is True + assert 2 == len(log) + assert isinstance(log[0], KeyError) + assert isinstance(log[1], ValueError) + + +def test_on_exception_max_tries(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + @backoff.on_exception(backoff.expo, KeyError, jitter=None, max_tries=3) + def keyerror_then_true(log, n, foo=None): + if len(log) == n: + return True + e = KeyError() + log.append(e) + raise e + + log = [] + with pytest.raises(KeyError): + keyerror_then_true(log, 10, foo="bar") + + assert 3 == len(log) + + +def test_on_exception_constant_iterable(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + backoffs = [] + giveups = [] + successes = [] + + @backoff.on_exception( + backoff.constant, + KeyError, + interval=(1, 2, 3), + on_backoff=backoffs.append, + on_giveup=giveups.append, + on_success=successes.append, + ) + def endless_exceptions(): + raise KeyError('foo') + + with pytest.raises(KeyError): + endless_exceptions() + + assert len(backoffs) == 3 + assert len(giveups) == 1 + assert len(successes) == 0 + + +def test_on_exception_success_random_jitter(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + backoffs, giveups, successes = [], [], [] + + @backoff.on_exception(backoff.expo, + Exception, + on_success=successes.append, + on_backoff=backoffs.append, + on_giveup=giveups.append, + jitter=backoff.random_jitter, + factor=0.5) + @_save_target + def succeeder(*args, **kwargs): + # succeed after we've backed off twice + if len(backoffs) < 2: + raise ValueError("catch me") + + succeeder(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice before succeeding + assert len(successes) == 1 + assert len(backoffs) == 2 + assert len(giveups) == 0 + + for i in range(2): + details = backoffs[i] + assert details['wait'] >= 0.5 * 2 ** i + + +def test_on_exception_success_full_jitter(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + backoffs, giveups, successes = [], [], [] + + @backoff.on_exception(backoff.expo, + Exception, + on_success=successes.append, + on_backoff=backoffs.append, + on_giveup=giveups.append, + jitter=backoff.full_jitter, + factor=0.5) + @_save_target + def succeeder(*args, **kwargs): + # succeed after we've backed off twice + if len(backoffs) < 2: + raise ValueError("catch me") + + succeeder(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice before succeeding + assert len(successes) == 1 + assert len(backoffs) == 2 + assert len(giveups) == 0 + + for i in range(2): + details = backoffs[i] + assert details['wait'] <= 0.5 * 2 ** i + + +def test_on_exception_success(): + backoffs, giveups, successes = [], [], [] + + @backoff.on_exception(backoff.constant, + Exception, + on_success=successes.append, + on_backoff=backoffs.append, + on_giveup=giveups.append, + jitter=None, + interval=0) + @_save_target + def succeeder(*args, **kwargs): + # succeed after we've backed off twice + if len(backoffs) < 2: + raise ValueError("catch me") + + succeeder(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice before succeeding + assert len(successes) == 1 + assert len(backoffs) == 2 + assert len(giveups) == 0 + + for i in range(2): + details = backoffs[i] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': succeeder._target, + 'tries': i + 1, + 'wait': 0} + + details = successes[0] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': succeeder._target, + 'tries': 3} + + +def test_on_exception_giveup(): + backoffs, giveups, successes = [], [], [] + + @backoff.on_exception(backoff.constant, + ValueError, + on_success=successes.append, + on_backoff=backoffs.append, + on_giveup=giveups.append, + max_tries=3, + jitter=None, + interval=0) + @_save_target + def exceptor(*args, **kwargs): + raise ValueError("catch me") + + with pytest.raises(ValueError): + exceptor(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice and giving up once + assert len(successes) == 0 + assert len(backoffs) == 2 + assert len(giveups) == 1 + + details = giveups[0] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': exceptor._target, + 'tries': 3} + + +def test_on_exception_giveup_predicate(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + def on_baz(e): + return str(e) == "baz" + + vals = ["baz", "bar", "foo"] + + @backoff.on_exception(backoff.constant, + ValueError, + giveup=on_baz) + def foo_bar_baz(): + raise ValueError(vals.pop()) + + with pytest.raises(ValueError): + foo_bar_baz() + + assert not vals + + +def test_on_predicate_success(): + backoffs, giveups, successes = [], [], [] + + @backoff.on_predicate(backoff.constant, + on_success=successes.append, + on_backoff=backoffs.append, + on_giveup=giveups.append, + jitter=None, + interval=0) + @_save_target + def success(*args, **kwargs): + # succeed after we've backed off twice + return len(backoffs) == 2 + + success(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice before succeeding + assert len(successes) == 1 + assert len(backoffs) == 2 + assert len(giveups) == 0 + + for i in range(2): + details = backoffs[i] + + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': success._target, + 'tries': i + 1, + 'value': False, + 'wait': 0} + + details = successes[0] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': success._target, + 'tries': 3, + 'value': True} + + +def test_on_predicate_giveup(): + backoffs, giveups, successes = [], [], [] + + @backoff.on_predicate(backoff.constant, + on_success=successes.append, + on_backoff=backoffs.append, + on_giveup=giveups.append, + max_tries=3, + jitter=None, + interval=0) + @_save_target + def emptiness(*args, **kwargs): + pass + + emptiness(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice and giving up once + assert len(successes) == 0 + assert len(backoffs) == 2 + assert len(giveups) == 1 + + details = giveups[0] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': emptiness._target, + 'tries': 3, + 'value': None} + + +def test_on_predicate_iterable_handlers(): + class Logger: + def __init__(self): + self.backoffs = [] + self.giveups = [] + self.successes = [] + + loggers = [Logger() for _ in range(3)] + + @backoff.on_predicate(backoff.constant, + on_backoff=(l.backoffs.append for l in loggers), + on_giveup=(l.giveups.append for l in loggers), + on_success=(l.successes.append for l in loggers), + max_tries=3, + jitter=None, + interval=0) + @_save_target + def emptiness(*args, **kwargs): + pass + + emptiness(1, 2, 3, foo=1, bar=2) + + for logger in loggers: + + assert len(logger.successes) == 0 + assert len(logger.backoffs) == 2 + assert len(logger.giveups) == 1 + + details = dict(logger.giveups[0]) + print(details) + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': emptiness._target, + 'tries': 3, + 'value': None} + + +# To maintain backward compatibility, +# on_predicate should support 0-argument jitter function. +def test_on_exception_success_0_arg_jitter(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + monkeypatch.setattr('random.random', lambda: 0) + + backoffs, giveups, successes = [], [], [] + + @backoff.on_exception(backoff.constant, + Exception, + on_success=successes.append, + on_backoff=backoffs.append, + on_giveup=giveups.append, + jitter=random.random, + interval=0) + @_save_target + def succeeder(*args, **kwargs): + # succeed after we've backed off twice + if len(backoffs) < 2: + raise ValueError("catch me") + + with pytest.deprecated_call(): + succeeder(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice before succeeding + assert len(successes) == 1 + assert len(backoffs) == 2 + assert len(giveups) == 0 + + for i in range(2): + details = backoffs[i] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': succeeder._target, + 'tries': i + 1, + 'wait': 0} + + details = successes[0] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': succeeder._target, + 'tries': 3} + + +# To maintain backward compatibility, +# on_predicate should support 0-argument jitter function. +def test_on_predicate_success_0_arg_jitter(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + monkeypatch.setattr('random.random', lambda: 0) + + backoffs, giveups, successes = [], [], [] + + @backoff.on_predicate(backoff.constant, + on_success=successes.append, + on_backoff=backoffs.append, + on_giveup=giveups.append, + jitter=random.random, + interval=0) + @_save_target + def success(*args, **kwargs): + # succeed after we've backed off twice + return len(backoffs) == 2 + + with pytest.deprecated_call(): + success(1, 2, 3, foo=1, bar=2) + + # we try 3 times, backing off twice before succeeding + assert len(successes) == 1 + assert len(backoffs) == 2 + assert len(giveups) == 0 + + for i in range(2): + details = backoffs[i] + print(details) + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': success._target, + 'tries': i + 1, + 'value': False, + 'wait': 0} + + details = successes[0] + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': success._target, + 'tries': 3, + 'value': True} + + +def test_on_exception_callable_max_tries(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + def lookup_max_tries(): + return 3 + + log = [] + + @backoff.on_exception(backoff.constant, + ValueError, + max_tries=lookup_max_tries) + def exceptor(): + log.append(True) + raise ValueError() + + with pytest.raises(ValueError): + exceptor() + + assert len(log) == 3 + + +def test_on_exception_callable_gen_kwargs(): + + def lookup_foo(): + return "foo" + + def wait_gen(foo=None, bar=None): + assert foo == "foo" + assert bar == "bar" + + while True: + yield 0 + + @backoff.on_exception(wait_gen, + ValueError, + max_tries=2, + foo=lookup_foo, + bar="bar") + def exceptor(): + raise ValueError("aah") + + with pytest.raises(ValueError): + exceptor() + + +def test_on_predicate_in_thread(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + result = [] + + def check(): + try: + @backoff.on_predicate(backoff.expo) + def return_true(log, n): + val = (len(log) == n - 1) + log.append(val) + return val + + log = [] + ret = return_true(log, 3) + assert ret is True + assert 3 == len(log) + + except Exception as ex: + result.append(ex) + else: + result.append('success') + + t = threading.Thread(target=check) + t.start() + t.join() + + assert len(result) == 1 + assert result[0] == 'success' + + +def test_on_predicate_constant_iterable(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + waits = [1, 2, 3, 6, 9] + backoffs = [] + giveups = [] + successes = [] + + @backoff.on_predicate( + backoff.constant, + interval=waits, + on_backoff=backoffs.append, + on_giveup=giveups.append, + on_success=successes.append, + jitter=None, + ) + def falsey(): + return False + + assert not falsey() + + assert len(backoffs) == len(waits) + for i, wait in enumerate(waits): + assert backoffs[i]['wait'] == wait + + assert len(giveups) == 1 + assert len(successes) == 0 + + +def test_on_exception_in_thread(monkeypatch): + monkeypatch.setattr('time.sleep', lambda x: None) + + result = [] + + def check(): + try: + @backoff.on_exception(backoff.expo, KeyError) + def keyerror_then_true(log, n): + if len(log) == n: + return True + e = KeyError() + log.append(e) + raise e + + log = [] + assert keyerror_then_true(log, 3) is True + assert 3 == len(log) + + except Exception as ex: + result.append(ex) + else: + result.append('success') + + t = threading.Thread(target=check) + t.start() + t.join() + + assert len(result) == 1 + assert result[0] == 'success' + + +def test_on_exception_logger_default(monkeypatch, caplog): + monkeypatch.setattr('time.sleep', lambda x: None) + + logger = logging.getLogger('backoff') + handler = logging.StreamHandler(sys.stdout) + logger.addHandler(handler) + + @backoff.on_exception(backoff.expo, KeyError, max_tries=3) + def key_error(): + raise KeyError() + + with caplog.at_level(logging.INFO): + with pytest.raises(KeyError): + key_error() + + assert len(caplog.records) == 3 # 2 backoffs and 1 giveup + for record in caplog.records: + assert record.name == 'backoff' + + +def test_on_exception_logger_none(monkeypatch, caplog): + monkeypatch.setattr('time.sleep', lambda x: None) + + logger = logging.getLogger('backoff') + handler = logging.StreamHandler(sys.stdout) + logger.addHandler(handler) + + @backoff.on_exception(backoff.expo, KeyError, max_tries=3, logger=None) + def key_error(): + raise KeyError() + + with caplog.at_level(logging.INFO): + with pytest.raises(KeyError): + key_error() + + assert not caplog.records + + +def test_on_exception_logger_user(monkeypatch, caplog): + monkeypatch.setattr('time.sleep', lambda x: None) + + logger = logging.getLogger('my-logger') + handler = logging.StreamHandler(sys.stdout) + logger.addHandler(handler) + + @backoff.on_exception(backoff.expo, KeyError, max_tries=3, logger=logger) + def key_error(): + raise KeyError() + + with caplog.at_level(logging.INFO): + with pytest.raises(KeyError): + key_error() + + assert len(caplog.records) == 3 # 2 backoffs and 1 giveup + for record in caplog.records: + assert record.name == 'my-logger' + + +def test_on_exception_logger_user_str(monkeypatch, caplog): + monkeypatch.setattr('time.sleep', lambda x: None) + + logger = logging.getLogger('my-logger') + handler = logging.StreamHandler(sys.stdout) + logger.addHandler(handler) + + @backoff.on_exception(backoff.expo, KeyError, max_tries=3, + logger='my-logger') + def key_error(): + raise KeyError() + + with caplog.at_level(logging.INFO): + with pytest.raises(KeyError): + key_error() + + assert len(caplog.records) == 3 # 2 backoffs and 1 giveup + for record in caplog.records: + assert record.name == 'my-logger' diff --git a/tests/test_jitter.py b/tests/test_jitter.py new file mode 100644 index 0000000..041b9fb --- /dev/null +++ b/tests/test_jitter.py @@ -0,0 +1,10 @@ +# coding:utf-8 +import backoff + + +def test_full_jitter(): + for input in range(100): + for i in range(100): + jitter = backoff.full_jitter(input) + assert jitter >= 0 + assert jitter <= input diff --git a/tests/test_wait_gen.py b/tests/test_wait_gen.py new file mode 100644 index 0000000..eb60416 --- /dev/null +++ b/tests/test_wait_gen.py @@ -0,0 +1,53 @@ +# coding:utf-8 +import backoff + + +def test_expo(): + gen = backoff.expo() + for i in range(9): + assert 2**i == next(gen) + + +def test_expo_base3(): + gen = backoff.expo(base=3) + for i in range(9): + assert 3**i == next(gen) + + +def test_expo_factor3(): + gen = backoff.expo(factor=3) + for i in range(9): + assert 3 * 2**i == next(gen) + + +def test_expo_base3_factor5(): + gen = backoff.expo(base=3, factor=5) + for i in range(9): + assert 5 * 3**i == next(gen) + + +def test_expo_max_value(): + gen = backoff.expo(max_value=2**4) + expected = [1, 2, 4, 8, 16, 16, 16] + for expect in expected: + assert expect == next(gen) + + +def test_fibo(): + gen = backoff.fibo() + expected = [1, 1, 2, 3, 5, 8, 13] + for expect in expected: + assert expect == next(gen) + + +def test_fibo_max_value(): + gen = backoff.fibo(max_value=8) + expected = [1, 1, 2, 3, 5, 8, 8, 8] + for expect in expected: + assert expect == next(gen) + + +def test_constant(): + gen = backoff.constant(interval=3) + for i in range(9): + assert 3 == next(gen)