New upstream version 1.10.0
Sophie Brun
3 years ago
0 | [report] | |
1 | show_missing = True | |
2 | ||
3 | # Regexes for lines to exclude from consideration | |
4 | exclude_lines = | |
5 | # Have to re-enable the standard pragma | |
6 | pragma: no cover | |
7 | pragma: python=3\.5 |
0 | __pycache__/ | |
1 | *.pyc | |
2 | .coverage | |
3 | virtualenv | |
4 | MANIFEST | |
5 | build/ | |
6 | dist/ | |
7 | .cache/ | |
8 | *.egg-info | |
9 | poetry.lock | |
10 | .vscode |
0 | language: python | |
1 | python: | |
2 | - "2.7" | |
3 | - "3.5" | |
4 | - "3.6" | |
5 | - "3.7" | |
6 | - "3.8" | |
7 | matrix: | |
8 | include: | |
9 | - python: "3.5" | |
10 | env: PYTHONASYNCIODEBUG=x | |
11 | - python: "3.6" | |
12 | env: PYTHONASYNCIODEBUG=x | |
13 | - python: "3.7" | |
14 | env: PYTHONASYNCIODEBUG=x | |
15 | - python: "3.8" | |
16 | env: PYTHONASYNCIODEBUG=x | |
17 | ||
18 | before_install: | |
19 | - pip install poetry more-itertools | |
20 | install: | |
21 | - poetry install | |
22 | script: poetry run make check | |
23 | after_success: coveralls | |
24 | notifications: | |
25 | email: false |
0 | # Change Log | |
1 | ||
2 | ## [v1.0.3] - 2014-06-05 | |
3 | ### Changed | |
4 | - Make logging unicode safe | |
5 | - Log on_predicate backoff as INFO rather than ERROR | |
6 | ||
7 | ## [v1.0.4] - 2014-08-12 | |
8 | ### Added | |
9 | - Python 2.6 support from @Bonko | |
10 | - Python 3.0 support from @robyoung | |
11 | - Run tests in Travis from @robyoung | |
12 | ||
13 | ## [v1.0.5] - 2015-02-03 | |
14 | ### Changed | |
15 | - Add a default interval of 1 second for the constant generator | |
16 | - Improve on_predicate stop condition avoiding extra sleep | |
17 | ||
18 | ## [v1.0.6] - 2015-02-10 | |
19 | ### Added | |
20 | - Coveralls.io integration from @singingwolfboy | |
21 | ||
22 | ### Changed | |
23 | - Fix logging bug for function calls with tuple params | |
24 | ||
25 | ## [v1.0.7] - 2015-02-10 | |
26 | ||
27 | ### Changed | |
28 | - Fix string formatting for python 2.6 | |
29 | ||
30 | ## [v1.1.0] - 2015-12-08 | |
31 | ### Added | |
32 | - Event handling for success, backoff, and giveup | |
33 | - Change log | |
34 | ||
35 | ### Changed | |
36 | - Docs and test for multi exception invocations | |
37 | - Update dev environment test dependencies | |
38 | ||
39 | ## [v1.2.0] - 2016-05-26 | |
40 | ### Added | |
41 | - 'Full jitter' algorithm from @jonascheng | |
42 | ||
43 | ### Changed | |
44 | - Jitter function now accepts raw value and returns jittered value | |
45 | - Change README to reST for the benefit of pypi :( | |
46 | - Remove docstring doc generation and make README canonical | |
47 | ||
48 | ## [v1.2.1] - 2016-05-27 | |
49 | ### Changed | |
50 | - Documentation fixes | |
51 | ||
52 | ## [v1.3.0] - 2016-08-08 | |
53 | ### Added | |
54 | - Support runtime configuration with optional callable kwargs | |
55 | - Add giveup kwarg for exception inspection | |
56 | ||
57 | ### Changed | |
58 | - Documentation fixes | |
59 | ||
60 | ## [v1.3.1] - 2016-08-08 | |
61 | ### Changed | |
62 | - Include README.rst in source distribution (fixes package) | |
63 | ||
64 | ## [v1.3.2] - 2016-11-18 | |
65 | ### Changed | |
66 | - Don't log retried args and kwargs by default | |
67 | - README.rst syntax highlighting from @dethi | |
68 | ||
69 | ## [v1.4.0] - 2017-02-05 | |
70 | ### Added | |
71 | - Async support via `asyncio` coroutines (Python 3.4) from @rutsky | |
72 | ||
73 | ### Changed | |
74 | - Refactor `backoff` module into package with identical API | |
75 | ||
76 | ## [v1.4.1] - 2017-04-21 | |
77 | ### Added | |
78 | - Expose __version__ at package root | |
79 | ||
80 | ### Changed | |
81 | - Fix checking for running sync version in coroutine in case when event | |
82 | loop is not set from @rutsky | |
83 | ||
84 | ## [v1.4.2] - 2017-04-25 | |
85 | ### Changed | |
86 | ||
87 | - Use documented logger name https://github.com/litl/backoff/pull/32 | |
88 | from @pquentin | |
89 | ||
90 | ## [v1.4.3] - 2017-05-22 | |
91 | ### Changed | |
92 | ||
93 | - Add license to source distribution | |
94 | ||
95 | ## [v1.5.0] - 2018-04-11 | |
96 | ### Changed | |
97 | ||
98 | - Add max_time keyword argument | |
99 | ||
100 | ## [v1.6.0] - 2018-07-14 | |
101 | ### Changed | |
102 | ||
103 | - Change default log level from ERROR to INFO | |
104 | - Log retries on exception as INFO | |
105 | ||
106 | ## [v1.7.0] - 2018-11-23 | |
107 | ### Changed | |
108 | ||
109 | - Support Python 3.7 | |
110 | - Drop support for async in Python 3.4 | |
111 | - Drop support for Python 2.6 | |
112 | - Update development dependencies | |
113 | - Use poetry for dependencies and packaging | |
114 | ||
115 | ## [v1.8.0] - 2018-12-20 | |
116 | ### Changed | |
117 | ||
118 | - Give up on StopIteration raised in wait generators | |
119 | - Iterable intervals for constant wait_gen for predefined wait sequences | |
120 | - Nullary jitter signature deprecation warning | |
121 | - Custom loggers | |
122 | ||
123 | ## [v1.8.1] - 2019-10-11 | |
124 | ### Changed | |
125 | ||
126 | - Use arguments in log messages rather than fully formatting log | |
127 | https://github.com/litl/backoff/pull/82 from @lbernick | |
128 | ||
129 | ## [v1.9.0] 2019-11-16 | |
130 | ### Changed | |
131 | ||
132 | - Support python 3.8 | |
133 | ||
134 | ## [v1.9.1] 2019-11-18 | |
135 | ### Changed | |
136 | ||
137 | - Include tests and changelog in distribution | |
138 | ||
139 | ## [v1.9.2] 2019-11-19 | |
140 | ### Changed | |
141 | ||
142 | - Don't include tests and changelog in distribution | |
143 | ||
144 | ## [v1.10.0] 2019-12-7 | |
145 | ### Changed | |
146 | ||
147 | - Allow sync decorator call from async function |
0 | The MIT License (MIT) | |
1 | ||
2 | Copyright (c) 2014 litl, LLC. | |
3 | ||
4 | Permission is hereby granted, free of charge, to any person obtaining a copy | |
5 | of this software and associated documentation files (the "Software"), to deal | |
6 | in the Software without restriction, including without limitation the rights | |
7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
8 | copies of the Software, and to permit persons to whom the Software is | |
9 | furnished to do so, subject to the following conditions: | |
10 | ||
11 | The above copyright notice and this permission notice shall be included in | |
12 | all copies or substantial portions of the Software. | |
13 | ||
14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
20 | THE SOFTWARE. |
0 | PY_VERSION := $(wordlist 2,4,$(subst ., ,$(shell python --version 2>&1))) | |
1 | PY_MAJOR := $(word 1,${PY_VERSION}) | |
2 | PY_MINOR := $(word 2,${PY_VERSION}) | |
3 | PY_GTE_35 = $(shell echo $(PY_MAJOR).$(PY_MINOR)\>=3.5 | bc) | |
4 | ||
5 | ||
6 | .PHONY: all flake8 clean test check | |
7 | ||
8 | all: | |
9 | @echo 'flake8 check flake8 compliance' | |
10 | @echo 'clean cleanup the source tree' | |
11 | @echo 'test run the unit tests' | |
12 | @echo 'check make sure you are ready to commit' | |
13 | ||
14 | flake8: | |
15 | ifeq ($(PY_GTE_35),1) | |
16 | @flake8 backoff tests | |
17 | else | |
18 | @flake8 --exclude tests/python35,backoff/_async.py backoff tests | |
19 | endif | |
20 | ||
21 | clean: | |
22 | @find . -name "*.pyc" -delete | |
23 | @find . -name "__pycache__" -delete | |
24 | @rm -rf build dist .coverage MANIFEST | |
25 | ||
26 | test: clean | |
27 | ifeq ($(PY_GTE_35),1) | |
28 | @PYTHONPATH=. py.test --cov-config .coveragerc-py35 --cov backoff tests | |
29 | else | |
30 | @PYTHONPATH=. py.test --cov-config .coveragerc-py2 --cov backoff tests/test_*.py | |
31 | endif | |
32 | ||
33 | check: flake8 test | |
34 | @coverage report | grep 100% >/dev/null || { echo 'Unit tests coverage is incomplete.'; exit 1; } |
0 | backoff | |
1 | ======= | |
2 | ||
3 | .. image:: https://travis-ci.org/litl/backoff.svg?branch=master | |
4 | :target: https://travis-ci.org/litl/backoff?branch=master | |
5 | .. image:: https://coveralls.io/repos/litl/backoff/badge.svg?branch=master | |
6 | :target: https://coveralls.io/r/litl/backoff?branch=master | |
7 | .. image:: https://img.shields.io/pypi/v/backoff.svg | |
8 | :target: https://pypi.python.org/pypi/backoff | |
9 | ||
10 | **Function decoration for backoff and retry** | |
11 | ||
12 | This module provides function decorators which can be used to wrap a | |
13 | function such that it will be retried until some condition is met. It | |
14 | is meant to be of use when accessing unreliable resources with the | |
15 | potential for intermittent failures i.e. network resources and external | |
16 | APIs. Somewhat more generally, it may also be of use for dynamically | |
17 | polling resources for externally generated content. | |
18 | ||
19 | Decorators support both regular functions for synchronous code and | |
20 | `asyncio <https://docs.python.org/3/library/asyncio.html>`_'s coroutines | |
21 | for asynchronous code. | |
22 | ||
23 | Examples | |
24 | ======== | |
25 | ||
26 | Since Kenneth Reitz's `requests <http://python-requests.org>`_ module | |
27 | has become a defacto standard for synchronous HTTP clients in Python, | |
28 | networking examples below are written using it, but it is in no way required | |
29 | by the backoff module. | |
30 | ||
31 | @backoff.on_exception | |
32 | --------------------- | |
33 | ||
34 | The ``on_exception`` decorator is used to retry when a specified exception | |
35 | is raised. Here's an example using exponential backoff when any | |
36 | ``requests`` exception is raised: | |
37 | ||
38 | .. code-block:: python | |
39 | ||
40 | @backoff.on_exception(backoff.expo, | |
41 | requests.exceptions.RequestException) | |
42 | def get_url(url): | |
43 | return requests.get(url) | |
44 | ||
45 | The decorator will also accept a tuple of exceptions for cases where | |
46 | the same backoff behavior is desired for more than one exception type: | |
47 | ||
48 | .. code-block:: python | |
49 | ||
50 | @backoff.on_exception(backoff.expo, | |
51 | (requests.exceptions.Timeout, | |
52 | requests.exceptions.ConnectionError)) | |
53 | def get_url(url): | |
54 | return requests.get(url) | |
55 | ||
56 | **Give Up Conditions** | |
57 | ||
58 | Optional keyword arguments can specify conditions under which to give | |
59 | up. | |
60 | ||
61 | The keyword argument ``max_time`` specifies the maximum amount | |
62 | of total time in seconds that can elapse before giving up. | |
63 | ||
64 | .. code-block:: python | |
65 | ||
66 | @backoff.on_exception(backoff.expo, | |
67 | requests.exceptions.RequestException, | |
68 | max_time=60) | |
69 | def get_url(url): | |
70 | return requests.get(url) | |
71 | ||
72 | ||
73 | Keyword argument ``max_tries`` specifies the maximum number of calls | |
74 | to make to the target function before giving up. | |
75 | ||
76 | .. code-block:: python | |
77 | ||
78 | @backoff.on_exception(backoff.expo, | |
79 | requests.exceptions.RequestException, | |
80 | max_tries=8, | |
81 | jitter=None) | |
82 | def get_url(url): | |
83 | return requests.get(url) | |
84 | ||
85 | ||
86 | In some cases the raised exception instance itself may need to be | |
87 | inspected in order to determine if it is a retryable condition. The | |
88 | ``giveup`` keyword arg can be used to specify a function which accepts | |
89 | the exception and returns a truthy value if the exception should not | |
90 | be retried: | |
91 | ||
92 | .. code-block:: python | |
93 | ||
94 | def fatal_code(e): | |
95 | return 400 <= e.response.status_code < 500 | |
96 | ||
97 | @backoff.on_exception(backoff.expo, | |
98 | requests.exceptions.RequestException, | |
99 | max_time=300, | |
100 | giveup=fatal_code) | |
101 | def get_url(url): | |
102 | return requests.get(url) | |
103 | ||
104 | When a give up event occurs, the exception in question is reraised | |
105 | and so code calling an `on_exception`-decorated function may still | |
106 | need to do exception handling. | |
107 | ||
108 | @backoff.on_predicate | |
109 | --------------------- | |
110 | ||
111 | The ``on_predicate`` decorator is used to retry when a particular | |
112 | condition is true of the return value of the target function. This may | |
113 | be useful when polling a resource for externally generated content. | |
114 | ||
115 | Here's an example which uses a fibonacci sequence backoff when the | |
116 | return value of the target function is the empty list: | |
117 | ||
118 | .. code-block:: python | |
119 | ||
120 | @backoff.on_predicate(backoff.fibo, lambda x: x == [], max_value=13) | |
121 | def poll_for_messages(queue): | |
122 | return queue.get() | |
123 | ||
124 | Extra keyword arguments are passed when initializing the | |
125 | wait generator, so the ``max_value`` param above is passed as a keyword | |
126 | arg when initializing the fibo generator. | |
127 | ||
128 | When not specified, the predicate param defaults to the falsey test, | |
129 | so the above can more concisely be written: | |
130 | ||
131 | .. code-block:: python | |
132 | ||
133 | @backoff.on_predicate(backoff.fibo, max_value=13) | |
134 | def poll_for_message(queue) | |
135 | return queue.get() | |
136 | ||
137 | More simply, a function which continues polling every second until it | |
138 | gets a non-falsey result could be defined like like this: | |
139 | ||
140 | .. code-block:: python | |
141 | ||
142 | @backoff.on_predicate(backoff.constant, interval=1) | |
143 | def poll_for_message(queue) | |
144 | return queue.get() | |
145 | ||
146 | Jitter | |
147 | ------ | |
148 | ||
149 | A jitter algorithm can be supplied with the ``jitter`` keyword arg to | |
150 | either of the backoff decorators. This argument should be a function | |
151 | accepting the original unadulterated backoff value and returning it's | |
152 | jittered counterpart. | |
153 | ||
154 | As of version 1.2, the default jitter function ``backoff.full_jitter`` | |
155 | implements the 'Full Jitter' algorithm as defined in the AWS | |
156 | Architecture Blog's `Exponential Backoff And Jitter | |
157 | <https://www.awsarchitectureblog.com/2015/03/backoff.html>`_ post. | |
158 | Note that with this algorithm, the time yielded by the wait generator | |
159 | is actually the *maximum* amount of time to wait. | |
160 | ||
161 | Previous versions of backoff defaulted to adding some random number of | |
162 | milliseconds (up to 1s) to the raw sleep value. If desired, this | |
163 | behavior is now available as ``backoff.random_jitter``. | |
164 | ||
165 | Using multiple decorators | |
166 | ------------------------- | |
167 | ||
168 | The backoff decorators may also be combined to specify different | |
169 | backoff behavior for different cases: | |
170 | ||
171 | .. code-block:: python | |
172 | ||
173 | @backoff.on_predicate(backoff.fibo, max_value=13) | |
174 | @backoff.on_exception(backoff.expo, | |
175 | requests.exceptions.HTTPError, | |
176 | max_time=60) | |
177 | @backoff.on_exception(backoff.expo, | |
178 | requests.exceptions.Timeout, | |
179 | max_time=300) | |
180 | def poll_for_message(queue): | |
181 | return queue.get() | |
182 | ||
183 | Runtime Configuration | |
184 | --------------------- | |
185 | ||
186 | The decorator functions ``on_exception`` and ``on_predicate`` are | |
187 | generally evaluated at import time. This is fine when the keyword args | |
188 | are passed as constant values, but suppose we want to consult a | |
189 | dictionary with configuration options that only become available at | |
190 | runtime. The relevant values are not available at import time. Instead, | |
191 | decorator functions can be passed callables which are evaluated at | |
192 | runtime to obtain the value: | |
193 | ||
194 | .. code-block:: python | |
195 | ||
196 | def lookup_max_time(): | |
197 | # pretend we have a global reference to 'app' here | |
198 | # and that it has a dictionary-like 'config' property | |
199 | return app.config["BACKOFF_MAX_TIME"] | |
200 | ||
201 | @backoff.on_exception(backoff.expo, | |
202 | ValueError, | |
203 | max_time=lookup_max_time) | |
204 | ||
205 | Event handlers | |
206 | -------------- | |
207 | ||
208 | Both backoff decorators optionally accept event handler functions | |
209 | using the keyword arguments ``on_success``, ``on_backoff``, and ``on_giveup``. | |
210 | This may be useful in reporting statistics or performing other custom | |
211 | logging. | |
212 | ||
213 | Handlers must be callables with a unary signature accepting a dict | |
214 | argument. This dict contains the details of the invocation. Valid keys | |
215 | include: | |
216 | ||
217 | * *target*: reference to the function or method being invoked | |
218 | * *args*: positional arguments to func | |
219 | * *kwargs*: keyword arguments to func | |
220 | * *tries*: number of invocation tries so far | |
221 | * *elapsed*: elapsed time in seconds so far | |
222 | * *wait*: seconds to wait (``on_backoff`` handler only) | |
223 | * *value*: value triggering backoff (``on_predicate`` decorator only) | |
224 | ||
225 | A handler which prints the details of the backoff event could be | |
226 | implemented like so: | |
227 | ||
228 | .. code-block:: python | |
229 | ||
230 | def backoff_hdlr(details): | |
231 | print ("Backing off {wait:0.1f} seconds afters {tries} tries " | |
232 | "calling function {target} with args {args} and kwargs " | |
233 | "{kwargs}".format(**details)) | |
234 | ||
235 | @backoff.on_exception(backoff.expo, | |
236 | requests.exceptions.RequestException, | |
237 | on_backoff=backoff_hdlr) | |
238 | def get_url(url): | |
239 | return requests.get(url) | |
240 | ||
241 | **Multiple handlers per event type** | |
242 | ||
243 | In all cases, iterables of handler functions are also accepted, which | |
244 | are called in turn. For example, you might provide a simple list of | |
245 | handler functions as the value of the ``on_backoff`` keyword arg: | |
246 | ||
247 | .. code-block:: python | |
248 | ||
249 | @backoff.on_exception(backoff.expo, | |
250 | requests.exceptions.RequestException, | |
251 | on_backoff=[backoff_hdlr1, backoff_hdlr2]) | |
252 | def get_url(url): | |
253 | return requests.get(url) | |
254 | ||
255 | **Getting exception info** | |
256 | ||
257 | In the case of the ``on_exception`` decorator, all ``on_backoff`` and | |
258 | ``on_giveup`` handlers are called from within the except block for the | |
259 | exception being handled. Therefore exception info is available to the | |
260 | handler functions via the python standard library, specifically | |
261 | ``sys.exc_info()`` or the ``traceback`` module. | |
262 | ||
263 | Asynchronous code | |
264 | ----------------- | |
265 | ||
266 | Backoff supports asynchronous execution in Python 3.5 and above. | |
267 | ||
268 | To use backoff in asynchronous code based on | |
269 | `asyncio <https://docs.python.org/3/library/asyncio.html>`_ | |
270 | you simply need to apply ``backoff.on_exception`` or ``backoff.on_predicate`` | |
271 | to coroutines. | |
272 | You can also use coroutines for the ``on_success``, ``on_backoff``, and | |
273 | ``on_giveup`` event handlers, with the interface otherwise being identical. | |
274 | ||
275 | The following examples use `aiohttp <https://aiohttp.readthedocs.io/>`_ | |
276 | asynchronous HTTP client/server library. | |
277 | ||
278 | .. code-block:: python | |
279 | ||
280 | @backoff.on_exception(backoff.expo, aiohttp.ClientError, max_time=60) | |
281 | async def get_url(url): | |
282 | async with aiohttp.ClientSession() as session: | |
283 | async with session.get(url) as response: | |
284 | return await response.text() | |
285 | ||
286 | Logging configuration | |
287 | --------------------- | |
288 | ||
289 | By default, backoff and retry attempts are logged to the 'backoff' | |
290 | logger. By default, this logger is configured with a NullHandler, so | |
291 | there will be nothing output unless you configure a handler. | |
292 | Programmatically, this might be accomplished with something as simple | |
293 | as: | |
294 | ||
295 | .. code-block:: python | |
296 | ||
297 | logging.getLogger('backoff').addHandler(logging.StreamHandler()) | |
298 | ||
299 | The default logging level is INFO, which corresponds to logging | |
300 | anytime a retry event occurs. If you would instead like to log | |
301 | only when a giveup event occurs, set the logger level to ERROR. | |
302 | ||
303 | .. code-block:: python | |
304 | ||
305 | logging.getLogger('backoff').setLevel(logging.ERROR) | |
306 | ||
307 | It is also possible to specify an alternate logger with the ``logger`` | |
308 | keyword argument. If a string value is specified the logger will be | |
309 | looked up by name. | |
310 | ||
311 | .. code-block:: python | |
312 | ||
313 | @backoff.on_exception(backoff.expo, | |
314 | requests.exception.RequestException, | |
315 | logger='my_logger') | |
316 | # ... | |
317 | ||
318 | It is also supported to specify a Logger (or LoggerAdapter) object | |
319 | directly. | |
320 | ||
321 | .. code-block:: python | |
322 | ||
323 | my_logger = logging.getLogger('my_logger') | |
324 | my_handler = logging.StreamHandler() | |
325 | my_logger.add_handler(my_handler) | |
326 | my_logger.setLevel(logging.ERROR) | |
327 | ||
328 | @backoff.on_exception(backoff.expo, | |
329 | requests.exception.RequestException, | |
330 | logger=my_logger) | |
331 | # ... | |
332 | ||
333 | Default logging can be disabled all together by specifying | |
334 | ``logger=None``. In this case, if desired alternative logging behavior | |
335 | could be defined by using custom event handlers. |
0 | # coding:utf-8 | |
1 | """ | |
2 | Function decoration for backoff and retry | |
3 | ||
4 | This module provides function decorators which can be used to wrap a | |
5 | function such that it will be retried until some condition is met. It | |
6 | is meant to be of use when accessing unreliable resources with the | |
7 | potential for intermittent failures i.e. network resources and external | |
8 | APIs. Somewhat more generally, it may also be of use for dynamically | |
9 | polling resources for externally generated content. | |
10 | ||
11 | For examples and full documentation see the README at | |
12 | https://github.com/litl/backoff | |
13 | """ | |
14 | from backoff._decorator import on_predicate, on_exception | |
15 | from backoff._jitter import full_jitter, random_jitter | |
16 | from backoff._wait_gen import constant, expo, fibo | |
17 | ||
18 | __all__ = [ | |
19 | 'on_predicate', | |
20 | 'on_exception', | |
21 | 'constant', | |
22 | 'expo', | |
23 | 'fibo', | |
24 | 'full_jitter', | |
25 | 'random_jitter' | |
26 | ] | |
27 | ||
28 | __version__ = '1.10.0' |
0 | # coding:utf-8 | |
1 | import datetime | |
2 | import functools | |
3 | import asyncio # Python 3.5 code and syntax is allowed in this file | |
4 | from datetime import timedelta | |
5 | ||
6 | from backoff._common import (_init_wait_gen, _maybe_call, _next_wait) | |
7 | ||
8 | ||
9 | def _ensure_coroutine(coro_or_func): | |
10 | if asyncio.iscoroutinefunction(coro_or_func): | |
11 | return coro_or_func | |
12 | else: | |
13 | @functools.wraps(coro_or_func) | |
14 | async def f(*args, **kwargs): | |
15 | return coro_or_func(*args, **kwargs) | |
16 | return f | |
17 | ||
18 | ||
19 | def _ensure_coroutines(coros_or_funcs): | |
20 | return [_ensure_coroutine(f) for f in coros_or_funcs] | |
21 | ||
22 | ||
23 | async def _call_handlers(hdlrs, target, args, kwargs, tries, elapsed, **extra): | |
24 | details = { | |
25 | 'target': target, | |
26 | 'args': args, | |
27 | 'kwargs': kwargs, | |
28 | 'tries': tries, | |
29 | 'elapsed': elapsed, | |
30 | } | |
31 | details.update(extra) | |
32 | for hdlr in hdlrs: | |
33 | await hdlr(details) | |
34 | ||
35 | ||
36 | def retry_predicate(target, wait_gen, predicate, | |
37 | max_tries, max_time, jitter, | |
38 | on_success, on_backoff, on_giveup, | |
39 | wait_gen_kwargs): | |
40 | on_success = _ensure_coroutines(on_success) | |
41 | on_backoff = _ensure_coroutines(on_backoff) | |
42 | on_giveup = _ensure_coroutines(on_giveup) | |
43 | ||
44 | # Easy to implement, please report if you need this. | |
45 | assert not asyncio.iscoroutinefunction(max_tries) | |
46 | assert not asyncio.iscoroutinefunction(jitter) | |
47 | ||
48 | assert asyncio.iscoroutinefunction(target) | |
49 | ||
50 | @functools.wraps(target) | |
51 | async def retry(*args, **kwargs): | |
52 | ||
53 | # change names because python 2.x doesn't have nonlocal | |
54 | max_tries_ = _maybe_call(max_tries) | |
55 | max_time_ = _maybe_call(max_time) | |
56 | ||
57 | tries = 0 | |
58 | start = datetime.datetime.now() | |
59 | wait = _init_wait_gen(wait_gen, wait_gen_kwargs) | |
60 | while True: | |
61 | tries += 1 | |
62 | elapsed = timedelta.total_seconds(datetime.datetime.now() - start) | |
63 | details = (target, args, kwargs, tries, elapsed) | |
64 | ||
65 | ret = await target(*args, **kwargs) | |
66 | if predicate(ret): | |
67 | max_tries_exceeded = (tries == max_tries_) | |
68 | max_time_exceeded = (max_time_ is not None and | |
69 | elapsed >= max_time_) | |
70 | ||
71 | if max_tries_exceeded or max_time_exceeded: | |
72 | await _call_handlers(on_giveup, *details, value=ret) | |
73 | break | |
74 | ||
75 | try: | |
76 | seconds = _next_wait(wait, jitter, elapsed, max_time_) | |
77 | except StopIteration: | |
78 | await _call_handlers(on_giveup, *details, value=ret) | |
79 | break | |
80 | ||
81 | await _call_handlers(on_backoff, *details, value=ret, | |
82 | wait=seconds) | |
83 | ||
84 | # Note: there is no convenient way to pass explicit event | |
85 | # loop to decorator, so here we assume that either default | |
86 | # thread event loop is set and correct (it mostly is | |
87 | # by default), or Python >= 3.5.3 or Python >= 3.6 is used | |
88 | # where loop.get_event_loop() in coroutine guaranteed to | |
89 | # return correct value. | |
90 | # See for details: | |
91 | # <https://groups.google.com/forum/#!topic/python-tulip/yF9C-rFpiKk> | |
92 | # <https://bugs.python.org/issue28613> | |
93 | await asyncio.sleep(seconds) | |
94 | continue | |
95 | else: | |
96 | await _call_handlers(on_success, *details, value=ret) | |
97 | break | |
98 | ||
99 | return ret | |
100 | ||
101 | return retry | |
102 | ||
103 | ||
104 | def retry_exception(target, wait_gen, exception, | |
105 | max_tries, max_time, jitter, giveup, | |
106 | on_success, on_backoff, on_giveup, | |
107 | wait_gen_kwargs): | |
108 | on_success = _ensure_coroutines(on_success) | |
109 | on_backoff = _ensure_coroutines(on_backoff) | |
110 | on_giveup = _ensure_coroutines(on_giveup) | |
111 | giveup = _ensure_coroutine(giveup) | |
112 | ||
113 | # Easy to implement, please report if you need this. | |
114 | assert not asyncio.iscoroutinefunction(max_tries) | |
115 | assert not asyncio.iscoroutinefunction(jitter) | |
116 | ||
117 | @functools.wraps(target) | |
118 | async def retry(*args, **kwargs): | |
119 | # change names because python 2.x doesn't have nonlocal | |
120 | max_tries_ = _maybe_call(max_tries) | |
121 | max_time_ = _maybe_call(max_time) | |
122 | ||
123 | tries = 0 | |
124 | start = datetime.datetime.now() | |
125 | wait = _init_wait_gen(wait_gen, wait_gen_kwargs) | |
126 | while True: | |
127 | tries += 1 | |
128 | elapsed = timedelta.total_seconds(datetime.datetime.now() - start) | |
129 | details = (target, args, kwargs, tries, elapsed) | |
130 | ||
131 | try: | |
132 | ret = await target(*args, **kwargs) | |
133 | except exception as e: | |
134 | giveup_result = await giveup(e) | |
135 | max_tries_exceeded = (tries == max_tries_) | |
136 | max_time_exceeded = (max_time_ is not None and | |
137 | elapsed >= max_time_) | |
138 | ||
139 | if giveup_result or max_tries_exceeded or max_time_exceeded: | |
140 | await _call_handlers(on_giveup, *details) | |
141 | raise | |
142 | ||
143 | try: | |
144 | seconds = _next_wait(wait, jitter, elapsed, max_time_) | |
145 | except StopIteration: | |
146 | await _call_handlers(on_giveup, *details) | |
147 | raise e | |
148 | ||
149 | await _call_handlers(on_backoff, *details, wait=seconds) | |
150 | ||
151 | # Note: there is no convenient way to pass explicit event | |
152 | # loop to decorator, so here we assume that either default | |
153 | # thread event loop is set and correct (it mostly is | |
154 | # by default), or Python >= 3.5.3 or Python >= 3.6 is used | |
155 | # where loop.get_event_loop() in coroutine guaranteed to | |
156 | # return correct value. | |
157 | # See for details: | |
158 | # <https://groups.google.com/forum/#!topic/python-tulip/yF9C-rFpiKk> | |
159 | # <https://bugs.python.org/issue28613> | |
160 | await asyncio.sleep(seconds) | |
161 | else: | |
162 | await _call_handlers(on_success, *details) | |
163 | ||
164 | return ret | |
165 | return retry |
0 | # coding:utf-8 | |
1 | ||
2 | import functools | |
3 | import logging | |
4 | import sys | |
5 | import traceback | |
6 | import warnings | |
7 | ||
8 | ||
9 | # Use module-specific logger with a default null handler. | |
10 | _logger = logging.getLogger('backoff') | |
11 | _logger.addHandler(logging.NullHandler()) # pragma: no cover | |
12 | _logger.setLevel(logging.INFO) | |
13 | ||
14 | ||
15 | # Evaluate arg that can be either a fixed value or a callable. | |
16 | def _maybe_call(f, *args, **kwargs): | |
17 | return f(*args, **kwargs) if callable(f) else f | |
18 | ||
19 | ||
20 | def _init_wait_gen(wait_gen, wait_gen_kwargs): | |
21 | kwargs = {k: _maybe_call(v) for k, v in wait_gen_kwargs.items()} | |
22 | return wait_gen(**kwargs) | |
23 | ||
24 | ||
25 | def _next_wait(wait, jitter, elapsed, max_time): | |
26 | value = next(wait) | |
27 | try: | |
28 | if jitter is not None: | |
29 | seconds = jitter(value) | |
30 | else: | |
31 | seconds = value | |
32 | except TypeError: | |
33 | warnings.warn( | |
34 | "Nullary jitter function signature is deprecated. Use " | |
35 | "unary signature accepting a wait value in seconds and " | |
36 | "returning a jittered version of it.", | |
37 | DeprecationWarning, | |
38 | stacklevel=2, | |
39 | ) | |
40 | ||
41 | seconds = value + jitter() | |
42 | ||
43 | # don't sleep longer than remaining alloted max_time | |
44 | if max_time is not None: | |
45 | seconds = min(seconds, max_time - elapsed) | |
46 | ||
47 | return seconds | |
48 | ||
49 | ||
50 | # Configure handler list with user specified handler and optionally | |
51 | # with a default handler bound to the specified logger. | |
52 | def _config_handlers(user_handlers, default_handler=None, logger=None): | |
53 | handlers = [] | |
54 | if logger is not None: | |
55 | # bind the specified logger to the default log handler | |
56 | log_handler = functools.partial(default_handler, logger=logger) | |
57 | handlers.append(log_handler) | |
58 | ||
59 | if user_handlers is None: | |
60 | return handlers | |
61 | ||
62 | # user specified handlers can either be an iterable of handlers | |
63 | # or a single handler. either way append them to the list. | |
64 | if hasattr(user_handlers, '__iter__'): | |
65 | # add all handlers in the iterable | |
66 | handlers += list(user_handlers) | |
67 | else: | |
68 | # append a single handler | |
69 | handlers.append(user_handlers) | |
70 | ||
71 | return handlers | |
72 | ||
73 | ||
74 | # Default backoff handler | |
75 | def _log_backoff(details, logger): | |
76 | msg = "Backing off %s(...) for %.1fs (%s)" | |
77 | log_args = [details['target'].__name__, details['wait']] | |
78 | ||
79 | exc_typ, exc, _ = sys.exc_info() | |
80 | if exc is not None: | |
81 | exc_fmt = traceback.format_exception_only(exc_typ, exc)[-1] | |
82 | log_args.append(exc_fmt.rstrip("\n")) | |
83 | else: | |
84 | log_args.append(details['value']) | |
85 | logger.info(msg, *log_args) | |
86 | ||
87 | ||
88 | # Default giveup handler | |
89 | def _log_giveup(details, logger): | |
90 | msg = "Giving up %s(...) after %d tries (%s)" | |
91 | log_args = [details['target'].__name__, details['tries']] | |
92 | ||
93 | exc_typ, exc, _ = sys.exc_info() | |
94 | if exc is not None: | |
95 | exc_fmt = traceback.format_exception_only(exc_typ, exc)[-1] | |
96 | log_args.append(exc_fmt.rstrip("\n")) | |
97 | else: | |
98 | log_args.append(details['value']) | |
99 | ||
100 | logger.error(msg, *log_args) |
0 | # coding:utf-8 | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | import logging | |
4 | import operator | |
5 | import sys | |
6 | ||
7 | from backoff._common import (_config_handlers, _log_backoff, _log_giveup) | |
8 | from backoff._jitter import full_jitter | |
9 | from backoff import _sync | |
10 | ||
11 | ||
12 | # python 2.7 -> 3.x compatibility for str and unicode | |
13 | try: | |
14 | basestring | |
15 | except NameError: # pragma: python=3.5 | |
16 | basestring = str | |
17 | ||
18 | ||
19 | def on_predicate(wait_gen, | |
20 | predicate=operator.not_, | |
21 | max_tries=None, | |
22 | max_time=None, | |
23 | jitter=full_jitter, | |
24 | on_success=None, | |
25 | on_backoff=None, | |
26 | on_giveup=None, | |
27 | logger='backoff', | |
28 | **wait_gen_kwargs): | |
29 | """Returns decorator for backoff and retry triggered by predicate. | |
30 | ||
31 | Args: | |
32 | wait_gen: A generator yielding successive wait times in | |
33 | seconds. | |
34 | predicate: A function which when called on the return value of | |
35 | the target function will trigger backoff when considered | |
36 | truthily. If not specified, the default behavior is to | |
37 | backoff on falsey return values. | |
38 | max_tries: The maximum number of attempts to make before giving | |
39 | up. In the case of failure, the result of the last attempt | |
40 | will be returned. The default value of None means there | |
41 | is no limit to the number of tries. If a callable is passed, | |
42 | it will be evaluated at runtime and its return value used. | |
43 | max_time: The maximum total amount of time to try for before | |
44 | giving up. If this time expires, the result of the last | |
45 | attempt will be returned. If a callable is passed, it will | |
46 | be evaluated at runtime and its return value used. | |
47 | jitter: A function of the value yielded by wait_gen returning | |
48 | the actual time to wait. This distributes wait times | |
49 | stochastically in order to avoid timing collisions across | |
50 | concurrent clients. Wait times are jittered by default | |
51 | using the full_jitter function. Jittering may be disabled | |
52 | altogether by passing jitter=None. | |
53 | on_success: Callable (or iterable of callables) with a unary | |
54 | signature to be called in the event of success. The | |
55 | parameter is a dict containing details about the invocation. | |
56 | on_backoff: Callable (or iterable of callables) with a unary | |
57 | signature to be called in the event of a backoff. The | |
58 | parameter is a dict containing details about the invocation. | |
59 | on_giveup: Callable (or iterable of callables) with a unary | |
60 | signature to be called in the event that max_tries | |
61 | is exceeded. The parameter is a dict containing details | |
62 | about the invocation. | |
63 | logger: Name of logger or Logger object to log to. Defaults to | |
64 | 'backoff'. | |
65 | **wait_gen_kwargs: Any additional keyword args specified will be | |
66 | passed to wait_gen when it is initialized. Any callable | |
67 | args will first be evaluated and their return values passed. | |
68 | This is useful for runtime configuration. | |
69 | """ | |
70 | def decorate(target): | |
71 | # change names because python 2.x doesn't have nonlocal | |
72 | logger_ = logger | |
73 | if isinstance(logger_, basestring): | |
74 | logger_ = logging.getLogger(logger_) | |
75 | on_success_ = _config_handlers(on_success) | |
76 | on_backoff_ = _config_handlers(on_backoff, _log_backoff, logger_) | |
77 | on_giveup_ = _config_handlers(on_giveup, _log_giveup, logger_) | |
78 | ||
79 | retry = None | |
80 | if sys.version_info >= (3, 5): # pragma: python=3.5 | |
81 | import asyncio | |
82 | ||
83 | if asyncio.iscoroutinefunction(target): | |
84 | import backoff._async | |
85 | retry = backoff._async.retry_predicate | |
86 | ||
87 | if retry is None: | |
88 | retry = _sync.retry_predicate | |
89 | ||
90 | return retry(target, wait_gen, predicate, | |
91 | max_tries, max_time, jitter, | |
92 | on_success_, on_backoff_, on_giveup_, | |
93 | wait_gen_kwargs) | |
94 | ||
95 | # Return a function which decorates a target with a retry loop. | |
96 | return decorate | |
97 | ||
98 | ||
99 | def on_exception(wait_gen, | |
100 | exception, | |
101 | max_tries=None, | |
102 | max_time=None, | |
103 | jitter=full_jitter, | |
104 | giveup=lambda e: False, | |
105 | on_success=None, | |
106 | on_backoff=None, | |
107 | on_giveup=None, | |
108 | logger='backoff', | |
109 | **wait_gen_kwargs): | |
110 | """Returns decorator for backoff and retry triggered by exception. | |
111 | ||
112 | Args: | |
113 | wait_gen: A generator yielding successive wait times in | |
114 | seconds. | |
115 | exception: An exception type (or tuple of types) which triggers | |
116 | backoff. | |
117 | max_tries: The maximum number of attempts to make before giving | |
118 | up. Once exhausted, the exception will be allowed to escape. | |
119 | The default value of None means their is no limit to the | |
120 | number of tries. If a callable is passed, it will be | |
121 | evaluated at runtime and its return value used. | |
122 | max_time: The maximum total amount of time to try for before | |
123 | giving up. Once expired, the exception will be allowed to | |
124 | escape. If a callable is passed, it will be | |
125 | evaluated at runtime and its return value used. | |
126 | jitter: A function of the value yielded by wait_gen returning | |
127 | the actual time to wait. This distributes wait times | |
128 | stochastically in order to avoid timing collisions across | |
129 | concurrent clients. Wait times are jittered by default | |
130 | using the full_jitter function. Jittering may be disabled | |
131 | altogether by passing jitter=None. | |
132 | giveup: Function accepting an exception instance and | |
133 | returning whether or not to give up. Optional. The default | |
134 | is to always continue. | |
135 | on_success: Callable (or iterable of callables) with a unary | |
136 | signature to be called in the event of success. The | |
137 | parameter is a dict containing details about the invocation. | |
138 | on_backoff: Callable (or iterable of callables) with a unary | |
139 | signature to be called in the event of a backoff. The | |
140 | parameter is a dict containing details about the invocation. | |
141 | on_giveup: Callable (or iterable of callables) with a unary | |
142 | signature to be called in the event that max_tries | |
143 | is exceeded. The parameter is a dict containing details | |
144 | about the invocation. | |
145 | logger: Name or Logger object to log to. Defaults to 'backoff'. | |
146 | **wait_gen_kwargs: Any additional keyword args specified will be | |
147 | passed to wait_gen when it is initialized. Any callable | |
148 | args will first be evaluated and their return values passed. | |
149 | This is useful for runtime configuration. | |
150 | """ | |
151 | def decorate(target): | |
152 | # change names because python 2.x doesn't have nonlocal | |
153 | logger_ = logger | |
154 | if isinstance(logger_, basestring): | |
155 | logger_ = logging.getLogger(logger_) | |
156 | on_success_ = _config_handlers(on_success) | |
157 | on_backoff_ = _config_handlers(on_backoff, _log_backoff, logger_) | |
158 | on_giveup_ = _config_handlers(on_giveup, _log_giveup, logger_) | |
159 | ||
160 | retry = None | |
161 | if sys.version_info[:2] >= (3, 5): # pragma: python=3.5 | |
162 | import asyncio | |
163 | ||
164 | if asyncio.iscoroutinefunction(target): | |
165 | import backoff._async | |
166 | retry = backoff._async.retry_exception | |
167 | ||
168 | if retry is None: | |
169 | retry = _sync.retry_exception | |
170 | ||
171 | return retry(target, wait_gen, exception, | |
172 | max_tries, max_time, jitter, giveup, | |
173 | on_success_, on_backoff_, on_giveup_, | |
174 | wait_gen_kwargs) | |
175 | ||
176 | # Return a function which decorates a target with a retry loop. | |
177 | return decorate |
0 | # coding:utf-8 | |
1 | ||
2 | import random | |
3 | ||
4 | ||
5 | def random_jitter(value): | |
6 | """Jitter the value a random number of milliseconds. | |
7 | ||
8 | This adds up to 1 second of additional time to the original value. | |
9 | Prior to backoff version 1.2 this was the default jitter behavior. | |
10 | ||
11 | Args: | |
12 | value: The unadulterated backoff value. | |
13 | """ | |
14 | return value + random.random() | |
15 | ||
16 | ||
17 | def full_jitter(value): | |
18 | """Jitter the value across the full range (0 to value). | |
19 | ||
20 | This corresponds to the "Full Jitter" algorithm specified in the | |
21 | AWS blog's post on the performance of various jitter algorithms. | |
22 | (http://www.awsarchitectureblog.com/2015/03/backoff.html) | |
23 | ||
24 | Args: | |
25 | value: The unadulterated backoff value. | |
26 | """ | |
27 | return random.uniform(0, value) |
0 | # coding:utf-8 | |
1 | import datetime | |
2 | import functools | |
3 | import time | |
4 | from datetime import timedelta | |
5 | ||
6 | from backoff._common import (_init_wait_gen, _maybe_call, _next_wait) | |
7 | ||
8 | ||
9 | def _call_handlers(hdlrs, target, args, kwargs, tries, elapsed, **extra): | |
10 | details = { | |
11 | 'target': target, | |
12 | 'args': args, | |
13 | 'kwargs': kwargs, | |
14 | 'tries': tries, | |
15 | 'elapsed': elapsed, | |
16 | } | |
17 | details.update(extra) | |
18 | for hdlr in hdlrs: | |
19 | hdlr(details) | |
20 | ||
21 | ||
22 | def retry_predicate(target, wait_gen, predicate, | |
23 | max_tries, max_time, jitter, | |
24 | on_success, on_backoff, on_giveup, | |
25 | wait_gen_kwargs): | |
26 | ||
27 | @functools.wraps(target) | |
28 | def retry(*args, **kwargs): | |
29 | ||
30 | # change names because python 2.x doesn't have nonlocal | |
31 | max_tries_ = _maybe_call(max_tries) | |
32 | max_time_ = _maybe_call(max_time) | |
33 | ||
34 | tries = 0 | |
35 | start = datetime.datetime.now() | |
36 | wait = _init_wait_gen(wait_gen, wait_gen_kwargs) | |
37 | while True: | |
38 | tries += 1 | |
39 | elapsed = timedelta.total_seconds(datetime.datetime.now() - start) | |
40 | details = (target, args, kwargs, tries, elapsed) | |
41 | ||
42 | ret = target(*args, **kwargs) | |
43 | if predicate(ret): | |
44 | max_tries_exceeded = (tries == max_tries_) | |
45 | max_time_exceeded = (max_time_ is not None and | |
46 | elapsed >= max_time_) | |
47 | ||
48 | if max_tries_exceeded or max_time_exceeded: | |
49 | _call_handlers(on_giveup, *details, value=ret) | |
50 | break | |
51 | ||
52 | try: | |
53 | seconds = _next_wait(wait, jitter, elapsed, max_time_) | |
54 | except StopIteration: | |
55 | _call_handlers(on_giveup, *details) | |
56 | break | |
57 | ||
58 | _call_handlers(on_backoff, *details, | |
59 | value=ret, wait=seconds) | |
60 | ||
61 | time.sleep(seconds) | |
62 | continue | |
63 | else: | |
64 | _call_handlers(on_success, *details, value=ret) | |
65 | break | |
66 | ||
67 | return ret | |
68 | ||
69 | return retry | |
70 | ||
71 | ||
72 | def retry_exception(target, wait_gen, exception, | |
73 | max_tries, max_time, jitter, giveup, | |
74 | on_success, on_backoff, on_giveup, | |
75 | wait_gen_kwargs): | |
76 | ||
77 | @functools.wraps(target) | |
78 | def retry(*args, **kwargs): | |
79 | ||
80 | # change names because python 2.x doesn't have nonlocal | |
81 | max_tries_ = _maybe_call(max_tries) | |
82 | max_time_ = _maybe_call(max_time) | |
83 | ||
84 | tries = 0 | |
85 | start = datetime.datetime.now() | |
86 | wait = _init_wait_gen(wait_gen, wait_gen_kwargs) | |
87 | while True: | |
88 | tries += 1 | |
89 | elapsed = timedelta.total_seconds(datetime.datetime.now() - start) | |
90 | details = (target, args, kwargs, tries, elapsed) | |
91 | ||
92 | try: | |
93 | ret = target(*args, **kwargs) | |
94 | except exception as e: | |
95 | max_tries_exceeded = (tries == max_tries_) | |
96 | max_time_exceeded = (max_time_ is not None and | |
97 | elapsed >= max_time_) | |
98 | ||
99 | if giveup(e) or max_tries_exceeded or max_time_exceeded: | |
100 | _call_handlers(on_giveup, *details) | |
101 | raise | |
102 | ||
103 | try: | |
104 | seconds = _next_wait(wait, jitter, elapsed, max_time_) | |
105 | except StopIteration: | |
106 | _call_handlers(on_giveup, *details) | |
107 | raise e | |
108 | ||
109 | _call_handlers(on_backoff, *details, wait=seconds) | |
110 | ||
111 | time.sleep(seconds) | |
112 | else: | |
113 | _call_handlers(on_success, *details) | |
114 | ||
115 | return ret | |
116 | return retry |
0 | # coding:utf-8 | |
1 | ||
2 | import itertools | |
3 | ||
4 | ||
5 | def expo(base=2, factor=1, max_value=None): | |
6 | """Generator for exponential decay. | |
7 | ||
8 | Args: | |
9 | base: The mathematical base of the exponentiation operation | |
10 | factor: Factor to multiply the exponentation by. | |
11 | max_value: The maximum value to yield. Once the value in the | |
12 | true exponential sequence exceeds this, the value | |
13 | of max_value will forever after be yielded. | |
14 | """ | |
15 | n = 0 | |
16 | while True: | |
17 | a = factor * base ** n | |
18 | if max_value is None or a < max_value: | |
19 | yield a | |
20 | n += 1 | |
21 | else: | |
22 | yield max_value | |
23 | ||
24 | ||
25 | def fibo(max_value=None): | |
26 | """Generator for fibonaccial decay. | |
27 | ||
28 | Args: | |
29 | max_value: The maximum value to yield. Once the value in the | |
30 | true fibonacci sequence exceeds this, the value | |
31 | of max_value will forever after be yielded. | |
32 | """ | |
33 | a = 1 | |
34 | b = 1 | |
35 | while True: | |
36 | if max_value is None or a < max_value: | |
37 | yield a | |
38 | a, b = b, a + b | |
39 | else: | |
40 | yield max_value | |
41 | ||
42 | ||
43 | def constant(interval=1): | |
44 | """Generator for constant intervals. | |
45 | ||
46 | Args: | |
47 | interval: A constant value to yield or an iterable of such values. | |
48 | """ | |
49 | try: | |
50 | itr = iter(interval) | |
51 | except TypeError: | |
52 | itr = itertools.repeat(interval) | |
53 | ||
54 | for val in itr: | |
55 | yield val |
0 | [tool.poetry] | |
1 | name = "backoff" | |
2 | version = "1.10.0" | |
3 | description = "Function decoration for backoff and retry" | |
4 | authors = ["Bob Green <[email protected]>"] | |
5 | readme = "README.rst" | |
6 | repository = "https://github.com/litl/backoff" | |
7 | license = "MIT" | |
8 | keywords = ["retry", "backoff", "decorators"] | |
9 | classifiers = ['Development Status :: 5 - Production/Stable', | |
10 | 'Intended Audience :: Developers', | |
11 | 'Programming Language :: Python', | |
12 | 'License :: OSI Approved :: MIT License', | |
13 | 'Natural Language :: English', | |
14 | 'Operating System :: OS Independent', | |
15 | 'Programming Language :: Python', | |
16 | 'Programming Language :: Python :: 2', | |
17 | 'Programming Language :: Python :: 2.7', | |
18 | 'Programming Language :: Python :: 3', | |
19 | 'Programming Language :: Python :: 3.5', | |
20 | 'Programming Language :: Python :: 3.6', | |
21 | 'Programming Language :: Python :: 3.7', | |
22 | 'Programming Language :: Python :: 3.8', | |
23 | 'Topic :: Internet :: WWW/HTTP', | |
24 | 'Topic :: Software Development :: Libraries :: Python Modules', | |
25 | 'Topic :: Utilities'] | |
26 | packages = [ | |
27 | { include = "backoff" }, | |
28 | ] | |
29 | ||
30 | [tool.poetry.dependencies] | |
31 | python = "^2.7 || ^3.5" | |
32 | ||
33 | [tool.poetry.dev-dependencies] | |
34 | flake8 = "^3.6" | |
35 | pytest = "^4.0" | |
36 | pytest-cov = "^2.6" | |
37 | pytest-asyncio = {version = "^0.10.0",python = "^3.5"} | |
38 | ||
39 | [build-system] | |
40 | requires = ["poetry>=0.12"] | |
41 | build-backend = "poetry.masonry.api" |
0 | # coding:utf-8 | |
1 | import collections | |
2 | import functools | |
3 | ||
4 | ||
5 | # create event handler which log their invocations to a dict | |
6 | def _log_hdlrs(): | |
7 | log = collections.defaultdict(list) | |
8 | ||
9 | def log_hdlr(event, details): | |
10 | log[event].append(details) | |
11 | ||
12 | log_success = functools.partial(log_hdlr, 'success') | |
13 | log_backoff = functools.partial(log_hdlr, 'backoff') | |
14 | log_giveup = functools.partial(log_hdlr, 'giveup') | |
15 | ||
16 | return log, log_success, log_backoff, log_giveup | |
17 | ||
18 | ||
19 | # decorator that that saves the target as | |
20 | # an attribute of the decorated function | |
21 | def _save_target(f): | |
22 | f._target = f | |
23 | return f |
0 | # coding:utf-8 | |
1 | ||
2 | import asyncio # Python 3.5 code and syntax is allowed in this file | |
3 | import backoff | |
4 | import pytest | |
5 | import random | |
6 | ||
7 | from tests.common import _log_hdlrs, _save_target | |
8 | ||
9 | ||
10 | async def _await_none(x): | |
11 | return None | |
12 | ||
13 | ||
14 | @pytest.mark.asyncio | |
15 | async def test_on_predicate(monkeypatch): | |
16 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
17 | ||
18 | @backoff.on_predicate(backoff.expo) | |
19 | async def return_true(log, n): | |
20 | val = (len(log) == n - 1) | |
21 | log.append(val) | |
22 | return val | |
23 | ||
24 | log = [] | |
25 | ret = await return_true(log, 3) | |
26 | assert ret is True | |
27 | assert 3 == len(log) | |
28 | ||
29 | ||
30 | @pytest.mark.asyncio | |
31 | async def test_on_predicate_max_tries(monkeypatch): | |
32 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
33 | ||
34 | @backoff.on_predicate(backoff.expo, jitter=None, max_tries=3) | |
35 | async def return_true(log, n): | |
36 | val = (len(log) == n) | |
37 | log.append(val) | |
38 | return val | |
39 | ||
40 | log = [] | |
41 | ret = await return_true(log, 10) | |
42 | assert ret is False | |
43 | assert 3 == len(log) | |
44 | ||
45 | ||
46 | @pytest.mark.asyncio | |
47 | async def test_on_exception(monkeypatch): | |
48 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
49 | ||
50 | @backoff.on_exception(backoff.expo, KeyError) | |
51 | async def keyerror_then_true(log, n): | |
52 | if len(log) == n: | |
53 | return True | |
54 | e = KeyError() | |
55 | log.append(e) | |
56 | raise e | |
57 | ||
58 | log = [] | |
59 | assert (await keyerror_then_true(log, 3)) is True | |
60 | assert 3 == len(log) | |
61 | ||
62 | ||
63 | @pytest.mark.asyncio | |
64 | async def test_on_exception_tuple(monkeypatch): | |
65 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
66 | ||
67 | @backoff.on_exception(backoff.expo, (KeyError, ValueError)) | |
68 | async def keyerror_valueerror_then_true(log): | |
69 | if len(log) == 2: | |
70 | return True | |
71 | if len(log) == 0: | |
72 | e = KeyError() | |
73 | if len(log) == 1: | |
74 | e = ValueError() | |
75 | log.append(e) | |
76 | raise e | |
77 | ||
78 | log = [] | |
79 | assert (await keyerror_valueerror_then_true(log)) is True | |
80 | assert 2 == len(log) | |
81 | assert isinstance(log[0], KeyError) | |
82 | assert isinstance(log[1], ValueError) | |
83 | ||
84 | ||
85 | @pytest.mark.asyncio | |
86 | async def test_on_exception_max_tries(monkeypatch): | |
87 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
88 | ||
89 | @backoff.on_exception(backoff.expo, KeyError, jitter=None, max_tries=3) | |
90 | async def keyerror_then_true(log, n, foo=None): | |
91 | if len(log) == n: | |
92 | return True | |
93 | e = KeyError() | |
94 | log.append(e) | |
95 | raise e | |
96 | ||
97 | log = [] | |
98 | with pytest.raises(KeyError): | |
99 | await keyerror_then_true(log, 10, foo="bar") | |
100 | ||
101 | assert 3 == len(log) | |
102 | ||
103 | ||
104 | @pytest.mark.asyncio | |
105 | async def test_on_exception_constant_iterable(monkeypatch): | |
106 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
107 | ||
108 | backoffs = [] | |
109 | giveups = [] | |
110 | successes = [] | |
111 | ||
112 | @backoff.on_exception( | |
113 | backoff.constant, | |
114 | KeyError, | |
115 | interval=(1, 2, 3), | |
116 | on_backoff=backoffs.append, | |
117 | on_giveup=giveups.append, | |
118 | on_success=successes.append, | |
119 | ) | |
120 | async def endless_exceptions(): | |
121 | raise KeyError('foo') | |
122 | ||
123 | with pytest.raises(KeyError): | |
124 | await endless_exceptions() | |
125 | ||
126 | assert len(backoffs) == 3 | |
127 | assert len(giveups) == 1 | |
128 | assert len(successes) == 0 | |
129 | ||
130 | ||
131 | @pytest.mark.asyncio | |
132 | async def test_on_exception_success_random_jitter(monkeypatch): | |
133 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
134 | ||
135 | log, log_success, log_backoff, log_giveup = _log_hdlrs() | |
136 | ||
137 | @backoff.on_exception(backoff.expo, | |
138 | Exception, | |
139 | on_success=log_success, | |
140 | on_backoff=log_backoff, | |
141 | on_giveup=log_giveup, | |
142 | jitter=backoff.random_jitter, | |
143 | factor=0.5) | |
144 | @_save_target | |
145 | async def succeeder(*args, **kwargs): | |
146 | # succeed after we've backed off twice | |
147 | if len(log['backoff']) < 2: | |
148 | raise ValueError("catch me") | |
149 | ||
150 | await succeeder(1, 2, 3, foo=1, bar=2) | |
151 | ||
152 | # we try 3 times, backing off twice before succeeding | |
153 | assert len(log['success']) == 1 | |
154 | assert len(log['backoff']) == 2 | |
155 | assert len(log['giveup']) == 0 | |
156 | ||
157 | for i in range(2): | |
158 | details = log['backoff'][i] | |
159 | assert details['wait'] >= 0.5 * 2 ** i | |
160 | ||
161 | ||
162 | @pytest.mark.asyncio | |
163 | async def test_on_exception_success_full_jitter(monkeypatch): | |
164 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
165 | ||
166 | log, log_success, log_backoff, log_giveup = _log_hdlrs() | |
167 | ||
168 | @backoff.on_exception(backoff.expo, | |
169 | Exception, | |
170 | on_success=log_success, | |
171 | on_backoff=log_backoff, | |
172 | on_giveup=log_giveup, | |
173 | jitter=backoff.full_jitter, | |
174 | factor=0.5) | |
175 | @_save_target | |
176 | async def succeeder(*args, **kwargs): | |
177 | # succeed after we've backed off twice | |
178 | if len(log['backoff']) < 2: | |
179 | raise ValueError("catch me") | |
180 | ||
181 | await succeeder(1, 2, 3, foo=1, bar=2) | |
182 | ||
183 | # we try 3 times, backing off twice before succeeding | |
184 | assert len(log['success']) == 1 | |
185 | assert len(log['backoff']) == 2 | |
186 | assert len(log['giveup']) == 0 | |
187 | ||
188 | for i in range(2): | |
189 | details = log['backoff'][i] | |
190 | assert details['wait'] <= 0.5 * 2 ** i | |
191 | ||
192 | ||
193 | @pytest.mark.asyncio | |
194 | async def test_on_exception_success(): | |
195 | log, log_success, log_backoff, log_giveup = _log_hdlrs() | |
196 | ||
197 | @backoff.on_exception(backoff.constant, | |
198 | Exception, | |
199 | on_success=log_success, | |
200 | on_backoff=log_backoff, | |
201 | on_giveup=log_giveup, | |
202 | jitter=None, | |
203 | interval=0) | |
204 | @_save_target | |
205 | async def succeeder(*args, **kwargs): | |
206 | # succeed after we've backed off twice | |
207 | if len(log['backoff']) < 2: | |
208 | raise ValueError("catch me") | |
209 | ||
210 | await succeeder(1, 2, 3, foo=1, bar=2) | |
211 | ||
212 | # we try 3 times, backing off twice before succeeding | |
213 | assert len(log['success']) == 1 | |
214 | assert len(log['backoff']) == 2 | |
215 | assert len(log['giveup']) == 0 | |
216 | ||
217 | for i in range(2): | |
218 | details = log['backoff'][i] | |
219 | elapsed = details.pop('elapsed') | |
220 | assert isinstance(elapsed, float) | |
221 | assert details == {'args': (1, 2, 3), | |
222 | 'kwargs': {'foo': 1, 'bar': 2}, | |
223 | 'target': succeeder._target, | |
224 | 'tries': i + 1, | |
225 | 'wait': 0} | |
226 | ||
227 | details = log['success'][0] | |
228 | elapsed = details.pop('elapsed') | |
229 | assert isinstance(elapsed, float) | |
230 | assert details == {'args': (1, 2, 3), | |
231 | 'kwargs': {'foo': 1, 'bar': 2}, | |
232 | 'target': succeeder._target, | |
233 | 'tries': 3} | |
234 | ||
235 | ||
236 | @pytest.mark.asyncio | |
237 | async def test_on_exception_giveup(): | |
238 | log, log_success, log_backoff, log_giveup = _log_hdlrs() | |
239 | ||
240 | @backoff.on_exception(backoff.constant, | |
241 | ValueError, | |
242 | on_success=log_success, | |
243 | on_backoff=log_backoff, | |
244 | on_giveup=log_giveup, | |
245 | max_tries=3, | |
246 | jitter=None, | |
247 | interval=0) | |
248 | @_save_target | |
249 | async def exceptor(*args, **kwargs): | |
250 | raise ValueError("catch me") | |
251 | ||
252 | with pytest.raises(ValueError): | |
253 | await exceptor(1, 2, 3, foo=1, bar=2) | |
254 | ||
255 | # we try 3 times, backing off twice and giving up once | |
256 | assert len(log['success']) == 0 | |
257 | assert len(log['backoff']) == 2 | |
258 | assert len(log['giveup']) == 1 | |
259 | ||
260 | details = log['giveup'][0] | |
261 | elapsed = details.pop('elapsed') | |
262 | assert isinstance(elapsed, float) | |
263 | assert details == {'args': (1, 2, 3), | |
264 | 'kwargs': {'foo': 1, 'bar': 2}, | |
265 | 'target': exceptor._target, | |
266 | 'tries': 3} | |
267 | ||
268 | ||
269 | @pytest.mark.asyncio | |
270 | async def test_on_exception_giveup_predicate(monkeypatch): | |
271 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
272 | ||
273 | def on_baz(e): | |
274 | return str(e) == "baz" | |
275 | ||
276 | vals = ["baz", "bar", "foo"] | |
277 | ||
278 | @backoff.on_exception(backoff.constant, | |
279 | ValueError, | |
280 | giveup=on_baz) | |
281 | async def foo_bar_baz(): | |
282 | raise ValueError(vals.pop()) | |
283 | ||
284 | with pytest.raises(ValueError): | |
285 | await foo_bar_baz() | |
286 | ||
287 | assert not vals | |
288 | ||
289 | ||
290 | @pytest.mark.asyncio | |
291 | async def test_on_exception_giveup_coro(monkeypatch): | |
292 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
293 | ||
294 | async def on_baz(e): | |
295 | return str(e) == "baz" | |
296 | ||
297 | vals = ["baz", "bar", "foo"] | |
298 | ||
299 | @backoff.on_exception(backoff.constant, | |
300 | ValueError, | |
301 | giveup=on_baz) | |
302 | async def foo_bar_baz(): | |
303 | raise ValueError(vals.pop()) | |
304 | ||
305 | with pytest.raises(ValueError): | |
306 | await foo_bar_baz() | |
307 | ||
308 | assert not vals | |
309 | ||
310 | ||
311 | @pytest.mark.asyncio | |
312 | async def test_on_predicate_success(): | |
313 | log, log_success, log_backoff, log_giveup = _log_hdlrs() | |
314 | ||
315 | @backoff.on_predicate(backoff.constant, | |
316 | on_success=log_success, | |
317 | on_backoff=log_backoff, | |
318 | on_giveup=log_giveup, | |
319 | jitter=None, | |
320 | interval=0) | |
321 | @_save_target | |
322 | async def success(*args, **kwargs): | |
323 | # succeed after we've backed off twice | |
324 | return len(log['backoff']) == 2 | |
325 | ||
326 | await success(1, 2, 3, foo=1, bar=2) | |
327 | ||
328 | # we try 3 times, backing off twice before succeeding | |
329 | assert len(log['success']) == 1 | |
330 | assert len(log['backoff']) == 2 | |
331 | assert len(log['giveup']) == 0 | |
332 | ||
333 | for i in range(2): | |
334 | details = log['backoff'][i] | |
335 | elapsed = details.pop('elapsed') | |
336 | assert isinstance(elapsed, float) | |
337 | assert details == {'args': (1, 2, 3), | |
338 | 'kwargs': {'foo': 1, 'bar': 2}, | |
339 | 'target': success._target, | |
340 | 'tries': i + 1, | |
341 | 'value': False, | |
342 | 'wait': 0} | |
343 | ||
344 | details = log['success'][0] | |
345 | elapsed = details.pop('elapsed') | |
346 | assert isinstance(elapsed, float) | |
347 | assert details == {'args': (1, 2, 3), | |
348 | 'kwargs': {'foo': 1, 'bar': 2}, | |
349 | 'target': success._target, | |
350 | 'tries': 3, | |
351 | 'value': True} | |
352 | ||
353 | ||
354 | @pytest.mark.asyncio | |
355 | async def test_on_predicate_giveup(): | |
356 | log, log_success, log_backoff, log_giveup = _log_hdlrs() | |
357 | ||
358 | @backoff.on_predicate(backoff.constant, | |
359 | on_success=log_success, | |
360 | on_backoff=log_backoff, | |
361 | on_giveup=log_giveup, | |
362 | max_tries=3, | |
363 | jitter=None, | |
364 | interval=0) | |
365 | @_save_target | |
366 | async def emptiness(*args, **kwargs): | |
367 | pass | |
368 | ||
369 | await emptiness(1, 2, 3, foo=1, bar=2) | |
370 | ||
371 | # we try 3 times, backing off twice and giving up once | |
372 | assert len(log['success']) == 0 | |
373 | assert len(log['backoff']) == 2 | |
374 | assert len(log['giveup']) == 1 | |
375 | ||
376 | details = log['giveup'][0] | |
377 | elapsed = details.pop('elapsed') | |
378 | assert isinstance(elapsed, float) | |
379 | assert details == {'args': (1, 2, 3), | |
380 | 'kwargs': {'foo': 1, 'bar': 2}, | |
381 | 'target': emptiness._target, | |
382 | 'tries': 3, | |
383 | 'value': None} | |
384 | ||
385 | ||
386 | @pytest.mark.asyncio | |
387 | async def test_on_predicate_iterable_handlers(): | |
388 | hdlrs = [_log_hdlrs() for _ in range(3)] | |
389 | ||
390 | @backoff.on_predicate(backoff.constant, | |
391 | on_success=(h[1] for h in hdlrs), | |
392 | on_backoff=(h[2] for h in hdlrs), | |
393 | on_giveup=(h[3] for h in hdlrs), | |
394 | max_tries=3, | |
395 | jitter=None, | |
396 | interval=0) | |
397 | @_save_target | |
398 | async def emptiness(*args, **kwargs): | |
399 | pass | |
400 | ||
401 | await emptiness(1, 2, 3, foo=1, bar=2) | |
402 | ||
403 | for i in range(3): | |
404 | assert len(hdlrs[i][0]['success']) == 0 | |
405 | assert len(hdlrs[i][0]['backoff']) == 2 | |
406 | assert len(hdlrs[i][0]['giveup']) == 1 | |
407 | ||
408 | details = dict(hdlrs[i][0]['giveup'][0]) | |
409 | elapsed = details.pop('elapsed') | |
410 | assert isinstance(elapsed, float) | |
411 | assert details == {'args': (1, 2, 3), | |
412 | 'kwargs': {'foo': 1, 'bar': 2}, | |
413 | 'target': emptiness._target, | |
414 | 'tries': 3, | |
415 | 'value': None} | |
416 | ||
417 | ||
418 | @pytest.mark.asyncio | |
419 | async def test_on_predicate_constant_iterable(monkeypatch): | |
420 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
421 | ||
422 | waits = [1, 2, 3, 6, 9] | |
423 | backoffs = [] | |
424 | giveups = [] | |
425 | successes = [] | |
426 | ||
427 | @backoff.on_predicate( | |
428 | backoff.constant, | |
429 | interval=waits, | |
430 | on_backoff=backoffs.append, | |
431 | on_giveup=giveups.append, | |
432 | on_success=successes.append, | |
433 | jitter=None, | |
434 | ) | |
435 | async def falsey(): | |
436 | return False | |
437 | ||
438 | assert not await falsey() | |
439 | ||
440 | assert len(backoffs) == len(waits) | |
441 | for i, wait in enumerate(waits): | |
442 | assert backoffs[i]['wait'] == wait | |
443 | ||
444 | assert len(giveups) == 1 | |
445 | assert len(successes) == 0 | |
446 | ||
447 | ||
448 | # To maintain backward compatibility, | |
449 | # on_predicate should support 0-argument jitter function. | |
450 | @pytest.mark.asyncio | |
451 | async def test_on_exception_success_0_arg_jitter(monkeypatch): | |
452 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
453 | monkeypatch.setattr('random.random', lambda: 0) | |
454 | ||
455 | log, log_success, log_backoff, log_giveup = _log_hdlrs() | |
456 | ||
457 | @backoff.on_exception(backoff.constant, | |
458 | Exception, | |
459 | on_success=log_success, | |
460 | on_backoff=log_backoff, | |
461 | on_giveup=log_giveup, | |
462 | jitter=random.random, | |
463 | interval=0) | |
464 | @_save_target | |
465 | async def succeeder(*args, **kwargs): | |
466 | # succeed after we've backed off twice | |
467 | if len(log['backoff']) < 2: | |
468 | raise ValueError("catch me") | |
469 | ||
470 | with pytest.deprecated_call(): | |
471 | await succeeder(1, 2, 3, foo=1, bar=2) | |
472 | ||
473 | # we try 3 times, backing off twice before succeeding | |
474 | assert len(log['success']) == 1 | |
475 | assert len(log['backoff']) == 2 | |
476 | assert len(log['giveup']) == 0 | |
477 | ||
478 | for i in range(2): | |
479 | details = log['backoff'][i] | |
480 | elapsed = details.pop('elapsed') | |
481 | assert isinstance(elapsed, float) | |
482 | assert details == {'args': (1, 2, 3), | |
483 | 'kwargs': {'foo': 1, 'bar': 2}, | |
484 | 'target': succeeder._target, | |
485 | 'tries': i + 1, | |
486 | 'wait': 0} | |
487 | ||
488 | details = log['success'][0] | |
489 | elapsed = details.pop('elapsed') | |
490 | assert isinstance(elapsed, float) | |
491 | assert details == {'args': (1, 2, 3), | |
492 | 'kwargs': {'foo': 1, 'bar': 2}, | |
493 | 'target': succeeder._target, | |
494 | 'tries': 3} | |
495 | ||
496 | ||
497 | # To maintain backward compatibility, | |
498 | # on_predicate should support 0-argument jitter function. | |
499 | @pytest.mark.asyncio | |
500 | async def test_on_predicate_success_0_arg_jitter(monkeypatch): | |
501 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
502 | monkeypatch.setattr('random.random', lambda: 0) | |
503 | ||
504 | log, log_success, log_backoff, log_giveup = _log_hdlrs() | |
505 | ||
506 | @backoff.on_predicate(backoff.constant, | |
507 | on_success=log_success, | |
508 | on_backoff=log_backoff, | |
509 | on_giveup=log_giveup, | |
510 | jitter=random.random, | |
511 | interval=0) | |
512 | @_save_target | |
513 | async def success(*args, **kwargs): | |
514 | # succeed after we've backed off twice | |
515 | return len(log['backoff']) == 2 | |
516 | ||
517 | with pytest.deprecated_call(): | |
518 | await success(1, 2, 3, foo=1, bar=2) | |
519 | ||
520 | # we try 3 times, backing off twice before succeeding | |
521 | assert len(log['success']) == 1 | |
522 | assert len(log['backoff']) == 2 | |
523 | assert len(log['giveup']) == 0 | |
524 | ||
525 | for i in range(2): | |
526 | details = log['backoff'][i] | |
527 | elapsed = details.pop('elapsed') | |
528 | assert isinstance(elapsed, float) | |
529 | assert details == {'args': (1, 2, 3), | |
530 | 'kwargs': {'foo': 1, 'bar': 2}, | |
531 | 'target': success._target, | |
532 | 'tries': i + 1, | |
533 | 'value': False, | |
534 | 'wait': 0} | |
535 | ||
536 | details = log['success'][0] | |
537 | elapsed = details.pop('elapsed') | |
538 | assert isinstance(elapsed, float) | |
539 | assert details == {'args': (1, 2, 3), | |
540 | 'kwargs': {'foo': 1, 'bar': 2}, | |
541 | 'target': success._target, | |
542 | 'tries': 3, | |
543 | 'value': True} | |
544 | ||
545 | ||
546 | @pytest.mark.asyncio | |
547 | async def test_on_exception_callable_max_tries(monkeypatch): | |
548 | monkeypatch.setattr('asyncio.sleep', _await_none) | |
549 | ||
550 | def lookup_max_tries(): | |
551 | return 3 | |
552 | ||
553 | log = [] | |
554 | ||
555 | @backoff.on_exception(backoff.constant, | |
556 | ValueError, | |
557 | max_tries=lookup_max_tries) | |
558 | async def exceptor(): | |
559 | log.append(True) | |
560 | raise ValueError() | |
561 | ||
562 | with pytest.raises(ValueError): | |
563 | await exceptor() | |
564 | ||
565 | assert len(log) == 3 | |
566 | ||
567 | ||
568 | @pytest.mark.asyncio | |
569 | async def test_on_exception_callable_gen_kwargs(): | |
570 | ||
571 | def lookup_foo(): | |
572 | return "foo" | |
573 | ||
574 | def wait_gen(foo=None, bar=None): | |
575 | assert foo == "foo" | |
576 | assert bar == "bar" | |
577 | ||
578 | while True: | |
579 | yield 0 | |
580 | ||
581 | @backoff.on_exception(wait_gen, | |
582 | ValueError, | |
583 | max_tries=2, | |
584 | foo=lookup_foo, | |
585 | bar="bar") | |
586 | async def exceptor(): | |
587 | raise ValueError("aah") | |
588 | ||
589 | with pytest.raises(ValueError): | |
590 | await exceptor() | |
591 | ||
592 | ||
593 | @pytest.mark.asyncio | |
594 | async def test_on_exception_coro_cancelling(event_loop): | |
595 | sleep_started_event = asyncio.Event() | |
596 | ||
597 | @backoff.on_predicate(backoff.expo) | |
598 | async def coro(): | |
599 | sleep_started_event.set() | |
600 | ||
601 | try: | |
602 | await asyncio.sleep(10) | |
603 | except asyncio.CancelledError: | |
604 | return True | |
605 | ||
606 | return False | |
607 | ||
608 | task = event_loop.create_task(coro()) | |
609 | ||
610 | await sleep_started_event.wait() | |
611 | ||
612 | task.cancel() | |
613 | ||
614 | assert (await task) | |
615 | ||
616 | ||
617 | def test_on_predicate_on_regular_function_without_event_loop(monkeypatch): | |
618 | monkeypatch.setattr('time.sleep', lambda x: None) | |
619 | ||
620 | # Set default event loop to None. | |
621 | loop = asyncio.get_event_loop() | |
622 | asyncio.set_event_loop(None) | |
623 | ||
624 | try: | |
625 | @backoff.on_predicate(backoff.expo) | |
626 | def return_true(log, n): | |
627 | val = (len(log) == n - 1) | |
628 | log.append(val) | |
629 | return val | |
630 | ||
631 | log = [] | |
632 | ret = return_true(log, 3) | |
633 | assert ret is True | |
634 | assert 3 == len(log) | |
635 | ||
636 | finally: | |
637 | # Restore event loop. | |
638 | asyncio.set_event_loop(loop) | |
639 | ||
640 | ||
641 | def test_on_exception_on_regular_function_without_event_loop(monkeypatch): | |
642 | monkeypatch.setattr('time.sleep', lambda x: None) | |
643 | ||
644 | # Set default event loop to None. | |
645 | loop = asyncio.get_event_loop() | |
646 | asyncio.set_event_loop(None) | |
647 | ||
648 | try: | |
649 | @backoff.on_exception(backoff.expo, KeyError) | |
650 | def keyerror_then_true(log, n): | |
651 | if len(log) == n: | |
652 | return True | |
653 | e = KeyError() | |
654 | log.append(e) | |
655 | raise e | |
656 | ||
657 | log = [] | |
658 | assert keyerror_then_true(log, 3) is True | |
659 | assert 3 == len(log) | |
660 | ||
661 | finally: | |
662 | # Restore event loop. | |
663 | asyncio.set_event_loop(loop) |
0 | # coding:utf-8 | |
1 | import datetime | |
2 | import logging | |
3 | import random | |
4 | import sys | |
5 | import threading | |
6 | ||
7 | import pytest | |
8 | ||
9 | import backoff | |
10 | from tests.common import _save_target | |
11 | ||
12 | ||
13 | def test_on_predicate(monkeypatch): | |
14 | monkeypatch.setattr('time.sleep', lambda x: None) | |
15 | ||
16 | @backoff.on_predicate(backoff.expo) | |
17 | def return_true(log, n): | |
18 | val = (len(log) == n - 1) | |
19 | log.append(val) | |
20 | return val | |
21 | ||
22 | log = [] | |
23 | ret = return_true(log, 3) | |
24 | assert ret is True | |
25 | assert 3 == len(log) | |
26 | ||
27 | ||
28 | def test_on_predicate_max_tries(monkeypatch): | |
29 | monkeypatch.setattr('time.sleep', lambda x: None) | |
30 | ||
31 | @backoff.on_predicate(backoff.expo, jitter=None, max_tries=3) | |
32 | def return_true(log, n): | |
33 | val = (len(log) == n) | |
34 | log.append(val) | |
35 | return val | |
36 | ||
37 | log = [] | |
38 | ret = return_true(log, 10) | |
39 | assert ret is False | |
40 | assert 3 == len(log) | |
41 | ||
42 | ||
43 | def test_on_predicate_max_time(monkeypatch): | |
44 | nows = [ | |
45 | datetime.datetime(2018, 1, 1, 12, 0, 10, 5), | |
46 | datetime.datetime(2018, 1, 1, 12, 0, 9, 0), | |
47 | datetime.datetime(2018, 1, 1, 12, 0, 1, 0), | |
48 | datetime.datetime(2018, 1, 1, 12, 0, 0, 0), | |
49 | ] | |
50 | ||
51 | class Datetime: | |
52 | @staticmethod | |
53 | def now(): | |
54 | return nows.pop() | |
55 | ||
56 | monkeypatch.setattr('time.sleep', lambda x: None) | |
57 | monkeypatch.setattr('datetime.datetime', Datetime) | |
58 | ||
59 | def giveup(details): | |
60 | assert details['tries'] == 3 | |
61 | assert details['elapsed'] == 10.000005 | |
62 | ||
63 | @backoff.on_predicate(backoff.expo, jitter=None, max_time=10, | |
64 | on_giveup=giveup) | |
65 | def return_true(log, n): | |
66 | val = (len(log) == n) | |
67 | log.append(val) | |
68 | return val | |
69 | ||
70 | log = [] | |
71 | ret = return_true(log, 10) | |
72 | assert ret is False | |
73 | assert len(log) == 3 | |
74 | ||
75 | ||
76 | def test_on_exception(monkeypatch): | |
77 | monkeypatch.setattr('time.sleep', lambda x: None) | |
78 | ||
79 | @backoff.on_exception(backoff.expo, KeyError) | |
80 | def keyerror_then_true(log, n): | |
81 | if len(log) == n: | |
82 | return True | |
83 | e = KeyError() | |
84 | log.append(e) | |
85 | raise e | |
86 | ||
87 | log = [] | |
88 | assert keyerror_then_true(log, 3) is True | |
89 | assert 3 == len(log) | |
90 | ||
91 | ||
92 | def test_on_exception_tuple(monkeypatch): | |
93 | monkeypatch.setattr('time.sleep', lambda x: None) | |
94 | ||
95 | @backoff.on_exception(backoff.expo, (KeyError, ValueError)) | |
96 | def keyerror_valueerror_then_true(log): | |
97 | if len(log) == 2: | |
98 | return True | |
99 | if len(log) == 0: | |
100 | e = KeyError() | |
101 | if len(log) == 1: | |
102 | e = ValueError() | |
103 | log.append(e) | |
104 | raise e | |
105 | ||
106 | log = [] | |
107 | assert keyerror_valueerror_then_true(log) is True | |
108 | assert 2 == len(log) | |
109 | assert isinstance(log[0], KeyError) | |
110 | assert isinstance(log[1], ValueError) | |
111 | ||
112 | ||
113 | def test_on_exception_max_tries(monkeypatch): | |
114 | monkeypatch.setattr('time.sleep', lambda x: None) | |
115 | ||
116 | @backoff.on_exception(backoff.expo, KeyError, jitter=None, max_tries=3) | |
117 | def keyerror_then_true(log, n, foo=None): | |
118 | if len(log) == n: | |
119 | return True | |
120 | e = KeyError() | |
121 | log.append(e) | |
122 | raise e | |
123 | ||
124 | log = [] | |
125 | with pytest.raises(KeyError): | |
126 | keyerror_then_true(log, 10, foo="bar") | |
127 | ||
128 | assert 3 == len(log) | |
129 | ||
130 | ||
131 | def test_on_exception_constant_iterable(monkeypatch): | |
132 | monkeypatch.setattr('time.sleep', lambda x: None) | |
133 | ||
134 | backoffs = [] | |
135 | giveups = [] | |
136 | successes = [] | |
137 | ||
138 | @backoff.on_exception( | |
139 | backoff.constant, | |
140 | KeyError, | |
141 | interval=(1, 2, 3), | |
142 | on_backoff=backoffs.append, | |
143 | on_giveup=giveups.append, | |
144 | on_success=successes.append, | |
145 | ) | |
146 | def endless_exceptions(): | |
147 | raise KeyError('foo') | |
148 | ||
149 | with pytest.raises(KeyError): | |
150 | endless_exceptions() | |
151 | ||
152 | assert len(backoffs) == 3 | |
153 | assert len(giveups) == 1 | |
154 | assert len(successes) == 0 | |
155 | ||
156 | ||
157 | def test_on_exception_success_random_jitter(monkeypatch): | |
158 | monkeypatch.setattr('time.sleep', lambda x: None) | |
159 | ||
160 | backoffs, giveups, successes = [], [], [] | |
161 | ||
162 | @backoff.on_exception(backoff.expo, | |
163 | Exception, | |
164 | on_success=successes.append, | |
165 | on_backoff=backoffs.append, | |
166 | on_giveup=giveups.append, | |
167 | jitter=backoff.random_jitter, | |
168 | factor=0.5) | |
169 | @_save_target | |
170 | def succeeder(*args, **kwargs): | |
171 | # succeed after we've backed off twice | |
172 | if len(backoffs) < 2: | |
173 | raise ValueError("catch me") | |
174 | ||
175 | succeeder(1, 2, 3, foo=1, bar=2) | |
176 | ||
177 | # we try 3 times, backing off twice before succeeding | |
178 | assert len(successes) == 1 | |
179 | assert len(backoffs) == 2 | |
180 | assert len(giveups) == 0 | |
181 | ||
182 | for i in range(2): | |
183 | details = backoffs[i] | |
184 | assert details['wait'] >= 0.5 * 2 ** i | |
185 | ||
186 | ||
187 | def test_on_exception_success_full_jitter(monkeypatch): | |
188 | monkeypatch.setattr('time.sleep', lambda x: None) | |
189 | ||
190 | backoffs, giveups, successes = [], [], [] | |
191 | ||
192 | @backoff.on_exception(backoff.expo, | |
193 | Exception, | |
194 | on_success=successes.append, | |
195 | on_backoff=backoffs.append, | |
196 | on_giveup=giveups.append, | |
197 | jitter=backoff.full_jitter, | |
198 | factor=0.5) | |
199 | @_save_target | |
200 | def succeeder(*args, **kwargs): | |
201 | # succeed after we've backed off twice | |
202 | if len(backoffs) < 2: | |
203 | raise ValueError("catch me") | |
204 | ||
205 | succeeder(1, 2, 3, foo=1, bar=2) | |
206 | ||
207 | # we try 3 times, backing off twice before succeeding | |
208 | assert len(successes) == 1 | |
209 | assert len(backoffs) == 2 | |
210 | assert len(giveups) == 0 | |
211 | ||
212 | for i in range(2): | |
213 | details = backoffs[i] | |
214 | assert details['wait'] <= 0.5 * 2 ** i | |
215 | ||
216 | ||
217 | def test_on_exception_success(): | |
218 | backoffs, giveups, successes = [], [], [] | |
219 | ||
220 | @backoff.on_exception(backoff.constant, | |
221 | Exception, | |
222 | on_success=successes.append, | |
223 | on_backoff=backoffs.append, | |
224 | on_giveup=giveups.append, | |
225 | jitter=None, | |
226 | interval=0) | |
227 | @_save_target | |
228 | def succeeder(*args, **kwargs): | |
229 | # succeed after we've backed off twice | |
230 | if len(backoffs) < 2: | |
231 | raise ValueError("catch me") | |
232 | ||
233 | succeeder(1, 2, 3, foo=1, bar=2) | |
234 | ||
235 | # we try 3 times, backing off twice before succeeding | |
236 | assert len(successes) == 1 | |
237 | assert len(backoffs) == 2 | |
238 | assert len(giveups) == 0 | |
239 | ||
240 | for i in range(2): | |
241 | details = backoffs[i] | |
242 | elapsed = details.pop('elapsed') | |
243 | assert isinstance(elapsed, float) | |
244 | assert details == {'args': (1, 2, 3), | |
245 | 'kwargs': {'foo': 1, 'bar': 2}, | |
246 | 'target': succeeder._target, | |
247 | 'tries': i + 1, | |
248 | 'wait': 0} | |
249 | ||
250 | details = successes[0] | |
251 | elapsed = details.pop('elapsed') | |
252 | assert isinstance(elapsed, float) | |
253 | assert details == {'args': (1, 2, 3), | |
254 | 'kwargs': {'foo': 1, 'bar': 2}, | |
255 | 'target': succeeder._target, | |
256 | 'tries': 3} | |
257 | ||
258 | ||
259 | def test_on_exception_giveup(): | |
260 | backoffs, giveups, successes = [], [], [] | |
261 | ||
262 | @backoff.on_exception(backoff.constant, | |
263 | ValueError, | |
264 | on_success=successes.append, | |
265 | on_backoff=backoffs.append, | |
266 | on_giveup=giveups.append, | |
267 | max_tries=3, | |
268 | jitter=None, | |
269 | interval=0) | |
270 | @_save_target | |
271 | def exceptor(*args, **kwargs): | |
272 | raise ValueError("catch me") | |
273 | ||
274 | with pytest.raises(ValueError): | |
275 | exceptor(1, 2, 3, foo=1, bar=2) | |
276 | ||
277 | # we try 3 times, backing off twice and giving up once | |
278 | assert len(successes) == 0 | |
279 | assert len(backoffs) == 2 | |
280 | assert len(giveups) == 1 | |
281 | ||
282 | details = giveups[0] | |
283 | elapsed = details.pop('elapsed') | |
284 | assert isinstance(elapsed, float) | |
285 | assert details == {'args': (1, 2, 3), | |
286 | 'kwargs': {'foo': 1, 'bar': 2}, | |
287 | 'target': exceptor._target, | |
288 | 'tries': 3} | |
289 | ||
290 | ||
291 | def test_on_exception_giveup_predicate(monkeypatch): | |
292 | monkeypatch.setattr('time.sleep', lambda x: None) | |
293 | ||
294 | def on_baz(e): | |
295 | return str(e) == "baz" | |
296 | ||
297 | vals = ["baz", "bar", "foo"] | |
298 | ||
299 | @backoff.on_exception(backoff.constant, | |
300 | ValueError, | |
301 | giveup=on_baz) | |
302 | def foo_bar_baz(): | |
303 | raise ValueError(vals.pop()) | |
304 | ||
305 | with pytest.raises(ValueError): | |
306 | foo_bar_baz() | |
307 | ||
308 | assert not vals | |
309 | ||
310 | ||
311 | def test_on_predicate_success(): | |
312 | backoffs, giveups, successes = [], [], [] | |
313 | ||
314 | @backoff.on_predicate(backoff.constant, | |
315 | on_success=successes.append, | |
316 | on_backoff=backoffs.append, | |
317 | on_giveup=giveups.append, | |
318 | jitter=None, | |
319 | interval=0) | |
320 | @_save_target | |
321 | def success(*args, **kwargs): | |
322 | # succeed after we've backed off twice | |
323 | return len(backoffs) == 2 | |
324 | ||
325 | success(1, 2, 3, foo=1, bar=2) | |
326 | ||
327 | # we try 3 times, backing off twice before succeeding | |
328 | assert len(successes) == 1 | |
329 | assert len(backoffs) == 2 | |
330 | assert len(giveups) == 0 | |
331 | ||
332 | for i in range(2): | |
333 | details = backoffs[i] | |
334 | ||
335 | elapsed = details.pop('elapsed') | |
336 | assert isinstance(elapsed, float) | |
337 | assert details == {'args': (1, 2, 3), | |
338 | 'kwargs': {'foo': 1, 'bar': 2}, | |
339 | 'target': success._target, | |
340 | 'tries': i + 1, | |
341 | 'value': False, | |
342 | 'wait': 0} | |
343 | ||
344 | details = successes[0] | |
345 | elapsed = details.pop('elapsed') | |
346 | assert isinstance(elapsed, float) | |
347 | assert details == {'args': (1, 2, 3), | |
348 | 'kwargs': {'foo': 1, 'bar': 2}, | |
349 | 'target': success._target, | |
350 | 'tries': 3, | |
351 | 'value': True} | |
352 | ||
353 | ||
354 | def test_on_predicate_giveup(): | |
355 | backoffs, giveups, successes = [], [], [] | |
356 | ||
357 | @backoff.on_predicate(backoff.constant, | |
358 | on_success=successes.append, | |
359 | on_backoff=backoffs.append, | |
360 | on_giveup=giveups.append, | |
361 | max_tries=3, | |
362 | jitter=None, | |
363 | interval=0) | |
364 | @_save_target | |
365 | def emptiness(*args, **kwargs): | |
366 | pass | |
367 | ||
368 | emptiness(1, 2, 3, foo=1, bar=2) | |
369 | ||
370 | # we try 3 times, backing off twice and giving up once | |
371 | assert len(successes) == 0 | |
372 | assert len(backoffs) == 2 | |
373 | assert len(giveups) == 1 | |
374 | ||
375 | details = giveups[0] | |
376 | elapsed = details.pop('elapsed') | |
377 | assert isinstance(elapsed, float) | |
378 | assert details == {'args': (1, 2, 3), | |
379 | 'kwargs': {'foo': 1, 'bar': 2}, | |
380 | 'target': emptiness._target, | |
381 | 'tries': 3, | |
382 | 'value': None} | |
383 | ||
384 | ||
385 | def test_on_predicate_iterable_handlers(): | |
386 | class Logger: | |
387 | def __init__(self): | |
388 | self.backoffs = [] | |
389 | self.giveups = [] | |
390 | self.successes = [] | |
391 | ||
392 | loggers = [Logger() for _ in range(3)] | |
393 | ||
394 | @backoff.on_predicate(backoff.constant, | |
395 | on_backoff=(l.backoffs.append for l in loggers), | |
396 | on_giveup=(l.giveups.append for l in loggers), | |
397 | on_success=(l.successes.append for l in loggers), | |
398 | max_tries=3, | |
399 | jitter=None, | |
400 | interval=0) | |
401 | @_save_target | |
402 | def emptiness(*args, **kwargs): | |
403 | pass | |
404 | ||
405 | emptiness(1, 2, 3, foo=1, bar=2) | |
406 | ||
407 | for logger in loggers: | |
408 | ||
409 | assert len(logger.successes) == 0 | |
410 | assert len(logger.backoffs) == 2 | |
411 | assert len(logger.giveups) == 1 | |
412 | ||
413 | details = dict(logger.giveups[0]) | |
414 | print(details) | |
415 | elapsed = details.pop('elapsed') | |
416 | assert isinstance(elapsed, float) | |
417 | assert details == {'args': (1, 2, 3), | |
418 | 'kwargs': {'foo': 1, 'bar': 2}, | |
419 | 'target': emptiness._target, | |
420 | 'tries': 3, | |
421 | 'value': None} | |
422 | ||
423 | ||
424 | # To maintain backward compatibility, | |
425 | # on_predicate should support 0-argument jitter function. | |
426 | def test_on_exception_success_0_arg_jitter(monkeypatch): | |
427 | monkeypatch.setattr('time.sleep', lambda x: None) | |
428 | monkeypatch.setattr('random.random', lambda: 0) | |
429 | ||
430 | backoffs, giveups, successes = [], [], [] | |
431 | ||
432 | @backoff.on_exception(backoff.constant, | |
433 | Exception, | |
434 | on_success=successes.append, | |
435 | on_backoff=backoffs.append, | |
436 | on_giveup=giveups.append, | |
437 | jitter=random.random, | |
438 | interval=0) | |
439 | @_save_target | |
440 | def succeeder(*args, **kwargs): | |
441 | # succeed after we've backed off twice | |
442 | if len(backoffs) < 2: | |
443 | raise ValueError("catch me") | |
444 | ||
445 | with pytest.deprecated_call(): | |
446 | succeeder(1, 2, 3, foo=1, bar=2) | |
447 | ||
448 | # we try 3 times, backing off twice before succeeding | |
449 | assert len(successes) == 1 | |
450 | assert len(backoffs) == 2 | |
451 | assert len(giveups) == 0 | |
452 | ||
453 | for i in range(2): | |
454 | details = backoffs[i] | |
455 | elapsed = details.pop('elapsed') | |
456 | assert isinstance(elapsed, float) | |
457 | assert details == {'args': (1, 2, 3), | |
458 | 'kwargs': {'foo': 1, 'bar': 2}, | |
459 | 'target': succeeder._target, | |
460 | 'tries': i + 1, | |
461 | 'wait': 0} | |
462 | ||
463 | details = successes[0] | |
464 | elapsed = details.pop('elapsed') | |
465 | assert isinstance(elapsed, float) | |
466 | assert details == {'args': (1, 2, 3), | |
467 | 'kwargs': {'foo': 1, 'bar': 2}, | |
468 | 'target': succeeder._target, | |
469 | 'tries': 3} | |
470 | ||
471 | ||
472 | # To maintain backward compatibility, | |
473 | # on_predicate should support 0-argument jitter function. | |
474 | def test_on_predicate_success_0_arg_jitter(monkeypatch): | |
475 | monkeypatch.setattr('time.sleep', lambda x: None) | |
476 | monkeypatch.setattr('random.random', lambda: 0) | |
477 | ||
478 | backoffs, giveups, successes = [], [], [] | |
479 | ||
480 | @backoff.on_predicate(backoff.constant, | |
481 | on_success=successes.append, | |
482 | on_backoff=backoffs.append, | |
483 | on_giveup=giveups.append, | |
484 | jitter=random.random, | |
485 | interval=0) | |
486 | @_save_target | |
487 | def success(*args, **kwargs): | |
488 | # succeed after we've backed off twice | |
489 | return len(backoffs) == 2 | |
490 | ||
491 | with pytest.deprecated_call(): | |
492 | success(1, 2, 3, foo=1, bar=2) | |
493 | ||
494 | # we try 3 times, backing off twice before succeeding | |
495 | assert len(successes) == 1 | |
496 | assert len(backoffs) == 2 | |
497 | assert len(giveups) == 0 | |
498 | ||
499 | for i in range(2): | |
500 | details = backoffs[i] | |
501 | print(details) | |
502 | elapsed = details.pop('elapsed') | |
503 | assert isinstance(elapsed, float) | |
504 | assert details == {'args': (1, 2, 3), | |
505 | 'kwargs': {'foo': 1, 'bar': 2}, | |
506 | 'target': success._target, | |
507 | 'tries': i + 1, | |
508 | 'value': False, | |
509 | 'wait': 0} | |
510 | ||
511 | details = successes[0] | |
512 | elapsed = details.pop('elapsed') | |
513 | assert isinstance(elapsed, float) | |
514 | assert details == {'args': (1, 2, 3), | |
515 | 'kwargs': {'foo': 1, 'bar': 2}, | |
516 | 'target': success._target, | |
517 | 'tries': 3, | |
518 | 'value': True} | |
519 | ||
520 | ||
521 | def test_on_exception_callable_max_tries(monkeypatch): | |
522 | monkeypatch.setattr('time.sleep', lambda x: None) | |
523 | ||
524 | def lookup_max_tries(): | |
525 | return 3 | |
526 | ||
527 | log = [] | |
528 | ||
529 | @backoff.on_exception(backoff.constant, | |
530 | ValueError, | |
531 | max_tries=lookup_max_tries) | |
532 | def exceptor(): | |
533 | log.append(True) | |
534 | raise ValueError() | |
535 | ||
536 | with pytest.raises(ValueError): | |
537 | exceptor() | |
538 | ||
539 | assert len(log) == 3 | |
540 | ||
541 | ||
542 | def test_on_exception_callable_gen_kwargs(): | |
543 | ||
544 | def lookup_foo(): | |
545 | return "foo" | |
546 | ||
547 | def wait_gen(foo=None, bar=None): | |
548 | assert foo == "foo" | |
549 | assert bar == "bar" | |
550 | ||
551 | while True: | |
552 | yield 0 | |
553 | ||
554 | @backoff.on_exception(wait_gen, | |
555 | ValueError, | |
556 | max_tries=2, | |
557 | foo=lookup_foo, | |
558 | bar="bar") | |
559 | def exceptor(): | |
560 | raise ValueError("aah") | |
561 | ||
562 | with pytest.raises(ValueError): | |
563 | exceptor() | |
564 | ||
565 | ||
566 | def test_on_predicate_in_thread(monkeypatch): | |
567 | monkeypatch.setattr('time.sleep', lambda x: None) | |
568 | ||
569 | result = [] | |
570 | ||
571 | def check(): | |
572 | try: | |
573 | @backoff.on_predicate(backoff.expo) | |
574 | def return_true(log, n): | |
575 | val = (len(log) == n - 1) | |
576 | log.append(val) | |
577 | return val | |
578 | ||
579 | log = [] | |
580 | ret = return_true(log, 3) | |
581 | assert ret is True | |
582 | assert 3 == len(log) | |
583 | ||
584 | except Exception as ex: | |
585 | result.append(ex) | |
586 | else: | |
587 | result.append('success') | |
588 | ||
589 | t = threading.Thread(target=check) | |
590 | t.start() | |
591 | t.join() | |
592 | ||
593 | assert len(result) == 1 | |
594 | assert result[0] == 'success' | |
595 | ||
596 | ||
597 | def test_on_predicate_constant_iterable(monkeypatch): | |
598 | monkeypatch.setattr('time.sleep', lambda x: None) | |
599 | ||
600 | waits = [1, 2, 3, 6, 9] | |
601 | backoffs = [] | |
602 | giveups = [] | |
603 | successes = [] | |
604 | ||
605 | @backoff.on_predicate( | |
606 | backoff.constant, | |
607 | interval=waits, | |
608 | on_backoff=backoffs.append, | |
609 | on_giveup=giveups.append, | |
610 | on_success=successes.append, | |
611 | jitter=None, | |
612 | ) | |
613 | def falsey(): | |
614 | return False | |
615 | ||
616 | assert not falsey() | |
617 | ||
618 | assert len(backoffs) == len(waits) | |
619 | for i, wait in enumerate(waits): | |
620 | assert backoffs[i]['wait'] == wait | |
621 | ||
622 | assert len(giveups) == 1 | |
623 | assert len(successes) == 0 | |
624 | ||
625 | ||
626 | def test_on_exception_in_thread(monkeypatch): | |
627 | monkeypatch.setattr('time.sleep', lambda x: None) | |
628 | ||
629 | result = [] | |
630 | ||
631 | def check(): | |
632 | try: | |
633 | @backoff.on_exception(backoff.expo, KeyError) | |
634 | def keyerror_then_true(log, n): | |
635 | if len(log) == n: | |
636 | return True | |
637 | e = KeyError() | |
638 | log.append(e) | |
639 | raise e | |
640 | ||
641 | log = [] | |
642 | assert keyerror_then_true(log, 3) is True | |
643 | assert 3 == len(log) | |
644 | ||
645 | except Exception as ex: | |
646 | result.append(ex) | |
647 | else: | |
648 | result.append('success') | |
649 | ||
650 | t = threading.Thread(target=check) | |
651 | t.start() | |
652 | t.join() | |
653 | ||
654 | assert len(result) == 1 | |
655 | assert result[0] == 'success' | |
656 | ||
657 | ||
658 | def test_on_exception_logger_default(monkeypatch, caplog): | |
659 | monkeypatch.setattr('time.sleep', lambda x: None) | |
660 | ||
661 | logger = logging.getLogger('backoff') | |
662 | handler = logging.StreamHandler(sys.stdout) | |
663 | logger.addHandler(handler) | |
664 | ||
665 | @backoff.on_exception(backoff.expo, KeyError, max_tries=3) | |
666 | def key_error(): | |
667 | raise KeyError() | |
668 | ||
669 | with caplog.at_level(logging.INFO): | |
670 | with pytest.raises(KeyError): | |
671 | key_error() | |
672 | ||
673 | assert len(caplog.records) == 3 # 2 backoffs and 1 giveup | |
674 | for record in caplog.records: | |
675 | assert record.name == 'backoff' | |
676 | ||
677 | ||
678 | def test_on_exception_logger_none(monkeypatch, caplog): | |
679 | monkeypatch.setattr('time.sleep', lambda x: None) | |
680 | ||
681 | logger = logging.getLogger('backoff') | |
682 | handler = logging.StreamHandler(sys.stdout) | |
683 | logger.addHandler(handler) | |
684 | ||
685 | @backoff.on_exception(backoff.expo, KeyError, max_tries=3, logger=None) | |
686 | def key_error(): | |
687 | raise KeyError() | |
688 | ||
689 | with caplog.at_level(logging.INFO): | |
690 | with pytest.raises(KeyError): | |
691 | key_error() | |
692 | ||
693 | assert not caplog.records | |
694 | ||
695 | ||
696 | def test_on_exception_logger_user(monkeypatch, caplog): | |
697 | monkeypatch.setattr('time.sleep', lambda x: None) | |
698 | ||
699 | logger = logging.getLogger('my-logger') | |
700 | handler = logging.StreamHandler(sys.stdout) | |
701 | logger.addHandler(handler) | |
702 | ||
703 | @backoff.on_exception(backoff.expo, KeyError, max_tries=3, logger=logger) | |
704 | def key_error(): | |
705 | raise KeyError() | |
706 | ||
707 | with caplog.at_level(logging.INFO): | |
708 | with pytest.raises(KeyError): | |
709 | key_error() | |
710 | ||
711 | assert len(caplog.records) == 3 # 2 backoffs and 1 giveup | |
712 | for record in caplog.records: | |
713 | assert record.name == 'my-logger' | |
714 | ||
715 | ||
716 | def test_on_exception_logger_user_str(monkeypatch, caplog): | |
717 | monkeypatch.setattr('time.sleep', lambda x: None) | |
718 | ||
719 | logger = logging.getLogger('my-logger') | |
720 | handler = logging.StreamHandler(sys.stdout) | |
721 | logger.addHandler(handler) | |
722 | ||
723 | @backoff.on_exception(backoff.expo, KeyError, max_tries=3, | |
724 | logger='my-logger') | |
725 | def key_error(): | |
726 | raise KeyError() | |
727 | ||
728 | with caplog.at_level(logging.INFO): | |
729 | with pytest.raises(KeyError): | |
730 | key_error() | |
731 | ||
732 | assert len(caplog.records) == 3 # 2 backoffs and 1 giveup | |
733 | for record in caplog.records: | |
734 | assert record.name == 'my-logger' |
0 | # coding:utf-8 | |
1 | import backoff | |
2 | ||
3 | ||
4 | def test_full_jitter(): | |
5 | for input in range(100): | |
6 | for i in range(100): | |
7 | jitter = backoff.full_jitter(input) | |
8 | assert jitter >= 0 | |
9 | assert jitter <= input |
0 | # coding:utf-8 | |
1 | import backoff | |
2 | ||
3 | ||
4 | def test_expo(): | |
5 | gen = backoff.expo() | |
6 | for i in range(9): | |
7 | assert 2**i == next(gen) | |
8 | ||
9 | ||
10 | def test_expo_base3(): | |
11 | gen = backoff.expo(base=3) | |
12 | for i in range(9): | |
13 | assert 3**i == next(gen) | |
14 | ||
15 | ||
16 | def test_expo_factor3(): | |
17 | gen = backoff.expo(factor=3) | |
18 | for i in range(9): | |
19 | assert 3 * 2**i == next(gen) | |
20 | ||
21 | ||
22 | def test_expo_base3_factor5(): | |
23 | gen = backoff.expo(base=3, factor=5) | |
24 | for i in range(9): | |
25 | assert 5 * 3**i == next(gen) | |
26 | ||
27 | ||
28 | def test_expo_max_value(): | |
29 | gen = backoff.expo(max_value=2**4) | |
30 | expected = [1, 2, 4, 8, 16, 16, 16] | |
31 | for expect in expected: | |
32 | assert expect == next(gen) | |
33 | ||
34 | ||
35 | def test_fibo(): | |
36 | gen = backoff.fibo() | |
37 | expected = [1, 1, 2, 3, 5, 8, 13] | |
38 | for expect in expected: | |
39 | assert expect == next(gen) | |
40 | ||
41 | ||
42 | def test_fibo_max_value(): | |
43 | gen = backoff.fibo(max_value=8) | |
44 | expected = [1, 1, 2, 3, 5, 8, 8, 8] | |
45 | for expect in expected: | |
46 | assert expect == next(gen) | |
47 | ||
48 | ||
49 | def test_constant(): | |
50 | gen = backoff.constant(interval=3) | |
51 | for i in range(9): | |
52 | assert 3 == next(gen) |