diff --git a/.coveragerc-py2 b/.coveragerc-py2
deleted file mode 100644
index 1bf7d6d..0000000
--- a/.coveragerc-py2
+++ /dev/null
@@ -1,8 +0,0 @@
-[report]
-show_missing = True
-
-# Regexes for lines to exclude from consideration
-exclude_lines =
-    # Have to re-enable the standard pragma
-    pragma: no cover
-    pragma: python=3\.5
diff --git a/.coveragerc-py35 b/.coveragerc-py35
deleted file mode 100644
index 013dd20..0000000
--- a/.coveragerc-py35
+++ /dev/null
@@ -1,2 +0,0 @@
-[report]
-show_missing = True
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
new file mode 100644
index 0000000..91041ed
--- /dev/null
+++ b/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,71 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+name: "CodeQL"
+
+on:
+  push:
+    branches: [master]
+  pull_request:
+    # The branches below must be a subset of the branches above
+    branches: [master]
+  schedule:
+    - cron: '0 5 * * 6'
+
+jobs:
+  analyze:
+    name: Analyze
+    runs-on: ubuntu-latest
+
+    strategy:
+      fail-fast: false
+      matrix:
+        # Override automatic language detection by changing the below list
+        # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python']
+        language: ['python']
+        # Learn more...
+        # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection
+
+    steps:
+    - name: Checkout repository
+      uses: actions/checkout@v2
+      with:
+        # We must fetch at least the immediate parents so that if this is
+        # a pull request then we can checkout the head.
+        fetch-depth: 2
+
+    # If this run was triggered by a pull request event, then checkout
+    # the head of the pull request instead of the merge commit.
+    - run: git checkout HEAD^2
+      if: ${{ github.event_name == 'pull_request' }}
+
+    # Initializes the CodeQL tools for scanning.
+    - name: Initialize CodeQL
+      uses: github/codeql-action/init@v1
+      with:
+        languages: ${{ matrix.language }}
+        # If you wish to specify custom queries, you can do so here or in a config file.
+        # By default, queries listed here will override any specified in a config file. 
+        # Prefix the list here with "+" to use these queries and those in the config file.
+        # queries: ./path/to/local/query, your-org/your-repo/queries@main
+
+    # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java).
+    # If this step fails, then you should remove it and run the build manually (see below)
+    - name: Autobuild
+      uses: github/codeql-action/autobuild@v1
+
+    # ℹī¸ Command-line programs to run using the OS shell.
+    # 📚 https://git.io/JvXDl
+
+    # ✏ī¸ If the Autobuild fails above, remove it and uncomment the following three lines
+    #    and modify them (or add more) to build your code if your project
+    #    uses a compiled language
+
+    #- run: |
+    #   make bootstrap
+    #   make release
+
+    - name: Perform CodeQL Analysis
+      uses: github/codeql-action/analyze@v1
diff --git a/.gitignore b/.gitignore
index 15a54e2..c17741c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,3 +9,4 @@ dist/
 *.egg-info
 poetry.lock
 .vscode
+.python-version
diff --git a/.travis.yml b/.travis.yml
index f3f043a..b168b08 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,21 +1,9 @@
 language: python
 python:
-  - "2.7"
-  - "3.5"
-  - "3.6"
   - "3.7"
   - "3.8"
-matrix:
-  include:
-  - python: "3.5"
-    env: PYTHONASYNCIODEBUG=x
-  - python: "3.6"
-    env: PYTHONASYNCIODEBUG=x
-  - python: "3.7"
-    env: PYTHONASYNCIODEBUG=x
-  - python: "3.8"
-    env: PYTHONASYNCIODEBUG=x
-
+  - "3.9"
+  - "3.10"
 before_install:
   - pip install poetry more-itertools
 install:
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4432a3b..10368e0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,148 +1,161 @@
-# Change Log
+# Changelog
 
-## [v1.0.3] - 2014-06-05
+## [v2.0.1] - 2022-04-27
 ### Changed
-- Make logging unicode safe
-- Log on_predicate backoff as INFO rather than ERROR
+- Allow None for jitter keyword arg (typing)
 
-## [v1.0.4] - 2014-08-12
-### Added
-- Python 2.6 support from @Bonko
-- Python 3.0 support from @robyoung
-- Run tests in Travis from @robyoung
 
-## [v1.0.5] - 2015-02-03
-### Changed
-- Add a default interval of 1 second for the constant generator
-- Improve on_predicate stop condition avoiding extra sleep
-
-## [v1.0.6] - 2015-02-10
+## [v2.0.0] - 2022-04-26
 ### Added
-- Coveralls.io integration from @singingwolfboy
-
+- Add raise_on_giveup keyword arg for decorators
+- Add backoff.runtime wait generator for dynamically setting wait times based
+  on target function return value or exception details
 ### Changed
-- Fix logging bug for function calls with tuple params
+- Improve type hints for on_success, on_backoff, on_giveup handlers
+- Use decorator-specific detail and handler type hints
+- Optionally use typing_extensions for python 3.7 type hinting
+- Drop python 3.6 support
+- Add python 3.10 support
 
-## [v1.0.7] - 2015-02-10
+## [v1.11.1] - 2021-07-14
+### Fixed
+- Update __version__ in backoff module
 
+## [v1.11.0] - 2021-07-12
+### Added
+- Configurable logging levels for backoff and giveup events
 ### Changed
-- Fix string formatting for python 2.6
+- Minor documentation fixes
 
-## [v1.1.0] - 2015-12-08
-### Added
-- Event handling for success, backoff, and giveup
-- Change log
+## [v1.10.0] - 2019-12-07
+### Changed
+- Allow sync decorator call from async function
+- NOTE: THIS WILL BE THE FINAL PYTHON 2.7 COMPATIBLE RELEASE.
 
+## [v1.9.2] - 2019-11-19
 ### Changed
-- Docs and test for multi exception invocations
-- Update dev environment test dependencies
+- Don't include tests and changelog in distribution
 
-## [v1.2.0] - 2016-05-26
-### Added
-- 'Full jitter' algorithm from @jonascheng
+## [v1.9.1] - 2019-11-18
+### Changed
+- Include tests and changelog in distribution
 
+## [v1.9.0] - 2019-11-16
 ### Changed
-- Jitter function now accepts raw value and returns jittered value
-- Change README to reST for the benefit of pypi :(
-- Remove docstring doc generation and make README canonical
+- Support python 3.8
 
-## [v1.2.1] - 2016-05-27
+## [v1.8.1] - 2019-10-11
 ### Changed
-- Documentation fixes
+- Use arguments in log messages rather than fully formatting log
+  https://github.com/litl/backoff/pull/82 from @lbernick
 
-## [v1.3.0] - 2016-08-08
+## [v1.8.0] - 2018-12-20
 ### Added
-- Support runtime configuration with optional callable kwargs
-- Add giveup kwarg for exception inspection
-
+- Custom loggers
+- Iterable intervals for constant wait_gen for predefined wait sequences
 ### Changed
-- Documentation fixes
+- Give up on StopIteration raised in wait generators
+- Nullary jitter signature deprecation warning
 
-## [v1.3.1] - 2016-08-08
+## [v1.7.0] - 2018-11-23
 ### Changed
-- Include README.rst in source distribution (fixes package)
+- Support Python 3.7
+- Drop support for async in Python 3.4
+- Drop support for Python 2.6
+- Update development dependencies
+- Use poetry for dependencies and packaging
 
-## [v1.3.2] - 2016-11-18
+## [v1.6.0] - 2018-07-14
 ### Changed
-- Don't log retried args and kwargs by default
-- README.rst syntax highlighting from @dethi
+- Change default log level from ERROR to INFO
+- Log retries on exception as INFO
 
-## [v1.4.0] - 2017-02-05
+## [v1.5.0] - 2018-04-11
 ### Added
-- Async support via `asyncio` coroutines (Python 3.4) from @rutsky
+- Add max_time keyword argument
+
+## [v1.4.3] - 2017-05-22
+### Changed
+- Add license to source distribution
 
+## [v1.4.2] - 2017-04-25
 ### Changed
-- Refactor `backoff` module into package with identical API
+- Use documented logger name https://github.com/litl/backoff/pull/32
+  from @pquentin
 
 ## [v1.4.1] - 2017-04-21
 ### Added
 - Expose __version__ at package root
-
 ### Changed
 - Fix checking for running sync version in coroutine in case when event
   loop is not set from @rutsky
 
-## [v1.4.2] - 2017-04-25
+## [v1.4.0] - 2017-02-05
+### Added
+- Async support via `asyncio` coroutines (Python 3.4) from @rutsky
 ### Changed
+- Refactor `backoff` module into package with identical API
 
-- Use documented logger name https://github.com/litl/backoff/pull/32
-  from @pquentin
-
-## [v1.4.3] - 2017-05-22
+## [v1.3.2] - 2016-11-18
 ### Changed
+- Don't log retried args and kwargs by default
+- README.rst syntax highlighting from @dethi
 
-- Add license to source distribution
-
-## [v1.5.0] - 2018-04-11
+## [v1.3.1] - 2016-08-08
 ### Changed
+- Include README.rst in source distribution (fixes package)
 
-- Add max_time keyword argument
-
-## [v1.6.0] - 2018-07-14
+## [v1.3.0] - 2016-08-08
+### Added
+- Support runtime configuration with optional callable kwargs
+- Add giveup kwarg for exception inspection
 ### Changed
+- Documentation fixes
 
-- Change default log level from ERROR to INFO
-- Log retries on exception as INFO
-
-## [v1.7.0] - 2018-11-23
+## [v1.2.1] - 2016-05-27
 ### Changed
+- Documentation fixes
 
-- Support Python 3.7
-- Drop support for async in Python 3.4
-- Drop support for Python 2.6
-- Update development dependencies
-- Use poetry for dependencies and packaging
-
-## [v1.8.0] - 2018-12-20
-### Changed
 
-- Give up on StopIteration raised in wait generators
-- Iterable intervals for constant wait_gen for predefined wait sequences
-- Nullary jitter signature deprecation warning
-- Custom loggers
+## [v1.2.0] - 2016-05-26
+### Added
+- 'Full jitter' algorithm from @jonascheng
 
-## [v1.8.1] - 2019-10-11
 ### Changed
+- Jitter function now accepts raw value and returns jittered value
+- Change README to reST for the benefit of pypi :(
+- Remove docstring doc generation and make README canonical
 
-- Use arguments in log messages rather than fully formatting log
-  https://github.com/litl/backoff/pull/82 from @lbernick
-
-## [v1.9.0] 2019-11-16
+## [v1.1.0] - 2015-12-08
+### Added
+- Event handling for success, backoff, and giveup
+- Change log
 ### Changed
+- Docs and test for multi exception invocations
+- Update dev environment test dependencies
 
-- Support python 3.8
-
-## [v1.9.1] 2019-11-18
+## [v1.0.7] - 2015-02-10
 ### Changed
+- Fix string formatting for python 2.6
 
-- Include tests and changelog in distribution
+## [v1.0.6] - 2015-02-10
+### Added
+- Coveralls.io integration from @singingwolfboy
+### Changed
+- Fix logging bug for function calls with tuple params
 
-## [v1.9.2] 2019-11-19
+## [v1.0.5] - 2015-02-03
 ### Changed
+- Add a default interval of 1 second for the constant generator
+- Improve on_predicate stop condition avoiding extra sleep
 
-- Don't include tests and changelog in distribution
+## [v1.0.4] - 2014-08-12
+### Added
+- Python 2.6 support from @Bonko
+- Python 3.0 support from @robyoung
+- Run tests in Travis from @robyoung
 
-## [v1.10.0] 2019-12-7
+## [v1.0.3] - 2014-06-05
 ### Changed
-
-- Allow sync decorator call from async function
+- Make logging unicode safe
+- Log on_predicate backoff as INFO rather than ERROR
diff --git a/Makefile b/Makefile
index 9daaf0b..b354ae4 100644
--- a/Makefile
+++ b/Makefile
@@ -13,11 +13,10 @@ all:
 	@echo 'check             make sure you are ready to commit'
 
 flake8:
-ifeq ($(PY_GTE_35),1)
-	@flake8 backoff tests
-else
-	@flake8 --exclude tests/python35,backoff/_async.py backoff tests
-endif
+	@flake8 --ignore=E741,W503,W504 backoff tests
+
+mypy:	
+	@mypy --show-error-codes backoff tests
 
 clean:
 	@find . -name "*.pyc" -delete
@@ -25,11 +24,8 @@ clean:
 	@rm -rf build dist .coverage MANIFEST
 
 test: clean
-ifeq ($(PY_GTE_35),1)
-	@PYTHONPATH=. py.test --cov-config .coveragerc-py35 --cov backoff tests
-else
-	@PYTHONPATH=. py.test --cov-config .coveragerc-py2 --cov backoff tests/test_*.py
-endif
-
-check: flake8 test
-	@coverage report | grep 100% >/dev/null || { echo 'Unit tests coverage is incomplete.'; exit 1; }
+	@PYTHONPATH=. py.test --cov-report term-missing --cov backoff tests
+
+check: flake8 mypy test
+	@coverage report | grep ^TOTAL | grep 100% >/dev/null || \
+	{ echo 'Unit tests coverage is incomplete.'; exit 1; }
diff --git a/README.rst b/README.rst
index 30ddc32..0053c2b 100644
--- a/README.rst
+++ b/README.rst
@@ -1,12 +1,16 @@
 backoff
 =======
 
-.. image:: https://travis-ci.org/litl/backoff.svg?branch=master
-    :target: https://travis-ci.org/litl/backoff?branch=master
-.. image:: https://coveralls.io/repos/litl/backoff/badge.svg?branch=master
-    :target: https://coveralls.io/r/litl/backoff?branch=master
+.. image:: https://travis-ci.org/litl/backoff.svg
+    :target: https://travis-ci.org/litl/backoff
+.. image:: https://coveralls.io/repos/litl/backoff/badge.svg
+    :target: https://coveralls.io/r/litl/backoff?branch=python-3
+.. image:: https://github.com/litl/backoff/workflows/CodeQL/badge.svg
+    :target: https://github.com/litl/backoff/actions/workflows/codeql-analysis.yml
 .. image:: https://img.shields.io/pypi/v/backoff.svg
     :target: https://pypi.python.org/pypi/backoff
+.. image:: https://img.shields.io/github/license/litl/backoff
+    :target: https://github.com/litl/backoff/blob/master/LICENSE
 
 **Function decoration for backoff and retry**
 
@@ -18,7 +22,7 @@ APIs. Somewhat more generally, it may also be of use for dynamically
 polling resources for externally generated content.
 
 Decorators support both regular functions for synchronous code and
-`asyncio <https://docs.python.org/3/library/asyncio.html>`_'s coroutines
+`asyncio <https://docs.python.org/3/library/asyncio.html>`__'s coroutines
 for asynchronous code.
 
 Examples
@@ -102,9 +106,31 @@ be retried:
     def get_url(url):
         return requests.get(url)
 
-When a give up event occurs, the exception in question is reraised
+By default, when a give up event occurs, the exception in question is reraised
 and so code calling an `on_exception`-decorated function may still
-need to do exception handling.
+need to do exception handling. This behavior can optionally be disabled
+using the `raise_on_giveup` keyword argument.
+
+In the code below, `requests.exceptions.RequestException` will not be raised
+when giveup occurs. Note that the decorated function will return `None` in this
+case, regardless of the logic in the `on_exception` handler.
+
+.. code-block:: python
+
+    def fatal_code(e):
+        return 400 <= e.response.status_code < 500
+
+    @backoff.on_exception(backoff.expo,
+                          requests.exceptions.RequestException,
+                          max_time=300,
+                          raise_on_giveup=False,
+                          giveup=fatal_code)
+    def get_url(url):
+        return requests.get(url)
+
+This is useful for non-mission critical code where you still wish to retry
+the code inside of `backoff.on_exception` but wish to proceed with execution
+even if all retries fail.
 
 @backoff.on_predicate
 ---------------------
@@ -132,7 +158,7 @@ so the above can more concisely be written:
 .. code-block:: python
 
     @backoff.on_predicate(backoff.fibo, max_value=13)
-    def poll_for_message(queue)
+    def poll_for_message(queue):
         return queue.get()
 
 More simply, a function which continues polling every second until it
@@ -141,7 +167,7 @@ gets a non-falsey result could be defined like like this:
 .. code-block:: python
 
     @backoff.on_predicate(backoff.constant, interval=1)
-    def poll_for_message(queue)
+    def poll_for_message(queue):
         return queue.get()
 
 Jitter
@@ -181,6 +207,7 @@ backoff behavior for different cases:
     def poll_for_message(queue):
         return queue.get()
 
+
 Runtime Configuration
 ---------------------
 
@@ -229,7 +256,7 @@ implemented like so:
 .. code-block:: python
 
     def backoff_hdlr(details):
-        print ("Backing off {wait:0.1f} seconds afters {tries} tries "
+        print ("Backing off {wait:0.1f} seconds after {tries} tries "
                "calling function {target} with args {args} and kwargs "
                "{kwargs}".format(**details))
 
@@ -267,20 +294,20 @@ Asynchronous code
 Backoff supports asynchronous execution in Python 3.5 and above.
 
 To use backoff in asynchronous code based on
-`asyncio <https://docs.python.org/3/library/asyncio.html>`_
+`asyncio <https://docs.python.org/3/library/asyncio.html>`__
 you simply need to apply ``backoff.on_exception`` or ``backoff.on_predicate``
 to coroutines.
 You can also use coroutines for the ``on_success``, ``on_backoff``, and
 ``on_giveup`` event handlers, with the interface otherwise being identical.
 
-The following examples use `aiohttp <https://aiohttp.readthedocs.io/>`_
+The following examples use `aiohttp <https://aiohttp.readthedocs.io/>`__
 asynchronous HTTP client/server library.
 
 .. code-block:: python
 
     @backoff.on_exception(backoff.expo, aiohttp.ClientError, max_time=60)
     async def get_url(url):
-        async with aiohttp.ClientSession() as session:
+        async with aiohttp.ClientSession(raise_for_status=True) as session:
             async with session.get(url) as response:
                 return await response.text()
 
@@ -312,7 +339,7 @@ looked up by name.
 .. code-block:: python
 
    @backoff.on_exception(backoff.expo,
-                         requests.exception.RequestException,
+                         requests.exceptions.RequestException,
 			 logger='my_logger')
    # ...
 
@@ -323,12 +350,12 @@ directly.
 
     my_logger = logging.getLogger('my_logger')
     my_handler = logging.StreamHandler()
-    my_logger.add_handler(my_handler)
+    my_logger.addHandler(my_handler)
     my_logger.setLevel(logging.ERROR)
 
     @backoff.on_exception(backoff.expo,
-                         requests.exception.RequestException,
-			 logger=my_logger)
+                          requests.exceptions.RequestException,
+			  logger=my_logger)
     # ...
 
 Default logging can be disabled all together by specifying
diff --git a/backoff/__init__.py b/backoff/__init__.py
index fc00001..0898599 100644
--- a/backoff/__init__.py
+++ b/backoff/__init__.py
@@ -14,7 +14,7 @@ https://github.com/litl/backoff
 """
 from backoff._decorator import on_predicate, on_exception
 from backoff._jitter import full_jitter, random_jitter
-from backoff._wait_gen import constant, expo, fibo
+from backoff._wait_gen import constant, expo, fibo, runtime
 
 __all__ = [
     'on_predicate',
@@ -22,8 +22,9 @@ __all__ = [
     'constant',
     'expo',
     'fibo',
+    'runtime',
     'full_jitter',
-    'random_jitter'
+    'random_jitter',
 ]
 
-__version__ = '1.10.0'
+__version__ = '2.0.1'
diff --git a/backoff/_async.py b/backoff/_async.py
index 38cde8e..14f1415 100644
--- a/backoff/_async.py
+++ b/backoff/_async.py
@@ -1,7 +1,7 @@
 # coding:utf-8
 import datetime
 import functools
-import asyncio  # Python 3.5 code and syntax is allowed in this file
+import asyncio
 from datetime import timedelta
 
 from backoff._common import (_init_wait_gen, _maybe_call, _next_wait)
@@ -21,7 +21,10 @@ def _ensure_coroutines(coros_or_funcs):
     return [_ensure_coroutine(f) for f in coros_or_funcs]
 
 
-async def _call_handlers(hdlrs, target, args, kwargs, tries, elapsed, **extra):
+async def _call_handlers(handlers,
+                         *,
+                         target, args, kwargs, tries, elapsed,
+                         **extra):
     details = {
         'target': target,
         'args': args,
@@ -30,11 +33,12 @@ async def _call_handlers(hdlrs, target, args, kwargs, tries, elapsed, **extra):
         'elapsed': elapsed,
     }
     details.update(extra)
-    for hdlr in hdlrs:
-        await hdlr(details)
+    for handler in handlers:
+        await handler(details)
 
 
 def retry_predicate(target, wait_gen, predicate,
+                    *,
                     max_tries, max_time, jitter,
                     on_success, on_backoff, on_giveup,
                     wait_gen_kwargs):
@@ -51,9 +55,10 @@ def retry_predicate(target, wait_gen, predicate,
     @functools.wraps(target)
     async def retry(*args, **kwargs):
 
-        # change names because python 2.x doesn't have nonlocal
-        max_tries_ = _maybe_call(max_tries)
-        max_time_ = _maybe_call(max_time)
+        # update variables from outer function args
+        nonlocal max_tries, max_time
+        max_tries = _maybe_call(max_tries)
+        max_time = _maybe_call(max_time)
 
         tries = 0
         start = datetime.datetime.now()
@@ -61,25 +66,31 @@ def retry_predicate(target, wait_gen, predicate,
         while True:
             tries += 1
             elapsed = timedelta.total_seconds(datetime.datetime.now() - start)
-            details = (target, args, kwargs, tries, elapsed)
+            details = {
+                "target": target,
+                "args": args,
+                "kwargs": kwargs,
+                "tries": tries,
+                "elapsed": elapsed,
+            }
 
             ret = await target(*args, **kwargs)
             if predicate(ret):
-                max_tries_exceeded = (tries == max_tries_)
-                max_time_exceeded = (max_time_ is not None and
-                                     elapsed >= max_time_)
+                max_tries_exceeded = (tries == max_tries)
+                max_time_exceeded = (max_time is not None and
+                                     elapsed >= max_time)
 
                 if max_tries_exceeded or max_time_exceeded:
-                    await _call_handlers(on_giveup, *details, value=ret)
+                    await _call_handlers(on_giveup, **details, value=ret)
                     break
 
                 try:
-                    seconds = _next_wait(wait, jitter, elapsed, max_time_)
+                    seconds = _next_wait(wait, ret, jitter, elapsed, max_time)
                 except StopIteration:
-                    await _call_handlers(on_giveup, *details, value=ret)
+                    await _call_handlers(on_giveup, **details, value=ret)
                     break
 
-                await _call_handlers(on_backoff, *details, value=ret,
+                await _call_handlers(on_backoff, **details, value=ret,
                                      wait=seconds)
 
                 # Note: there is no convenient way to pass explicit event
@@ -94,7 +105,7 @@ def retry_predicate(target, wait_gen, predicate,
                 await asyncio.sleep(seconds)
                 continue
             else:
-                await _call_handlers(on_success, *details, value=ret)
+                await _call_handlers(on_success, **details, value=ret)
                 break
 
         return ret
@@ -103,8 +114,9 @@ def retry_predicate(target, wait_gen, predicate,
 
 
 def retry_exception(target, wait_gen, exception,
+                    *,
                     max_tries, max_time, jitter, giveup,
-                    on_success, on_backoff, on_giveup,
+                    on_success, on_backoff, on_giveup, raise_on_giveup,
                     wait_gen_kwargs):
     on_success = _ensure_coroutines(on_success)
     on_backoff = _ensure_coroutines(on_backoff)
@@ -117,9 +129,11 @@ def retry_exception(target, wait_gen, exception,
 
     @functools.wraps(target)
     async def retry(*args, **kwargs):
-        # change names because python 2.x doesn't have nonlocal
-        max_tries_ = _maybe_call(max_tries)
-        max_time_ = _maybe_call(max_time)
+
+        # update variables from outer function args
+        nonlocal max_tries, max_time
+        max_tries = _maybe_call(max_tries)
+        max_time = _maybe_call(max_time)
 
         tries = 0
         start = datetime.datetime.now()
@@ -127,27 +141,35 @@ def retry_exception(target, wait_gen, exception,
         while True:
             tries += 1
             elapsed = timedelta.total_seconds(datetime.datetime.now() - start)
-            details = (target, args, kwargs, tries, elapsed)
+            details = {
+                "target": target,
+                "args": args,
+                "kwargs": kwargs,
+                "tries": tries,
+                "elapsed": elapsed,
+            }
 
             try:
                 ret = await target(*args, **kwargs)
             except exception as e:
                 giveup_result = await giveup(e)
-                max_tries_exceeded = (tries == max_tries_)
-                max_time_exceeded = (max_time_ is not None and
-                                     elapsed >= max_time_)
+                max_tries_exceeded = (tries == max_tries)
+                max_time_exceeded = (max_time is not None and
+                                     elapsed >= max_time)
 
                 if giveup_result or max_tries_exceeded or max_time_exceeded:
-                    await _call_handlers(on_giveup, *details)
-                    raise
+                    await _call_handlers(on_giveup, **details)
+                    if raise_on_giveup:
+                        raise
+                    return None
 
                 try:
-                    seconds = _next_wait(wait, jitter, elapsed, max_time_)
+                    seconds = _next_wait(wait, e, jitter, elapsed, max_time)
                 except StopIteration:
-                    await _call_handlers(on_giveup, *details)
+                    await _call_handlers(on_giveup, **details)
                     raise e
 
-                await _call_handlers(on_backoff, *details, wait=seconds)
+                await _call_handlers(on_backoff, **details, wait=seconds)
 
                 # Note: there is no convenient way to pass explicit event
                 # loop to decorator, so here we assume that either default
@@ -160,7 +182,7 @@ def retry_exception(target, wait_gen, exception,
                 #   <https://bugs.python.org/issue28613>
                 await asyncio.sleep(seconds)
             else:
-                await _call_handlers(on_success, *details)
+                await _call_handlers(on_success, **details)
 
                 return ret
     return retry
diff --git a/backoff/_common.py b/backoff/_common.py
index efd13f1..2b2e54e 100644
--- a/backoff/_common.py
+++ b/backoff/_common.py
@@ -15,16 +15,24 @@ _logger.setLevel(logging.INFO)
 
 # Evaluate arg that can be either a fixed value or a callable.
 def _maybe_call(f, *args, **kwargs):
-    return f(*args, **kwargs) if callable(f) else f
+    if callable(f):
+        try:
+            return f(*args, **kwargs)
+        except TypeError:
+            return f
+    else:
+        return f
 
 
 def _init_wait_gen(wait_gen, wait_gen_kwargs):
     kwargs = {k: _maybe_call(v) for k, v in wait_gen_kwargs.items()}
-    return wait_gen(**kwargs)
+    initialized = wait_gen(**kwargs)
+    initialized.send(None)  # Initialize with an empty send
+    return initialized
 
 
-def _next_wait(wait, jitter, elapsed, max_time):
-    value = next(wait)
+def _next_wait(wait, send_value, jitter, elapsed, max_time):
+    value = wait.send(send_value)
     try:
         if jitter is not None:
             seconds = jitter(value)
@@ -41,20 +49,31 @@ def _next_wait(wait, jitter, elapsed, max_time):
 
         seconds = value + jitter()
 
-    # don't sleep longer than remaining alloted max_time
+    # don't sleep longer than remaining allotted max_time
     if max_time is not None:
         seconds = min(seconds, max_time - elapsed)
 
     return seconds
 
 
+def _prepare_logger(logger):
+    if isinstance(logger, str):
+        logger = logging.getLogger(logger)
+    return logger
+
+
 # Configure handler list with user specified handler and optionally
 # with a default handler bound to the specified logger.
-def _config_handlers(user_handlers, default_handler=None, logger=None):
+def _config_handlers(
+    user_handlers, *, default_handler=None, logger=None, log_level=None
+):
     handlers = []
     if logger is not None:
+        assert log_level is not None, "Log level is not specified"
         # bind the specified logger to the default log handler
-        log_handler = functools.partial(default_handler, logger=logger)
+        log_handler = functools.partial(
+            default_handler, logger=logger, log_level=log_level
+        )
         handlers.append(log_handler)
 
     if user_handlers is None:
@@ -73,7 +92,7 @@ def _config_handlers(user_handlers, default_handler=None, logger=None):
 
 
 # Default backoff handler
-def _log_backoff(details, logger):
+def _log_backoff(details, logger, log_level):
     msg = "Backing off %s(...) for %.1fs (%s)"
     log_args = [details['target'].__name__, details['wait']]
 
@@ -83,11 +102,11 @@ def _log_backoff(details, logger):
         log_args.append(exc_fmt.rstrip("\n"))
     else:
         log_args.append(details['value'])
-    logger.info(msg, *log_args)
+    logger.log(log_level, msg, *log_args)
 
 
 # Default giveup handler
-def _log_giveup(details, logger):
+def _log_giveup(details, logger, log_level):
     msg = "Giving up %s(...) after %d tries (%s)"
     log_args = [details['target'].__name__, details['tries']]
 
@@ -98,4 +117,4 @@ def _log_giveup(details, logger):
     else:
         log_args.append(details['value'])
 
-    logger.error(msg, *log_args)
+    logger.log(log_level, msg, *log_args)
diff --git a/backoff/_decorator.py b/backoff/_decorator.py
index e541904..6cf9e17 100644
--- a/backoff/_decorator.py
+++ b/backoff/_decorator.py
@@ -1,32 +1,42 @@
 # coding:utf-8
-from __future__ import unicode_literals
-
+import asyncio
 import logging
 import operator
-import sys
-
-from backoff._common import (_config_handlers, _log_backoff, _log_giveup)
+from typing import Any, Callable, Iterable, Optional, Type, Union
+
+from backoff._common import (
+    _prepare_logger,
+    _config_handlers,
+    _log_backoff,
+    _log_giveup
+)
 from backoff._jitter import full_jitter
-from backoff import _sync
-
-
-# python 2.7 -> 3.x compatibility for str and unicode
-try:
-    basestring
-except NameError:  # pragma: python=3.5
-    basestring = str
-
-
-def on_predicate(wait_gen,
-                 predicate=operator.not_,
-                 max_tries=None,
-                 max_time=None,
-                 jitter=full_jitter,
-                 on_success=None,
-                 on_backoff=None,
-                 on_giveup=None,
-                 logger='backoff',
-                 **wait_gen_kwargs):
+from backoff import _async, _sync
+from backoff._typing import (
+    _CallableT,
+    _Handler,
+    _Jitterer,
+    _MaybeCallable,
+    _MaybeLogger,
+    _MaybeSequence,
+    _Predicate,
+    _WaitGenerator,
+)
+
+
+def on_predicate(wait_gen: _WaitGenerator,
+                 predicate: _Predicate[Any] = operator.not_,
+                 *,
+                 max_tries: Optional[_MaybeCallable[int]] = None,
+                 max_time: Optional[_MaybeCallable[float]] = None,
+                 jitter: Union[_Jitterer, None] = full_jitter,
+                 on_success: Union[_Handler, Iterable[_Handler]] = None,
+                 on_backoff: Union[_Handler, Iterable[_Handler]] = None,
+                 on_giveup: Union[_Handler, Iterable[_Handler]] = None,
+                 logger: _MaybeLogger = 'backoff',
+                 backoff_log_level: int = logging.INFO,
+                 giveup_log_level: int = logging.ERROR,
+                 **wait_gen_kwargs: Any) -> Callable[[_CallableT], _CallableT]:
     """Returns decorator for backoff and retry triggered by predicate.
 
     Args:
@@ -63,51 +73,68 @@ def on_predicate(wait_gen,
             about the invocation.
         logger: Name of logger or Logger object to log to. Defaults to
             'backoff'.
+        backoff_log_level: log level for the backoff event. Defaults to "INFO"
+        giveup_log_level: log level for the give up event. Defaults to "ERROR"
         **wait_gen_kwargs: Any additional keyword args specified will be
             passed to wait_gen when it is initialized.  Any callable
             args will first be evaluated and their return values passed.
             This is useful for runtime configuration.
     """
     def decorate(target):
-        # change names because python 2.x doesn't have nonlocal
-        logger_ = logger
-        if isinstance(logger_, basestring):
-            logger_ = logging.getLogger(logger_)
-        on_success_ = _config_handlers(on_success)
-        on_backoff_ = _config_handlers(on_backoff, _log_backoff, logger_)
-        on_giveup_ = _config_handlers(on_giveup, _log_giveup, logger_)
-
-        retry = None
-        if sys.version_info >= (3, 5):  # pragma: python=3.5
-            import asyncio
-
-            if asyncio.iscoroutinefunction(target):
-                import backoff._async
-                retry = backoff._async.retry_predicate
-
-        if retry is None:
+        nonlocal logger, on_success, on_backoff, on_giveup
+
+        logger = _prepare_logger(logger)
+        on_success = _config_handlers(on_success)
+        on_backoff = _config_handlers(
+            on_backoff,
+            default_handler=_log_backoff,
+            logger=logger,
+            log_level=backoff_log_level
+        )
+        on_giveup = _config_handlers(
+            on_giveup,
+            default_handler=_log_giveup,
+            logger=logger,
+            log_level=giveup_log_level
+        )
+
+        if asyncio.iscoroutinefunction(target):
+            retry = _async.retry_predicate
+        else:
             retry = _sync.retry_predicate
 
-        return retry(target, wait_gen, predicate,
-                     max_tries, max_time, jitter,
-                     on_success_, on_backoff_, on_giveup_,
-                     wait_gen_kwargs)
+        return retry(
+            target,
+            wait_gen,
+            predicate,
+            max_tries=max_tries,
+            max_time=max_time,
+            jitter=jitter,
+            on_success=on_success,
+            on_backoff=on_backoff,
+            on_giveup=on_giveup,
+            wait_gen_kwargs=wait_gen_kwargs
+        )
 
     # Return a function which decorates a target with a retry loop.
     return decorate
 
 
-def on_exception(wait_gen,
-                 exception,
-                 max_tries=None,
-                 max_time=None,
-                 jitter=full_jitter,
-                 giveup=lambda e: False,
-                 on_success=None,
-                 on_backoff=None,
-                 on_giveup=None,
-                 logger='backoff',
-                 **wait_gen_kwargs):
+def on_exception(wait_gen: _WaitGenerator,
+                 exception: _MaybeSequence[Type[Exception]],
+                 *,
+                 max_tries: Optional[_MaybeCallable[int]] = None,
+                 max_time: Optional[_MaybeCallable[float]] = None,
+                 jitter: Union[_Jitterer, None] = full_jitter,
+                 giveup: _Predicate[Exception] = lambda e: False,
+                 on_success: Union[_Handler, Iterable[_Handler]] = None,
+                 on_backoff: Union[_Handler, Iterable[_Handler]] = None,
+                 on_giveup: Union[_Handler, Iterable[_Handler]] = None,
+                 raise_on_giveup: bool = True,
+                 logger: _MaybeLogger = 'backoff',
+                 backoff_log_level: int = logging.INFO,
+                 giveup_log_level: int = logging.ERROR,
+                 **wait_gen_kwargs: Any) -> Callable[[_CallableT], _CallableT]:
     """Returns decorator for backoff and retry triggered by exception.
 
     Args:
@@ -117,7 +144,7 @@ def on_exception(wait_gen,
             backoff.
         max_tries: The maximum number of attempts to make before giving
             up. Once exhausted, the exception will be allowed to escape.
-            The default value of None means their is no limit to the
+            The default value of None means there is no limit to the
             number of tries. If a callable is passed, it will be
             evaluated at runtime and its return value used.
         max_time: The maximum total amount of time to try for before
@@ -143,36 +170,53 @@ def on_exception(wait_gen,
             signature to be called in the event that max_tries
             is exceeded.  The parameter is a dict containing details
             about the invocation.
+        raise_on_giveup: Boolean indicating whether the registered exceptions
+            should be raised on giveup. Defaults to `True`
         logger: Name or Logger object to log to. Defaults to 'backoff'.
+        backoff_log_level: log level for the backoff event. Defaults to "INFO"
+        giveup_log_level: log level for the give up event. Defaults to "ERROR"
         **wait_gen_kwargs: Any additional keyword args specified will be
             passed to wait_gen when it is initialized.  Any callable
             args will first be evaluated and their return values passed.
             This is useful for runtime configuration.
     """
     def decorate(target):
-        # change names because python 2.x doesn't have nonlocal
-        logger_ = logger
-        if isinstance(logger_, basestring):
-            logger_ = logging.getLogger(logger_)
-        on_success_ = _config_handlers(on_success)
-        on_backoff_ = _config_handlers(on_backoff, _log_backoff, logger_)
-        on_giveup_ = _config_handlers(on_giveup, _log_giveup, logger_)
-
-        retry = None
-        if sys.version_info[:2] >= (3, 5):   # pragma: python=3.5
-            import asyncio
-
-            if asyncio.iscoroutinefunction(target):
-                import backoff._async
-                retry = backoff._async.retry_exception
-
-        if retry is None:
+        nonlocal logger, on_success, on_backoff, on_giveup
+
+        logger = _prepare_logger(logger)
+        on_success = _config_handlers(on_success)
+        on_backoff = _config_handlers(
+            on_backoff,
+            default_handler=_log_backoff,
+            logger=logger,
+            log_level=backoff_log_level,
+        )
+        on_giveup = _config_handlers(
+            on_giveup,
+            default_handler=_log_giveup,
+            logger=logger,
+            log_level=giveup_log_level,
+        )
+
+        if asyncio.iscoroutinefunction(target):
+            retry = _async.retry_exception
+        else:
             retry = _sync.retry_exception
 
-        return retry(target, wait_gen, exception,
-                     max_tries, max_time, jitter, giveup,
-                     on_success_, on_backoff_, on_giveup_,
-                     wait_gen_kwargs)
+        return retry(
+            target,
+            wait_gen,
+            exception,
+            max_tries=max_tries,
+            max_time=max_time,
+            jitter=jitter,
+            giveup=giveup,
+            on_success=on_success,
+            on_backoff=on_backoff,
+            on_giveup=on_giveup,
+            raise_on_giveup=raise_on_giveup,
+            wait_gen_kwargs=wait_gen_kwargs
+        )
 
     # Return a function which decorates a target with a retry loop.
     return decorate
diff --git a/backoff/_jitter.py b/backoff/_jitter.py
index 19f079b..be7e389 100644
--- a/backoff/_jitter.py
+++ b/backoff/_jitter.py
@@ -3,7 +3,7 @@
 import random
 
 
-def random_jitter(value):
+def random_jitter(value: float) -> float:
     """Jitter the value a random number of milliseconds.
 
     This adds up to 1 second of additional time to the original value.
@@ -15,7 +15,7 @@ def random_jitter(value):
     return value + random.random()
 
 
-def full_jitter(value):
+def full_jitter(value: float) -> float:
     """Jitter the value across the full range (0 to value).
 
     This corresponds to the "Full Jitter" algorithm specified in the
diff --git a/backoff/_sync.py b/backoff/_sync.py
index 477765d..ecc592d 100644
--- a/backoff/_sync.py
+++ b/backoff/_sync.py
@@ -21,6 +21,7 @@ def _call_handlers(hdlrs, target, args, kwargs, tries, elapsed, **extra):
 
 
 def retry_predicate(target, wait_gen, predicate,
+                    *,
                     max_tries, max_time, jitter,
                     on_success, on_backoff, on_giveup,
                     wait_gen_kwargs):
@@ -28,9 +29,10 @@ def retry_predicate(target, wait_gen, predicate,
     @functools.wraps(target)
     def retry(*args, **kwargs):
 
-        # change names because python 2.x doesn't have nonlocal
-        max_tries_ = _maybe_call(max_tries)
-        max_time_ = _maybe_call(max_time)
+        # update variables from outer function args
+        nonlocal max_tries, max_time
+        max_tries = _maybe_call(max_tries)
+        max_time = _maybe_call(max_time)
 
         tries = 0
         start = datetime.datetime.now()
@@ -38,31 +40,37 @@ def retry_predicate(target, wait_gen, predicate,
         while True:
             tries += 1
             elapsed = timedelta.total_seconds(datetime.datetime.now() - start)
-            details = (target, args, kwargs, tries, elapsed)
+            details = {
+                "target": target,
+                "args": args,
+                "kwargs": kwargs,
+                "tries": tries,
+                "elapsed": elapsed,
+            }
 
             ret = target(*args, **kwargs)
             if predicate(ret):
-                max_tries_exceeded = (tries == max_tries_)
-                max_time_exceeded = (max_time_ is not None and
-                                     elapsed >= max_time_)
+                max_tries_exceeded = (tries == max_tries)
+                max_time_exceeded = (max_time is not None and
+                                     elapsed >= max_time)
 
                 if max_tries_exceeded or max_time_exceeded:
-                    _call_handlers(on_giveup, *details, value=ret)
+                    _call_handlers(on_giveup, **details, value=ret)
                     break
 
                 try:
-                    seconds = _next_wait(wait, jitter, elapsed, max_time_)
+                    seconds = _next_wait(wait, ret, jitter, elapsed, max_time)
                 except StopIteration:
-                    _call_handlers(on_giveup, *details)
+                    _call_handlers(on_giveup, **details)
                     break
 
-                _call_handlers(on_backoff, *details,
+                _call_handlers(on_backoff, **details,
                                value=ret, wait=seconds)
 
                 time.sleep(seconds)
                 continue
             else:
-                _call_handlers(on_success, *details, value=ret)
+                _call_handlers(on_success, **details, value=ret)
                 break
 
         return ret
@@ -71,16 +79,18 @@ def retry_predicate(target, wait_gen, predicate,
 
 
 def retry_exception(target, wait_gen, exception,
+                    *,
                     max_tries, max_time, jitter, giveup,
-                    on_success, on_backoff, on_giveup,
+                    on_success, on_backoff, on_giveup, raise_on_giveup,
                     wait_gen_kwargs):
 
     @functools.wraps(target)
     def retry(*args, **kwargs):
 
-        # change names because python 2.x doesn't have nonlocal
-        max_tries_ = _maybe_call(max_tries)
-        max_time_ = _maybe_call(max_time)
+        # update variables from outer function args
+        nonlocal max_tries, max_time
+        max_tries = _maybe_call(max_tries)
+        max_time = _maybe_call(max_time)
 
         tries = 0
         start = datetime.datetime.now()
@@ -88,30 +98,38 @@ def retry_exception(target, wait_gen, exception,
         while True:
             tries += 1
             elapsed = timedelta.total_seconds(datetime.datetime.now() - start)
-            details = (target, args, kwargs, tries, elapsed)
+            details = {
+                "target": target,
+                "args": args,
+                "kwargs": kwargs,
+                "tries": tries,
+                "elapsed": elapsed,
+            }
 
             try:
                 ret = target(*args, **kwargs)
             except exception as e:
-                max_tries_exceeded = (tries == max_tries_)
-                max_time_exceeded = (max_time_ is not None and
-                                     elapsed >= max_time_)
+                max_tries_exceeded = (tries == max_tries)
+                max_time_exceeded = (max_time is not None and
+                                     elapsed >= max_time)
 
                 if giveup(e) or max_tries_exceeded or max_time_exceeded:
-                    _call_handlers(on_giveup, *details)
-                    raise
+                    _call_handlers(on_giveup, **details)
+                    if raise_on_giveup:
+                        raise
+                    return None
 
                 try:
-                    seconds = _next_wait(wait, jitter, elapsed, max_time_)
+                    seconds = _next_wait(wait, e, jitter, elapsed, max_time)
                 except StopIteration:
-                    _call_handlers(on_giveup, *details)
+                    _call_handlers(on_giveup, **details)
                     raise e
 
-                _call_handlers(on_backoff, *details, wait=seconds)
+                _call_handlers(on_backoff, **details, wait=seconds)
 
                 time.sleep(seconds)
             else:
-                _call_handlers(on_success, *details)
+                _call_handlers(on_success, **details)
 
                 return ret
     return retry
diff --git a/backoff/_typing.py b/backoff/_typing.py
new file mode 100644
index 0000000..67a624d
--- /dev/null
+++ b/backoff/_typing.py
@@ -0,0 +1,43 @@
+# coding:utf-8
+import logging
+import sys
+from typing import (Any, Callable, Dict, Generator, Sequence, Tuple, Union,
+                    TypeVar)
+
+
+details_kwargs = {"total": False}
+
+if sys.version_info >= (3, 8):  # pragma: no cover
+    from typing import TypedDict
+else:  # pragma: no cover
+    # use typing_extensions if installed but don't require it
+    try:
+        from typing_extensions import TypedDict
+    except ImportError:
+        TypedDict = Dict[str, Any]
+        del details_kwargs["total"]
+
+
+class _Details(TypedDict):
+    target: Callable[..., Any]
+    args: Tuple[Any, ...]
+    kwargs: Dict[str, Any]
+    tries: int
+    elapsed: float
+
+
+class Details(_Details, **details_kwargs):
+    wait: float  # present in the on_backoff handler case for either decorator
+    value: Any  # present in the on_predicate decorator case
+
+
+T = TypeVar("T")
+
+_CallableT = TypeVar('_CallableT', bound=Callable[..., Any])
+_Handler = Callable[[Details], None]
+_Jitterer = Callable[[float], float]
+_MaybeCallable = Union[T, Callable[[], T]]
+_MaybeLogger = Union[str, logging.Logger, None]
+_MaybeSequence = Union[T, Sequence[T]]
+_Predicate = Callable[[T], bool]
+_WaitGenerator = Callable[..., Generator[float, None, None]]
diff --git a/backoff/_wait_gen.py b/backoff/_wait_gen.py
index 49dbbca..aee05c5 100644
--- a/backoff/_wait_gen.py
+++ b/backoff/_wait_gen.py
@@ -1,18 +1,26 @@
 # coding:utf-8
 
 import itertools
+from typing import Any, Callable, Generator, Iterable, Optional, Union
 
 
-def expo(base=2, factor=1, max_value=None):
+def expo(
+    base: int = 2,
+    factor: int = 1,
+    max_value: Optional[int] = None
+) -> Generator[int, Any, None]:
+
     """Generator for exponential decay.
 
     Args:
         base: The mathematical base of the exponentiation operation
-        factor: Factor to multiply the exponentation by.
+        factor: Factor to multiply the exponentiation by.
         max_value: The maximum value to yield. Once the value in the
              true exponential sequence exceeds this, the value
              of max_value will forever after be yielded.
     """
+    # Advance past initial .send() call
+    yield  # type: ignore[misc]
     n = 0
     while True:
         a = factor * base ** n
@@ -23,7 +31,7 @@ def expo(base=2, factor=1, max_value=None):
             yield max_value
 
 
-def fibo(max_value=None):
+def fibo(max_value: Optional[int] = None) -> Generator[int, None, None]:
     """Generator for fibonaccial decay.
 
     Args:
@@ -31,6 +39,9 @@ def fibo(max_value=None):
              true fibonacci sequence exceeds this, the value
              of max_value will forever after be yielded.
     """
+    # Advance past initial .send() call
+    yield  # type: ignore[misc]
+
     a = 1
     b = 1
     while True:
@@ -41,16 +52,35 @@ def fibo(max_value=None):
             yield max_value
 
 
-def constant(interval=1):
+def constant(
+    interval: Union[int, Iterable[int]] = 1
+) -> Generator[int, None, None]:
     """Generator for constant intervals.
 
     Args:
         interval: A constant value to yield or an iterable of such values.
     """
+    # Advance past initial .send() call
+    yield  # type: ignore[misc]
+
     try:
-        itr = iter(interval)
+        itr = iter(interval)  # type: ignore
     except TypeError:
-        itr = itertools.repeat(interval)
+        itr = itertools.repeat(interval)  # type: ignore
 
     for val in itr:
         yield val
+
+
+def runtime(*, value: Callable[[Any], int]) -> Generator[int, None, None]:
+    """Generator that is based on parsing the return value or thrown
+        exception of the decorated method
+
+    Args:
+        value: a callable which takes as input the decorated
+            function's return value or thrown exception and
+            determines how long to wait
+    """
+    ret_or_exc = yield  # type: ignore[misc]
+    while True:
+        ret_or_exc = yield value(ret_or_exc)
diff --git a/backoff/py.typed b/backoff/py.typed
new file mode 100644
index 0000000..e69de29
diff --git a/backoff/types.py b/backoff/types.py
new file mode 100644
index 0000000..25f20a4
--- /dev/null
+++ b/backoff/types.py
@@ -0,0 +1,6 @@
+# coding:utf-8
+from ._typing import Details
+
+__all__ = [
+    'Details'
+]
diff --git a/debian/changelog b/debian/changelog
index 6ce55a3..6bf5f4b 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+backoff (2.0.1-0kali1) UNRELEASED; urgency=low
+
+  * New upstream release.
+  * New upstream release.
+
+ -- Kali Janitor <janitor@kali.org>  Mon, 16 May 2022 04:06:15 -0000
+
 backoff (1.10.0-0kali1) kali-dev; urgency=medium
 
   * Initial release
diff --git a/debian/patches/Add-missing-setup.py.patch b/debian/patches/Add-missing-setup.py.patch
index 6d9f89c..0d67892 100644
--- a/debian/patches/Add-missing-setup.py.patch
+++ b/debian/patches/Add-missing-setup.py.patch
@@ -11,11 +11,10 @@ tarball)
  1 file changed, 24 insertions(+)
  create mode 100644 setup.py
 
-diff --git a/setup.py b/setup.py
-new file mode 100644
-index 0000000..6e4f961
+Index: backoff/setup.py
+===================================================================
 --- /dev/null
-+++ b/setup.py
++++ backoff/setup.py
 @@ -0,0 +1,24 @@
 +# -*- coding: utf-8 -*-
 +from distutils.core import setup
diff --git a/pyproject.toml b/pyproject.toml
index 3469bf2..337df62 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,8 +1,8 @@
 [tool.poetry]
 name = "backoff"
-version = "1.10.0"
+version = "2.0.1"
 description = "Function decoration for backoff and retry"
-authors = ["Bob Green <rgreen@goscoutgo.com>"]
+authors = ["Bob Green <rgreen@aquent.com>"]
 readme = "README.rst"
 repository = "https://github.com/litl/backoff"
 license = "MIT"
@@ -14,13 +14,11 @@ classifiers = ['Development Status :: 5 - Production/Stable',
                'Natural Language :: English',
                'Operating System :: OS Independent',
                'Programming Language :: Python',
-               'Programming Language :: Python :: 2',
-               'Programming Language :: Python :: 2.7',
                'Programming Language :: Python :: 3',
-               'Programming Language :: Python :: 3.5',
-               'Programming Language :: Python :: 3.6',
                'Programming Language :: Python :: 3.7',
                'Programming Language :: Python :: 3.8',
+               'Programming Language :: Python :: 3.9',
+               'Programming Language :: Python :: 3.10',
                'Topic :: Internet :: WWW/HTTP',
                'Topic :: Software Development :: Libraries :: Python Modules',
                'Topic :: Utilities']
@@ -29,14 +27,18 @@ packages = [
 ]
 
 [tool.poetry.dependencies]
-python = "^2.7 || ^3.5"
+python = "^3.7"
 
 [tool.poetry.dev-dependencies]
-flake8 = "^3.6"
-pytest = "^4.0"
-pytest-cov = "^2.6"
-pytest-asyncio = {version = "^0.10.0",python = "^3.5"}
+flake8 = "^4.0.1"
+mypy = "^0.942"
+pytest = "^7.1.2"
+pytest-asyncio = "^0.18.3"
+pytest-cov = "^3.0.0"
+requests = "^2.26.0"
+responses = "^0.20.0"
+types-requests = "^2.27.20"
 
 [build-system]
-requires = ["poetry>=0.12"]
-build-backend = "poetry.masonry.api"
+requires = ["poetry-core>=1.0.0"]
+build-backend = "poetry.core.masonry.api"
diff --git a/tests/test_backoff.py b/tests/test_backoff.py
index 20734f5..e6b3657 100644
--- a/tests/test_backoff.py
+++ b/tests/test_backoff.py
@@ -1,9 +1,12 @@
 # coding:utf-8
 import datetime
+import itertools
 import logging
 import random
+import re
 import sys
 import threading
+import unittest.mock
 
 import pytest
 
@@ -257,7 +260,8 @@ def test_on_exception_success():
                        'tries': 3}
 
 
-def test_on_exception_giveup():
+@pytest.mark.parametrize('raise_on_giveup', [True, False])
+def test_on_exception_giveup(raise_on_giveup):
     backoffs, giveups, successes = [], [], []
 
     @backoff.on_exception(backoff.constant,
@@ -267,12 +271,16 @@ def test_on_exception_giveup():
                           on_giveup=giveups.append,
                           max_tries=3,
                           jitter=None,
+                          raise_on_giveup=raise_on_giveup,
                           interval=0)
     @_save_target
     def exceptor(*args, **kwargs):
         raise ValueError("catch me")
 
-    with pytest.raises(ValueError):
+    if raise_on_giveup:
+        with pytest.raises(ValueError):
+            exceptor(1, 2, 3, foo=1, bar=2)
+    else:
         exceptor(1, 2, 3, foo=1, bar=2)
 
     # we try 3 times, backing off twice and giving up once
@@ -733,3 +741,82 @@ def test_on_exception_logger_user_str(monkeypatch, caplog):
     assert len(caplog.records) == 3  # 2 backoffs and 1 giveup
     for record in caplog.records:
         assert record.name == 'my-logger'
+
+
+def _on_exception_factory(
+    backoff_log_level, giveup_log_level, max_tries
+):
+    @backoff.on_exception(
+        backoff.expo,
+        ValueError,
+        max_tries=max_tries,
+        backoff_log_level=backoff_log_level,
+        giveup_log_level=giveup_log_level,
+    )
+    def value_error():
+        raise ValueError
+
+    def func():
+        with pytest.raises(ValueError):
+            value_error()
+
+    return func
+
+
+def _on_predicate_factory(
+    backoff_log_level, giveup_log_level, max_tries
+):
+    @backoff.on_predicate(
+        backoff.expo,
+        max_tries=max_tries,
+        backoff_log_level=backoff_log_level,
+        giveup_log_level=giveup_log_level,
+    )
+    def func():
+        return False
+
+    return func
+
+
+@pytest.mark.parametrize(
+    ("func_factory", "backoff_log_level", "giveup_log_level"),
+    (
+        (factory, backoff_log_level, giveup_log_level)
+        for backoff_log_level, giveup_log_level in itertools.product(
+            (
+                logging.DEBUG,
+                logging.INFO,
+                logging.WARNING,
+                logging.ERROR,
+                logging.CRITICAL,
+            ),
+            repeat=2,
+        )
+        for factory in (_on_predicate_factory, _on_exception_factory)
+    )
+)
+def test_event_log_levels(
+    caplog, func_factory, backoff_log_level, giveup_log_level
+):
+    max_tries = 3
+    func = func_factory(backoff_log_level, giveup_log_level, max_tries)
+
+    with unittest.mock.patch('time.sleep', return_value=None):
+        with caplog.at_level(
+            min(backoff_log_level, giveup_log_level), logger="backoff"
+        ):
+            func()
+
+    backoff_re = re.compile("backing off", re.IGNORECASE)
+    giveup_re = re.compile("giving up", re.IGNORECASE)
+
+    backoff_log_count = 0
+    giveup_log_count = 0
+    for logger_name, level, message in caplog.record_tuples:
+        if level == backoff_log_level and backoff_re.match(message):
+            backoff_log_count += 1
+        elif level == giveup_log_level and giveup_re.match(message):
+            giveup_log_count += 1
+
+    assert backoff_log_count == max_tries - 1
+    assert giveup_log_count == 1
diff --git a/tests/python35/test_backoff_async.py b/tests/test_backoff_async.py
similarity index 98%
rename from tests/python35/test_backoff_async.py
rename to tests/test_backoff_async.py
index ca62c4a..d8219fd 100644
--- a/tests/python35/test_backoff_async.py
+++ b/tests/test_backoff_async.py
@@ -235,7 +235,8 @@ async def test_on_exception_success():
 
 
 @pytest.mark.asyncio
-async def test_on_exception_giveup():
+@pytest.mark.parametrize('raise_on_giveup', [True, False])
+async def test_on_exception_giveup(raise_on_giveup):
     log, log_success, log_backoff, log_giveup = _log_hdlrs()
 
     @backoff.on_exception(backoff.constant,
@@ -243,6 +244,7 @@ async def test_on_exception_giveup():
                           on_success=log_success,
                           on_backoff=log_backoff,
                           on_giveup=log_giveup,
+                          raise_on_giveup=raise_on_giveup,
                           max_tries=3,
                           jitter=None,
                           interval=0)
@@ -250,7 +252,10 @@ async def test_on_exception_giveup():
     async def exceptor(*args, **kwargs):
         raise ValueError("catch me")
 
-    with pytest.raises(ValueError):
+    if raise_on_giveup:
+        with pytest.raises(ValueError):
+            await exceptor(1, 2, 3, foo=1, bar=2)
+    else:
         await exceptor(1, 2, 3, foo=1, bar=2)
 
     # we try 3 times, backing off twice and giving up once
diff --git a/tests/test_integration.py b/tests/test_integration.py
new file mode 100644
index 0000000..2dcade8
--- /dev/null
+++ b/tests/test_integration.py
@@ -0,0 +1,78 @@
+"""Integration tests
+
+Higher-level tests integrating with 3rd party modules using iodiomatic
+backoff patterns.
+"""
+
+import backoff
+
+
+import requests
+from requests import HTTPError
+import responses
+
+
+@responses.activate
+def test_on_predicate_runtime(monkeypatch):
+
+    log = []
+
+    def sleep(seconds):
+        log.append(seconds)
+
+    monkeypatch.setattr("time.sleep", sleep)
+
+    url = "http://example.com"
+
+    responses.add(responses.GET, url, status=429, headers={"Retry-After": "1"})
+    responses.add(responses.GET, url, status=429, headers={"Retry-After": "3"})
+    responses.add(responses.GET, url, status=429, headers={"Retry-After": "7"})
+    responses.add(responses.GET, url, status=200)
+
+    @backoff.on_predicate(
+        backoff.runtime,
+        predicate=lambda r: r.status_code == 429,
+        value=lambda r: int(r.headers.get("Retry-After")),
+        jitter=None,
+    )
+    def get_url():
+        return requests.get(url)
+
+    resp = get_url()
+    assert resp.status_code == 200
+
+    assert log == [1, 3, 7]
+
+
+@responses.activate
+def test_on_exception_runtime(monkeypatch):
+
+    log = []
+
+    def sleep(seconds):
+        log.append(seconds)
+
+    monkeypatch.setattr("time.sleep", sleep)
+
+    url = "http://example.com"
+
+    responses.add(responses.GET, url, status=429, headers={"Retry-After": "1"})
+    responses.add(responses.GET, url, status=429, headers={"Retry-After": "3"})
+    responses.add(responses.GET, url, status=429, headers={"Retry-After": "7"})
+    responses.add(responses.GET, url, status=200)
+
+    @backoff.on_exception(
+        backoff.runtime,
+        HTTPError,
+        value=lambda e: int(e.response.headers.get("Retry-After")),
+        jitter=None,
+    )
+    def get_url():
+        resp = requests.get(url)
+        resp.raise_for_status()
+        return resp
+
+    resp = get_url()
+    assert resp.status_code == 200
+
+    assert log == [1, 3, 7]
diff --git a/tests/test_types.py b/tests/test_types.py
new file mode 100644
index 0000000..4e4b8cf
--- /dev/null
+++ b/tests/test_types.py
@@ -0,0 +1,6 @@
+# coding:utf-8
+
+from backoff.types import Details
+
+
+assert Details
diff --git a/tests/test_wait_gen.py b/tests/test_wait_gen.py
index eb60416..b72482d 100644
--- a/tests/test_wait_gen.py
+++ b/tests/test_wait_gen.py
@@ -4,30 +4,35 @@ import backoff
 
 def test_expo():
     gen = backoff.expo()
+    gen.send(None)
     for i in range(9):
-        assert 2**i == next(gen)
+        assert 2 ** i == next(gen)
 
 
 def test_expo_base3():
     gen = backoff.expo(base=3)
+    gen.send(None)
     for i in range(9):
-        assert 3**i == next(gen)
+        assert 3 ** i == next(gen)
 
 
 def test_expo_factor3():
     gen = backoff.expo(factor=3)
+    gen.send(None)
     for i in range(9):
-        assert 3 * 2**i == next(gen)
+        assert 3 * 2 ** i == next(gen)
 
 
 def test_expo_base3_factor5():
     gen = backoff.expo(base=3, factor=5)
+    gen.send(None)
     for i in range(9):
-        assert 5 * 3**i == next(gen)
+        assert 5 * 3 ** i == next(gen)
 
 
 def test_expo_max_value():
-    gen = backoff.expo(max_value=2**4)
+    gen = backoff.expo(max_value=2 ** 4)
+    gen.send(None)
     expected = [1, 2, 4, 8, 16, 16, 16]
     for expect in expected:
         assert expect == next(gen)
@@ -35,6 +40,7 @@ def test_expo_max_value():
 
 def test_fibo():
     gen = backoff.fibo()
+    gen.send(None)
     expected = [1, 1, 2, 3, 5, 8, 13]
     for expect in expected:
         assert expect == next(gen)
@@ -42,6 +48,7 @@ def test_fibo():
 
 def test_fibo_max_value():
     gen = backoff.fibo(max_value=8)
+    gen.send(None)
     expected = [1, 1, 2, 3, 5, 8, 8, 8]
     for expect in expected:
         assert expect == next(gen)
@@ -49,5 +56,13 @@ def test_fibo_max_value():
 
 def test_constant():
     gen = backoff.constant(interval=3)
+    gen.send(None)
     for i in range(9):
         assert 3 == next(gen)
+
+
+def test_runtime():
+    gen = backoff.runtime(value=lambda x: x)
+    gen.send(None)
+    for i in range(20):
+        assert i == gen.send(i)