Edit docstring and comments

This commit is contained in:
Carol Willing
2017-07-26 12:38:21 -07:00
committed by yuvipanda
parent 8de38b1708
commit da10a8e7dd
3 changed files with 51 additions and 18 deletions

View File

@@ -597,6 +597,7 @@ class UserSpawnHandler(BaseHandler):
# record redirect count in query parameter # record redirect count in query parameter
if redirects: if redirects:
self.log.warning("Redirect loop detected on %s", self.request.uri) self.log.warning("Redirect loop detected on %s", self.request.uri)
# add capped exponential backoff where cap is 10s
yield gen.sleep(min(1 * (2 ** redirects), 10)) yield gen.sleep(min(1 * (2 ** redirects), 10))
# rewrite target url with new `redirects` query value # rewrite target url with new `redirects` query value
url_parts = urlparse(target) url_parts = urlparse(target)

View File

@@ -674,6 +674,7 @@ class Spawner(LoggingConfigurable):
def _wait_for_death(): def _wait_for_death():
status = yield self.poll() status = yield self.poll()
return status is not None return status is not None
try: try:
r = yield exponential_backoff( r = yield exponential_backoff(
_wait_for_death, _wait_for_death,

View File

@@ -60,25 +60,59 @@ def exponential_backoff(
timeout_tolerance=0.1, timeout_tolerance=0.1,
*args, **kwargs): *args, **kwargs):
""" """
Exponentially backoff until pass_func is true. Exponentially backoff until `pass_func` is true.
This function will wait with exponential backoff + random jitter for as The `pass_func` function will wait with **exponential backoff** and
many iterations as needed, with maximum timeout timeout. If pass_func is **random jitter** for as many needed iterations of the Tornado loop,
still returning false at the end of timeout, a TimeoutError will be raised. until reaching maximum `timeout` or truthiness. If `pass_func` is still
returning false at `timeout`, a `TimeoutError` will be raised.
It'll start waiting at start_wait, scaling up by continuously multiplying itself The first iteration will begin with a wait time of `start_wait` seconds.
by scale_factor until pass_func returns true. It'll never wait for more than Each subsequent iteration's wait time will scale up by continuously
max_wait seconds per iteration. multiplying itself by `scale_factor`. This continues for each iteration
until `pass_func` returns true or an iteration's wait time has reached
the `max_wait` seconds per iteration.
*args and **kwargs are passed to pass_func. pass_func maybe a future, although `pass_func` may be a future, although that is not entirely recommended.
that is not entirely recommended.
It'll return the value of pass_func when it's truthy! Parameters
----------
pass_func
function that is to be run
fail_message : str
message for a `TimeoutError`
start_wait : optional
initial wait time for the first iteration in seconds
scale_factor : optional
a multiplier to increase the wait time for each iteration
max_wait : optional
maximum wait time per iteration in seconds
timeout : optional
maximum time of total wait in seconds
timeout_tolerance : optional
a small multiplier used to add jitter to `timeout`'s deadline
*args, **kwargs
passed to `pass_func(*args, **kwargs)`
Returns
-------
value of `pass_func(*args, **kwargs)`
Raises
------
TimeoutError
If `pass_func` is still false at the end of the `timeout` period.
Notes
-----
See https://www.awsarchitectureblog.com/2015/03/backoff.html
for information about the algorithm and examples. We're using their
full Jitter implementation equivalent.
""" """
loop = ioloop.IOLoop.current() loop = ioloop.IOLoop.current()
deadline = loop.time() + timeout deadline = loop.time() + timeout
# add some jitter to the deadline itself, so that we don't # add jitter to the deadline itself to prevent re-align of a bunch of
# re-align a bunch of timing out calls once the deadline is reached. # timing out calls once the deadline is reached.
if timeout_tolerance: if timeout_tolerance:
tol = timeout_tolerance * timeout tol = timeout_tolerance * timeout
deadline = random.uniform(deadline - tol, deadline + tol) deadline = random.uniform(deadline - tol, deadline + tol)
@@ -92,12 +126,9 @@ def exponential_backoff(
if remaining < 0: if remaining < 0:
# timeout exceeded # timeout exceeded
break break
# Add some random jitter to improve performance # add some random jitter to improve performance
# This makes sure that we don't overload any single iteration # this prevents overloading any single tornado loop iteration with
# of the tornado loop with too many things # too many things
# See https://www.awsarchitectureblog.com/2015/03/backoff.html
# for a good example of why and how this helps. We're using their
# full Jitter implementation equivalent.
dt = min(max_wait, remaining, random.uniform(0, start_wait * scale)) dt = min(max_wait, remaining, random.uniform(0, start_wait * scale))
scale *= scale_factor scale *= scale_factor
yield gen.sleep(dt) yield gen.sleep(dt)