mirror of
https://mau.dev/maunium/synapse.git
synced 2024-11-16 06:51:46 +01:00
Merge branch 'keyclient_retry_scheme' of github.com:matrix-org/synapse into develop
This commit is contained in:
commit
1be67eca8a
4 changed files with 246 additions and 101 deletions
|
@ -22,6 +22,8 @@ from syutil.crypto.signing_key import (
|
||||||
from syutil.base64util import decode_base64, encode_base64
|
from syutil.base64util import decode_base64, encode_base64
|
||||||
from synapse.api.errors import SynapseError, Codes
|
from synapse.api.errors import SynapseError, Codes
|
||||||
|
|
||||||
|
from synapse.util.retryutils import get_retry_limiter
|
||||||
|
|
||||||
from OpenSSL import crypto
|
from OpenSSL import crypto
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
@ -87,8 +89,14 @@ class Keyring(object):
|
||||||
return
|
return
|
||||||
|
|
||||||
# Try to fetch the key from the remote server.
|
# Try to fetch the key from the remote server.
|
||||||
# TODO(markjh): Ratelimit requests to a given server.
|
|
||||||
|
|
||||||
|
limiter = yield get_retry_limiter(
|
||||||
|
server_name,
|
||||||
|
self.clock,
|
||||||
|
self.store,
|
||||||
|
)
|
||||||
|
|
||||||
|
with limiter:
|
||||||
(response, tls_certificate) = yield fetch_server_key(
|
(response, tls_certificate) = yield fetch_server_key(
|
||||||
server_name, self.hs.tls_context_factory
|
server_name, self.hs.tls_context_factory
|
||||||
)
|
)
|
||||||
|
|
|
@ -24,6 +24,8 @@ from synapse.util.expiringcache import ExpiringCache
|
||||||
from synapse.util.logutils import log_function
|
from synapse.util.logutils import log_function
|
||||||
from synapse.events import FrozenEvent
|
from synapse.events import FrozenEvent
|
||||||
|
|
||||||
|
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
@ -183,6 +185,13 @@ class FederationClient(FederationBase):
|
||||||
pdu = None
|
pdu = None
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
try:
|
try:
|
||||||
|
limiter = yield get_retry_limiter(
|
||||||
|
destination,
|
||||||
|
self._clock,
|
||||||
|
self.store,
|
||||||
|
)
|
||||||
|
|
||||||
|
with limiter:
|
||||||
transaction_data = yield self.transport_layer.get_event(
|
transaction_data = yield self.transport_layer.get_event(
|
||||||
destination, event_id
|
destination, event_id
|
||||||
)
|
)
|
||||||
|
@ -201,6 +210,7 @@ class FederationClient(FederationBase):
|
||||||
pdu = yield self._check_sigs_and_hash(pdu)
|
pdu = yield self._check_sigs_and_hash(pdu)
|
||||||
|
|
||||||
break
|
break
|
||||||
|
|
||||||
except SynapseError:
|
except SynapseError:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Failed to get PDU %s from %s because %s",
|
"Failed to get PDU %s from %s because %s",
|
||||||
|
@ -216,6 +226,9 @@ class FederationClient(FederationBase):
|
||||||
event_id, destination, e,
|
event_id, destination, e,
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
except NotRetryingDestination as e:
|
||||||
|
logger.info(e.message)
|
||||||
|
continue
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Failed to get PDU %s from %s because %s",
|
"Failed to get PDU %s from %s because %s",
|
||||||
|
|
|
@ -22,6 +22,9 @@ from .units import Transaction
|
||||||
from synapse.api.errors import HttpResponseException
|
from synapse.api.errors import HttpResponseException
|
||||||
from synapse.util.logutils import log_function
|
from synapse.util.logutils import log_function
|
||||||
from synapse.util.logcontext import PreserveLoggingContext
|
from synapse.util.logcontext import PreserveLoggingContext
|
||||||
|
from synapse.util.retryutils import (
|
||||||
|
get_retry_limiter, NotRetryingDestination,
|
||||||
|
)
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
@ -147,25 +150,6 @@ class TransactionQueue(object):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def _attempt_new_transaction(self, destination):
|
def _attempt_new_transaction(self, destination):
|
||||||
|
|
||||||
(retry_last_ts, retry_interval) = (0, 0)
|
|
||||||
retry_timings = yield self.store.get_destination_retry_timings(
|
|
||||||
destination
|
|
||||||
)
|
|
||||||
if retry_timings:
|
|
||||||
(retry_last_ts, retry_interval) = (
|
|
||||||
retry_timings.retry_last_ts, retry_timings.retry_interval
|
|
||||||
)
|
|
||||||
if retry_last_ts + retry_interval > int(self._clock.time_msec()):
|
|
||||||
logger.info(
|
|
||||||
"TX [%s] not ready for retry yet - "
|
|
||||||
"dropping transaction for now",
|
|
||||||
destination,
|
|
||||||
)
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
logger.info("TX [%s] is ready for retry", destination)
|
|
||||||
|
|
||||||
if destination in self.pending_transactions:
|
if destination in self.pending_transactions:
|
||||||
# XXX: pending_transactions can get stuck on by a never-ending
|
# XXX: pending_transactions can get stuck on by a never-ending
|
||||||
# request at which point pending_pdus_by_dest just keeps growing.
|
# request at which point pending_pdus_by_dest just keeps growing.
|
||||||
|
@ -192,15 +176,6 @@ class TransactionQueue(object):
|
||||||
logger.info("TX [%s] Nothing to send", destination)
|
logger.info("TX [%s] Nothing to send", destination)
|
||||||
return
|
return
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
"TX [%s] Attempting new transaction"
|
|
||||||
" (pdus: %d, edus: %d, failures: %d)",
|
|
||||||
destination,
|
|
||||||
len(pending_pdus),
|
|
||||||
len(pending_edus),
|
|
||||||
len(pending_failures)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Sort based on the order field
|
# Sort based on the order field
|
||||||
pending_pdus.sort(key=lambda t: t[2])
|
pending_pdus.sort(key=lambda t: t[2])
|
||||||
|
|
||||||
|
@ -213,6 +188,21 @@ class TransactionQueue(object):
|
||||||
]
|
]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
limiter = yield get_retry_limiter(
|
||||||
|
destination,
|
||||||
|
self._clock,
|
||||||
|
self.store,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
"TX [%s] Attempting new transaction"
|
||||||
|
" (pdus: %d, edus: %d, failures: %d)",
|
||||||
|
destination,
|
||||||
|
len(pending_pdus),
|
||||||
|
len(pending_edus),
|
||||||
|
len(pending_failures)
|
||||||
|
)
|
||||||
|
|
||||||
self.pending_transactions[destination] = 1
|
self.pending_transactions[destination] = 1
|
||||||
|
|
||||||
logger.debug("TX [%s] Persisting transaction...", destination)
|
logger.debug("TX [%s] Persisting transaction...", destination)
|
||||||
|
@ -238,6 +228,7 @@ class TransactionQueue(object):
|
||||||
transaction.transaction_id,
|
transaction.transaction_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
with limiter:
|
||||||
# Actually send the transaction
|
# Actually send the transaction
|
||||||
|
|
||||||
# FIXME (erikj): This is a bit of a hack to make the Pdu age
|
# FIXME (erikj): This is a bit of a hack to make the Pdu age
|
||||||
|
@ -266,7 +257,6 @@ class TransactionQueue(object):
|
||||||
"Transaction returned error for %s: %s",
|
"Transaction returned error for %s: %s",
|
||||||
e_id, r,
|
e_id, r,
|
||||||
)
|
)
|
||||||
|
|
||||||
except HttpResponseException as e:
|
except HttpResponseException as e:
|
||||||
code = e.code
|
code = e.code
|
||||||
response = e.response
|
response = e.response
|
||||||
|
@ -276,23 +266,19 @@ class TransactionQueue(object):
|
||||||
logger.debug("TX [%s] Sent transaction", destination)
|
logger.debug("TX [%s] Sent transaction", destination)
|
||||||
logger.debug("TX [%s] Marking as delivered...", destination)
|
logger.debug("TX [%s] Marking as delivered...", destination)
|
||||||
|
|
||||||
|
|
||||||
yield self.transaction_actions.delivered(
|
yield self.transaction_actions.delivered(
|
||||||
transaction, code, response
|
transaction, code, response
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("TX [%s] Marked as delivered", destination)
|
logger.debug("TX [%s] Marked as delivered", destination)
|
||||||
|
|
||||||
logger.debug("TX [%s] Yielding to callbacks...", destination)
|
logger.debug("TX [%s] Yielding to callbacks...", destination)
|
||||||
|
|
||||||
for deferred in deferreds:
|
for deferred in deferreds:
|
||||||
if code == 200:
|
if code == 200:
|
||||||
if retry_last_ts:
|
|
||||||
# this host is alive! reset retry schedule
|
|
||||||
yield self.store.set_destination_retry_timings(
|
|
||||||
destination, 0, 0
|
|
||||||
)
|
|
||||||
deferred.callback(None)
|
deferred.callback(None)
|
||||||
else:
|
else:
|
||||||
self.set_retrying(destination, retry_interval)
|
|
||||||
deferred.errback(RuntimeError("Got status %d" % code))
|
deferred.errback(RuntimeError("Got status %d" % code))
|
||||||
|
|
||||||
# Ensures we don't continue until all callbacks on that
|
# Ensures we don't continue until all callbacks on that
|
||||||
|
@ -303,6 +289,12 @@ class TransactionQueue(object):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
logger.debug("TX [%s] Yielded to callbacks", destination)
|
logger.debug("TX [%s] Yielded to callbacks", destination)
|
||||||
|
except NotRetryingDestination:
|
||||||
|
logger.info(
|
||||||
|
"TX [%s] not ready for retry yet - "
|
||||||
|
"dropping transaction for now",
|
||||||
|
destination,
|
||||||
|
)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
# We capture this here as there as nothing actually listens
|
# We capture this here as there as nothing actually listens
|
||||||
# for this finishing functions deferred.
|
# for this finishing functions deferred.
|
||||||
|
@ -320,8 +312,6 @@ class TransactionQueue(object):
|
||||||
e,
|
e,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.set_retrying(destination, retry_interval)
|
|
||||||
|
|
||||||
for deferred in deferreds:
|
for deferred in deferreds:
|
||||||
if not deferred.called:
|
if not deferred.called:
|
||||||
deferred.errback(e)
|
deferred.errback(e)
|
||||||
|
@ -332,22 +322,3 @@ class TransactionQueue(object):
|
||||||
|
|
||||||
# Check to see if there is anything else to send.
|
# Check to see if there is anything else to send.
|
||||||
self._attempt_new_transaction(destination)
|
self._attempt_new_transaction(destination)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def set_retrying(self, destination, retry_interval):
|
|
||||||
# track that this destination is having problems and we should
|
|
||||||
# give it a chance to recover before trying it again
|
|
||||||
|
|
||||||
if retry_interval:
|
|
||||||
retry_interval *= 2
|
|
||||||
# plateau at hourly retries for now
|
|
||||||
if retry_interval >= 60 * 60 * 1000:
|
|
||||||
retry_interval = 60 * 60 * 1000
|
|
||||||
else:
|
|
||||||
retry_interval = 2000 # try again at first after 2 seconds
|
|
||||||
|
|
||||||
yield self.store.set_destination_retry_timings(
|
|
||||||
destination,
|
|
||||||
int(self._clock.time_msec()),
|
|
||||||
retry_interval
|
|
||||||
)
|
|
||||||
|
|
153
synapse/util/retryutils.py
Normal file
153
synapse/util/retryutils.py
Normal file
|
@ -0,0 +1,153 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.api.errors import CodeMessageException
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class NotRetryingDestination(Exception):
|
||||||
|
def __init__(self, retry_last_ts, retry_interval, destination):
|
||||||
|
msg = "Not retrying server %s." % (destination,)
|
||||||
|
super(NotRetryingDestination, self).__init__(msg)
|
||||||
|
|
||||||
|
self.retry_last_ts = retry_last_ts
|
||||||
|
self.retry_interval = retry_interval
|
||||||
|
self.destination = destination
|
||||||
|
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_retry_limiter(destination, clock, store, **kwargs):
|
||||||
|
"""For a given destination check if we have previously failed to
|
||||||
|
send a request there and are waiting before retrying the destination.
|
||||||
|
If we are not ready to retry the destination, this will raise a
|
||||||
|
NotRetryingDestination exception. Otherwise, will return a Context Manager
|
||||||
|
that will mark the destination as down if an exception is thrown (excluding
|
||||||
|
CodeMessageException with code < 500)
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
|
||||||
|
try:
|
||||||
|
limiter = yield get_retry_limiter(destination, clock, store)
|
||||||
|
with limiter:
|
||||||
|
response = yield do_request()
|
||||||
|
except NotRetryingDestination:
|
||||||
|
# We aren't ready to retry that destination.
|
||||||
|
raise
|
||||||
|
"""
|
||||||
|
retry_last_ts, retry_interval = (0, 0)
|
||||||
|
|
||||||
|
retry_timings = yield store.get_destination_retry_timings(
|
||||||
|
destination
|
||||||
|
)
|
||||||
|
|
||||||
|
if retry_timings:
|
||||||
|
retry_last_ts, retry_interval = (
|
||||||
|
retry_timings.retry_last_ts, retry_timings.retry_interval
|
||||||
|
)
|
||||||
|
|
||||||
|
now = int(clock.time_msec())
|
||||||
|
|
||||||
|
if retry_last_ts + retry_interval > now:
|
||||||
|
raise NotRetryingDestination(
|
||||||
|
retry_last_ts=retry_last_ts,
|
||||||
|
retry_interval=retry_interval,
|
||||||
|
destination=destination,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue(
|
||||||
|
RetryDestinationLimiter(
|
||||||
|
destination,
|
||||||
|
clock,
|
||||||
|
store,
|
||||||
|
retry_interval,
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class RetryDestinationLimiter(object):
|
||||||
|
def __init__(self, destination, clock, store, retry_interval,
|
||||||
|
min_retry_interval=5000, max_retry_interval=60 * 60 * 1000,
|
||||||
|
multiplier_retry_interval=2,):
|
||||||
|
"""Marks the destination as "down" if an exception is thrown in the
|
||||||
|
context, except for CodeMessageException with code < 500.
|
||||||
|
|
||||||
|
If no exception is raised, marks the destination as "up".
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination (str)
|
||||||
|
clock (Clock)
|
||||||
|
store (DataStore)
|
||||||
|
retry_interval (int): The next retry interval taken from the
|
||||||
|
database in milliseconds, or zero if the last request was
|
||||||
|
successful.
|
||||||
|
min_retry_interval (int): The minimum retry interval to use after
|
||||||
|
a failed request, in milliseconds.
|
||||||
|
max_retry_interval (int): The maximum retry interval to use after
|
||||||
|
a failed request, in milliseconds.
|
||||||
|
multiplier_retry_interval (int): The multiplier to use to increase
|
||||||
|
the retry interval after a failed request.
|
||||||
|
"""
|
||||||
|
self.clock = clock
|
||||||
|
self.store = store
|
||||||
|
self.destination = destination
|
||||||
|
|
||||||
|
self.retry_interval = retry_interval
|
||||||
|
self.min_retry_interval = min_retry_interval
|
||||||
|
self.max_retry_interval = max_retry_interval
|
||||||
|
self.multiplier_retry_interval = multiplier_retry_interval
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
def err(failure):
|
||||||
|
logger.exception(
|
||||||
|
"Failed to store set_destination_retry_timings",
|
||||||
|
failure.value
|
||||||
|
)
|
||||||
|
|
||||||
|
valid_err_code = False
|
||||||
|
if exc_type is CodeMessageException:
|
||||||
|
valid_err_code = 0 <= exc_val.code < 500
|
||||||
|
|
||||||
|
if exc_type is None or valid_err_code:
|
||||||
|
# We connected successfully.
|
||||||
|
if not self.retry_interval:
|
||||||
|
return
|
||||||
|
|
||||||
|
retry_last_ts = 0
|
||||||
|
self.retry_interval = 0
|
||||||
|
else:
|
||||||
|
# We couldn't connect.
|
||||||
|
if self.retry_interval:
|
||||||
|
self.retry_interval *= self.multiplier_retry_interval
|
||||||
|
|
||||||
|
if self.retry_interval >= self.max_retry_interval:
|
||||||
|
self.retry_interval = self.max_retry_interval
|
||||||
|
else:
|
||||||
|
self.retry_interval = self.min_retry_interval
|
||||||
|
|
||||||
|
retry_last_ts = int(self._clock.time_msec()),
|
||||||
|
|
||||||
|
self.store.set_destination_retry_timings(
|
||||||
|
self.destination, retry_last_ts, self.retry_interval
|
||||||
|
).addErrback(err)
|
Loading…
Reference in a new issue