forked from MirrorHub/synapse
Time out filtered room dir queries after 60s
This commit is contained in:
parent
58f6c48183
commit
068aa1d228
1 changed files with 12 additions and 2 deletions
|
@ -31,6 +31,7 @@ from synapse.util.caches.descriptors import cachedInlineCallbacks
|
|||
from synapse.util.caches.response_cache import ResponseCache
|
||||
|
||||
from ._base import BaseHandler
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -73,8 +74,13 @@ class RoomListHandler(BaseHandler):
|
|||
# We explicitly don't bother caching searches or requests for
|
||||
# appservice specific lists.
|
||||
logger.info("Bypassing cache as search request.")
|
||||
|
||||
# XXX: Quick hack to stop room directory queries taking too long.
|
||||
# Timeout request after 60s. Probably want a more fundamental
|
||||
# solution at some point
|
||||
timeout = datetime.now() + timedelta(seconds=60)
|
||||
return self._get_public_room_list(
|
||||
limit, since_token, search_filter, network_tuple=network_tuple,
|
||||
limit, since_token, search_filter, network_tuple=network_tuple, timeout=timeout,
|
||||
)
|
||||
|
||||
key = (limit, since_token, network_tuple)
|
||||
|
@ -87,7 +93,8 @@ class RoomListHandler(BaseHandler):
|
|||
@defer.inlineCallbacks
|
||||
def _get_public_room_list(self, limit=None, since_token=None,
|
||||
search_filter=None,
|
||||
network_tuple=EMPTY_THIRD_PARTY_ID,):
|
||||
network_tuple=EMPTY_THIRD_PARTY_ID,
|
||||
timeout=None,):
|
||||
if since_token and since_token != "END":
|
||||
since_token = RoomListNextBatch.from_token(since_token)
|
||||
else:
|
||||
|
@ -202,6 +209,9 @@ class RoomListHandler(BaseHandler):
|
|||
|
||||
chunk = []
|
||||
for i in range(0, len(rooms_to_scan), step):
|
||||
if timeout and datetime.now() > timeout:
|
||||
raise Exception("Timed out searching room directory")
|
||||
|
||||
batch = rooms_to_scan[i:i + step]
|
||||
logger.info("Processing %i rooms for result", len(batch))
|
||||
yield concurrently_execute(
|
||||
|
|
Loading…
Reference in a new issue