mirror of
https://mau.dev/maunium/synapse.git
synced 2024-12-16 02:14:01 +01:00
Add a load of logging to the room_list handler
So we can see what it gets up to.
This commit is contained in:
parent
7bd6c87eca
commit
1fc66c7460
1 changed files with 13 additions and 0 deletions
|
@ -154,6 +154,8 @@ class RoomListHandler(BaseHandler):
|
||||||
# We want larger rooms to be first, hence negating num_joined_users
|
# We want larger rooms to be first, hence negating num_joined_users
|
||||||
rooms_to_order_value[room_id] = (-num_joined_users, room_id)
|
rooms_to_order_value[room_id] = (-num_joined_users, room_id)
|
||||||
|
|
||||||
|
logger.info("Getting ordering for %i rooms since %s",
|
||||||
|
len(room_ids), stream_token)
|
||||||
yield concurrently_execute(get_order_for_room, room_ids, 10)
|
yield concurrently_execute(get_order_for_room, room_ids, 10)
|
||||||
|
|
||||||
sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1])
|
sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1])
|
||||||
|
@ -181,17 +183,25 @@ class RoomListHandler(BaseHandler):
|
||||||
rooms_to_scan = rooms_to_scan[:since_token.current_limit]
|
rooms_to_scan = rooms_to_scan[:since_token.current_limit]
|
||||||
rooms_to_scan.reverse()
|
rooms_to_scan.reverse()
|
||||||
|
|
||||||
|
logger.info("After sorting and filtering, %i rooms remain",
|
||||||
|
len(rooms_to_scan))
|
||||||
|
|
||||||
# Actually generate the entries. _append_room_entry_to_chunk will append to
|
# Actually generate the entries. _append_room_entry_to_chunk will append to
|
||||||
# chunk but will stop if len(chunk) > limit
|
# chunk but will stop if len(chunk) > limit
|
||||||
chunk = []
|
chunk = []
|
||||||
if limit and not search_filter:
|
if limit and not search_filter:
|
||||||
step = limit + 1
|
step = limit + 1
|
||||||
for i in xrange(0, len(rooms_to_scan), step):
|
for i in xrange(0, len(rooms_to_scan), step):
|
||||||
|
logger.info("Processing %i rooms for result", step)
|
||||||
# We iterate here because the vast majority of cases we'll stop
|
# We iterate here because the vast majority of cases we'll stop
|
||||||
# at first iteration, but occaisonally _append_room_entry_to_chunk
|
# at first iteration, but occaisonally _append_room_entry_to_chunk
|
||||||
# won't append to the chunk and so we need to loop again.
|
# won't append to the chunk and so we need to loop again.
|
||||||
# We don't want to scan over the entire range either as that
|
# We don't want to scan over the entire range either as that
|
||||||
# would potentially waste a lot of work.
|
# would potentially waste a lot of work.
|
||||||
|
#
|
||||||
|
# XXX why would that happen? _append_room_entry_to_chunk will
|
||||||
|
# only exclude rooms which don't match search_filter, but we
|
||||||
|
# know search_filter is None here.
|
||||||
yield concurrently_execute(
|
yield concurrently_execute(
|
||||||
lambda r: self._append_room_entry_to_chunk(
|
lambda r: self._append_room_entry_to_chunk(
|
||||||
r, rooms_to_num_joined[r],
|
r, rooms_to_num_joined[r],
|
||||||
|
@ -199,9 +209,11 @@ class RoomListHandler(BaseHandler):
|
||||||
),
|
),
|
||||||
rooms_to_scan[i:i + step], 10
|
rooms_to_scan[i:i + step], 10
|
||||||
)
|
)
|
||||||
|
logger.info("Now %i rooms in result", len(chunk))
|
||||||
if len(chunk) >= limit + 1:
|
if len(chunk) >= limit + 1:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
|
logger.info("Processing %i rooms for result", len(rooms_to_scan))
|
||||||
yield concurrently_execute(
|
yield concurrently_execute(
|
||||||
lambda r: self._append_room_entry_to_chunk(
|
lambda r: self._append_room_entry_to_chunk(
|
||||||
r, rooms_to_num_joined[r],
|
r, rooms_to_num_joined[r],
|
||||||
|
@ -209,6 +221,7 @@ class RoomListHandler(BaseHandler):
|
||||||
),
|
),
|
||||||
rooms_to_scan, 5
|
rooms_to_scan, 5
|
||||||
)
|
)
|
||||||
|
logger.info("Now %i rooms in result", len(chunk))
|
||||||
|
|
||||||
chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))
|
chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue