mirror of
https://mau.dev/maunium/synapse.git
synced 2024-12-17 06:23:53 +01:00
Use iter(items|values)
This commit is contained in:
parent
00957d1aa4
commit
e71940aa64
3 changed files with 48 additions and 51 deletions
|
@ -135,7 +135,7 @@ class PerformanceCounters(object):
|
|||
|
||||
def interval(self, interval_duration, limit=3):
|
||||
counters = []
|
||||
for name, (count, cum_time) in self.current_counters.items():
|
||||
for name, (count, cum_time) in self.current_counters.iteritems():
|
||||
prev_count, prev_time = self.previous_counters.get(name, (0, 0))
|
||||
counters.append((
|
||||
(cum_time - prev_time) / interval_duration,
|
||||
|
@ -568,7 +568,7 @@ class SQLBaseStore(object):
|
|||
@staticmethod
|
||||
def _simple_select_onecol_txn(txn, table, keyvalues, retcol):
|
||||
if keyvalues:
|
||||
where = "WHERE %s" % " AND ".join("%s = ?" % k for k in keyvalues.keys())
|
||||
where = "WHERE %s" % " AND ".join("%s = ?" % k for k in keyvalues.iterkeys())
|
||||
else:
|
||||
where = ""
|
||||
|
||||
|
@ -715,7 +715,7 @@ class SQLBaseStore(object):
|
|||
)
|
||||
values.extend(iterable)
|
||||
|
||||
for key, value in keyvalues.items():
|
||||
for key, value in keyvalues.iteritems():
|
||||
clauses.append("%s = ?" % (key,))
|
||||
values.append(value)
|
||||
|
||||
|
@ -756,7 +756,7 @@ class SQLBaseStore(object):
|
|||
@staticmethod
|
||||
def _simple_update_one_txn(txn, table, keyvalues, updatevalues):
|
||||
if keyvalues:
|
||||
where = "WHERE %s" % " AND ".join("%s = ?" % k for k in keyvalues.keys())
|
||||
where = "WHERE %s" % " AND ".join("%s = ?" % k for k in keyvalues.iterkeys())
|
||||
else:
|
||||
where = ""
|
||||
|
||||
|
@ -873,7 +873,7 @@ class SQLBaseStore(object):
|
|||
)
|
||||
values.extend(iterable)
|
||||
|
||||
for key, value in keyvalues.items():
|
||||
for key, value in keyvalues.iteritems():
|
||||
clauses.append("%s = ?" % (key,))
|
||||
values.append(value)
|
||||
|
||||
|
@ -913,7 +913,7 @@ class SQLBaseStore(object):
|
|||
txn.close()
|
||||
|
||||
if cache:
|
||||
min_val = min(cache.values())
|
||||
min_val = min(cache.itervalues())
|
||||
else:
|
||||
min_val = max_value
|
||||
|
||||
|
|
|
@ -217,14 +217,14 @@ class EventsStore(SQLBaseStore):
|
|||
partitioned.setdefault(event.room_id, []).append((event, ctx))
|
||||
|
||||
deferreds = []
|
||||
for room_id, evs_ctxs in partitioned.items():
|
||||
for room_id, evs_ctxs in partitioned.iteritems():
|
||||
d = preserve_fn(self._event_persist_queue.add_to_queue)(
|
||||
room_id, evs_ctxs,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
deferreds.append(d)
|
||||
|
||||
for room_id in partitioned.keys():
|
||||
for room_id in partitioned:
|
||||
self._maybe_start_persisting(room_id)
|
||||
|
||||
return preserve_context_over_deferred(
|
||||
|
@ -323,7 +323,7 @@ class EventsStore(SQLBaseStore):
|
|||
(event, context)
|
||||
)
|
||||
|
||||
for room_id, ev_ctx_rm in events_by_room.items():
|
||||
for room_id, ev_ctx_rm in events_by_room.iteritems():
|
||||
# Work out new extremities by recursively adding and removing
|
||||
# the new events.
|
||||
latest_event_ids = yield self.get_latest_event_ids_in_room(
|
||||
|
@ -453,10 +453,10 @@ class EventsStore(SQLBaseStore):
|
|||
missing_event_ids,
|
||||
)
|
||||
|
||||
groups = set(event_to_groups.values())
|
||||
groups = set(event_to_groups.itervalues())
|
||||
group_to_state = yield self._get_state_for_groups(groups)
|
||||
|
||||
state_sets.extend(group_to_state.values())
|
||||
state_sets.extend(group_to_state.itervalues())
|
||||
|
||||
if not new_latest_event_ids:
|
||||
current_state = {}
|
||||
|
@ -718,7 +718,7 @@ class EventsStore(SQLBaseStore):
|
|||
|
||||
def _update_forward_extremities_txn(self, txn, new_forward_extremities,
|
||||
max_stream_order):
|
||||
for room_id, new_extrem in new_forward_extremities.items():
|
||||
for room_id, new_extrem in new_forward_extremities.iteritems():
|
||||
self._simple_delete_txn(
|
||||
txn,
|
||||
table="event_forward_extremities",
|
||||
|
@ -736,7 +736,7 @@ class EventsStore(SQLBaseStore):
|
|||
"event_id": ev_id,
|
||||
"room_id": room_id,
|
||||
}
|
||||
for room_id, new_extrem in new_forward_extremities.items()
|
||||
for room_id, new_extrem in new_forward_extremities.iteritems()
|
||||
for ev_id in new_extrem
|
||||
],
|
||||
)
|
||||
|
@ -753,7 +753,7 @@ class EventsStore(SQLBaseStore):
|
|||
"event_id": event_id,
|
||||
"stream_ordering": max_stream_order,
|
||||
}
|
||||
for room_id, new_extrem in new_forward_extremities.items()
|
||||
for room_id, new_extrem in new_forward_extremities.iteritems()
|
||||
for event_id in new_extrem
|
||||
]
|
||||
)
|
||||
|
@ -807,7 +807,7 @@ class EventsStore(SQLBaseStore):
|
|||
event.depth, depth_updates.get(event.room_id, event.depth)
|
||||
)
|
||||
|
||||
for room_id, depth in depth_updates.items():
|
||||
for room_id, depth in depth_updates.iteritems():
|
||||
self._update_min_depth_for_room_txn(txn, room_id, depth)
|
||||
|
||||
def _update_outliers_txn(self, txn, events_and_contexts):
|
||||
|
@ -958,14 +958,10 @@ class EventsStore(SQLBaseStore):
|
|||
return
|
||||
|
||||
def event_dict(event):
|
||||
return {
|
||||
k: v
|
||||
for k, v in event.get_dict().items()
|
||||
if k not in [
|
||||
"redacted",
|
||||
"redacted_because",
|
||||
]
|
||||
}
|
||||
d = event.get_dict()
|
||||
d.pop("redacted", None)
|
||||
d.pop("redacted_because", None)
|
||||
return d
|
||||
|
||||
self._simple_insert_many_txn(
|
||||
txn,
|
||||
|
@ -1998,7 +1994,7 @@ class EventsStore(SQLBaseStore):
|
|||
"state_key": key[1],
|
||||
"event_id": state_id,
|
||||
}
|
||||
for key, state_id in curr_state.items()
|
||||
for key, state_id in curr_state.iteritems()
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ class StateStore(SQLBaseStore):
|
|||
event_ids,
|
||||
)
|
||||
|
||||
groups = set(event_to_groups.values())
|
||||
groups = set(event_to_groups.itervalues())
|
||||
group_to_state = yield self._get_state_for_groups(groups)
|
||||
|
||||
defer.returnValue(group_to_state)
|
||||
|
@ -108,17 +108,18 @@ class StateStore(SQLBaseStore):
|
|||
|
||||
state_event_map = yield self.get_events(
|
||||
[
|
||||
ev_id for group_ids in group_to_ids.values()
|
||||
for ev_id in group_ids.values()
|
||||
ev_id for group_ids in group_to_ids.itervalues()
|
||||
for ev_id in group_ids.itervalues()
|
||||
],
|
||||
get_prev_content=False
|
||||
)
|
||||
|
||||
defer.returnValue({
|
||||
group: [
|
||||
state_event_map[v] for v in event_id_map.values() if v in state_event_map
|
||||
state_event_map[v] for v in event_id_map.itervalues()
|
||||
if v in state_event_map
|
||||
]
|
||||
for group, event_id_map in group_to_ids.items()
|
||||
for group, event_id_map in group_to_ids.iteritems()
|
||||
})
|
||||
|
||||
def _have_persisted_state_group_txn(self, txn, state_group):
|
||||
|
@ -190,7 +191,7 @@ class StateStore(SQLBaseStore):
|
|||
"state_key": key[1],
|
||||
"event_id": state_id,
|
||||
}
|
||||
for key, state_id in context.delta_ids.items()
|
||||
for key, state_id in context.delta_ids.iteritems()
|
||||
],
|
||||
)
|
||||
else:
|
||||
|
@ -205,7 +206,7 @@ class StateStore(SQLBaseStore):
|
|||
"state_key": key[1],
|
||||
"event_id": state_id,
|
||||
}
|
||||
for key, state_id in context.current_state_ids.items()
|
||||
for key, state_id in context.current_state_ids.iteritems()
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -217,7 +218,7 @@ class StateStore(SQLBaseStore):
|
|||
"state_group": state_group_id,
|
||||
"event_id": event_id,
|
||||
}
|
||||
for event_id, state_group_id in state_groups.items()
|
||||
for event_id, state_group_id in state_groups.iteritems()
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -373,11 +374,11 @@ class StateStore(SQLBaseStore):
|
|||
" WHERE state_group = ? %s" % (where_clause,),
|
||||
args
|
||||
)
|
||||
results[group].update({
|
||||
(typ, state_key): event_id
|
||||
results[group].update(
|
||||
((typ, state_key), event_id)
|
||||
for typ, state_key, event_id in txn
|
||||
if (typ, state_key) not in results[group]
|
||||
})
|
||||
)
|
||||
|
||||
# If the lengths match then we must have all the types,
|
||||
# so no need to go walk further down the tree.
|
||||
|
@ -414,21 +415,21 @@ class StateStore(SQLBaseStore):
|
|||
event_ids,
|
||||
)
|
||||
|
||||
groups = set(event_to_groups.values())
|
||||
groups = set(event_to_groups.itervalues())
|
||||
group_to_state = yield self._get_state_for_groups(groups, types)
|
||||
|
||||
state_event_map = yield self.get_events(
|
||||
[ev_id for sd in group_to_state.values() for ev_id in sd.values()],
|
||||
[ev_id for sd in group_to_state.itervalues() for ev_id in sd.itervalues()],
|
||||
get_prev_content=False
|
||||
)
|
||||
|
||||
event_to_state = {
|
||||
event_id: {
|
||||
k: state_event_map[v]
|
||||
for k, v in group_to_state[group].items()
|
||||
for k, v in group_to_state[group].iteritems()
|
||||
if v in state_event_map
|
||||
}
|
||||
for event_id, group in event_to_groups.items()
|
||||
for event_id, group in event_to_groups.iteritems()
|
||||
}
|
||||
|
||||
defer.returnValue({event: event_to_state[event] for event in event_ids})
|
||||
|
@ -451,12 +452,12 @@ class StateStore(SQLBaseStore):
|
|||
event_ids,
|
||||
)
|
||||
|
||||
groups = set(event_to_groups.values())
|
||||
groups = set(event_to_groups.itervalues())
|
||||
group_to_state = yield self._get_state_for_groups(groups, types)
|
||||
|
||||
event_to_state = {
|
||||
event_id: group_to_state[group]
|
||||
for event_id, group in event_to_groups.items()
|
||||
for event_id, group in event_to_groups.iteritems()
|
||||
}
|
||||
|
||||
defer.returnValue({event: event_to_state[event] for event in event_ids})
|
||||
|
@ -568,7 +569,7 @@ class StateStore(SQLBaseStore):
|
|||
got_all = not (missing_types or types is None)
|
||||
|
||||
return {
|
||||
k: v for k, v in state_dict_ids.items()
|
||||
k: v for k, v in state_dict_ids.iteritems()
|
||||
if include(k[0], k[1])
|
||||
}, missing_types, got_all
|
||||
|
||||
|
@ -627,7 +628,7 @@ class StateStore(SQLBaseStore):
|
|||
|
||||
# Now we want to update the cache with all the things we fetched
|
||||
# from the database.
|
||||
for group, group_state_dict in group_to_state_dict.items():
|
||||
for group, group_state_dict in group_to_state_dict.iteritems():
|
||||
if types:
|
||||
# We delibrately put key -> None mappings into the cache to
|
||||
# cache absence of the key, on the assumption that if we've
|
||||
|
@ -642,10 +643,10 @@ class StateStore(SQLBaseStore):
|
|||
else:
|
||||
state_dict = results[group]
|
||||
|
||||
state_dict.update({
|
||||
(intern_string(k[0]), intern_string(k[1])): v
|
||||
for k, v in group_state_dict.items()
|
||||
})
|
||||
state_dict.update(
|
||||
((intern_string(k[0]), intern_string(k[1])), v)
|
||||
for k, v in group_state_dict.iteritems()
|
||||
)
|
||||
|
||||
self._state_group_cache.update(
|
||||
cache_seq_num,
|
||||
|
@ -656,10 +657,10 @@ class StateStore(SQLBaseStore):
|
|||
|
||||
# Remove all the entries with None values. The None values were just
|
||||
# used for bookkeeping in the cache.
|
||||
for group, state_dict in results.items():
|
||||
for group, state_dict in results.iteritems():
|
||||
results[group] = {
|
||||
key: event_id
|
||||
for key, event_id in state_dict.items()
|
||||
for key, event_id in state_dict.iteritems()
|
||||
if event_id
|
||||
}
|
||||
|
||||
|
@ -748,7 +749,7 @@ class StateStore(SQLBaseStore):
|
|||
# of keys
|
||||
|
||||
delta_state = {
|
||||
key: value for key, value in curr_state.items()
|
||||
key: value for key, value in curr_state.iteritems()
|
||||
if prev_state.get(key, None) != value
|
||||
}
|
||||
|
||||
|
@ -788,7 +789,7 @@ class StateStore(SQLBaseStore):
|
|||
"state_key": key[1],
|
||||
"event_id": state_id,
|
||||
}
|
||||
for key, state_id in delta_state.items()
|
||||
for key, state_id in delta_state.iteritems()
|
||||
],
|
||||
)
|
||||
|
||||
|
|
Loading…
Reference in a new issue