mirror of
https://mau.dev/maunium/synapse.git
synced 2024-11-15 22:42:23 +01:00
[pyupgrade] synapse/
(#10348)
This PR is tantamount to running ``` pyupgrade --py36-plus --keep-percent-format `find synapse/ -type f -name "*.py"` ``` Part of #9744
This commit is contained in:
parent
7387d6f624
commit
95e47b2e78
29 changed files with 86 additions and 102 deletions
1
changelog.d/10348.misc
Normal file
1
changelog.d/10348.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Run `pyupgrade` on the codebase.
|
|
@ -395,11 +395,9 @@ class GenericWorkerServer(HomeServer):
|
||||||
elif listener.type == "metrics":
|
elif listener.type == "metrics":
|
||||||
if not self.config.enable_metrics:
|
if not self.config.enable_metrics:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
(
|
|
||||||
"Metrics listener configured, but "
|
"Metrics listener configured, but "
|
||||||
"enable_metrics is not True!"
|
"enable_metrics is not True!"
|
||||||
)
|
)
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
_base.listen_metrics(listener.bind_addresses, listener.port)
|
_base.listen_metrics(listener.bind_addresses, listener.port)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -305,11 +305,9 @@ class SynapseHomeServer(HomeServer):
|
||||||
elif listener.type == "metrics":
|
elif listener.type == "metrics":
|
||||||
if not self.config.enable_metrics:
|
if not self.config.enable_metrics:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
(
|
|
||||||
"Metrics listener configured, but "
|
"Metrics listener configured, but "
|
||||||
"enable_metrics is not True!"
|
"enable_metrics is not True!"
|
||||||
)
|
)
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
_base.listen_metrics(listener.bind_addresses, listener.port)
|
_base.listen_metrics(listener.bind_addresses, listener.port)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -64,7 +64,7 @@ def load_appservices(hostname, config_files):
|
||||||
|
|
||||||
for config_file in config_files:
|
for config_file in config_files:
|
||||||
try:
|
try:
|
||||||
with open(config_file, "r") as f:
|
with open(config_file) as f:
|
||||||
appservice = _load_appservice(hostname, yaml.safe_load(f), config_file)
|
appservice = _load_appservice(hostname, yaml.safe_load(f), config_file)
|
||||||
if appservice.id in seen_ids:
|
if appservice.id in seen_ids:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
|
|
|
@ -66,11 +66,9 @@ class TlsConfig(Config):
|
||||||
if self.federation_client_minimum_tls_version == "1.3":
|
if self.federation_client_minimum_tls_version == "1.3":
|
||||||
if getattr(SSL, "OP_NO_TLSv1_3", None) is None:
|
if getattr(SSL, "OP_NO_TLSv1_3", None) is None:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
(
|
|
||||||
"federation_client_minimum_tls_version cannot be 1.3, "
|
"federation_client_minimum_tls_version cannot be 1.3, "
|
||||||
"your OpenSSL does not support it"
|
"your OpenSSL does not support it"
|
||||||
)
|
)
|
||||||
)
|
|
||||||
|
|
||||||
# Whitelist of domains to not verify certificates for
|
# Whitelist of domains to not verify certificates for
|
||||||
fed_whitelist_entries = config.get(
|
fed_whitelist_entries = config.get(
|
||||||
|
|
|
@ -40,7 +40,7 @@ class CasError(Exception):
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
if self.error_description:
|
if self.error_description:
|
||||||
return "{}: {}".format(self.error, self.error_description)
|
return f"{self.error}: {self.error_description}"
|
||||||
return self.error
|
return self.error
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -735,7 +735,7 @@ class FederationHandler(BaseHandler):
|
||||||
# we need to make sure we re-load from the database to get the rejected
|
# we need to make sure we re-load from the database to get the rejected
|
||||||
# state correct.
|
# state correct.
|
||||||
fetched_events.update(
|
fetched_events.update(
|
||||||
(await self.store.get_events(missing_desired_events, allow_rejected=True))
|
await self.store.get_events(missing_desired_events, allow_rejected=True)
|
||||||
)
|
)
|
||||||
|
|
||||||
# check for events which were in the wrong room.
|
# check for events which were in the wrong room.
|
||||||
|
|
|
@ -302,7 +302,7 @@ class IdentityHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
|
|
||||||
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
|
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
|
||||||
url_bytes = "/_matrix/identity/api/v1/3pid/unbind".encode("ascii")
|
url_bytes = b"/_matrix/identity/api/v1/3pid/unbind"
|
||||||
|
|
||||||
content = {
|
content = {
|
||||||
"mxid": mxid,
|
"mxid": mxid,
|
||||||
|
@ -695,7 +695,7 @@ class IdentityHandler(BaseHandler):
|
||||||
return data["mxid"]
|
return data["mxid"]
|
||||||
except RequestTimedOutError:
|
except RequestTimedOutError:
|
||||||
raise SynapseError(500, "Timed out contacting identity server")
|
raise SynapseError(500, "Timed out contacting identity server")
|
||||||
except IOError as e:
|
except OSError as e:
|
||||||
logger.warning("Error from v1 identity server lookup: %s" % (e,))
|
logger.warning("Error from v1 identity server lookup: %s" % (e,))
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
|
@ -72,26 +72,26 @@ _SESSION_COOKIES = [
|
||||||
(b"oidc_session_no_samesite", b"HttpOnly"),
|
(b"oidc_session_no_samesite", b"HttpOnly"),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
#: A token exchanged from the token endpoint, as per RFC6749 sec 5.1. and
|
#: A token exchanged from the token endpoint, as per RFC6749 sec 5.1. and
|
||||||
#: OpenID.Core sec 3.1.3.3.
|
#: OpenID.Core sec 3.1.3.3.
|
||||||
Token = TypedDict(
|
class Token(TypedDict):
|
||||||
"Token",
|
access_token: str
|
||||||
{
|
token_type: str
|
||||||
"access_token": str,
|
id_token: Optional[str]
|
||||||
"token_type": str,
|
refresh_token: Optional[str]
|
||||||
"id_token": Optional[str],
|
expires_in: int
|
||||||
"refresh_token": Optional[str],
|
scope: Optional[str]
|
||||||
"expires_in": int,
|
|
||||||
"scope": Optional[str],
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
#: A JWK, as per RFC7517 sec 4. The type could be more precise than that, but
|
#: A JWK, as per RFC7517 sec 4. The type could be more precise than that, but
|
||||||
#: there is no real point of doing this in our case.
|
#: there is no real point of doing this in our case.
|
||||||
JWK = Dict[str, str]
|
JWK = Dict[str, str]
|
||||||
|
|
||||||
|
|
||||||
#: A JWK Set, as per RFC7517 sec 5.
|
#: A JWK Set, as per RFC7517 sec 5.
|
||||||
JWKS = TypedDict("JWKS", {"keys": List[JWK]})
|
class JWKS(TypedDict):
|
||||||
|
keys: List[JWK]
|
||||||
|
|
||||||
|
|
||||||
class OidcHandler:
|
class OidcHandler:
|
||||||
|
@ -255,7 +255,7 @@ class OidcError(Exception):
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
if self.error_description:
|
if self.error_description:
|
||||||
return "{}: {}".format(self.error, self.error_description)
|
return f"{self.error}: {self.error_description}"
|
||||||
return self.error
|
return self.error
|
||||||
|
|
||||||
|
|
||||||
|
@ -639,7 +639,7 @@ class OidcProvider:
|
||||||
)
|
)
|
||||||
logger.warning(description)
|
logger.warning(description)
|
||||||
# Body was still valid JSON. Might be useful to log it for debugging.
|
# Body was still valid JSON. Might be useful to log it for debugging.
|
||||||
logger.warning("Code exchange response: {resp!r}".format(resp=resp))
|
logger.warning("Code exchange response: %r", resp)
|
||||||
raise OidcError("server_error", description)
|
raise OidcError("server_error", description)
|
||||||
|
|
||||||
return resp
|
return resp
|
||||||
|
@ -1217,10 +1217,12 @@ class OidcSessionData:
|
||||||
ui_auth_session_id = attr.ib(type=str)
|
ui_auth_session_id = attr.ib(type=str)
|
||||||
|
|
||||||
|
|
||||||
UserAttributeDict = TypedDict(
|
class UserAttributeDict(TypedDict):
|
||||||
"UserAttributeDict",
|
localpart: Optional[str]
|
||||||
{"localpart": Optional[str], "display_name": Optional[str], "emails": List[str]},
|
display_name: Optional[str]
|
||||||
)
|
emails: List[str]
|
||||||
|
|
||||||
|
|
||||||
C = TypeVar("C")
|
C = TypeVar("C")
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -55,15 +55,12 @@ login_counter = Counter(
|
||||||
["guest", "auth_provider"],
|
["guest", "auth_provider"],
|
||||||
)
|
)
|
||||||
|
|
||||||
LoginDict = TypedDict(
|
|
||||||
"LoginDict",
|
class LoginDict(TypedDict):
|
||||||
{
|
device_id: str
|
||||||
"device_id": str,
|
access_token: str
|
||||||
"access_token": str,
|
valid_until_ms: Optional[int]
|
||||||
"valid_until_ms": Optional[int],
|
refresh_token: Optional[str]
|
||||||
"refresh_token": Optional[str],
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RegistrationHandler(BaseHandler):
|
class RegistrationHandler(BaseHandler):
|
||||||
|
|
|
@ -372,7 +372,7 @@ class SamlHandler(BaseHandler):
|
||||||
|
|
||||||
|
|
||||||
DOT_REPLACE_PATTERN = re.compile(
|
DOT_REPLACE_PATTERN = re.compile(
|
||||||
("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),))
|
"[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1601,7 +1601,7 @@ class SyncHandler:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Membership changes in %s: [%s]",
|
"Membership changes in %s: [%s]",
|
||||||
room_id,
|
room_id,
|
||||||
", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)),
|
", ".join("%s (%s)" % (e.event_id, e.membership) for e in events),
|
||||||
)
|
)
|
||||||
|
|
||||||
non_joins = [e for e in events if e.membership != Membership.JOIN]
|
non_joins = [e for e in events if e.membership != Membership.JOIN]
|
||||||
|
|
|
@ -172,7 +172,7 @@ class ProxyAgent(_AgentBase):
|
||||||
"""
|
"""
|
||||||
uri = uri.strip()
|
uri = uri.strip()
|
||||||
if not _VALID_URI.match(uri):
|
if not _VALID_URI.match(uri):
|
||||||
raise ValueError("Invalid URI {!r}".format(uri))
|
raise ValueError(f"Invalid URI {uri!r}")
|
||||||
|
|
||||||
parsed_uri = URI.fromBytes(uri)
|
parsed_uri = URI.fromBytes(uri)
|
||||||
pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
|
pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
|
||||||
|
|
|
@ -384,7 +384,7 @@ class SynapseRequest(Request):
|
||||||
# authenticated (e.g. and admin is puppetting a user) then we log both.
|
# authenticated (e.g. and admin is puppetting a user) then we log both.
|
||||||
requester, authenticated_entity = self.get_authenticated_entity()
|
requester, authenticated_entity = self.get_authenticated_entity()
|
||||||
if authenticated_entity:
|
if authenticated_entity:
|
||||||
requester = "{}.{}".format(authenticated_entity, requester)
|
requester = f"{authenticated_entity}.{requester}"
|
||||||
|
|
||||||
self.site.access_logger.log(
|
self.site.access_logger.log(
|
||||||
log_level,
|
log_level,
|
||||||
|
|
|
@ -374,7 +374,7 @@ def init_tracer(hs: "HomeServer"):
|
||||||
|
|
||||||
config = JaegerConfig(
|
config = JaegerConfig(
|
||||||
config=hs.config.jaeger_config,
|
config=hs.config.jaeger_config,
|
||||||
service_name="{} {}".format(hs.config.server_name, hs.get_instance_name()),
|
service_name=f"{hs.config.server_name} {hs.get_instance_name()}",
|
||||||
scope_manager=LogContextScopeManager(hs.config),
|
scope_manager=LogContextScopeManager(hs.config),
|
||||||
metrics_factory=PrometheusMetricsFactory(),
|
metrics_factory=PrometheusMetricsFactory(),
|
||||||
)
|
)
|
||||||
|
|
|
@ -34,7 +34,7 @@ from twisted.web.resource import Resource
|
||||||
|
|
||||||
from synapse.util import caches
|
from synapse.util import caches
|
||||||
|
|
||||||
CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8")
|
CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
|
||||||
|
|
||||||
|
|
||||||
INF = float("inf")
|
INF = float("inf")
|
||||||
|
@ -55,8 +55,8 @@ def floatToGoString(d):
|
||||||
# Go switches to exponents sooner than Python.
|
# Go switches to exponents sooner than Python.
|
||||||
# We only need to care about positive values for le/quantile.
|
# We only need to care about positive values for le/quantile.
|
||||||
if d > 0 and dot > 6:
|
if d > 0 and dot > 6:
|
||||||
mantissa = "{0}.{1}{2}".format(s[0], s[1:dot], s[dot + 1 :]).rstrip("0.")
|
mantissa = f"{s[0]}.{s[1:dot]}{s[dot + 1 :]}".rstrip("0.")
|
||||||
return "{0}e+0{1}".format(mantissa, dot - 1)
|
return f"{mantissa}e+0{dot - 1}"
|
||||||
return s
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ def sample_line(line, name):
|
||||||
labelstr = "{{{0}}}".format(
|
labelstr = "{{{0}}}".format(
|
||||||
",".join(
|
",".join(
|
||||||
[
|
[
|
||||||
'{0}="{1}"'.format(
|
'{}="{}"'.format(
|
||||||
k,
|
k,
|
||||||
v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
|
v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
|
||||||
)
|
)
|
||||||
|
@ -78,10 +78,8 @@ def sample_line(line, name):
|
||||||
timestamp = ""
|
timestamp = ""
|
||||||
if line.timestamp is not None:
|
if line.timestamp is not None:
|
||||||
# Convert to milliseconds.
|
# Convert to milliseconds.
|
||||||
timestamp = " {0:d}".format(int(float(line.timestamp) * 1000))
|
timestamp = f" {int(float(line.timestamp) * 1000):d}"
|
||||||
return "{0}{1} {2}{3}\n".format(
|
return "{}{} {}{}\n".format(name, labelstr, floatToGoString(line.value), timestamp)
|
||||||
name, labelstr, floatToGoString(line.value), timestamp
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_latest(registry, emit_help=False):
|
def generate_latest(registry, emit_help=False):
|
||||||
|
@ -118,12 +116,12 @@ def generate_latest(registry, emit_help=False):
|
||||||
# Output in the old format for compatibility.
|
# Output in the old format for compatibility.
|
||||||
if emit_help:
|
if emit_help:
|
||||||
output.append(
|
output.append(
|
||||||
"# HELP {0} {1}\n".format(
|
"# HELP {} {}\n".format(
|
||||||
mname,
|
mname,
|
||||||
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
|
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
output.append("# TYPE {0} {1}\n".format(mname, mtype))
|
output.append(f"# TYPE {mname} {mtype}\n")
|
||||||
|
|
||||||
om_samples: Dict[str, List[str]] = {}
|
om_samples: Dict[str, List[str]] = {}
|
||||||
for s in metric.samples:
|
for s in metric.samples:
|
||||||
|
@ -143,13 +141,13 @@ def generate_latest(registry, emit_help=False):
|
||||||
for suffix, lines in sorted(om_samples.items()):
|
for suffix, lines in sorted(om_samples.items()):
|
||||||
if emit_help:
|
if emit_help:
|
||||||
output.append(
|
output.append(
|
||||||
"# HELP {0}{1} {2}\n".format(
|
"# HELP {}{} {}\n".format(
|
||||||
metric.name,
|
metric.name,
|
||||||
suffix,
|
suffix,
|
||||||
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
|
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
output.append("# TYPE {0}{1} gauge\n".format(metric.name, suffix))
|
output.append(f"# TYPE {metric.name}{suffix} gauge\n")
|
||||||
output.extend(lines)
|
output.extend(lines)
|
||||||
|
|
||||||
# Get rid of the weird colon things while we're at it
|
# Get rid of the weird colon things while we're at it
|
||||||
|
@ -163,12 +161,12 @@ def generate_latest(registry, emit_help=False):
|
||||||
# Also output in the new format, if it's different.
|
# Also output in the new format, if it's different.
|
||||||
if emit_help:
|
if emit_help:
|
||||||
output.append(
|
output.append(
|
||||||
"# HELP {0} {1}\n".format(
|
"# HELP {} {}\n".format(
|
||||||
mnewname,
|
mnewname,
|
||||||
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
|
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
output.append("# TYPE {0} {1}\n".format(mnewname, mtype))
|
output.append(f"# TYPE {mnewname} {mtype}\n")
|
||||||
|
|
||||||
for s in metric.samples:
|
for s in metric.samples:
|
||||||
# Get rid of the OpenMetrics specific samples (we should already have
|
# Get rid of the OpenMetrics specific samples (we should already have
|
||||||
|
|
|
@ -137,8 +137,7 @@ class _Collector:
|
||||||
_background_process_db_txn_duration,
|
_background_process_db_txn_duration,
|
||||||
_background_process_db_sched_duration,
|
_background_process_db_sched_duration,
|
||||||
):
|
):
|
||||||
for r in m.collect():
|
yield from m.collect()
|
||||||
yield r
|
|
||||||
|
|
||||||
|
|
||||||
REGISTRY.register(_Collector())
|
REGISTRY.register(_Collector())
|
||||||
|
|
|
@ -44,19 +44,14 @@ if TYPE_CHECKING:
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
LoginResponse = TypedDict(
|
class LoginResponse(TypedDict, total=False):
|
||||||
"LoginResponse",
|
user_id: str
|
||||||
{
|
access_token: str
|
||||||
"user_id": str,
|
home_server: str
|
||||||
"access_token": str,
|
expires_in_ms: Optional[int]
|
||||||
"home_server": str,
|
refresh_token: Optional[str]
|
||||||
"expires_in_ms": Optional[int],
|
device_id: str
|
||||||
"refresh_token": Optional[str],
|
well_known: Optional[Dict[str, Any]]
|
||||||
"device_id": str,
|
|
||||||
"well_known": Optional[Dict[str, Any]],
|
|
||||||
},
|
|
||||||
total=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class LoginRestServlet(RestServlet):
|
class LoginRestServlet(RestServlet):
|
||||||
|
@ -150,9 +145,7 @@ class LoginRestServlet(RestServlet):
|
||||||
# login flow types returned.
|
# login flow types returned.
|
||||||
flows.append({"type": LoginRestServlet.TOKEN_TYPE})
|
flows.append({"type": LoginRestServlet.TOKEN_TYPE})
|
||||||
|
|
||||||
flows.extend(
|
flows.extend({"type": t} for t in self.auth_handler.get_supported_login_types())
|
||||||
({"type": t} for t in self.auth_handler.get_supported_login_types())
|
|
||||||
)
|
|
||||||
|
|
||||||
flows.append({"type": LoginRestServlet.APPSERVICE_TYPE})
|
flows.append({"type": LoginRestServlet.APPSERVICE_TYPE})
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ import PIL.Image
|
||||||
# check for JPEG support.
|
# check for JPEG support.
|
||||||
try:
|
try:
|
||||||
PIL.Image._getdecoder("rgb", "jpeg", None)
|
PIL.Image._getdecoder("rgb", "jpeg", None)
|
||||||
except IOError as e:
|
except OSError as e:
|
||||||
if str(e).startswith("decoder jpeg not available"):
|
if str(e).startswith("decoder jpeg not available"):
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"FATAL: jpeg codec not supported. Install pillow correctly! "
|
"FATAL: jpeg codec not supported. Install pillow correctly! "
|
||||||
|
@ -32,7 +32,7 @@ except Exception:
|
||||||
# check for PNG support.
|
# check for PNG support.
|
||||||
try:
|
try:
|
||||||
PIL.Image._getdecoder("rgb", "zip", None)
|
PIL.Image._getdecoder("rgb", "zip", None)
|
||||||
except IOError as e:
|
except OSError as e:
|
||||||
if str(e).startswith("decoder zip not available"):
|
if str(e).startswith("decoder zip not available"):
|
||||||
raise Exception(
|
raise Exception(
|
||||||
"FATAL: zip codec not supported. Install pillow correctly! "
|
"FATAL: zip codec not supported. Install pillow correctly! "
|
||||||
|
|
|
@ -907,7 +907,7 @@ class DatabasePool:
|
||||||
# The sort is to ensure that we don't rely on dictionary iteration
|
# The sort is to ensure that we don't rely on dictionary iteration
|
||||||
# order.
|
# order.
|
||||||
keys, vals = zip(
|
keys, vals = zip(
|
||||||
*[zip(*(sorted(i.items(), key=lambda kv: kv[0]))) for i in values if i]
|
*(zip(*(sorted(i.items(), key=lambda kv: kv[0]))) for i in values if i)
|
||||||
)
|
)
|
||||||
|
|
||||||
for k in keys:
|
for k in keys:
|
||||||
|
|
|
@ -203,9 +203,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||||
"delete_messages_for_device", delete_messages_for_device_txn
|
"delete_messages_for_device", delete_messages_for_device_txn
|
||||||
)
|
)
|
||||||
|
|
||||||
log_kv(
|
log_kv({"message": f"deleted {count} messages for device", "count": count})
|
||||||
{"message": "deleted {} messages for device".format(count), "count": count}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Update the cache, ensuring that we only ever increase the value
|
# Update the cache, ensuring that we only ever increase the value
|
||||||
last_deleted_stream_id = self._last_device_delete_cache.get(
|
last_deleted_stream_id = self._last_device_delete_cache.get(
|
||||||
|
|
|
@ -27,8 +27,11 @@ from synapse.util import json_encoder
|
||||||
_DEFAULT_CATEGORY_ID = ""
|
_DEFAULT_CATEGORY_ID = ""
|
||||||
_DEFAULT_ROLE_ID = ""
|
_DEFAULT_ROLE_ID = ""
|
||||||
|
|
||||||
|
|
||||||
# A room in a group.
|
# A room in a group.
|
||||||
_RoomInGroup = TypedDict("_RoomInGroup", {"room_id": str, "is_public": bool})
|
class _RoomInGroup(TypedDict):
|
||||||
|
room_id: str
|
||||||
|
is_public: bool
|
||||||
|
|
||||||
|
|
||||||
class GroupServerWorkerStore(SQLBaseStore):
|
class GroupServerWorkerStore(SQLBaseStore):
|
||||||
|
@ -92,6 +95,7 @@ class GroupServerWorkerStore(SQLBaseStore):
|
||||||
"is_public": False # Whether this is a public room or not
|
"is_public": False # Whether this is a public room or not
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# TODO: Pagination
|
# TODO: Pagination
|
||||||
|
|
||||||
def _get_rooms_in_group_txn(txn):
|
def _get_rooms_in_group_txn(txn):
|
||||||
|
|
|
@ -649,7 +649,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||||
event_to_memberships = await self._get_joined_profiles_from_event_ids(
|
event_to_memberships = await self._get_joined_profiles_from_event_ids(
|
||||||
missing_member_event_ids
|
missing_member_event_ids
|
||||||
)
|
)
|
||||||
users_in_room.update((row for row in event_to_memberships.values() if row))
|
users_in_room.update(row for row in event_to_memberships.values() if row)
|
||||||
|
|
||||||
if event is not None and event.type == EventTypes.Member:
|
if event is not None and event.type == EventTypes.Member:
|
||||||
if event.membership == Membership.JOIN:
|
if event.membership == Membership.JOIN:
|
||||||
|
|
|
@ -639,7 +639,7 @@ def get_statements(f: Iterable[str]) -> Generator[str, None, None]:
|
||||||
|
|
||||||
|
|
||||||
def executescript(txn: Cursor, schema_path: str) -> None:
|
def executescript(txn: Cursor, schema_path: str) -> None:
|
||||||
with open(schema_path, "r") as f:
|
with open(schema_path) as f:
|
||||||
execute_statements_from_stream(txn, f)
|
execute_statements_from_stream(txn, f)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -577,10 +577,10 @@ class RoomStreamToken:
|
||||||
entries = []
|
entries = []
|
||||||
for name, pos in self.instance_map.items():
|
for name, pos in self.instance_map.items():
|
||||||
instance_id = await store.get_id_for_instance(name)
|
instance_id = await store.get_id_for_instance(name)
|
||||||
entries.append("{}.{}".format(instance_id, pos))
|
entries.append(f"{instance_id}.{pos}")
|
||||||
|
|
||||||
encoded_map = "~".join(entries)
|
encoded_map = "~".join(entries)
|
||||||
return "m{}~{}".format(self.stream, encoded_map)
|
return f"m{self.stream}~{encoded_map}"
|
||||||
else:
|
else:
|
||||||
return "s%d" % (self.stream,)
|
return "s%d" % (self.stream,)
|
||||||
|
|
||||||
|
|
|
@ -90,8 +90,7 @@ def enumerate_leaves(node, depth):
|
||||||
yield node
|
yield node
|
||||||
else:
|
else:
|
||||||
for n in node.values():
|
for n in node.values():
|
||||||
for m in enumerate_leaves(n, depth - 1):
|
yield from enumerate_leaves(n, depth - 1)
|
||||||
yield m
|
|
||||||
|
|
||||||
|
|
||||||
P = TypeVar("P")
|
P = TypeVar("P")
|
||||||
|
|
|
@ -138,7 +138,6 @@ def iterate_tree_cache_entry(d):
|
||||||
"""
|
"""
|
||||||
if isinstance(d, TreeCacheNode):
|
if isinstance(d, TreeCacheNode):
|
||||||
for value_d in d.values():
|
for value_d in d.values():
|
||||||
for value in iterate_tree_cache_entry(value_d):
|
yield from iterate_tree_cache_entry(value_d)
|
||||||
yield value
|
|
||||||
else:
|
else:
|
||||||
yield d
|
yield d
|
||||||
|
|
|
@ -31,13 +31,13 @@ def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -
|
||||||
# If pidfile already exists, we should read pid from there; to overwrite it, if
|
# If pidfile already exists, we should read pid from there; to overwrite it, if
|
||||||
# locking will fail, because locking attempt somehow purges the file contents.
|
# locking will fail, because locking attempt somehow purges the file contents.
|
||||||
if os.path.isfile(pid_file):
|
if os.path.isfile(pid_file):
|
||||||
with open(pid_file, "r") as pid_fh:
|
with open(pid_file) as pid_fh:
|
||||||
old_pid = pid_fh.read()
|
old_pid = pid_fh.read()
|
||||||
|
|
||||||
# Create a lockfile so that only one instance of this daemon is running at any time.
|
# Create a lockfile so that only one instance of this daemon is running at any time.
|
||||||
try:
|
try:
|
||||||
lock_fh = open(pid_file, "w")
|
lock_fh = open(pid_file, "w")
|
||||||
except IOError:
|
except OSError:
|
||||||
print("Unable to create the pidfile.")
|
print("Unable to create the pidfile.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -
|
||||||
# Try to get an exclusive lock on the file. This will fail if another process
|
# Try to get an exclusive lock on the file. This will fail if another process
|
||||||
# has the file locked.
|
# has the file locked.
|
||||||
fcntl.flock(lock_fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
fcntl.flock(lock_fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||||
except IOError:
|
except OSError:
|
||||||
print("Unable to lock on the pidfile.")
|
print("Unable to lock on the pidfile.")
|
||||||
# We need to overwrite the pidfile if we got here.
|
# We need to overwrite the pidfile if we got here.
|
||||||
#
|
#
|
||||||
|
@ -113,7 +113,7 @@ def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -
|
||||||
try:
|
try:
|
||||||
lock_fh.write("%s" % (os.getpid()))
|
lock_fh.write("%s" % (os.getpid()))
|
||||||
lock_fh.flush()
|
lock_fh.flush()
|
||||||
except IOError:
|
except OSError:
|
||||||
logger.error("Unable to write pid to the pidfile.")
|
logger.error("Unable to write pid to the pidfile.")
|
||||||
print("Unable to write pid to the pidfile.")
|
print("Unable to write pid to the pidfile.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
|
@ -96,7 +96,7 @@ async def filter_events_for_client(
|
||||||
if isinstance(ignored_users_dict, dict):
|
if isinstance(ignored_users_dict, dict):
|
||||||
ignore_list = frozenset(ignored_users_dict.keys())
|
ignore_list = frozenset(ignored_users_dict.keys())
|
||||||
|
|
||||||
erased_senders = await storage.main.are_users_erased((e.sender for e in events))
|
erased_senders = await storage.main.are_users_erased(e.sender for e in events)
|
||||||
|
|
||||||
if filter_send_to_client:
|
if filter_send_to_client:
|
||||||
room_ids = {e.room_id for e in events}
|
room_ids = {e.room_id for e in events}
|
||||||
|
@ -353,7 +353,7 @@ async def filter_events_for_server(
|
||||||
)
|
)
|
||||||
|
|
||||||
if not check_history_visibility_only:
|
if not check_history_visibility_only:
|
||||||
erased_senders = await storage.main.are_users_erased((e.sender for e in events))
|
erased_senders = await storage.main.are_users_erased(e.sender for e in events)
|
||||||
else:
|
else:
|
||||||
# We don't want to check whether users are erased, which is equivalent
|
# We don't want to check whether users are erased, which is equivalent
|
||||||
# to no users having been erased.
|
# to no users having been erased.
|
||||||
|
|
Loading…
Reference in a new issue