0
0
Fork 1
mirror of https://mau.dev/maunium/synapse.git synced 2024-06-02 02:38:59 +02:00
synapse/synapse/handlers/message.py

986 lines
35 KiB
Python
Raw Normal View History

2014-08-27 18:59:36 +02:00
# -*- coding: utf-8 -*-
# Copyright 2014 - 2016 OpenMarket Ltd
2018-02-06 17:40:38 +01:00
# Copyright 2017 - 2018 New Vector Ltd
2014-08-27 18:59:36 +02:00
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import simplejson
import sys
from canonicaljson import encode_canonical_json
import six
from six import string_types, itervalues, iteritems
from twisted.internet import defer
from twisted.internet.defer import succeed
from twisted.python.failure import Failure
2014-08-27 18:59:36 +02:00
from synapse.api.constants import EventTypes, Membership, MAX_DEPTH
from synapse.api.errors import (
AuthError, Codes, SynapseError,
ConsentNotGivenError,
)
from synapse.api.urls import ConsentURIBuilder
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.events.utils import serialize_event
2014-12-10 18:59:47 +01:00
from synapse.events.validator import EventValidator
from synapse.types import (
UserID, RoomAlias, RoomStreamToken,
)
2018-06-14 10:27:37 +02:00
from synapse.util.async import ReadWriteLock, Limiter
2018-04-27 15:32:08 +02:00
from synapse.util.logcontext import run_in_background
2016-08-23 16:23:39 +02:00
from synapse.util.metrics import measure_func
from synapse.util.frozenutils import frozendict_json_encoder
from synapse.util.stringutils import random_string
from synapse.visibility import filter_events_for_client
from synapse.replication.http.send_event import send_event_to_master
2014-12-10 18:59:47 +01:00
from ._base import BaseHandler
2014-08-27 18:59:36 +02:00
logger = logging.getLogger(__name__)
class PurgeStatus(object):
"""Object tracking the status of a purge request
This class contains information on the progress of a purge request, for
return by get_purge_status.
Attributes:
status (int): Tracks whether this request has completed. One of
STATUS_{ACTIVE,COMPLETE,FAILED}
"""
STATUS_ACTIVE = 0
STATUS_COMPLETE = 1
STATUS_FAILED = 2
STATUS_TEXT = {
STATUS_ACTIVE: "active",
STATUS_COMPLETE: "complete",
STATUS_FAILED: "failed",
}
def __init__(self):
self.status = PurgeStatus.STATUS_ACTIVE
def asdict(self):
return {
"status": PurgeStatus.STATUS_TEXT[self.status]
}
class MessageHandler(BaseHandler):
2014-08-27 18:59:36 +02:00
def __init__(self, hs):
super(MessageHandler, self).__init__(hs)
self.hs = hs
self.state = hs.get_state_handler()
2014-08-27 18:59:36 +02:00
self.clock = hs.get_clock()
self.pagination_lock = ReadWriteLock()
self._purges_in_progress_by_room = set()
# map from purge id to PurgeStatus
self._purges_by_id = {}
def start_purge_history(self, room_id, token,
delete_local_events=False):
"""Start off a history purge on a room.
Args:
room_id (str): The room to purge from
token (str): topological token to delete events before
delete_local_events (bool): True to delete local events as well as
remote ones
Returns:
str: unique ID for this purge transaction.
"""
if room_id in self._purges_in_progress_by_room:
raise SynapseError(
400,
"History purge already in progress for %s" % (room_id, ),
)
2016-07-05 11:28:51 +02:00
purge_id = random_string(16)
# we log the purge_id here so that it can be tied back to the
# request id in the log lines.
logger.info("[purge] starting purge_id %s", purge_id)
self._purges_by_id[purge_id] = PurgeStatus()
run_in_background(
self._purge_history,
purge_id, room_id, token, delete_local_events,
)
return purge_id
@defer.inlineCallbacks
def _purge_history(self, purge_id, room_id, token,
delete_local_events):
"""Carry out a history purge on a room.
Args:
purge_id (str): The id for this purge
room_id (str): The room to purge from
token (str): topological token to delete events before
delete_local_events (bool): True to delete local events as well as
remote ones
Returns:
Deferred
"""
self._purges_in_progress_by_room.add(room_id)
try:
with (yield self.pagination_lock.write(room_id)):
yield self.store.purge_history(
room_id, token, delete_local_events,
)
logger.info("[purge] complete")
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
except Exception:
logger.error("[purge] failed: %s", Failure().getTraceback().rstrip())
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
finally:
self._purges_in_progress_by_room.discard(room_id)
# remove the purge from the list 24 hours after it completes
def clear_purge():
del self._purges_by_id[purge_id]
self.hs.get_reactor().callLater(24 * 3600, clear_purge)
def get_purge_status(self, purge_id):
"""Get the current status of an active purge
Args:
purge_id (str): purge_id returned by start_purge_history
Returns:
PurgeStatus|None
"""
return self._purges_by_id.get(purge_id)
2014-08-27 18:59:36 +02:00
@defer.inlineCallbacks
def get_messages(self, requester, room_id=None, pagin_config=None,
2016-07-14 16:59:25 +02:00
as_client_event=True, event_filter=None):
2014-08-27 18:59:36 +02:00
"""Get messages in a room.
Args:
requester (Requester): The user requesting messages.
2014-08-27 18:59:36 +02:00
room_id (str): The room they want messages from.
pagin_config (synapse.api.streams.PaginationConfig): The pagination
config rules to apply, if any.
2015-01-08 15:36:33 +01:00
as_client_event (bool): True to get events in client-server format.
2016-07-14 16:59:25 +02:00
event_filter (Filter): Filter to apply to results or None
2014-08-27 18:59:36 +02:00
Returns:
dict: Pagination API results
"""
user_id = requester.user.to_string()
2014-08-27 18:59:36 +02:00
if pagin_config.from_token:
room_token = pagin_config.from_token.room_key
else:
2014-11-11 19:00:13 +01:00
pagin_config.from_token = (
yield self.hs.get_event_sources().get_current_token_for_room(
room_id=room_id
)
2014-11-11 19:00:13 +01:00
)
room_token = pagin_config.from_token.room_key
2014-08-27 18:59:36 +02:00
room_token = RoomStreamToken.parse(room_token)
pagin_config.from_token = pagin_config.from_token.copy_and_replace(
"room_key", str(room_token)
)
source_config = pagin_config.get_source_config("room")
with (yield self.pagination_lock.read(room_id)):
membership, member_event_id = yield self._check_in_room_or_world_readable(
room_id, user_id
)
if source_config.direction == 'b':
# if we're going backwards, we might need to backfill. This
# requires that we have a topo token.
if room_token.topological:
max_topo = room_token.topological
else:
max_topo = yield self.store.get_max_topological_token(
room_id, room_token.stream
)
if membership == Membership.LEAVE:
# If they have left the room then clamp the token to be before
# they left the room, to save the effort of loading from the
# database.
leave_token = yield self.store.get_topological_token_for_event(
member_event_id
)
leave_token = RoomStreamToken.parse(leave_token)
if leave_token.topological < max_topo:
source_config.from_key = str(leave_token)
yield self.hs.get_handlers().federation_handler.maybe_backfill(
room_id, max_topo
)
2016-07-14 16:59:25 +02:00
events, next_key = yield self.store.paginate_room_events(
room_id=room_id,
from_key=source_config.from_key,
to_key=source_config.to_key,
direction=source_config.direction,
limit=source_config.limit,
event_filter=event_filter,
)
next_token = pagin_config.from_token.copy_and_replace(
"room_key", next_key
)
2014-08-27 18:59:36 +02:00
if not events:
defer.returnValue({
"chunk": [],
"start": pagin_config.from_token.to_string(),
"end": next_token.to_string(),
})
2016-07-14 16:59:25 +02:00
if event_filter:
events = event_filter.filter(events)
events = yield filter_events_for_client(
self.store,
user_id,
events,
is_peeking=(member_event_id is None),
)
time_now = self.clock.time_msec()
2014-08-27 18:59:36 +02:00
chunk = {
2015-01-08 15:36:33 +01:00
"chunk": [
serialize_event(e, time_now, as_client_event)
for e in events
2015-01-08 15:36:33 +01:00
],
2014-08-27 18:59:36 +02:00
"start": pagin_config.from_token.to_string(),
"end": next_token.to_string(),
}
defer.returnValue(chunk)
@defer.inlineCallbacks
def get_room_data(self, user_id=None, room_id=None,
event_type=None, state_key="", is_guest=False):
""" Get data from a room.
Args:
event : The room path event
Returns:
The path data content.
Raises:
SynapseError if something went wrong.
"""
membership, membership_event_id = yield self._check_in_room_or_world_readable(
room_id, user_id
)
if membership == Membership.JOIN:
data = yield self.state_handler.get_current_state(
room_id, event_type, state_key
)
elif membership == Membership.LEAVE:
key = (event_type, state_key)
room_state = yield self.store.get_state_for_events(
[membership_event_id], [key]
)
data = room_state[membership_event_id].get(key)
defer.returnValue(data)
@defer.inlineCallbacks
def _check_in_room_or_world_readable(self, room_id, user_id):
try:
# check_user_was_in_room will return the most recent membership
# event for the user if:
# * The user is a non-guest user, and was ever in the room
# * The user is a guest user, and has joined the room
# else it will throw.
member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
defer.returnValue((member_event.membership, member_event.event_id))
return
except AuthError:
visibility = yield self.state_handler.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
)
if (
visibility and
visibility.content["history_visibility"] == "world_readable"
):
defer.returnValue((Membership.JOIN, None))
return
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
)
@defer.inlineCallbacks
def get_state_events(self, user_id, room_id, is_guest=False):
"""Retrieve all state events for a given room. If the user is
joined to the room then return the current state. If the user has
left the room return the state events from when they left.
Args:
user_id(str): The user requesting state events.
room_id(str): The room ID to get all state events from.
Returns:
A list of dicts representing state events. [{}, {}, {}]
"""
membership, membership_event_id = yield self._check_in_room_or_world_readable(
room_id, user_id
)
if membership == Membership.JOIN:
room_state = yield self.state_handler.get_current_state(room_id)
elif membership == Membership.LEAVE:
room_state = yield self.store.get_state_for_events(
[membership_event_id], None
)
room_state = room_state[membership_event_id]
now = self.clock.time_msec()
defer.returnValue(
[serialize_event(c, now) for c in room_state.values()]
)
@defer.inlineCallbacks
def get_joined_members(self, requester, room_id):
"""Get all the joined members in the room and their profile information.
If the user has left the room return the state events from when they left.
Args:
requester(Requester): The user requesting state events.
room_id(str): The room ID to get all state events from.
Returns:
A dict of user_id to profile info
"""
user_id = requester.user.to_string()
if not requester.app_service:
# We check AS auth after fetching the room membership, as it
# requires us to pull out all joined members anyway.
membership, _ = yield self._check_in_room_or_world_readable(
room_id, user_id
)
if membership != Membership.JOIN:
raise NotImplementedError(
"Getting joined members after leaving is not implemented"
)
users_with_profile = yield self.state.get_current_user_in_room(room_id)
# If this is an AS, double check that they are allowed to see the members.
# This can either be because the AS user is in the room or becuase there
# is a user in the room that the AS is "interested in"
if requester.app_service and user_id not in users_with_profile:
for uid in users_with_profile:
if requester.app_service.is_interested_in_user(uid):
break
else:
# Loop fell through, AS has no interested users in room
raise AuthError(403, "Appservice not in room")
defer.returnValue({
user_id: {
"avatar_url": profile.avatar_url,
"display_name": profile.display_name,
}
for user_id, profile in iteritems(users_with_profile)
})
class EventCreationHandler(object):
def __init__(self, hs):
self.hs = hs
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
self.clock = hs.get_clock()
self.validator = EventValidator()
self.profile_handler = hs.get_profile_handler()
self.event_builder_factory = hs.get_event_builder_factory()
self.server_name = hs.hostname
self.ratelimiter = hs.get_ratelimiter()
self.notifier = hs.get_notifier()
self.config = hs.config
self.http_client = hs.get_simple_http_client()
# This is only used to get at ratelimit function, and maybe_kick_guest_users
self.base_handler = BaseHandler(hs)
self.pusher_pool = hs.get_pusherpool()
# We arbitrarily limit concurrent event creation for a room to 5.
# This is to stop us from diverging history *too* much.
self.limiter = Limiter(max_count=5)
self.action_generator = hs.get_action_generator()
self.spam_checker = hs.get_spam_checker()
if self.config.block_events_without_consent_error is not None:
self._consent_uri_builder = ConsentURIBuilder(self.config)
@defer.inlineCallbacks
2017-05-02 12:36:11 +02:00
def create_event(self, requester, event_dict, token_id=None, txn_id=None,
prev_events_and_hashes=None):
"""
Given a dict from a client, create a new event.
Creates an FrozenEvent object, filling out auth_events, prev_events,
etc.
Adds display names to Join membership events.
Args:
2017-05-02 12:36:11 +02:00
requester
event_dict (dict): An entire event
2016-04-01 18:39:32 +02:00
token_id (str)
txn_id (str)
prev_events_and_hashes (list[(str, dict[str, str], int)]|None):
the forward extremities to use as the prev_events for the
new event. For each event, a tuple of (event_id, hashes, depth)
where *hashes* is a map from algorithm to hash.
If None, they will be requested from the database.
Returns:
Tuple of created event (FrozenEvent), Context
"""
builder = self.event_builder_factory.new(event_dict)
self.validator.validate_new(builder)
if builder.type == EventTypes.Member:
membership = builder.content.get("membership", None)
target = UserID.from_string(builder.state_key)
if membership in {Membership.JOIN, Membership.INVITE}:
# If event doesn't include a display name, add one.
profile = self.profile_handler
content = builder.content
try:
if "displayname" not in content:
content["displayname"] = yield profile.get_displayname(target)
if "avatar_url" not in content:
content["avatar_url"] = yield profile.get_avatar_url(target)
except Exception as e:
logger.info(
"Failed to get profile information for %r: %s",
target, e
)
is_exempt = yield self._is_exempt_from_privacy_policy(builder)
if not is_exempt:
yield self.assert_accepted_privacy_policy(requester)
if token_id is not None:
builder.internal_metadata.token_id = token_id
if txn_id is not None:
builder.internal_metadata.txn_id = txn_id
event, context = yield self.create_new_client_event(
builder=builder,
requester=requester,
prev_events_and_hashes=prev_events_and_hashes,
)
defer.returnValue((event, context))
def _is_exempt_from_privacy_policy(self, builder):
""""Determine if an event to be sent is exempt from having to consent
to the privacy policy
Args:
builder (synapse.events.builder.EventBuilder): event being created
Returns:
Deferred[bool]: true if the event can be sent without the user
consenting
"""
# the only thing the user can do is join the server notices room.
if builder.type == EventTypes.Member:
membership = builder.content.get("membership", None)
if membership == Membership.JOIN:
return self._is_server_notices_room(builder.room_id)
return succeed(False)
@defer.inlineCallbacks
def _is_server_notices_room(self, room_id):
if self.config.server_notices_mxid is None:
defer.returnValue(False)
user_ids = yield self.store.get_users_in_room(room_id)
defer.returnValue(self.config.server_notices_mxid in user_ids)
@defer.inlineCallbacks
def assert_accepted_privacy_policy(self, requester):
"""Check if a user has accepted the privacy policy
Called when the given user is about to do something that requires
privacy consent. We see if the user is exempt and otherwise check that
they have given consent. If they have not, a ConsentNotGiven error is
raised.
Args:
requester (synapse.types.Requester):
The user making the request
Returns:
Deferred[None]: returns normally if the user has consented or is
exempt
Raises:
ConsentNotGivenError: if the user has not given consent yet
"""
if self.config.block_events_without_consent_error is None:
return
# exempt AS users from needing consent
if requester.app_service is not None:
return
user_id = requester.user.to_string()
# exempt the system notices user
if (
self.config.server_notices_mxid is not None and
user_id == self.config.server_notices_mxid
):
return
u = yield self.store.get_user_by_id(user_id)
assert u is not None
if u["appservice_id"] is not None:
# users registered by an appservice are exempt
return
if u["consent_version"] == self.config.user_consent_version:
return
consent_uri = self._consent_uri_builder.build_user_consent_uri(
requester.user.localpart,
)
msg = self.config.block_events_without_consent_error % {
'consent_uri': consent_uri,
}
raise ConsentNotGivenError(
msg=msg,
consent_uri=consent_uri,
)
@defer.inlineCallbacks
def send_nonmember_event(self, requester, event, context, ratelimit=True):
"""
Persists and notifies local clients and federation of an event.
Args:
event (FrozenEvent) the event to send.
context (Context) the context of the event.
ratelimit (bool): Whether to rate limit this send.
is_guest (bool): Whether the sender is a guest.
"""
if event.type == EventTypes.Member:
raise SynapseError(
500,
2016-02-18 12:02:14 +01:00
"Tried to send member event through non-member codepath"
)
user = UserID.from_string(event.sender)
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
if event.is_state():
prev_state = yield self.deduplicate_state_event(event, context)
if prev_state is not None:
defer.returnValue(prev_state)
yield self.handle_new_client_event(
requester=requester,
event=event,
context=context,
ratelimit=ratelimit,
)
@defer.inlineCallbacks
def deduplicate_state_event(self, event, context):
2016-02-16 15:25:23 +01:00
"""
Checks whether event is in the latest resolved state in context.
If so, returns the version of the event in context.
Otherwise, returns None.
"""
prev_event_id = context.prev_state_ids.get((event.type, event.state_key))
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
if not prev_event:
return
2016-02-16 15:25:23 +01:00
if prev_event and event.user_id == prev_event.user_id:
prev_content = encode_canonical_json(prev_event.content)
next_content = encode_canonical_json(event.content)
if prev_content == next_content:
defer.returnValue(prev_event)
return
@defer.inlineCallbacks
def create_and_send_nonmember_event(
self,
requester,
event_dict,
ratelimit=True,
txn_id=None
):
"""
Creates an event, then sends it.
See self.create_event and self.send_nonmember_event.
"""
# We limit the number of concurrent event sends in a room so that we
# don't fork the DAG too much. If we don't limit then we can end up in
# a situation where event persistence can't keep up, causing
# extremities to pile up, which in turn leads to state resolution
# taking longer.
with (yield self.limiter.queue(event_dict["room_id"])):
event, context = yield self.create_event(
requester,
event_dict,
token_id=requester.access_token_id,
txn_id=txn_id
)
spam_error = self.spam_checker.check_event_for_spam(event)
if spam_error:
if not isinstance(spam_error, string_types):
spam_error = "Spam is not permitted here"
raise SynapseError(
403, spam_error, Codes.FORBIDDEN
)
yield self.send_nonmember_event(
requester,
event,
context,
ratelimit=ratelimit,
)
defer.returnValue(event)
@measure_func("create_new_client_event")
@defer.inlineCallbacks
def create_new_client_event(self, builder, requester=None,
prev_events_and_hashes=None):
"""Create a new event for a local client
Args:
builder (EventBuilder):
requester (synapse.types.Requester|None):
prev_events_and_hashes (list[(str, dict[str, str], int)]|None):
the forward extremities to use as the prev_events for the
new event. For each event, a tuple of (event_id, hashes, depth)
where *hashes* is a map from algorithm to hash.
If None, they will be requested from the database.
Returns:
Deferred[(synapse.events.EventBase, synapse.events.snapshot.EventContext)]
"""
if prev_events_and_hashes is not None:
assert len(prev_events_and_hashes) <= 10, \
"Attempting to create an event with %i prev_events" % (
len(prev_events_and_hashes),
)
else:
prev_events_and_hashes = \
yield self.store.get_prev_events_for_room(builder.room_id)
if prev_events_and_hashes:
depth = max([d for _, _, d in prev_events_and_hashes]) + 1
# we cap depth of generated events, to ensure that they are not
# rejected by other servers (and so that they can be persisted in
# the db)
depth = min(depth, MAX_DEPTH)
else:
depth = 1
prev_events = [
(event_id, prev_hashes)
for event_id, prev_hashes, _ in prev_events_and_hashes
]
builder.prev_events = prev_events
builder.depth = depth
context = yield self.state.compute_event_context(builder)
2017-05-02 12:36:11 +02:00
if requester:
context.app_service = requester.app_service
if builder.is_state():
builder.prev_state = yield self.store.add_event_hashes(
context.prev_state_events
)
yield self.auth.add_auth_events(builder, context)
signing_key = self.hs.config.signing_key[0]
add_hashes_and_signatures(
builder, self.server_name, signing_key
)
event = builder.build()
logger.debug(
"Created event %s with state: %s",
event.event_id, context.prev_state_ids,
)
defer.returnValue(
(event, context,)
)
2016-08-23 16:23:39 +02:00
@measure_func("handle_new_client_event")
@defer.inlineCallbacks
def handle_new_client_event(
self,
requester,
event,
context,
ratelimit=True,
2018-02-15 17:30:10 +01:00
extra_users=[],
):
2018-02-15 17:30:10 +01:00
"""Processes a new event. This includes checking auth, persisting it,
notifying users, sending to remote servers, etc.
If called from a worker will hit out to the master process for final
processing.
Args:
requester (Requester)
event (FrozenEvent)
context (EventContext)
ratelimit (bool)
extra_users (list(UserID)): Any extra users to notify about event
2018-02-15 17:30:10 +01:00
"""
2018-03-01 11:18:33 +01:00
try:
yield self.auth.check_from_context(event, context)
except AuthError as err:
logger.warn("Denying new event %r because %s", event, err)
raise err
# Ensure that we can round trip before trying to persist in db
try:
dump = frozendict_json_encoder.encode(event.content)
2018-03-16 00:43:31 +01:00
simplejson.loads(dump)
2018-03-01 11:18:33 +01:00
except Exception:
logger.exception("Failed to encode content: %r", event.content)
raise
2018-02-15 17:30:10 +01:00
yield self.action_generator.handle_push_actions_for_event(
event, context
)
try:
# If we're a worker we need to hit out to the master.
if self.config.worker_app:
yield send_event_to_master(
self.hs.get_clock(),
2018-02-15 17:30:10 +01:00
self.http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
requester=requester,
event=event,
context=context,
ratelimit=ratelimit,
extra_users=extra_users,
2018-02-15 17:30:10 +01:00
)
return
yield self.persist_and_notify_client_event(
requester,
event,
context,
ratelimit=ratelimit,
extra_users=extra_users,
)
2018-02-15 17:30:10 +01:00
except: # noqa: E722, as we reraise the exception this is fine.
# Ensure that we actually remove the entries in the push actions
# staging area, if we calculated them.
tp, value, tb = sys.exc_info()
run_in_background(
self.store.remove_push_actions_from_staging,
event.event_id,
)
six.reraise(tp, value, tb)
2018-02-15 17:30:10 +01:00
@defer.inlineCallbacks
def persist_and_notify_client_event(
self,
requester,
event,
context,
ratelimit=True,
extra_users=[],
):
2018-03-01 11:18:33 +01:00
"""Called when we have fully built the event, have already
calculated the push actions for the event, and checked auth.
2018-03-01 11:05:27 +01:00
This should only be run on master.
2018-02-15 17:30:10 +01:00
"""
assert not self.config.worker_app
if ratelimit:
yield self.base_handler.ratelimit(requester)
yield self.base_handler.maybe_kick_guest_users(event, context)
if event.type == EventTypes.CanonicalAlias:
# Check the alias is acually valid (at this time at least)
room_alias_str = event.content.get("alias", None)
if room_alias_str:
room_alias = RoomAlias.from_string(room_alias_str)
directory_handler = self.hs.get_handlers().directory_handler
mapping = yield directory_handler.get_association(room_alias)
if mapping["room_id"] != event.room_id:
raise SynapseError(
400,
"Room alias %s does not point to the room" % (
room_alias_str,
)
)
federation_handler = self.hs.get_handlers().federation_handler
if event.type == EventTypes.Member:
if event.content["membership"] == Membership.INVITE:
def is_inviter_member_event(e):
return (
e.type == EventTypes.Member and
e.sender == event.sender
)
state_to_include_ids = [
e_id
for k, e_id in iteritems(context.current_state_ids)
if k[0] in self.hs.config.room_invite_state_types
or k == (EventTypes.Member, event.sender)
]
state_to_include = yield self.store.get_events(state_to_include_ids)
event.unsigned["invite_room_state"] = [
{
"type": e.type,
"state_key": e.state_key,
"content": e.content,
"sender": e.sender,
}
for e in itervalues(state_to_include)
]
invitee = UserID.from_string(event.state_key)
if not self.hs.is_mine(invitee):
# TODO: Can we add signature from remote server in a nicer
# way? If we have been invited by a remote server, we need
# to get them to sign the event.
returned_invite = yield federation_handler.send_invite(
invitee.domain,
event,
)
event.unsigned.pop("room_state", None)
# TODO: Make sure the signatures actually are correct.
event.signatures.update(
returned_invite.signatures
)
if event.type == EventTypes.Redaction:
auth_events_ids = yield self.auth.compute_auth_events(
event, context.prev_state_ids, for_verification=True,
)
auth_events = yield self.store.get_events(auth_events_ids)
auth_events = {
(e.type, e.state_key): e for e in auth_events.values()
}
if self.auth.check_redaction(event, auth_events=auth_events):
original_event = yield self.store.get_event(
event.redacts,
check_redacted=False,
get_prev_content=False,
allow_rejected=False,
allow_none=False
)
if event.user_id != original_event.user_id:
raise AuthError(
403,
"You don't have permission to redact events"
)
if event.type == EventTypes.Create and context.prev_state_ids:
raise AuthError(
403,
"Changing the room create event is forbidden",
)
2018-02-15 17:30:10 +01:00
(event_stream_id, max_stream_id) = yield self.store.persist_event(
event, context=context
)
# this intentionally does not yield: we don't care about the result
# and don't need to wait for it.
run_in_background(
self.pusher_pool.on_new_notifications,
event_stream_id, max_stream_id
)
2016-06-02 14:02:33 +02:00
def _notify():
try:
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=extra_users
)
except Exception:
logger.exception("Error notifying about new room event")
run_in_background(_notify)
if event.type == EventTypes.Message:
# We don't want to block sending messages on any presence code. This
# matters as sometimes presence code can take a while.
run_in_background(self._bump_active_time, requester.user)
@defer.inlineCallbacks
def _bump_active_time(self, user):
try:
presence = self.hs.get_presence_handler()
yield presence.bump_presence_active_time(user)
except Exception:
logger.exception("Error bumping presence active time")