|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Contains handlers for federation events.""" |
|
|
|
import itertools |
|
import logging |
|
from collections.abc import Container |
|
from http import HTTPStatus |
|
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Tuple, Union |
|
|
|
import attr |
|
from signedjson.key import decode_verify_key_bytes |
|
from signedjson.sign import verify_signed_json |
|
from unpaddedbase64 import decode_base64 |
|
|
|
from twisted.internet import defer |
|
|
|
from synapse import event_auth |
|
from synapse.api.constants import ( |
|
EventTypes, |
|
Membership, |
|
RejectedReason, |
|
RoomEncryptionAlgorithms, |
|
) |
|
from synapse.api.errors import ( |
|
AuthError, |
|
CodeMessageException, |
|
Codes, |
|
FederationDeniedError, |
|
FederationError, |
|
HttpResponseException, |
|
NotFoundError, |
|
RequestSendFailed, |
|
SynapseError, |
|
) |
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions |
|
from synapse.crypto.event_signing import compute_event_signature |
|
from synapse.event_auth import auth_types_for_event |
|
from synapse.events import EventBase |
|
from synapse.events.snapshot import EventContext |
|
from synapse.events.validator import EventValidator |
|
from synapse.handlers._base import BaseHandler |
|
from synapse.http.servlet import assert_params_in_dict |
|
from synapse.logging.context import ( |
|
make_deferred_yieldable, |
|
nested_logging_context, |
|
preserve_fn, |
|
run_in_background, |
|
) |
|
from synapse.logging.utils import log_function |
|
from synapse.metrics.background_process_metrics import run_as_background_process |
|
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet |
|
from synapse.replication.http.federation import ( |
|
ReplicationCleanRoomRestServlet, |
|
ReplicationFederationSendEventsRestServlet, |
|
ReplicationStoreRoomOnOutlierMembershipRestServlet, |
|
) |
|
from synapse.state import StateResolutionStore |
|
from synapse.storage.databases.main.events_worker import EventRedactBehaviour |
|
from synapse.types import ( |
|
JsonDict, |
|
MutableStateMap, |
|
PersistedEventPosition, |
|
RoomStreamToken, |
|
StateMap, |
|
UserID, |
|
get_domain_from_id, |
|
) |
|
from synapse.util.async_helpers import Linearizer, concurrently_execute |
|
from synapse.util.retryutils import NotRetryingDestination |
|
from synapse.util.stringutils import shortstr |
|
from synapse.visibility import filter_events_for_server |
|
|
|
if TYPE_CHECKING: |
|
from synapse.server import HomeServer |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
@attr.s(slots=True) |
|
class _NewEventInfo: |
|
"""Holds information about a received event, ready for passing to _handle_new_events |
|
|
|
Attributes: |
|
event: the received event |
|
|
|
state: the state at that event |
|
|
|
auth_events: the auth_event map for that event |
|
""" |
|
|
|
event = attr.ib(type=EventBase) |
|
state = attr.ib(type=Optional[Sequence[EventBase]], default=None) |
|
auth_events = attr.ib(type=Optional[MutableStateMap[EventBase]], default=None) |
|
|
|
|
|
class FederationHandler(BaseHandler): |
|
"""Handles events that originated from federation. |
|
Responsible for: |
|
a) handling received Pdus before handing them on as Events to the rest |
|
of the homeserver (including auth and state conflict resolutions) |
|
b) converting events that were produced by local clients that may need |
|
to be sent to remote homeservers. |
|
c) doing the necessary dances to invite remote users and join remote |
|
rooms. |
|
""" |
|
|
|
def __init__(self, hs: "HomeServer"): |
|
super().__init__(hs) |
|
|
|
self.hs = hs |
|
|
|
self.store = hs.get_datastore() |
|
self.storage = hs.get_storage() |
|
self.state_store = self.storage.state |
|
self.federation_client = hs.get_federation_client() |
|
self.state_handler = hs.get_state_handler() |
|
self._state_resolution_handler = hs.get_state_resolution_handler() |
|
self.server_name = hs.hostname |
|
self.keyring = hs.get_keyring() |
|
self.action_generator = hs.get_action_generator() |
|
self.is_mine_id = hs.is_mine_id |
|
self.spam_checker = hs.get_spam_checker() |
|
self.event_creation_handler = hs.get_event_creation_handler() |
|
self._message_handler = hs.get_message_handler() |
|
self._server_notices_mxid = hs.config.server_notices_mxid |
|
self.config = hs.config |
|
|
|
|
|
|
|
self.http_client = hs.get_proxied_blacklisted_http_client() |
|
self._instance_name = hs.get_instance_name() |
|
self._replication = hs.get_replication_data_handler() |
|
|
|
self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs) |
|
self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client( |
|
hs |
|
) |
|
|
|
if hs.config.worker_app: |
|
self._user_device_resync = ReplicationUserDevicesResyncRestServlet.make_client( |
|
hs |
|
) |
|
self._maybe_store_room_on_outlier_membership = ReplicationStoreRoomOnOutlierMembershipRestServlet.make_client( |
|
hs |
|
) |
|
else: |
|
self._device_list_updater = hs.get_device_handler().device_list_updater |
|
self._maybe_store_room_on_outlier_membership = ( |
|
self.store.maybe_store_room_on_outlier_membership |
|
) |
|
|
|
|
|
|
|
self.room_queues = {} |
|
self._room_pdu_linearizer = Linearizer("fed_room_pdu") |
|
|
|
self.third_party_event_rules = hs.get_third_party_event_rules() |
|
|
|
self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages |
|
|
|
async def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False) -> None: |
|
""" Process a PDU received via a federation /send/ transaction, or |
|
via backfill of missing prev_events |
|
|
|
Args: |
|
origin (str): server which initiated the /send/ transaction. Will |
|
be used to fetch missing events or state. |
|
pdu (FrozenEvent): received PDU |
|
sent_to_us_directly (bool): True if this event was pushed to us; False if |
|
we pulled it as the result of a missing prev_event. |
|
""" |
|
|
|
room_id = pdu.room_id |
|
event_id = pdu.event_id |
|
|
|
logger.info("handling received PDU: %s", pdu) |
|
|
|
|
|
existing = await self.store.get_event( |
|
event_id, allow_none=True, allow_rejected=True |
|
) |
|
|
|
|
|
|
|
|
|
already_seen = existing and ( |
|
not existing.internal_metadata.is_outlier() |
|
or pdu.internal_metadata.is_outlier() |
|
) |
|
if already_seen: |
|
logger.debug("[%s %s]: Already seen pdu", room_id, event_id) |
|
return |
|
|
|
|
|
|
|
|
|
try: |
|
self._sanity_check_event(pdu) |
|
except SynapseError as err: |
|
logger.warning( |
|
"[%s %s] Received event failed sanity checks", room_id, event_id |
|
) |
|
raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id) |
|
|
|
|
|
|
|
if room_id in self.room_queues: |
|
logger.info( |
|
"[%s %s] Queuing PDU from %s for now: join in progress", |
|
room_id, |
|
event_id, |
|
origin, |
|
) |
|
self.room_queues[room_id].append((pdu, origin)) |
|
return |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
is_in_room = await self.auth.check_host_in_room(room_id, self.server_name) |
|
if not is_in_room: |
|
logger.info( |
|
"[%s %s] Ignoring PDU from %s as we're not in the room", |
|
room_id, |
|
event_id, |
|
origin, |
|
) |
|
return None |
|
|
|
state = None |
|
|
|
|
|
if not pdu.internal_metadata.is_outlier(): |
|
|
|
min_depth = await self.get_min_depth_for_context(pdu.room_id) |
|
|
|
logger.debug("[%s %s] min_depth: %d", room_id, event_id, min_depth) |
|
|
|
prevs = set(pdu.prev_event_ids()) |
|
seen = await self.store.have_events_in_timeline(prevs) |
|
|
|
if min_depth is not None and pdu.depth < min_depth: |
|
|
|
|
|
|
|
|
|
pdu.internal_metadata.outlier = True |
|
elif min_depth is not None and pdu.depth > min_depth: |
|
missing_prevs = prevs - seen |
|
if sent_to_us_directly and missing_prevs: |
|
|
|
|
|
logger.info( |
|
"[%s %s] Acquiring room lock to fetch %d missing prev_events: %s", |
|
room_id, |
|
event_id, |
|
len(missing_prevs), |
|
shortstr(missing_prevs), |
|
) |
|
with (await self._room_pdu_linearizer.queue(pdu.room_id)): |
|
logger.info( |
|
"[%s %s] Acquired room lock to fetch %d missing prev_events", |
|
room_id, |
|
event_id, |
|
len(missing_prevs), |
|
) |
|
|
|
try: |
|
await self._get_missing_events_for_pdu( |
|
origin, pdu, prevs, min_depth |
|
) |
|
except Exception as e: |
|
raise Exception( |
|
"Error fetching missing prev_events for %s: %s" |
|
% (event_id, e) |
|
) from e |
|
|
|
|
|
|
|
seen = await self.store.have_events_in_timeline(prevs) |
|
|
|
if not prevs - seen: |
|
logger.info( |
|
"[%s %s] Found all missing prev_events", |
|
room_id, |
|
event_id, |
|
) |
|
|
|
if prevs - seen: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if sent_to_us_directly: |
|
logger.warning( |
|
"[%s %s] Rejecting: failed to fetch %d prev events: %s", |
|
room_id, |
|
event_id, |
|
len(prevs - seen), |
|
shortstr(prevs - seen), |
|
) |
|
raise FederationError( |
|
"ERROR", |
|
403, |
|
( |
|
"Your server isn't divulging details about prev_events " |
|
"referenced in this event." |
|
), |
|
affected=pdu.event_id, |
|
) |
|
|
|
logger.info( |
|
"Event %s is missing prev_events: calculating state for a " |
|
"backwards extremity", |
|
event_id, |
|
) |
|
|
|
|
|
|
|
event_map = {event_id: pdu} |
|
try: |
|
|
|
ours = await self.state_store.get_state_groups_ids(room_id, seen) |
|
|
|
|
|
state_maps = list(ours.values()) |
|
|
|
|
|
del ours |
|
|
|
|
|
|
|
for p in prevs - seen: |
|
logger.info( |
|
"Requesting state at missing prev_event %s", event_id, |
|
) |
|
|
|
with nested_logging_context(p): |
|
|
|
|
|
|
|
(remote_state, _,) = await self._get_state_for_room( |
|
origin, room_id, p, include_event_in_state=True |
|
) |
|
|
|
remote_state_map = { |
|
(x.type, x.state_key): x.event_id for x in remote_state |
|
} |
|
state_maps.append(remote_state_map) |
|
|
|
for x in remote_state: |
|
event_map[x.event_id] = x |
|
|
|
room_version = await self.store.get_room_version_id(room_id) |
|
state_map = await self._state_resolution_handler.resolve_events_with_store( |
|
room_id, |
|
room_version, |
|
state_maps, |
|
event_map, |
|
state_res_store=StateResolutionStore(self.store), |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
evs = await self.store.get_events( |
|
list(state_map.values()), |
|
get_prev_content=False, |
|
redact_behaviour=EventRedactBehaviour.AS_IS, |
|
) |
|
event_map.update(evs) |
|
|
|
state = [event_map[e] for e in state_map.values()] |
|
except Exception: |
|
logger.warning( |
|
"[%s %s] Error attempting to resolve state at missing " |
|
"prev_events", |
|
room_id, |
|
event_id, |
|
exc_info=True, |
|
) |
|
raise FederationError( |
|
"ERROR", |
|
403, |
|
"We can't get valid state history.", |
|
affected=event_id, |
|
) |
|
|
|
await self._process_received_pdu(origin, pdu, state=state) |
|
|
|
async def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth): |
|
""" |
|
Args: |
|
origin (str): Origin of the pdu. Will be called to get the missing events |
|
pdu: received pdu |
|
prevs (set(str)): List of event ids which we are missing |
|
min_depth (int): Minimum depth of events to return. |
|
""" |
|
|
|
room_id = pdu.room_id |
|
event_id = pdu.event_id |
|
|
|
seen = await self.store.have_events_in_timeline(prevs) |
|
|
|
if not prevs - seen: |
|
return |
|
|
|
latest_list = await self.store.get_latest_event_ids_in_room(room_id) |
|
|
|
|
|
|
|
latest = set(latest_list) |
|
latest |= seen |
|
|
|
logger.info( |
|
"[%s %s]: Requesting missing events between %s and %s", |
|
room_id, |
|
event_id, |
|
shortstr(latest), |
|
event_id, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
missing_events = await self.federation_client.get_missing_events( |
|
origin, |
|
room_id, |
|
earliest_events_ids=list(latest), |
|
latest_events=[pdu], |
|
limit=10, |
|
min_depth=min_depth, |
|
timeout=60000, |
|
) |
|
except (RequestSendFailed, HttpResponseException, NotRetryingDestination) as e: |
|
|
|
|
|
|
|
logger.warning( |
|
"[%s %s]: Failed to get prev_events: %s", room_id, event_id, e |
|
) |
|
return |
|
|
|
logger.info( |
|
"[%s %s]: Got %d prev_events: %s", |
|
room_id, |
|
event_id, |
|
len(missing_events), |
|
shortstr(missing_events), |
|
) |
|
|
|
|
|
|
|
missing_events.sort(key=lambda x: x.depth) |
|
|
|
for ev in missing_events: |
|
logger.info( |
|
"[%s %s] Handling received prev_event %s", |
|
room_id, |
|
event_id, |
|
ev.event_id, |
|
) |
|
with nested_logging_context(ev.event_id): |
|
try: |
|
await self.on_receive_pdu(origin, ev, sent_to_us_directly=False) |
|
except FederationError as e: |
|
if e.code == 403: |
|
logger.warning( |
|
"[%s %s] Received prev_event %s failed history check.", |
|
room_id, |
|
event_id, |
|
ev.event_id, |
|
) |
|
else: |
|
raise |
|
|
|
async def _get_state_for_room( |
|
self, |
|
destination: str, |
|
room_id: str, |
|
event_id: str, |
|
include_event_in_state: bool = False, |
|
) -> Tuple[List[EventBase], List[EventBase]]: |
|
"""Requests all of the room state at a given event from a remote homeserver. |
|
|
|
Args: |
|
destination: The remote homeserver to query for the state. |
|
room_id: The id of the room we're interested in. |
|
event_id: The id of the event we want the state at. |
|
include_event_in_state: if true, the event itself will be included in the |
|
returned state event list. |
|
|
|
Returns: |
|
A list of events in the state, possibly including the event itself, and |
|
a list of events in the auth chain for the given event. |
|
""" |
|
( |
|
state_event_ids, |
|
auth_event_ids, |
|
) = await self.federation_client.get_room_state_ids( |
|
destination, room_id, event_id=event_id |
|
) |
|
|
|
desired_events = set(state_event_ids + auth_event_ids) |
|
|
|
if include_event_in_state: |
|
desired_events.add(event_id) |
|
|
|
event_map = await self._get_events_from_store_or_dest( |
|
destination, room_id, desired_events |
|
) |
|
|
|
failed_to_fetch = desired_events - event_map.keys() |
|
if failed_to_fetch: |
|
logger.warning( |
|
"Failed to fetch missing state/auth events for %s %s", |
|
event_id, |
|
failed_to_fetch, |
|
) |
|
|
|
remote_state = [ |
|
event_map[e_id] for e_id in state_event_ids if e_id in event_map |
|
] |
|
|
|
if include_event_in_state: |
|
remote_event = event_map.get(event_id) |
|
if not remote_event: |
|
raise Exception("Unable to get missing prev_event %s" % (event_id,)) |
|
if remote_event.is_state() and remote_event.rejected_reason is None: |
|
remote_state.append(remote_event) |
|
|
|
auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map] |
|
auth_chain.sort(key=lambda e: e.depth) |
|
|
|
return remote_state, auth_chain |
|
|
|
async def _get_events_from_store_or_dest( |
|
self, destination: str, room_id: str, event_ids: Iterable[str] |
|
) -> Dict[str, EventBase]: |
|
"""Fetch events from a remote destination, checking if we already have them. |
|
|
|
Persists any events we don't already have as outliers. |
|
|
|
If we fail to fetch any of the events, a warning will be logged, and the event |
|
will be omitted from the result. Likewise, any events which turn out not to |
|
be in the given room. |
|
|
|
This function *does not* automatically get missing auth events of the |
|
newly fetched events. Callers must include the full auth chain of |
|
of the missing events in the `event_ids` argument, to ensure that any |
|
missing auth events are correctly fetched. |
|
|
|
Returns: |
|
map from event_id to event |
|
""" |
|
fetched_events = await self.store.get_events(event_ids, allow_rejected=True) |
|
|
|
missing_events = set(event_ids) - fetched_events.keys() |
|
|
|
if missing_events: |
|
logger.debug( |
|
"Fetching unknown state/auth events %s for room %s", |
|
missing_events, |
|
room_id, |
|
) |
|
|
|
await self._get_events_and_persist( |
|
destination=destination, room_id=room_id, events=missing_events |
|
) |
|
|
|
|
|
|
|
fetched_events.update( |
|
(await self.store.get_events(missing_events, allow_rejected=True)) |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
bad_events = [ |
|
(event_id, event.room_id) |
|
for event_id, event in fetched_events.items() |
|
if event.room_id != room_id |
|
] |
|
|
|
for bad_event_id, bad_room_id in bad_events: |
|
|
|
|
|
|
|
logger.warning( |
|
"Remote server %s claims event %s in room %s is an auth/state " |
|
"event in room %s", |
|
destination, |
|
bad_event_id, |
|
bad_room_id, |
|
room_id, |
|
) |
|
|
|
del fetched_events[bad_event_id] |
|
|
|
return fetched_events |
|
|
|
async def _process_received_pdu( |
|
self, origin: str, event: EventBase, state: Optional[Iterable[EventBase]], |
|
): |
|
""" Called when we have a new pdu. We need to do auth checks and put it |
|
through the StateHandler. |
|
|
|
Args: |
|
origin: server sending the event |
|
|
|
event: event to be persisted |
|
|
|
state: Normally None, but if we are handling a gap in the graph |
|
(ie, we are missing one or more prev_events), the resolved state at the |
|
event |
|
""" |
|
room_id = event.room_id |
|
event_id = event.event_id |
|
|
|
logger.debug("[%s %s] Processing event: %s", room_id, event_id, event) |
|
|
|
try: |
|
await self._handle_new_event(origin, event, state=state) |
|
except AuthError as e: |
|
raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) |
|
|
|
|
|
|
|
if event.type == EventTypes.Encrypted: |
|
device_id = event.content.get("device_id") |
|
sender_key = event.content.get("sender_key") |
|
|
|
cached_devices = await self.store.get_cached_devices_for_user(event.sender) |
|
|
|
resync = False |
|
|
|
device = None |
|
if device_id is not None: |
|
device = cached_devices.get(device_id) |
|
if device is None: |
|
logger.info( |
|
"Received event from remote device not in our cache: %s %s", |
|
event.sender, |
|
device_id, |
|
) |
|
resync = True |
|
|
|
|
|
if sender_key is not None: |
|
|
|
|
|
|
|
|
|
|
|
current_keys = [] |
|
|
|
if device: |
|
keys = device.get("keys", {}).get("keys", {}) |
|
|
|
if ( |
|
event.content.get("algorithm") |
|
== RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2 |
|
): |
|
|
|
key_name = "curve25519:%s" % (device_id,) |
|
current_keys = [keys.get(key_name)] |
|
else: |
|
|
|
|
|
current_keys = keys.values() |
|
elif device_id: |
|
|
|
pass |
|
else: |
|
|
|
|
|
current_keys = [ |
|
key |
|
for device in cached_devices.values() |
|
for key in device.get("keys", {}).get("keys", {}).values() |
|
] |
|
|
|
|
|
|
|
if sender_key not in current_keys: |
|
logger.info( |
|
"Received event from remote device with unexpected sender key: %s %s: %s", |
|
event.sender, |
|
device_id or "<no device_id>", |
|
sender_key, |
|
) |
|
resync = True |
|
|
|
if resync: |
|
run_as_background_process( |
|
"resync_device_due_to_pdu", self._resync_device, event.sender |
|
) |
|
|
|
async def _resync_device(self, sender: str) -> None: |
|
"""We have detected that the device list for the given user may be out |
|
of sync, so we try and resync them. |
|
""" |
|
|
|
try: |
|
await self.store.mark_remote_user_device_cache_as_stale(sender) |
|
|
|
|
|
if self.config.worker_app: |
|
await self._user_device_resync(user_id=sender) |
|
else: |
|
await self._device_list_updater.user_device_resync(sender) |
|
except Exception: |
|
logger.exception("Failed to resync device for %s", sender) |
|
|
|
@log_function |
|
async def backfill(self, dest, room_id, limit, extremities): |
|
""" Trigger a backfill request to `dest` for the given `room_id` |
|
|
|
This will attempt to get more events from the remote. If the other side |
|
has no new events to offer, this will return an empty list. |
|
|
|
As the events are received, we check their signatures, and also do some |
|
sanity-checking on them. If any of the backfilled events are invalid, |
|
this method throws a SynapseError. |
|
|
|
TODO: make this more useful to distinguish failures of the remote |
|
server from invalid events (there is probably no point in trying to |
|
re-fetch invalid events from every other HS in the room.) |
|
""" |
|
if dest == self.server_name: |
|
raise SynapseError(400, "Can't backfill from self.") |
|
|
|
events = await self.federation_client.backfill( |
|
dest, room_id, limit=limit, extremities=extremities |
|
) |
|
|
|
if not events: |
|
return [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
seen_events = await self.store.have_events_in_timeline( |
|
{e.event_id for e in events} |
|
) |
|
|
|
events = [e for e in events if e.event_id not in seen_events] |
|
|
|
if not events: |
|
return [] |
|
|
|
event_map = {e.event_id: e for e in events} |
|
|
|
event_ids = {e.event_id for e in events} |
|
|
|
|
|
|
|
|
|
edges = [ev.event_id for ev in events if set(ev.prev_event_ids()) - event_ids] |
|
|
|
logger.info("backfill: Got %d events with %d edges", len(events), len(edges)) |
|
|
|
|
|
|
|
auth_events = {} |
|
state_events = {} |
|
events_to_state = {} |
|
for e_id in edges: |
|
state, auth = await self._get_state_for_room( |
|
destination=dest, |
|
room_id=room_id, |
|
event_id=e_id, |
|
include_event_in_state=False, |
|
) |
|
auth_events.update({a.event_id: a for a in auth}) |
|
auth_events.update({s.event_id: s for s in state}) |
|
state_events.update({s.event_id: s for s in state}) |
|
events_to_state[e_id] = state |
|
|
|
required_auth = { |
|
a_id |
|
for event in events |
|
+ list(state_events.values()) |
|
+ list(auth_events.values()) |
|
for a_id in event.auth_event_ids() |
|
} |
|
auth_events.update( |
|
{e_id: event_map[e_id] for e_id in required_auth if e_id in event_map} |
|
) |
|
|
|
ev_infos = [] |
|
|
|
|
|
|
|
for e_id in events_to_state: |
|
|
|
|
|
ev = event_map[e_id] |
|
assert not ev.internal_metadata.is_outlier() |
|
|
|
ev_infos.append( |
|
_NewEventInfo( |
|
event=ev, |
|
state=events_to_state[e_id], |
|
auth_events={ |
|
( |
|
auth_events[a_id].type, |
|
auth_events[a_id].state_key, |
|
): auth_events[a_id] |
|
for a_id in ev.auth_event_ids() |
|
if a_id in auth_events |
|
}, |
|
) |
|
) |
|
|
|
if ev_infos: |
|
await self._handle_new_events(dest, room_id, ev_infos, backfilled=True) |
|
|
|
|
|
events.sort(key=lambda e: e.depth) |
|
|
|
for event in events: |
|
if event in events_to_state: |
|
continue |
|
|
|
|
|
|
|
assert not event.internal_metadata.is_outlier() |
|
|
|
|
|
|
|
|
|
await self._handle_new_event(dest, event, backfilled=True) |
|
|
|
return events |
|
|
|
async def maybe_backfill( |
|
self, room_id: str, current_depth: int, limit: int |
|
) -> bool: |
|
"""Checks the database to see if we should backfill before paginating, |
|
and if so do. |
|
|
|
Args: |
|
room_id |
|
current_depth: The depth from which we're paginating from. This is |
|
used to decide if we should backfill and what extremities to |
|
use. |
|
limit: The number of events that the pagination request will |
|
return. This is used as part of the heuristic to decide if we |
|
should back paginate. |
|
""" |
|
extremities = await self.store.get_oldest_events_with_depth_in_room(room_id) |
|
|
|
if not extremities: |
|
logger.debug("Not backfilling as no extremeties found.") |
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
forward_events = await self.store.get_successor_events(list(extremities)) |
|
|
|
extremities_events = await self.store.get_events( |
|
forward_events, |
|
redact_behaviour=EventRedactBehaviour.AS_IS, |
|
get_prev_content=False, |
|
) |
|
|
|
|
|
|
|
filtered_extremities = await filter_events_for_server( |
|
self.storage, |
|
self.server_name, |
|
list(extremities_events.values()), |
|
redact=False, |
|
check_history_visibility_only=True, |
|
) |
|
|
|
if not filtered_extremities: |
|
return False |
|
|
|
|
|
sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1])) |
|
max_depth = sorted_extremeties_tuple[0][1] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if current_depth - 2 * limit > max_depth: |
|
logger.debug( |
|
"Not backfilling as we don't need to. %d < %d - 2 * %d", |
|
max_depth, |
|
current_depth, |
|
limit, |
|
) |
|
return False |
|
|
|
logger.debug( |
|
"room_id: %s, backfill: current_depth: %s, max_depth: %s, extrems: %s", |
|
room_id, |
|
current_depth, |
|
max_depth, |
|
sorted_extremeties_tuple, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
filtered_sorted_extremeties_tuple = [ |
|
t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth |
|
] |
|
|
|
|
|
|
|
|
|
|
|
if filtered_sorted_extremeties_tuple: |
|
sorted_extremeties_tuple = filtered_sorted_extremeties_tuple |
|
|
|
|
|
|
|
extremities = dict(sorted_extremeties_tuple[:5]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
curr_state = await self.state_handler.get_current_state(room_id) |
|
|
|
def get_domains_from_state(state): |
|
"""Get joined domains from state |
|
|
|
Args: |
|
state (dict[tuple, FrozenEvent]): State map from type/state |
|
key to event. |
|
|
|
Returns: |
|
list[tuple[str, int]]: Returns a list of servers with the |
|
lowest depth of their joins. Sorted by lowest depth first. |
|
""" |
|
joined_users = [ |
|
(state_key, int(event.depth)) |
|
for (e_type, state_key), event in state.items() |
|
if e_type == EventTypes.Member and event.membership == Membership.JOIN |
|
] |
|
|
|
joined_domains = {} |
|
for u, d in joined_users: |
|
try: |
|
dom = get_domain_from_id(u) |
|
old_d = joined_domains.get(dom) |
|
if old_d: |
|
joined_domains[dom] = min(d, old_d) |
|
else: |
|
joined_domains[dom] = d |
|
except Exception: |
|
pass |
|
|
|
return sorted(joined_domains.items(), key=lambda d: d[1]) |
|
|
|
curr_domains = get_domains_from_state(curr_state) |
|
|
|
likely_domains = [ |
|
domain for domain, depth in curr_domains if domain != self.server_name |
|
] |
|
|
|
async def try_backfill(domains): |
|
|
|
for dom in domains: |
|
try: |
|
await self.backfill( |
|
dom, room_id, limit=100, extremities=extremities |
|
) |
|
|
|
|
|
|
|
return True |
|
except SynapseError as e: |
|
logger.info("Failed to backfill from %s because %s", dom, e) |
|
continue |
|
except HttpResponseException as e: |
|
if 400 <= e.code < 500: |
|
raise e.to_synapse_error() |
|
|
|
logger.info("Failed to backfill from %s because %s", dom, e) |
|
continue |
|
except CodeMessageException as e: |
|
if 400 <= e.code < 500: |
|
raise |
|
|
|
logger.info("Failed to backfill from %s because %s", dom, e) |
|
continue |
|
except NotRetryingDestination as e: |
|
logger.info(str(e)) |
|
continue |
|
except RequestSendFailed as e: |
|
logger.info("Failed to get backfill from %s because %s", dom, e) |
|
continue |
|
except FederationDeniedError as e: |
|
logger.info(e) |
|
continue |
|
except Exception as e: |
|
logger.exception("Failed to backfill from %s because %s", dom, e) |
|
continue |
|
|
|
return False |
|
|
|
success = await try_backfill(likely_domains) |
|
if success: |
|
return True |
|
|
|
|
|
|
|
|
|
tried_domains = set(likely_domains) |
|
tried_domains.add(self.server_name) |
|
|
|
event_ids = list(extremities.keys()) |
|
|
|
logger.debug("calling resolve_state_groups in _maybe_backfill") |
|
resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events) |
|
states = await make_deferred_yieldable( |
|
defer.gatherResults( |
|
[resolve(room_id, [e]) for e in event_ids], consumeErrors=True |
|
) |
|
) |
|
|
|
|
|
|
|
states = dict(zip(event_ids, [s.state for s in states])) |
|
|
|
state_map = await self.store.get_events( |
|
[e_id for ids in states.values() for e_id in ids.values()], |
|
get_prev_content=False, |
|
) |
|
states = { |
|
key: { |
|
k: state_map[e_id] |
|
for k, e_id in state_dict.items() |
|
if e_id in state_map |
|
} |
|
for key, state_dict in states.items() |
|
} |
|
|
|
for e_id, _ in sorted_extremeties_tuple: |
|
likely_domains = get_domains_from_state(states[e_id]) |
|
|
|
success = await try_backfill( |
|
[dom for dom, _ in likely_domains if dom not in tried_domains] |
|
) |
|
if success: |
|
return True |
|
|
|
tried_domains.update(dom for dom, _ in likely_domains) |
|
|
|
return False |
|
|
|
async def _get_events_and_persist( |
|
self, destination: str, room_id: str, events: Iterable[str] |
|
): |
|
"""Fetch the given events from a server, and persist them as outliers. |
|
|
|
This function *does not* recursively get missing auth events of the |
|
newly fetched events. Callers must include in the `events` argument |
|
any missing events from the auth chain. |
|
|
|
Logs a warning if we can't find the given event. |
|
""" |
|
|
|
room_version = await self.store.get_room_version(room_id) |
|
|
|
event_map = {} |
|
|
|
async def get_event(event_id: str): |
|
with nested_logging_context(event_id): |
|
try: |
|
event = await self.federation_client.get_pdu( |
|
[destination], event_id, room_version, outlier=True, |
|
) |
|
if event is None: |
|
logger.warning( |
|
"Server %s didn't return event %s", destination, event_id, |
|
) |
|
return |
|
|
|
event_map[event.event_id] = event |
|
|
|
except Exception as e: |
|
logger.warning( |
|
"Error fetching missing state/auth event %s: %s %s", |
|
event_id, |
|
type(e), |
|
e, |
|
) |
|
|
|
await concurrently_execute(get_event, events, 5) |
|
|
|
|
|
|
|
|
|
|
|
auth_events = [ |
|
aid |
|
for event in event_map.values() |
|
for aid in event.auth_event_ids() |
|
if aid not in event_map |
|
] |
|
persisted_events = await self.store.get_events( |
|
auth_events, allow_rejected=True, |
|
) |
|
|
|
event_infos = [] |
|
for event in event_map.values(): |
|
auth = {} |
|
for auth_event_id in event.auth_event_ids(): |
|
ae = persisted_events.get(auth_event_id) or event_map.get(auth_event_id) |
|
if ae: |
|
auth[(ae.type, ae.state_key)] = ae |
|
else: |
|
logger.info("Missing auth event %s", auth_event_id) |
|
|
|
event_infos.append(_NewEventInfo(event, None, auth)) |
|
|
|
await self._handle_new_events( |
|
destination, room_id, event_infos, |
|
) |
|
|
|
def _sanity_check_event(self, ev): |
|
""" |
|
Do some early sanity checks of a received event |
|
|
|
In particular, checks it doesn't have an excessive number of |
|
prev_events or auth_events, which could cause a huge state resolution |
|
or cascade of event fetches. |
|
|
|
Args: |
|
ev (synapse.events.EventBase): event to be checked |
|
|
|
Returns: None |
|
|
|
Raises: |
|
SynapseError if the event does not pass muster |
|
""" |
|
if len(ev.prev_event_ids()) > 20: |
|
logger.warning( |
|
"Rejecting event %s which has %i prev_events", |
|
ev.event_id, |
|
len(ev.prev_event_ids()), |
|
) |
|
raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many prev_events") |
|
|
|
if len(ev.auth_event_ids()) > 10: |
|
logger.warning( |
|
"Rejecting event %s which has %i auth_events", |
|
ev.event_id, |
|
len(ev.auth_event_ids()), |
|
) |
|
raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events") |
|
|
|
async def send_invite(self, target_host, event): |
|
""" Sends the invite to the remote server for signing. |
|
|
|
Invites must be signed by the invitee's server before distribution. |
|
""" |
|
pdu = await self.federation_client.send_invite( |
|
destination=target_host, |
|
room_id=event.room_id, |
|
event_id=event.event_id, |
|
pdu=event, |
|
) |
|
|
|
return pdu |
|
|
|
async def on_event_auth(self, event_id: str) -> List[EventBase]: |
|
event = await self.store.get_event(event_id) |
|
auth = await self.store.get_auth_chain( |
|
list(event.auth_event_ids()), include_given=True |
|
) |
|
return list(auth) |
|
|
|
async def do_invite_join( |
|
self, target_hosts: Iterable[str], room_id: str, joinee: str, content: JsonDict |
|
) -> Tuple[str, int]: |
|
""" Attempts to join the `joinee` to the room `room_id` via the |
|
servers contained in `target_hosts`. |
|
|
|
This first triggers a /make_join/ request that returns a partial |
|
event that we can fill out and sign. This is then sent to the |
|
remote server via /send_join/ which responds with the state at that |
|
event and the auth_chains. |
|
|
|
We suspend processing of any received events from this room until we |
|
have finished processing the join. |
|
|
|
Args: |
|
target_hosts: List of servers to attempt to join the room with. |
|
|
|
room_id: The ID of the room to join. |
|
|
|
joinee: The User ID of the joining user. |
|
|
|
content: The event content to use for the join event. |
|
""" |
|
|
|
|
|
assert self.config.worker.worker_app is None |
|
|
|
logger.debug("Joining %s to %s", joinee, room_id) |
|
|
|
origin, event, room_version_obj = await self._make_and_verify_event( |
|
target_hosts, |
|
room_id, |
|
joinee, |
|
"join", |
|
content, |
|
params={"ver": KNOWN_ROOM_VERSIONS}, |
|
) |
|
|
|
|
|
|
|
|
|
assert room_id not in self.room_queues |
|
|
|
self.room_queues[room_id] = [] |
|
|
|
await self._clean_room_for_join(room_id) |
|
|
|
handled_events = set() |
|
|
|
try: |
|
|
|
|
|
host_list = list(target_hosts) |
|
try: |
|
host_list.remove(origin) |
|
host_list.insert(0, origin) |
|
except ValueError: |
|
pass |
|
|
|
ret = await self.federation_client.send_join( |
|
host_list, event, room_version_obj |
|
) |
|
|
|
origin = ret["origin"] |
|
state = ret["state"] |
|
auth_chain = ret["auth_chain"] |
|
auth_chain.sort(key=lambda e: e.depth) |
|
|
|
handled_events.update([s.event_id for s in state]) |
|
handled_events.update([a.event_id for a in auth_chain]) |
|
handled_events.add(event.event_id) |
|
|
|
logger.debug("do_invite_join auth_chain: %s", auth_chain) |
|
logger.debug("do_invite_join state: %s", state) |
|
|
|
logger.debug("do_invite_join event: %s", event) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
await self.store.upsert_room_on_join( |
|
room_id=room_id, room_version=room_version_obj, |
|
) |
|
|
|
max_stream_id = await self._persist_auth_tree( |
|
origin, room_id, auth_chain, state, event, room_version_obj |
|
) |
|
|
|
|
|
|
|
await self._replication.wait_for_stream_position( |
|
self.config.worker.events_shard_config.get_instance(room_id), |
|
"events", |
|
max_stream_id, |
|
) |
|
|
|
|
|
|
|
predecessor = await self.store.get_room_predecessor(room_id) |
|
if not predecessor or not isinstance(predecessor.get("room_id"), str): |
|
return event.event_id, max_stream_id |
|
old_room_id = predecessor["room_id"] |
|
logger.debug( |
|
"Found predecessor for %s during remote join: %s", room_id, old_room_id |
|
) |
|
|
|
|
|
member_handler = self.hs.get_room_member_handler() |
|
await member_handler.transfer_room_state_on_room_upgrade( |
|
old_room_id, room_id |
|
) |
|
|
|
logger.debug("Finished joining %s to %s", joinee, room_id) |
|
return event.event_id, max_stream_id |
|
finally: |
|
room_queue = self.room_queues[room_id] |
|
del self.room_queues[room_id] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
run_in_background(self._handle_queued_pdus, room_queue) |
|
|
|
async def _handle_queued_pdus(self, room_queue): |
|
"""Process PDUs which got queued up while we were busy send_joining. |
|
|
|
Args: |
|
room_queue (list[FrozenEvent, str]): list of PDUs to be processed |
|
and the servers that sent them |
|
""" |
|
for p, origin in room_queue: |
|
try: |
|
logger.info( |
|
"Processing queued PDU %s which was received " |
|
"while we were joining %s", |
|
p.event_id, |
|
p.room_id, |
|
) |
|
with nested_logging_context(p.event_id): |
|
await self.on_receive_pdu(origin, p, sent_to_us_directly=True) |
|
except Exception as e: |
|
logger.warning( |
|
"Error handling queued PDU %s from %s: %s", p.event_id, origin, e |
|
) |
|
|
|
async def on_make_join_request( |
|
self, origin: str, room_id: str, user_id: str |
|
) -> EventBase: |
|
""" We've received a /make_join/ request, so we create a partial |
|
join event for the room and return that. We do *not* persist or |
|
process it until the other server has signed it and sent it back. |
|
|
|
Args: |
|
origin: The (verified) server name of the requesting server. |
|
room_id: Room to create join event in |
|
user_id: The user to create the join for |
|
""" |
|
if get_domain_from_id(user_id) != origin: |
|
logger.info( |
|
"Got /make_join request for user %r from different origin %s, ignoring", |
|
user_id, |
|
origin, |
|
) |
|
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) |
|
|
|
|
|
|
|
room_version = await self.store.get_room_version_id(room_id) |
|
|
|
|
|
is_in_room = await self.auth.check_host_in_room(room_id, self.server_name) |
|
if not is_in_room: |
|
logger.info( |
|
"Got /make_join request for room %s we are no longer in", room_id, |
|
) |
|
raise NotFoundError("Not an active room on this server") |
|
|
|
event_content = {"membership": Membership.JOIN} |
|
|
|
builder = self.event_builder_factory.new( |
|
room_version, |
|
{ |
|
"type": EventTypes.Member, |
|
"content": event_content, |
|
"room_id": room_id, |
|
"sender": user_id, |
|
"state_key": user_id, |
|
}, |
|
) |
|
|
|
try: |
|
event, context = await self.event_creation_handler.create_new_client_event( |
|
builder=builder |
|
) |
|
except SynapseError as e: |
|
logger.warning("Failed to create join to %s because %s", room_id, e) |
|
raise |
|
|
|
|
|
|
|
await self.auth.check_from_context( |
|
room_version, event, context, do_sig_check=False |
|
) |
|
|
|
return event |
|
|
|
async def on_send_join_request(self, origin, pdu): |
|
""" We have received a join event for a room. Fully process it and |
|
respond with the current state and auth chains. |
|
""" |
|
event = pdu |
|
|
|
logger.debug( |
|
"on_send_join_request from %s: Got event: %s, signatures: %s", |
|
origin, |
|
event.event_id, |
|
event.signatures, |
|
) |
|
|
|
if get_domain_from_id(event.sender) != origin: |
|
logger.info( |
|
"Got /send_join request for user %r from different origin %s", |
|
event.sender, |
|
origin, |
|
) |
|
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) |
|
|
|
event.internal_metadata.outlier = False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
event.internal_metadata.send_on_behalf_of = origin |
|
|
|
context = await self._handle_new_event(origin, event) |
|
|
|
logger.debug( |
|
"on_send_join_request: After _handle_new_event: %s, sigs: %s", |
|
event.event_id, |
|
event.signatures, |
|
) |
|
|
|
prev_state_ids = await context.get_prev_state_ids() |
|
|
|
state_ids = list(prev_state_ids.values()) |
|
auth_chain = await self.store.get_auth_chain(state_ids) |
|
|
|
state = await self.store.get_events(list(prev_state_ids.values())) |
|
|
|
return {"state": list(state.values()), "auth_chain": auth_chain} |
|
|
|
async def on_invite_request( |
|
self, origin: str, event: EventBase, room_version: RoomVersion |
|
): |
|
""" We've got an invite event. Process and persist it. Sign it. |
|
|
|
Respond with the now signed event. |
|
""" |
|
if event.state_key is None: |
|
raise SynapseError(400, "The invite event did not have a state key") |
|
|
|
is_blocked = await self.store.is_room_blocked(event.room_id) |
|
if is_blocked: |
|
raise SynapseError(403, "This room has been blocked on this server") |
|
|
|
if self.hs.config.block_non_admin_invites: |
|
raise SynapseError(403, "This server does not accept room invites") |
|
|
|
if not self.spam_checker.user_may_invite( |
|
event.sender, event.state_key, event.room_id |
|
): |
|
raise SynapseError( |
|
403, "This user is not permitted to send invites to this server/user" |
|
) |
|
|
|
membership = event.content.get("membership") |
|
if event.type != EventTypes.Member or membership != Membership.INVITE: |
|
raise SynapseError(400, "The event was not an m.room.member invite event") |
|
|
|
sender_domain = get_domain_from_id(event.sender) |
|
if sender_domain != origin: |
|
raise SynapseError( |
|
400, "The invite event was not from the server sending it" |
|
) |
|
|
|
if not self.is_mine_id(event.state_key): |
|
raise SynapseError(400, "The invite event must be for this server") |
|
|
|
|
|
if event.state_key == self._server_notices_mxid: |
|
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user") |
|
|
|
|
|
|
|
|
|
await self._maybe_store_room_on_outlier_membership( |
|
room_id=event.room_id, room_version=room_version |
|
) |
|
|
|
event.internal_metadata.outlier = True |
|
event.internal_metadata.out_of_band_membership = True |
|
|
|
event.signatures.update( |
|
compute_event_signature( |
|
room_version, |
|
event.get_pdu_json(), |
|
self.hs.hostname, |
|
self.hs.signing_key, |
|
) |
|
) |
|
|
|
context = await self.state_handler.compute_event_context(event) |
|
await self.persist_events_and_notify(event.room_id, [(event, context)]) |
|
|
|
return event |
|
|
|
async def do_remotely_reject_invite( |
|
self, target_hosts: Iterable[str], room_id: str, user_id: str, content: JsonDict |
|
) -> Tuple[EventBase, int]: |
|
origin, event, room_version = await self._make_and_verify_event( |
|
target_hosts, room_id, user_id, "leave", content=content |
|
) |
|
|
|
|
|
event.internal_metadata.outlier = True |
|
event.internal_metadata.out_of_band_membership = True |
|
|
|
|
|
|
|
host_list = list(target_hosts) |
|
try: |
|
host_list.remove(origin) |
|
host_list.insert(0, origin) |
|
except ValueError: |
|
pass |
|
|
|
await self.federation_client.send_leave(host_list, event) |
|
|
|
context = await self.state_handler.compute_event_context(event) |
|
stream_id = await self.persist_events_and_notify( |
|
event.room_id, [(event, context)] |
|
) |
|
|
|
return event, stream_id |
|
|
|
async def _make_and_verify_event( |
|
self, |
|
target_hosts: Iterable[str], |
|
room_id: str, |
|
user_id: str, |
|
membership: str, |
|
content: JsonDict = {}, |
|
params: Optional[Dict[str, Union[str, Iterable[str]]]] = None, |
|
) -> Tuple[str, EventBase, RoomVersion]: |
|
( |
|
origin, |
|
event, |
|
room_version, |
|
) = await self.federation_client.make_membership_event( |
|
target_hosts, room_id, user_id, membership, content, params=params |
|
) |
|
|
|
logger.debug("Got response to make_%s: %s", membership, event) |
|
|
|
|
|
|
|
assert event.type == EventTypes.Member |
|
assert event.user_id == user_id |
|
assert event.state_key == user_id |
|
assert event.room_id == room_id |
|
return origin, event, room_version |
|
|
|
async def on_make_leave_request( |
|
self, origin: str, room_id: str, user_id: str |
|
) -> EventBase: |
|
""" We've received a /make_leave/ request, so we create a partial |
|
leave event for the room and return that. We do *not* persist or |
|
process it until the other server has signed it and sent it back. |
|
|
|
Args: |
|
origin: The (verified) server name of the requesting server. |
|
room_id: Room to create leave event in |
|
user_id: The user to create the leave for |
|
""" |
|
if get_domain_from_id(user_id) != origin: |
|
logger.info( |
|
"Got /make_leave request for user %r from different origin %s, ignoring", |
|
user_id, |
|
origin, |
|
) |
|
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) |
|
|
|
room_version = await self.store.get_room_version_id(room_id) |
|
builder = self.event_builder_factory.new( |
|
room_version, |
|
{ |
|
"type": EventTypes.Member, |
|
"content": {"membership": Membership.LEAVE}, |
|
"room_id": room_id, |
|
"sender": user_id, |
|
"state_key": user_id, |
|
}, |
|
) |
|
|
|
event, context = await self.event_creation_handler.create_new_client_event( |
|
builder=builder |
|
) |
|
|
|
try: |
|
|
|
|
|
await self.auth.check_from_context( |
|
room_version, event, context, do_sig_check=False |
|
) |
|
except AuthError as e: |
|
logger.warning("Failed to create new leave %r because %s", event, e) |
|
raise e |
|
|
|
return event |
|
|
|
async def on_send_leave_request(self, origin, pdu): |
|
""" We have received a leave event for a room. Fully process it.""" |
|
event = pdu |
|
|
|
logger.debug( |
|
"on_send_leave_request: Got event: %s, signatures: %s", |
|
event.event_id, |
|
event.signatures, |
|
) |
|
|
|
if get_domain_from_id(event.sender) != origin: |
|
logger.info( |
|
"Got /send_leave request for user %r from different origin %s", |
|
event.sender, |
|
origin, |
|
) |
|
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) |
|
|
|
event.internal_metadata.outlier = False |
|
|
|
await self._handle_new_event(origin, event) |
|
|
|
logger.debug( |
|
"on_send_leave_request: After _handle_new_event: %s, sigs: %s", |
|
event.event_id, |
|
event.signatures, |
|
) |
|
|
|
return None |
|
|
|
async def get_state_for_pdu(self, room_id: str, event_id: str) -> List[EventBase]: |
|
"""Returns the state at the event. i.e. not including said event. |
|
""" |
|
|
|
event = await self.store.get_event(event_id, check_room_id=room_id) |
|
|
|
state_groups = await self.state_store.get_state_groups(room_id, [event_id]) |
|
|
|
if state_groups: |
|
_, state = list(state_groups.items()).pop() |
|
results = {(e.type, e.state_key): e for e in state} |
|
|
|
if event.is_state(): |
|
|
|
if "replaces_state" in event.unsigned: |
|
prev_id = event.unsigned["replaces_state"] |
|
if prev_id != event.event_id: |
|
prev_event = await self.store.get_event(prev_id) |
|
results[(event.type, event.state_key)] = prev_event |
|
else: |
|
del results[(event.type, event.state_key)] |
|
|
|
res = list(results.values()) |
|
return res |
|
else: |
|
return [] |
|
|
|
async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]: |
|
"""Returns the state at the event. i.e. not including said event. |
|
""" |
|
event = await self.store.get_event(event_id, check_room_id=room_id) |
|
|
|
state_groups = await self.state_store.get_state_groups_ids(room_id, [event_id]) |
|
|
|
if state_groups: |
|
_, state = list(state_groups.items()).pop() |
|
results = state |
|
|
|
if event.is_state(): |
|
|
|
if "replaces_state" in event.unsigned: |
|
prev_id = event.unsigned["replaces_state"] |
|
if prev_id != event.event_id: |
|
results[(event.type, event.state_key)] = prev_id |
|
else: |
|
results.pop((event.type, event.state_key), None) |
|
|
|
return list(results.values()) |
|
else: |
|
return [] |
|
|
|
@log_function |
|
async def on_backfill_request( |
|
self, origin: str, room_id: str, pdu_list: List[str], limit: int |
|
) -> List[EventBase]: |
|
in_room = await self.auth.check_host_in_room(room_id, origin) |
|
if not in_room: |
|
raise AuthError(403, "Host not in room.") |
|
|
|
|
|
limit = min(limit, 100) |
|
|
|
events = await self.store.get_backfill_events(room_id, pdu_list, limit) |
|
|
|
events = await filter_events_for_server(self.storage, origin, events) |
|
|
|
return events |
|
|
|
@log_function |
|
async def get_persisted_pdu( |
|
self, origin: str, event_id: str |
|
) -> Optional[EventBase]: |
|
"""Get an event from the database for the given server. |
|
|
|
Args: |
|
origin: hostname of server which is requesting the event; we |
|
will check that the server is allowed to see it. |
|
event_id: id of the event being requested |
|
|
|
Returns: |
|
None if we know nothing about the event; otherwise the (possibly-redacted) event. |
|
|
|
Raises: |
|
AuthError if the server is not currently in the room |
|
""" |
|
event = await self.store.get_event( |
|
event_id, allow_none=True, allow_rejected=True |
|
) |
|
|
|
if event: |
|
in_room = await self.auth.check_host_in_room(event.room_id, origin) |
|
if not in_room: |
|
raise AuthError(403, "Host not in room.") |
|
|
|
events = await filter_events_for_server(self.storage, origin, [event]) |
|
event = events[0] |
|
return event |
|
else: |
|
return None |
|
|
|
async def get_min_depth_for_context(self, context): |
|
return await self.store.get_min_depth(context) |
|
|
|
async def _handle_new_event( |
|
self, origin, event, state=None, auth_events=None, backfilled=False |
|
): |
|
context = await self._prep_event( |
|
origin, event, state=state, auth_events=auth_events, backfilled=backfilled |
|
) |
|
|
|
try: |
|
if ( |
|
not event.internal_metadata.is_outlier() |
|
and not backfilled |
|
and not context.rejected |
|
): |
|
await self.action_generator.handle_push_actions_for_event( |
|
event, context |
|
) |
|
|
|
await self.persist_events_and_notify( |
|
event.room_id, [(event, context)], backfilled=backfilled |
|
) |
|
except Exception: |
|
run_in_background( |
|
self.store.remove_push_actions_from_staging, event.event_id |
|
) |
|
raise |
|
|
|
return context |
|
|
|
async def _handle_new_events( |
|
self, |
|
origin: str, |
|
room_id: str, |
|
event_infos: Iterable[_NewEventInfo], |
|
backfilled: bool = False, |
|
) -> None: |
|
"""Creates the appropriate contexts and persists events. The events |
|
should not depend on one another, e.g. this should be used to persist |
|
a bunch of outliers, but not a chunk of individual events that depend |
|
on each other for state calculations. |
|
|
|
Notifies about the events where appropriate. |
|
""" |
|
|
|
async def prep(ev_info: _NewEventInfo): |
|
event = ev_info.event |
|
with nested_logging_context(suffix=event.event_id): |
|
res = await self._prep_event( |
|
origin, |
|
event, |
|
state=ev_info.state, |
|
auth_events=ev_info.auth_events, |
|
backfilled=backfilled, |
|
) |
|
return res |
|
|
|
contexts = await make_deferred_yieldable( |
|
defer.gatherResults( |
|
[run_in_background(prep, ev_info) for ev_info in event_infos], |
|
consumeErrors=True, |
|
) |
|
) |
|
|
|
await self.persist_events_and_notify( |
|
room_id, |
|
[ |
|
(ev_info.event, context) |
|
for ev_info, context in zip(event_infos, contexts) |
|
], |
|
backfilled=backfilled, |
|
) |
|
|
|
async def _persist_auth_tree( |
|
self, |
|
origin: str, |
|
room_id: str, |
|
auth_events: List[EventBase], |
|
state: List[EventBase], |
|
event: EventBase, |
|
room_version: RoomVersion, |
|
) -> int: |
|
"""Checks the auth chain is valid (and passes auth checks) for the |
|
state and event. Then persists the auth chain and state atomically. |
|
Persists the event separately. Notifies about the persisted events |
|
where appropriate. |
|
|
|
Will attempt to fetch missing auth events. |
|
|
|
Args: |
|
origin: Where the events came from |
|
room_id, |
|
auth_events |
|
state |
|
event |
|
room_version: The room version we expect this room to have, and |
|
will raise if it doesn't match the version in the create event. |
|
""" |
|
events_to_context = {} |
|
for e in itertools.chain(auth_events, state): |
|
e.internal_metadata.outlier = True |
|
ctx = await self.state_handler.compute_event_context(e) |
|
events_to_context[e.event_id] = ctx |
|
|
|
event_map = { |
|
e.event_id: e for e in itertools.chain(auth_events, state, [event]) |
|
} |
|
|
|
create_event = None |
|
for e in auth_events: |
|
if (e.type, e.state_key) == (EventTypes.Create, ""): |
|
create_event = e |
|
break |
|
|
|
if create_event is None: |
|
|
|
|
|
raise SynapseError(400, "No create event in state") |
|
|
|
room_version_id = create_event.content.get( |
|
"room_version", RoomVersions.V1.identifier |
|
) |
|
|
|
if room_version.identifier != room_version_id: |
|
raise SynapseError(400, "Room version mismatch") |
|
|
|
missing_auth_events = set() |
|
for e in itertools.chain(auth_events, state, [event]): |
|
for e_id in e.auth_event_ids(): |
|
if e_id not in event_map: |
|
missing_auth_events.add(e_id) |
|
|
|
for e_id in missing_auth_events: |
|
m_ev = await self.federation_client.get_pdu( |
|
[origin], e_id, room_version=room_version, outlier=True, timeout=10000, |
|
) |
|
if m_ev and m_ev.event_id == e_id: |
|
event_map[e_id] = m_ev |
|
else: |
|
logger.info("Failed to find auth event %r", e_id) |
|
|
|
for e in itertools.chain(auth_events, state, [event]): |
|
auth_for_e = { |
|
(event_map[e_id].type, event_map[e_id].state_key): event_map[e_id] |
|
for e_id in e.auth_event_ids() |
|
if e_id in event_map |
|
} |
|
if create_event: |
|
auth_for_e[(EventTypes.Create, "")] = create_event |
|
|
|
try: |
|
event_auth.check(room_version, e, auth_events=auth_for_e) |
|
except SynapseError as err: |
|
|
|
|
|
|
|
|
|
|
|
|
|
logger.warning("Rejecting %s because %s", e.event_id, err.msg) |
|
|
|
if e == event: |
|
raise |
|
events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR |
|
|
|
await self.persist_events_and_notify( |
|
room_id, |
|
[ |
|
(e, events_to_context[e.event_id]) |
|
for e in itertools.chain(auth_events, state) |
|
], |
|
) |
|
|
|
new_event_context = await self.state_handler.compute_event_context( |
|
event, old_state=state |
|
) |
|
|
|
return await self.persist_events_and_notify( |
|
room_id, [(event, new_event_context)] |
|
) |
|
|
|
async def _prep_event( |
|
self, |
|
origin: str, |
|
event: EventBase, |
|
state: Optional[Iterable[EventBase]], |
|
auth_events: Optional[MutableStateMap[EventBase]], |
|
backfilled: bool, |
|
) -> EventContext: |
|
context = await self.state_handler.compute_event_context(event, old_state=state) |
|
|
|
if not auth_events: |
|
prev_state_ids = await context.get_prev_state_ids() |
|
auth_events_ids = self.auth.compute_auth_events( |
|
event, prev_state_ids, for_verification=True |
|
) |
|
auth_events_x = await self.store.get_events(auth_events_ids) |
|
auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()} |
|
|
|
|
|
|
|
if event.type == EventTypes.Member and not event.auth_event_ids(): |
|
if len(event.prev_event_ids()) == 1 and event.depth < 5: |
|
c = await self.store.get_event( |
|
event.prev_event_ids()[0], allow_none=True |
|
) |
|
if c and c.type == EventTypes.Create: |
|
auth_events[(c.type, c.state_key)] = c |
|
|
|
context = await self.do_auth(origin, event, context, auth_events=auth_events) |
|
|
|
if not context.rejected: |
|
await self._check_for_soft_fail(event, state, backfilled) |
|
|
|
if event.type == EventTypes.GuestAccess and not context.rejected: |
|
await self.maybe_kick_guest_users(event) |
|
|
|
return context |
|
|
|
async def _check_for_soft_fail( |
|
self, event: EventBase, state: Optional[Iterable[EventBase]], backfilled: bool |
|
) -> None: |
|
"""Checks if we should soft fail the event; if so, marks the event as |
|
such. |
|
|
|
Args: |
|
event |
|
state: The state at the event if we don't have all the event's prev events |
|
backfilled: Whether the event is from backfill |
|
""" |
|
|
|
|
|
|
|
if backfilled or event.internal_metadata.is_outlier(): |
|
return |
|
|
|
extrem_ids_list = await self.store.get_latest_event_ids_in_room(event.room_id) |
|
extrem_ids = set(extrem_ids_list) |
|
prev_event_ids = set(event.prev_event_ids()) |
|
|
|
if extrem_ids == prev_event_ids: |
|
|
|
|
|
return |
|
|
|
room_version = await self.store.get_room_version_id(event.room_id) |
|
room_version_obj = KNOWN_ROOM_VERSIONS[room_version] |
|
|
|
|
|
if state is not None: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
state_sets_d = await self.state_store.get_state_groups( |
|
event.room_id, extrem_ids |
|
) |
|
state_sets = list(state_sets_d.values()) |
|
state_sets.append(state) |
|
current_states = await self.state_handler.resolve_events( |
|
room_version, state_sets, event |
|
) |
|
current_state_ids = { |
|
k: e.event_id for k, e in current_states.items() |
|
} |
|
else: |
|
current_state_ids = await self.state_handler.get_current_state_ids( |
|
event.room_id, latest_event_ids=extrem_ids |
|
) |
|
|
|
logger.debug( |
|
"Doing soft-fail check for %s: state %s", event.event_id, current_state_ids, |
|
) |
|
|
|
|
|
auth_types = auth_types_for_event(event) |
|
current_state_ids_list = [ |
|
e for k, e in current_state_ids.items() if k in auth_types |
|
] |
|
|
|
auth_events_map = await self.store.get_events(current_state_ids_list) |
|
current_auth_events = { |
|
(e.type, e.state_key): e for e in auth_events_map.values() |
|
} |
|
|
|
try: |
|
event_auth.check(room_version_obj, event, auth_events=current_auth_events) |
|
except AuthError as e: |
|
logger.warning("Soft-failing %r because %s", event, e) |
|
event.internal_metadata.soft_failed = True |
|
|
|
async def on_query_auth( |
|
self, origin, event_id, room_id, remote_auth_chain, rejects, missing |
|
): |
|
in_room = await self.auth.check_host_in_room(room_id, origin) |
|
if not in_room: |
|
raise AuthError(403, "Host not in room.") |
|
|
|
event = await self.store.get_event(event_id, check_room_id=room_id) |
|
|
|
|
|
|
|
for e in remote_auth_chain: |
|
try: |
|
await self._handle_new_event(origin, e) |
|
except AuthError: |
|
pass |
|
|
|
|
|
local_auth_chain = await self.store.get_auth_chain( |
|
list(event.auth_event_ids()), include_given=True |
|
) |
|
|
|
|
|
|
|
|
|
ret = await self.construct_auth_difference(local_auth_chain, remote_auth_chain) |
|
|
|
logger.debug("on_query_auth returning: %s", ret) |
|
|
|
return ret |
|
|
|
async def on_get_missing_events( |
|
self, origin, room_id, earliest_events, latest_events, limit |
|
): |
|
in_room = await self.auth.check_host_in_room(room_id, origin) |
|
if not in_room: |
|
raise AuthError(403, "Host not in room.") |
|
|
|
|
|
limit = min(limit, 20) |
|
|
|
missing_events = await self.store.get_missing_events( |
|
room_id=room_id, |
|
earliest_events=earliest_events, |
|
latest_events=latest_events, |
|
limit=limit, |
|
) |
|
|
|
missing_events = await filter_events_for_server( |
|
self.storage, origin, missing_events |
|
) |
|
|
|
return missing_events |
|
|
|
async def do_auth( |
|
self, |
|
origin: str, |
|
event: EventBase, |
|
context: EventContext, |
|
auth_events: MutableStateMap[EventBase], |
|
) -> EventContext: |
|
""" |
|
|
|
Args: |
|
origin: |
|
event: |
|
context: |
|
auth_events: |
|
Map from (event_type, state_key) to event |
|
|
|
Normally, our calculated auth_events based on the state of the room |
|
at the event's position in the DAG, though occasionally (eg if the |
|
event is an outlier), may be the auth events claimed by the remote |
|
server. |
|
|
|
Also NB that this function adds entries to it. |
|
Returns: |
|
updated context object |
|
""" |
|
room_version = await self.store.get_room_version_id(event.room_id) |
|
room_version_obj = KNOWN_ROOM_VERSIONS[room_version] |
|
|
|
try: |
|
context = await self._update_auth_events_and_context_for_auth( |
|
origin, event, context, auth_events |
|
) |
|
except Exception: |
|
|
|
|
|
|
|
|
|
logger.exception( |
|
"Failed to double check auth events for %s with remote. " |
|
"Ignoring failure and continuing processing of event.", |
|
event.event_id, |
|
) |
|
|
|
try: |
|
event_auth.check(room_version_obj, event, auth_events=auth_events) |
|
except AuthError as e: |
|
logger.warning("Failed auth resolution for %r because %s", event, e) |
|
context.rejected = RejectedReason.AUTH_ERROR |
|
|
|
return context |
|
|
|
async def _update_auth_events_and_context_for_auth( |
|
self, |
|
origin: str, |
|
event: EventBase, |
|
context: EventContext, |
|
auth_events: MutableStateMap[EventBase], |
|
) -> EventContext: |
|
"""Helper for do_auth. See there for docs. |
|
|
|
Checks whether a given event has the expected auth events. If it |
|
doesn't then we talk to the remote server to compare state to see if |
|
we can come to a consensus (e.g. if one server missed some valid |
|
state). |
|
|
|
This attempts to resolve any potential divergence of state between |
|
servers, but is not essential and so failures should not block further |
|
processing of the event. |
|
|
|
Args: |
|
origin: |
|
event: |
|
context: |
|
|
|
auth_events: |
|
Map from (event_type, state_key) to event |
|
|
|
Normally, our calculated auth_events based on the state of the room |
|
at the event's position in the DAG, though occasionally (eg if the |
|
event is an outlier), may be the auth events claimed by the remote |
|
server. |
|
|
|
Also NB that this function adds entries to it. |
|
|
|
Returns: |
|
updated context |
|
""" |
|
event_auth_events = set(event.auth_event_ids()) |
|
|
|
|
|
|
|
missing_auth = event_auth_events.difference( |
|
e.event_id for e in auth_events.values() |
|
) |
|
|
|
|
|
|
|
|
|
if missing_auth: |
|
have_events = await self.store.have_seen_events(missing_auth) |
|
logger.debug("Events %s are in the store", have_events) |
|
missing_auth.difference_update(have_events) |
|
|
|
if missing_auth: |
|
|
|
logger.info("auth_events contains unknown events: %s", missing_auth) |
|
try: |
|
try: |
|
remote_auth_chain = await self.federation_client.get_event_auth( |
|
origin, event.room_id, event.event_id |
|
) |
|
except RequestSendFailed as e1: |
|
|
|
|
|
logger.info("Failed to get event auth from remote: %s", e1) |
|
return context |
|
|
|
seen_remotes = await self.store.have_seen_events( |
|
[e.event_id for e in remote_auth_chain] |
|
) |
|
|
|
for e in remote_auth_chain: |
|
if e.event_id in seen_remotes: |
|
continue |
|
|
|
if e.event_id == event.event_id: |
|
continue |
|
|
|
try: |
|
auth_ids = e.auth_event_ids() |
|
auth = { |
|
(e.type, e.state_key): e |
|
for e in remote_auth_chain |
|
if e.event_id in auth_ids or e.type == EventTypes.Create |
|
} |
|
e.internal_metadata.outlier = True |
|
|
|
logger.debug( |
|
"do_auth %s missing_auth: %s", event.event_id, e.event_id |
|
) |
|
await self._handle_new_event(origin, e, auth_events=auth) |
|
|
|
if e.event_id in event_auth_events: |
|
auth_events[(e.type, e.state_key)] = e |
|
except AuthError: |
|
pass |
|
|
|
except Exception: |
|
logger.exception("Failed to get auth chain") |
|
|
|
if event.internal_metadata.is_outlier(): |
|
|
|
|
|
|
|
|
|
|
|
logger.info("Skipping auth_event fetch for outlier") |
|
return context |
|
|
|
different_auth = event_auth_events.difference( |
|
e.event_id for e in auth_events.values() |
|
) |
|
|
|
if not different_auth: |
|
return context |
|
|
|
logger.info( |
|
"auth_events refers to events which are not in our calculated auth " |
|
"chain: %s", |
|
different_auth, |
|
) |
|
|
|
|
|
|
|
different_events = await self.store.get_events_as_list(different_auth) |
|
|
|
for d in different_events: |
|
if d.room_id != event.room_id: |
|
logger.warning( |
|
"Event %s refers to auth_event %s which is in a different room", |
|
event.event_id, |
|
d.event_id, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return context |
|
|
|
|
|
|
|
|
|
local_state = auth_events.values() |
|
remote_auth_events = dict(auth_events) |
|
remote_auth_events.update({(d.type, d.state_key): d for d in different_events}) |
|
remote_state = remote_auth_events.values() |
|
|
|
room_version = await self.store.get_room_version_id(event.room_id) |
|
new_state = await self.state_handler.resolve_events( |
|
room_version, (local_state, remote_state), event |
|
) |
|
|
|
logger.info( |
|
"After state res: updating auth_events with new state %s", |
|
{ |
|
(d.type, d.state_key): d.event_id |
|
for d in new_state.values() |
|
if auth_events.get((d.type, d.state_key)) != d |
|
}, |
|
) |
|
|
|
auth_events.update(new_state) |
|
|
|
context = await self._update_context_for_auth_events( |
|
event, context, auth_events |
|
) |
|
|
|
return context |
|
|
|
async def _update_context_for_auth_events( |
|
self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase] |
|
) -> EventContext: |
|
"""Update the state_ids in an event context after auth event resolution, |
|
storing the changes as a new state group. |
|
|
|
Args: |
|
event: The event we're handling the context for |
|
|
|
context: initial event context |
|
|
|
auth_events: Events to update in the event context. |
|
|
|
Returns: |
|
new event context |
|
""" |
|
|
|
if event.is_state(): |
|
event_key = (event.type, event.state_key) |
|
else: |
|
event_key = None |
|
state_updates = { |
|
k: a.event_id for k, a in auth_events.items() if k != event_key |
|
} |
|
|
|
current_state_ids = await context.get_current_state_ids() |
|
current_state_ids = dict(current_state_ids) |
|
|
|
current_state_ids.update(state_updates) |
|
|
|
prev_state_ids = await context.get_prev_state_ids() |
|
prev_state_ids = dict(prev_state_ids) |
|
|
|
prev_state_ids.update({k: a.event_id for k, a in auth_events.items()}) |
|
|
|
|
|
prev_group = context.state_group |
|
state_group = await self.state_store.store_state_group( |
|
event.event_id, |
|
event.room_id, |
|
prev_group=prev_group, |
|
delta_ids=state_updates, |
|
current_state_ids=current_state_ids, |
|
) |
|
|
|
return EventContext.with_state( |
|
state_group=state_group, |
|
state_group_before_event=context.state_group_before_event, |
|
current_state_ids=current_state_ids, |
|
prev_state_ids=prev_state_ids, |
|
prev_group=prev_group, |
|
delta_ids=state_updates, |
|
) |
|
|
|
async def construct_auth_difference( |
|
self, local_auth: Iterable[EventBase], remote_auth: Iterable[EventBase] |
|
) -> Dict: |
|
""" Given a local and remote auth chain, find the differences. This |
|
assumes that we have already processed all events in remote_auth |
|
|
|
Params: |
|
local_auth (list) |
|
remote_auth (list) |
|
|
|
Returns: |
|
dict |
|
""" |
|
|
|
logger.debug("construct_auth_difference Start!") |
|
|
|
|
|
|
|
|
|
def sort_fun(ev): |
|
return ev.depth, ev.event_id |
|
|
|
logger.debug("construct_auth_difference after sort_fun!") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
remote_list = list(remote_auth) |
|
remote_list.sort(key=sort_fun) |
|
|
|
local_list = list(local_auth) |
|
local_list.sort(key=sort_fun) |
|
|
|
local_iter = iter(local_list) |
|
remote_iter = iter(remote_list) |
|
|
|
logger.debug("construct_auth_difference before get_next!") |
|
|
|
def get_next(it, opt=None): |
|
try: |
|
return next(it) |
|
except Exception: |
|
return opt |
|
|
|
current_local = get_next(local_iter) |
|
current_remote = get_next(remote_iter) |
|
|
|
logger.debug("construct_auth_difference before while") |
|
|
|
missing_remotes = [] |
|
missing_locals = [] |
|
while current_local or current_remote: |
|
if current_remote is None: |
|
missing_locals.append(current_local) |
|
current_local = get_next(local_iter) |
|
continue |
|
|
|
if current_local is None: |
|
missing_remotes.append(current_remote) |
|
current_remote = get_next(remote_iter) |
|
continue |
|
|
|
if current_local.event_id == current_remote.event_id: |
|
current_local = get_next(local_iter) |
|
current_remote = get_next(remote_iter) |
|
continue |
|
|
|
if current_local.depth < current_remote.depth: |
|
missing_locals.append(current_local) |
|
current_local = get_next(local_iter) |
|
continue |
|
|
|
if current_local.depth > current_remote.depth: |
|
missing_remotes.append(current_remote) |
|
current_remote = get_next(remote_iter) |
|
continue |
|
|
|
|
|
if current_local.event_id < current_remote.event_id: |
|
missing_locals.append(current_local) |
|
current_local = get_next(local_iter) |
|
|
|
if current_local.event_id > current_remote.event_id: |
|
missing_remotes.append(current_remote) |
|
current_remote = get_next(remote_iter) |
|
continue |
|
|
|
logger.debug("construct_auth_difference after while") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
missing_remote_ids = [e.event_id for e in missing_remotes] |
|
base_remote_rejected = list(missing_remotes) |
|
for e in missing_remotes: |
|
for e_id in e.auth_event_ids(): |
|
if e_id in missing_remote_ids: |
|
try: |
|
base_remote_rejected.remove(e) |
|
except ValueError: |
|
pass |
|
|
|
reason_map = {} |
|
|
|
for e in base_remote_rejected: |
|
reason = await self.store.get_rejection_reason(e.event_id) |
|
if reason is None: |
|
|
|
|
|
continue |
|
|
|
reason_map[e.event_id] = reason |
|
|
|
logger.debug("construct_auth_difference returning") |
|
|
|
return { |
|
"auth_chain": local_auth, |
|
"rejects": { |
|
e.event_id: {"reason": reason_map[e.event_id], "proof": None} |
|
for e in base_remote_rejected |
|
}, |
|
"missing": [e.event_id for e in missing_locals], |
|
} |
|
|
|
@log_function |
|
async def exchange_third_party_invite( |
|
self, sender_user_id, target_user_id, room_id, signed |
|
): |
|
third_party_invite = {"signed": signed} |
|
|
|
event_dict = { |
|
"type": EventTypes.Member, |
|
"content": { |
|
"membership": Membership.INVITE, |
|
"third_party_invite": third_party_invite, |
|
}, |
|
"room_id": room_id, |
|
"sender": sender_user_id, |
|
"state_key": target_user_id, |
|
} |
|
|
|
if await self.auth.check_host_in_room(room_id, self.hs.hostname): |
|
room_version = await self.store.get_room_version_id(room_id) |
|
builder = self.event_builder_factory.new(room_version, event_dict) |
|
|
|
EventValidator().validate_builder(builder) |
|
event, context = await self.event_creation_handler.create_new_client_event( |
|
builder=builder |
|
) |
|
|
|
event, context = await self.add_display_name_to_third_party_invite( |
|
room_version, event_dict, event, context |
|
) |
|
|
|
EventValidator().validate_new(event, self.config) |
|
|
|
|
|
|
|
event.internal_metadata.send_on_behalf_of = self.hs.hostname |
|
|
|
try: |
|
await self.auth.check_from_context(room_version, event, context) |
|
except AuthError as e: |
|
logger.warning("Denying new third party invite %r because %s", event, e) |
|
raise e |
|
|
|
await self._check_signature(event, context) |
|
|
|
|
|
member_handler = self.hs.get_room_member_handler() |
|
await member_handler.send_membership_event(None, event, context) |
|
else: |
|
destinations = {x.split(":", 1)[-1] for x in (sender_user_id, room_id)} |
|
await self.federation_client.forward_third_party_invite( |
|
destinations, room_id, event_dict |
|
) |
|
|
|
async def on_exchange_third_party_invite_request( |
|
self, event_dict: JsonDict |
|
) -> None: |
|
"""Handle an exchange_third_party_invite request from a remote server |
|
|
|
The remote server will call this when it wants to turn a 3pid invite |
|
into a normal m.room.member invite. |
|
|
|
Args: |
|
event_dict: Dictionary containing the event body. |
|
|
|
""" |
|
assert_params_in_dict(event_dict, ["room_id"]) |
|
room_version = await self.store.get_room_version_id(event_dict["room_id"]) |
|
|
|
|
|
|
|
builder = self.event_builder_factory.new(room_version, event_dict) |
|
|
|
event, context = await self.event_creation_handler.create_new_client_event( |
|
builder=builder |
|
) |
|
event, context = await self.add_display_name_to_third_party_invite( |
|
room_version, event_dict, event, context |
|
) |
|
|
|
try: |
|
await self.auth.check_from_context(room_version, event, context) |
|
except AuthError as e: |
|
logger.warning("Denying third party invite %r because %s", event, e) |
|
raise e |
|
await self._check_signature(event, context) |
|
|
|
|
|
|
|
event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender) |
|
|
|
|
|
member_handler = self.hs.get_room_member_handler() |
|
await member_handler.send_membership_event(None, event, context) |
|
|
|
async def add_display_name_to_third_party_invite( |
|
self, room_version, event_dict, event, context |
|
): |
|
key = ( |
|
EventTypes.ThirdPartyInvite, |
|
event.content["third_party_invite"]["signed"]["token"], |
|
) |
|
original_invite = None |
|
prev_state_ids = await context.get_prev_state_ids() |
|
original_invite_id = prev_state_ids.get(key) |
|
if original_invite_id: |
|
original_invite = await self.store.get_event( |
|
original_invite_id, allow_none=True |
|
) |
|
if original_invite: |
|
|
|
|
|
|
|
|
|
|
|
display_name = original_invite.content.get("display_name") |
|
event_dict["content"]["third_party_invite"]["display_name"] = display_name |
|
else: |
|
logger.info( |
|
"Could not find invite event for third_party_invite: %r", event_dict |
|
) |
|
|
|
|
|
|
|
|
|
builder = self.event_builder_factory.new(room_version, event_dict) |
|
EventValidator().validate_builder(builder) |
|
event, context = await self.event_creation_handler.create_new_client_event( |
|
builder=builder |
|
) |
|
EventValidator().validate_new(event, self.config) |
|
return (event, context) |
|
|
|
async def _check_signature(self, event, context): |
|
""" |
|
Checks that the signature in the event is consistent with its invite. |
|
|
|
Args: |
|
event (Event): The m.room.member event to check |
|
context (EventContext): |
|
|
|
Raises: |
|
AuthError: if signature didn't match any keys, or key has been |
|
revoked, |
|
SynapseError: if a transient error meant a key couldn't be checked |
|
for revocation. |
|
""" |
|
signed = event.content["third_party_invite"]["signed"] |
|
token = signed["token"] |
|
|
|
prev_state_ids = await context.get_prev_state_ids() |
|
invite_event_id = prev_state_ids.get((EventTypes.ThirdPartyInvite, token)) |
|
|
|
invite_event = None |
|
if invite_event_id: |
|
invite_event = await self.store.get_event(invite_event_id, allow_none=True) |
|
|
|
if not invite_event: |
|
raise AuthError(403, "Could not find invite") |
|
|
|
logger.debug("Checking auth on event %r", event.content) |
|
|
|
last_exception = None |
|
|
|
|
|
for public_key_object in self.hs.get_auth().get_public_keys(invite_event): |
|
try: |
|
|
|
for server, signature_block in signed["signatures"].items(): |
|
for key_name, encoded_signature in signature_block.items(): |
|
if not key_name.startswith("ed25519:"): |
|
continue |
|
|
|
logger.debug( |
|
"Attempting to verify sig with key %s from %r " |
|
"against pubkey %r", |
|
key_name, |
|
server, |
|
public_key_object, |
|
) |
|
|
|
try: |
|
public_key = public_key_object["public_key"] |
|
verify_key = decode_verify_key_bytes( |
|
key_name, decode_base64(public_key) |
|
) |
|
verify_signed_json(signed, server, verify_key) |
|
logger.debug( |
|
"Successfully verified sig with key %s from %r " |
|
"against pubkey %r", |
|
key_name, |
|
server, |
|
public_key_object, |
|
) |
|
except Exception: |
|
logger.info( |
|
"Failed to verify sig with key %s from %r " |
|
"against pubkey %r", |
|
key_name, |
|
server, |
|
public_key_object, |
|
) |
|
raise |
|
try: |
|
if "key_validity_url" in public_key_object: |
|
await self._check_key_revocation( |
|
public_key, public_key_object["key_validity_url"] |
|
) |
|
except Exception: |
|
logger.info( |
|
"Failed to query key_validity_url %s", |
|
public_key_object["key_validity_url"], |
|
) |
|
raise |
|
return |
|
except Exception as e: |
|
last_exception = e |
|
|
|
if last_exception is None: |
|
|
|
|
|
raise RuntimeError("no public key in invite event") |
|
|
|
raise last_exception |
|
|
|
async def _check_key_revocation(self, public_key, url): |
|
""" |
|
Checks whether public_key has been revoked. |
|
|
|
Args: |
|
public_key (str): base-64 encoded public key. |
|
url (str): Key revocation URL. |
|
|
|
Raises: |
|
AuthError: if they key has been revoked. |
|
SynapseError: if a transient error meant a key couldn't be checked |
|
for revocation. |
|
""" |
|
try: |
|
response = await self.http_client.get_json(url, {"public_key": public_key}) |
|
except Exception: |
|
raise SynapseError(502, "Third party certificate could not be checked") |
|
if "valid" not in response or not response["valid"]: |
|
raise AuthError(403, "Third party certificate was invalid") |
|
|
|
async def persist_events_and_notify( |
|
self, |
|
room_id: str, |
|
event_and_contexts: Sequence[Tuple[EventBase, EventContext]], |
|
backfilled: bool = False, |
|
) -> int: |
|
"""Persists events and tells the notifier/pushers about them, if |
|
necessary. |
|
|
|
Args: |
|
room_id: The room ID of events being persisted. |
|
event_and_contexts: Sequence of events with their associated |
|
context that should be persisted. All events must belong to |
|
the same room. |
|
backfilled: Whether these events are a result of |
|
backfilling or not |
|
""" |
|
instance = self.config.worker.events_shard_config.get_instance(room_id) |
|
if instance != self._instance_name: |
|
result = await self._send_events( |
|
instance_name=instance, |
|
store=self.store, |
|
room_id=room_id, |
|
event_and_contexts=event_and_contexts, |
|
backfilled=backfilled, |
|
) |
|
return result["max_stream_id"] |
|
else: |
|
assert self.storage.persistence |
|
|
|
|
|
|
|
events, max_stream_token = await self.storage.persistence.persist_events( |
|
event_and_contexts, backfilled=backfilled |
|
) |
|
|
|
if self._ephemeral_messages_enabled: |
|
for event in events: |
|
|
|
self._message_handler.maybe_schedule_expiry(event) |
|
|
|
if not backfilled: |
|
for event in events: |
|
await self._notify_persisted_event(event, max_stream_token) |
|
|
|
return max_stream_token.stream |
|
|
|
async def _notify_persisted_event( |
|
self, event: EventBase, max_stream_token: RoomStreamToken |
|
) -> None: |
|
"""Checks to see if notifier/pushers should be notified about the |
|
event or not. |
|
|
|
Args: |
|
event: |
|
max_stream_id: The max_stream_id returned by persist_events |
|
""" |
|
|
|
extra_users = [] |
|
if event.type == EventTypes.Member: |
|
target_user_id = event.state_key |
|
|
|
|
|
|
|
if event.internal_metadata.is_outlier(): |
|
if event.membership != Membership.INVITE: |
|
if not self.is_mine_id(target_user_id): |
|
return |
|
|
|
target_user = UserID.from_string(target_user_id) |
|
extra_users.append(target_user) |
|
elif event.internal_metadata.is_outlier(): |
|
return |
|
|
|
|
|
assert event.internal_metadata.stream_ordering |
|
|
|
event_pos = PersistedEventPosition( |
|
self._instance_name, event.internal_metadata.stream_ordering |
|
) |
|
self.notifier.on_new_room_event( |
|
event, event_pos, max_stream_token, extra_users=extra_users |
|
) |
|
|
|
async def _clean_room_for_join(self, room_id: str) -> None: |
|
"""Called to clean up any data in DB for a given room, ready for the |
|
server to join the room. |
|
|
|
Args: |
|
room_id |
|
""" |
|
if self.config.worker_app: |
|
await self._clean_room_for_join_client(room_id) |
|
else: |
|
await self.store.clean_room_for_join(room_id) |
|
|
|
async def get_room_complexity( |
|
self, remote_room_hosts: List[str], room_id: str |
|
) -> Optional[dict]: |
|
""" |
|
Fetch the complexity of a remote room over federation. |
|
|
|
Args: |
|
remote_room_hosts (list[str]): The remote servers to ask. |
|
room_id (str): The room ID to ask about. |
|
|
|
Returns: |
|
Dict contains the complexity |
|
metric versions, while None means we could not fetch the complexity. |
|
""" |
|
|
|
for host in remote_room_hosts: |
|
res = await self.federation_client.get_room_complexity(host, room_id) |
|
|
|
|
|
if res: |
|
return res |
|
|
|
|
|
|
|
return None |
|
|