-
Notifications
You must be signed in to change notification settings - Fork 503
Return some room data in Sliding Sync /sync
#17320
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 6 commits
079194c
3e0f759
5e2fd4e
8ce06f1
aa5f54a
5c175d5
9089bfe
9427991
19b2297
81d36f3
9791209
70ecd4d
71eabe5
39b4f10
9883b0f
1c06153
57ba033
c81f300
d801db0
6942b64
884b448
0eb0294
b1b4231
87fac19
0e71a2f
21ca02c
3568311
7aea406
e3e431f
303d834
4c22131
83d6f76
fbd92e1
6c791a8
27d74b0
fb8fbd4
d91aa00
daa7e36
cccbd15
62c6a4e
39259f6
5c21315
c60aca7
11db1be
7395e10
2bf3923
ec2d8dc
0b9a903
48d0acf
2a944ff
8df39d1
b7914e7
7eb1806
935b98c
f163fcf
956f20e
830e09d
15fcead
81c06be
eb159c1
ba56350
f774032
325856e
63c7b50
1158058
32b8b68
6045e11
9e53336
a4263bf
10d78d6
0061561
b8687e7
7c9513c
8b73185
126ce1e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. | ||
MadLittleMods marked this conversation as resolved.
Show resolved
Hide resolved
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. CI keeps failing on building debs. It's unclear what's even going wrong or a change to cause this and I'm unable to merge with the failing status. It was building fine on the https://github.com/element-hq/synapse/actions/runs/9754628604/job/26922590117 If I run Local build output
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I've just rerun this on develop and it looks like its broken there too
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for fixing up the deb build issues @erikjohnston! For the first one, the fact that it was building a wheel at all is the hint as it should be downloading one normally. For the second one, it makes more sense but if we expect a clean checkout for the Docker builds, it would be nice to remove things according to the |
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -18,22 +18,27 @@ | |
| # | ||
| # | ||
| import logging | ||
| from typing import TYPE_CHECKING, Dict, List, Optional, Tuple | ||
| from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple | ||
|
|
||
| import attr | ||
| from immutabledict import immutabledict | ||
|
|
||
| from synapse.api.constants import AccountDataTypes, EventTypes, Membership | ||
| from synapse.api.constants import AccountDataTypes, Direction, EventTypes, Membership | ||
| from synapse.events import EventBase | ||
| from synapse.events.utils import strip_event | ||
| from synapse.storage.roommember import RoomsForUser | ||
| from synapse.types import ( | ||
| JsonDict, | ||
| PersistedEventPosition, | ||
| Requester, | ||
| RoomStreamToken, | ||
| StreamKeyType, | ||
| StreamToken, | ||
| UserID, | ||
| ) | ||
| from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult | ||
| from synapse.types.state import StateFilter | ||
| from synapse.types.state import StateFilter, StateKey | ||
| from synapse.visibility import filter_events_for_client | ||
|
|
||
| if TYPE_CHECKING: | ||
| from synapse.server import HomeServer | ||
|
|
@@ -82,6 +87,24 @@ def filter_membership_for_sync(*, membership: str, user_id: str, sender: str) -> | |
| return membership != Membership.LEAVE or sender != user_id | ||
|
|
||
|
|
||
| # We can't freeze this class because we want to update it in place with the | ||
| # de-duplicated data. | ||
| @attr.s(slots=True, auto_attribs=True) | ||
| class RoomSyncConfig: | ||
| """ | ||
| Holds the config for what data we should fetch for a room in the sync response. | ||
|
|
||
| Attributes: | ||
| timeline_limit: The maximum number of events to return in the timeline. | ||
| required_state: The minimum set of state events requested for the room. The | ||
| values are close to `StateKey` but actually use a syntax where you can provide | ||
| `*` and `$LAZY` as the state key part of the tuple (type, state_key). | ||
| """ | ||
|
|
||
| timeline_limit: int | ||
| required_state: Set[Tuple[str, str]] | ||
|
|
||
|
|
||
| class SlidingSyncHandler: | ||
| def __init__(self, hs: "HomeServer"): | ||
| self.clock = hs.get_clock() | ||
|
|
@@ -201,6 +224,7 @@ async def current_sync_for_user( | |
|
|
||
| # Assemble sliding window lists | ||
| lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} | ||
| relevant_room_map: Dict[str, RoomSyncConfig] = {} | ||
| if sync_config.lists: | ||
| # Get all of the room IDs that the user should be able to see in the sync | ||
| # response | ||
|
|
@@ -225,29 +249,66 @@ async def current_sync_for_user( | |
| ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] | ||
| if list_config.ranges: | ||
| for range in list_config.ranges: | ||
| sliced_room_ids = [ | ||
| room_id | ||
| for room_id, _ in sorted_room_info[range[0] : range[1]] | ||
| ] | ||
|
|
||
| ops.append( | ||
| SlidingSyncResult.SlidingWindowList.Operation( | ||
| op=OperationType.SYNC, | ||
| range=range, | ||
| room_ids=[ | ||
| room_id | ||
| for room_id, _ in sorted_room_info[ | ||
| range[0] : range[1] | ||
| ] | ||
| ], | ||
| room_ids=sliced_room_ids, | ||
| ) | ||
| ) | ||
|
|
||
| # Update the relevant room map | ||
| for room_id in sliced_room_ids: | ||
| if relevant_room_map.get(room_id) is not None: | ||
| # Take the highest timeline limit | ||
| if ( | ||
| relevant_room_map[room_id].timeline_limit | ||
| < list_config.timeline_limit | ||
| ): | ||
| relevant_room_map[room_id].timeline_limit = ( | ||
| list_config.timeline_limit | ||
| ) | ||
|
|
||
| # Union the required state | ||
| relevant_room_map[room_id].required_state.update( | ||
| list_config.required_state | ||
| ) | ||
| else: | ||
| relevant_room_map[room_id] = RoomSyncConfig( | ||
| timeline_limit=list_config.timeline_limit, | ||
| required_state=set(list_config.required_state), | ||
| ) | ||
|
|
||
| lists[list_key] = SlidingSyncResult.SlidingWindowList( | ||
| count=len(sorted_room_info), | ||
| ops=ops, | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Handling state resetsIt's not clear to me what feedback we should give to the client in a state reset scenario where the user is removed from the room. We can't send a leave event down to them as there is no event to send. Do we just give a HTTP 400 "connection expired" so clients start over? Do we send the room down because we consider it
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think you're right, but let's just do whatever is easier and add a TODO/issue to follow up.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Added a TODO ⏩ |
||
| ) | ||
|
|
||
| # TODO: if (sync_config.room_subscriptions): | ||
|
|
||
| # Fetch room data | ||
| rooms: Dict[str, SlidingSyncResult.RoomResult] = {} | ||
| for room_id, room_sync_config in relevant_room_map.items(): | ||
| room_sync_result = await self.get_room_sync_data( | ||
| user=sync_config.user, | ||
| room_id=room_id, | ||
| room_sync_config=room_sync_config, | ||
| rooms_for_user_membership_at_to_token=sync_room_map[room_id], | ||
| from_token=from_token, | ||
| to_token=to_token, | ||
| ) | ||
|
|
||
| rooms[room_id] = room_sync_result | ||
|
|
||
| return SlidingSyncResult( | ||
| next_pos=to_token, | ||
| lists=lists, | ||
| # TODO: Gather room data for rooms in lists and `sync_config.room_subscriptions` | ||
| rooms={}, | ||
| rooms=rooms, | ||
| extensions={}, | ||
| ) | ||
|
|
||
|
|
@@ -665,3 +726,154 @@ async def sort_rooms( | |
| # We want descending order | ||
| reverse=True, | ||
| ) | ||
|
|
||
| async def get_room_sync_data( | ||
| self, | ||
| user: UserID, | ||
| room_id: str, | ||
| room_sync_config: RoomSyncConfig, | ||
| rooms_for_user_membership_at_to_token: RoomsForUser, | ||
| from_token: Optional[StreamToken], | ||
| to_token: StreamToken, | ||
| ) -> SlidingSyncResult.RoomResult: | ||
| """ | ||
| Fetch room data for a room. | ||
|
|
||
| We fetch data according to the token range (> `from_token` and <= `to_token`). | ||
|
|
||
| Args: | ||
| user: User to fetch data for | ||
| room_id: The room ID to fetch data for | ||
| room_sync_config: Config for what data we should fetch for a room in the | ||
| sync response. | ||
| rooms_for_user_membership_at_to_token: Membership information for the user | ||
| in the room at the time of `to_token`. | ||
| from_token: The point in the stream to sync from. | ||
| to_token: The point in the stream to sync up to. | ||
| """ | ||
|
|
||
| timeline_events: List[EventBase] = [] | ||
| limited = False | ||
| # We want to use `to_token` (vs `from_token`) because we look backwards from the | ||
| # `to_token` up to the `timeline_limit` and we might not reach `from_token` | ||
| # before we hit the limit. We will update the room stream position once we've | ||
| # fetched the events. | ||
| prev_batch_token = to_token | ||
| if room_sync_config.timeline_limit > 0: | ||
| timeline_events, new_room_key = await self.store.paginate_room_events( | ||
| room_id=room_id, | ||
| # We're going to paginate backwards from the `to_token` | ||
| from_key=to_token.room_key, | ||
| # We should always return historical messages (outside token range) in | ||
| # these cases because clients want to be able to show a basic screen of | ||
| # information: | ||
| # - Initial sync (because no `from_token`) | ||
| # - When users newly_join | ||
| # - TODO: For incremental sync where we haven't sent it down this | ||
| # connection before | ||
| to_key=from_token.room_key if from_token is not None else None, | ||
| direction=Direction.BACKWARDS, | ||
| # We add one so we can determine if there are enough events to saturate | ||
| # the limit or not (see `limited`) | ||
| limit=room_sync_config.timeline_limit + 1, | ||
| event_filter=None, | ||
| ) | ||
|
|
||
| # We want to return the events in ascending order (the last event is the | ||
| # most recent). | ||
| timeline_events.reverse() | ||
|
|
||
| timeline_events = await filter_events_for_client( | ||
| self.storage_controllers, | ||
| user.to_string(), | ||
| timeline_events, | ||
| is_peeking=rooms_for_user_membership_at_to_token.membership | ||
| != Membership.JOIN, | ||
| filter_send_to_client=True, | ||
| ) | ||
|
|
||
| # Determine our `limited` status | ||
| if len(timeline_events) > room_sync_config.timeline_limit: | ||
| limited = True | ||
| # Get rid of that extra "+ 1" event because we only used it to determine | ||
| # if we hit the limit or not | ||
| timeline_events = timeline_events[-room_sync_config.timeline_limit :] | ||
| assert timeline_events[0].internal_metadata.stream_ordering | ||
| new_room_key = RoomStreamToken( | ||
| stream=timeline_events[0].internal_metadata.stream_ordering - 1 | ||
| ) | ||
|
|
||
| prev_batch_token = prev_batch_token.copy_and_replace( | ||
| StreamKeyType.ROOM, new_room_key | ||
| ) | ||
|
Comment on lines
+949
to
+953
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Reviewer, please double-check the It's unclear how these fields should be set in cases where the user's room membership is Where should the If we change the scenario to a room with For To compare, Sync v2 doesn't have to deal with this Perhaps I'm making a leap too far in the Sliding Sync response and
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, ermh, I'd ask what the proxy does. My hunch is that we don't return any timeline events (bar maybe the invite?), and just set prev_batch to the just before the invite. I'd guess clients don't actually use it, as the api will send down a new room entry for when we joined the room?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I asked what the Sliding Sync proxy does for Because the proxy is just a Matrix client, it can basically only pass along events from Sync v2 so it's not able to pull out extra events and have some filtering logic or history visibility checks on them. I also asked whether a full server implementation of Sliding Sync should do something more/better in the That sentiment makes some sense. Especially, if we were to return strictly live traffic only but the sync endpoint is already loading history. I assume the history is useful for room previews and being able to show something immediately. Ultimately, we're leaving that decision is also up to the client depending on the Deciding on how we treat ⏩ Since nothing was decided and there wasn't traction either way, I'll just copy the proxy for now and only have |
||
|
|
||
| # Figure out any stripped state events for invite/knocks | ||
| stripped_state: List[JsonDict] = [] | ||
| if rooms_for_user_membership_at_to_token.membership in { | ||
| Membership.INVITE, | ||
| Membership.KNOCK, | ||
| }: | ||
| invite_or_knock_event = await self.store.get_event( | ||
| rooms_for_user_membership_at_to_token.event_id | ||
| ) | ||
|
|
||
| stripped_state = [] | ||
| if invite_or_knock_event.membership == Membership.INVITE: | ||
| stripped_state.extend( | ||
| invite_or_knock_event.unsigned.get("invite_room_state", []) | ||
| ) | ||
| elif invite_or_knock_event.membership == Membership.KNOCK: | ||
| stripped_state.extend( | ||
| invite_or_knock_event.unsigned.get("knock_room_state", []) | ||
| ) | ||
|
|
||
| stripped_state.append(strip_event(invite_or_knock_event)) | ||
|
|
||
| required_state = [] | ||
| if len(room_sync_config.required_state) > 0: | ||
| await self.storage_controllers.state.get_current_state( | ||
| room_id, | ||
| state_filter=StateFilter.from_types(TODO), | ||
| await_full_state=False, | ||
| ) | ||
|
|
||
| # TODO: rewind | ||
|
|
||
| # required_state = await self.storage_controllers.state.get_state_at( | ||
| # room_id, | ||
| # to_token, | ||
| # state_filter=StateFilter.from_types(TODO), | ||
| # ) | ||
|
|
||
| return SlidingSyncResult.RoomResult( | ||
| # TODO: Dummy value | ||
| # TODO: Make this optional because a computed name doesn't make sense for translated cases | ||
| name="TODO", | ||
| # TODO: Dummy value | ||
| avatar=None, | ||
| # TODO: Dummy value | ||
| heroes=None, | ||
| # Since we can't determine whether we've already sent a room down this | ||
| # Sliding Sync connection before (we plan to add this optimization in the | ||
| # future), we're always returning the requested room state instead of | ||
| # updates. | ||
| initial=True, | ||
| # TODO: Dummy value | ||
| required_state=[], | ||
| timeline=timeline_events, | ||
| # TODO: Dummy value | ||
| is_dm=False, | ||
| stripped_state=stripped_state, | ||
| prev_batch=prev_batch_token, | ||
| limited=limited, | ||
| # TODO: Dummy values | ||
| joined_count=0, | ||
| invited_count=0, | ||
| # TODO: These are just dummy values. We could potentially just remove these | ||
| # since notifications can only really be done correctly on the client anyway | ||
| # (encrypted rooms). | ||
| notification_count=0, | ||
| highlight_count=0, | ||
| # TODO: Dummy value | ||
| num_live=0, | ||
| ) | ||
Uh oh!
There was an error while loading. Please reload this page.