0
0
Fork 1
mirror of https://mau.dev/maunium/synapse.git synced 2024-11-17 07:21:37 +01:00

Merge branch 'release-v0.11.1' of github.com:matrix-org/synapse

This commit is contained in:
Erik Johnston 2015-11-20 17:38:58 +00:00
commit 2ca01ed747
22 changed files with 225 additions and 114 deletions

View file

@ -1,3 +1,18 @@
Changes in synapse v0.11.1 (2015-11-20)
=======================================
* Add extra options to search API (PR #394)
* Fix bug where we did not correctly cap federation retry timers. This meant it
could take several hours for servers to start talking to ressurected servers,
even when they were receiving traffic from them (PR #393)
* Don't advertise login token flow unless CAS is enabled. This caused issues
where some clients would always use the fallback API if they did not
recognize all login flows (PR #391)
* Change /v2 sync API to rename ``private_user_data`` to ``account_data``
(PR #386)
* Change /v2 sync API to remove the ``event_map`` and rename keys in ``rooms``
object (PR #389)
Changes in synapse v0.11.0-r2 (2015-11-19)
==========================================

View file

@ -30,6 +30,19 @@ running:
python synapse/python_dependencies.py | xargs -n1 pip install
Upgrading to v0.11.0
====================
This release includes the option to send anonymous usage stats to matrix.org,
and requires that administrators explictly opt in or out by setting the
``report_stats`` option to either ``true`` or ``false``.
We would really appreciate it if you could help our project out by reporting
anonymized usage statistics from your homeserver. Only very basic aggregate
data (e.g. number of users) will be reported, but it helps us to track the
growth of the Matrix community, and helps us to make Matrix a success, as well
as to convince other networks that they should peer with us.
Upgrading to v0.9.0
===================

View file

@ -17,14 +17,20 @@ export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished w
tox
: ${GIT_BRANCH:="$(git rev-parse --abbrev-ref HEAD)"}
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
set +u
. .tox/py27/bin/activate
set -u
if [[ ! -e .sytest-base ]]; then
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
else
(cd .sytest-base; git fetch)
fi
rm -rf sytest
git clone https://github.com/matrix-org/sytest.git sytest
git clone .sytest-base sytest --shared
cd sytest
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)

View file

@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
__version__ = "0.11.0-r2"
__version__ = "0.11.1"

View file

@ -587,10 +587,7 @@ class Auth(object):
def _get_user_from_macaroon(self, macaroon_str):
try:
macaroon = pymacaroons.Macaroon.deserialize(macaroon_str)
self.validate_macaroon(
macaroon, "access",
[lambda c: c.startswith("time < ")]
)
self.validate_macaroon(macaroon, "access", False)
user_prefix = "user_id = "
user = None
@ -638,22 +635,34 @@ class Auth(object):
errcode=Codes.UNKNOWN_TOKEN
)
def validate_macaroon(self, macaroon, type_string, additional_validation_functions):
def validate_macaroon(self, macaroon, type_string, verify_expiry):
"""
validate that a Macaroon is understood by and was signed by this server.
Args:
macaroon(pymacaroons.Macaroon): The macaroon to validate
type_string(str): The kind of token this is (e.g. "access", "refresh")
verify_expiry(bool): Whether to verify whether the macaroon has expired.
This should really always be True, but no clients currently implement
token refresh, so we can't enforce expiry yet.
"""
v = pymacaroons.Verifier()
v.satisfy_exact("gen = 1")
v.satisfy_exact("type = " + type_string)
v.satisfy_general(lambda c: c.startswith("user_id = "))
v.satisfy_exact("guest = true")
if verify_expiry:
v.satisfy_general(self._verify_expiry)
else:
v.satisfy_general(lambda c: c.startswith("time < "))
for validation_function in additional_validation_functions:
v.satisfy_general(validation_function)
v.verify(macaroon, self.hs.config.macaroon_secret_key)
v = pymacaroons.Verifier()
v.satisfy_general(self._verify_recognizes_caveats)
v.verify(macaroon, self.hs.config.macaroon_secret_key)
def verify_expiry(self, caveat):
def _verify_expiry(self, caveat):
prefix = "time < "
if not caveat.startswith(prefix):
return False

View file

@ -54,7 +54,7 @@ class Filtering(object):
]
room_level_definitions = [
"state", "timeline", "ephemeral", "private_user_data"
"state", "timeline", "ephemeral", "account_data"
]
for key in top_level_definitions:
@ -131,8 +131,8 @@ class FilterCollection(object):
self.filter_json.get("room", {}).get("ephemeral", {})
)
self.room_private_user_data = Filter(
self.filter_json.get("room", {}).get("private_user_data", {})
self.room_account_data = Filter(
self.filter_json.get("room", {}).get("account_data", {})
)
self.presence_filter = Filter(
@ -160,8 +160,8 @@ class FilterCollection(object):
def filter_room_ephemeral(self, events):
return self.room_ephemeral_filter.filter(events)
def filter_room_private_user_data(self, events):
return self.room_private_user_data.filter(events)
def filter_room_account_data(self, events):
return self.room_account_data.filter(events)
class Filter(object):

View file

@ -381,28 +381,24 @@ class Keyring(object):
def get_server_verify_key_v2_indirect(self, server_names_and_key_ids,
perspective_name,
perspective_keys):
limiter = yield get_retry_limiter(
perspective_name, self.clock, self.store
)
with limiter:
# TODO(mark): Set the minimum_valid_until_ts to that needed by
# the events being validated or the current time if validating
# an incoming request.
query_response = yield self.client.post_json(
destination=perspective_name,
path=b"/_matrix/key/v2/query",
data={
u"server_keys": {
server_name: {
key_id: {
u"minimum_valid_until_ts": 0
} for key_id in key_ids
}
for server_name, key_ids in server_names_and_key_ids
# TODO(mark): Set the minimum_valid_until_ts to that needed by
# the events being validated or the current time if validating
# an incoming request.
query_response = yield self.client.post_json(
destination=perspective_name,
path=b"/_matrix/key/v2/query",
data={
u"server_keys": {
server_name: {
key_id: {
u"minimum_valid_until_ts": 0
} for key_id in key_ids
}
},
)
for server_name, key_ids in server_names_and_key_ids
}
},
long_retries=True,
)
keys = {}

View file

@ -129,10 +129,9 @@ def format_event_for_client_v2(d):
return d
def format_event_for_client_v2_without_event_id(d):
def format_event_for_client_v2_without_room_id(d):
d = format_event_for_client_v2(d)
d.pop("room_id", None)
d.pop("event_id", None)
return d

View file

@ -16,19 +16,19 @@
from twisted.internet import defer
class PrivateUserDataEventSource(object):
class AccountDataEventSource(object):
def __init__(self, hs):
self.store = hs.get_datastore()
def get_current_key(self, direction='f'):
return self.store.get_max_private_user_data_stream_id()
return self.store.get_max_account_data_stream_id()
@defer.inlineCallbacks
def get_new_events(self, user, from_key, **kwargs):
user_id = user.to_string()
last_stream_id = from_key
current_stream_id = yield self.store.get_max_private_user_data_stream_id()
current_stream_id = yield self.store.get_max_account_data_stream_id()
tags = yield self.store.get_updated_tags(user_id, last_stream_id)
results = []

View file

@ -407,7 +407,7 @@ class AuthHandler(BaseHandler):
try:
macaroon = pymacaroons.Macaroon.deserialize(login_token)
auth_api = self.hs.get_auth()
auth_api.validate_macaroon(macaroon, "login", [auth_api.verify_expiry])
auth_api.validate_macaroon(macaroon, "login", True)
return self._get_user_from_macaroon(macaroon)
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
raise AuthError(401, "Invalid token", errcode=Codes.UNKNOWN_TOKEN)

View file

@ -436,14 +436,14 @@ class MessageHandler(BaseHandler):
for c in current_state.values()
]
private_user_data = []
account_data = []
tags = tags_by_room.get(event.room_id)
if tags:
private_user_data.append({
account_data.append({
"type": "m.tag",
"content": {"tags": tags},
})
d["private_user_data"] = private_user_data
d["account_data"] = account_data
except:
logger.exception("Failed to get snapshot")
@ -498,14 +498,14 @@ class MessageHandler(BaseHandler):
user_id, room_id, pagin_config, membership, member_event_id, is_guest
)
private_user_data = []
account_data = []
tags = yield self.store.get_tags_for_room(user_id, room_id)
if tags:
private_user_data.append({
account_data.append({
"type": "m.tag",
"content": {"tags": tags},
})
result["private_user_data"] = private_user_data
result["account_data"] = account_data
defer.returnValue(result)

View file

@ -17,13 +17,14 @@ from twisted.internet import defer
from ._base import BaseHandler
from synapse.api.constants import Membership
from synapse.api.constants import Membership, EventTypes
from synapse.api.filtering import Filter
from synapse.api.errors import SynapseError
from synapse.events.utils import serialize_event
from unpaddedbase64 import decode_base64, encode_base64
import itertools
import logging
@ -79,6 +80,9 @@ class SearchHandler(BaseHandler):
# What to order results by (impacts whether pagination can be doen)
order_by = room_cat.get("order_by", "rank")
# Return the current state of the rooms?
include_state = room_cat.get("include_state", False)
# Include context around each event?
event_context = room_cat.get(
"event_context", None
@ -96,6 +100,10 @@ class SearchHandler(BaseHandler):
after_limit = int(event_context.get(
"after_limit", 5
))
# Return the historic display name and avatar for the senders
# of the events?
include_profile = bool(event_context.get("include_profile", False))
except KeyError:
raise SynapseError(400, "Invalid search query")
@ -269,6 +277,33 @@ class SearchHandler(BaseHandler):
"room_key", res["end"]
).to_string()
if include_profile:
senders = set(
ev.sender
for ev in itertools.chain(
res["events_before"], [event], res["events_after"]
)
)
if res["events_after"]:
last_event_id = res["events_after"][-1].event_id
else:
last_event_id = event.event_id
state = yield self.store.get_state_for_event(
last_event_id,
types=[(EventTypes.Member, sender) for sender in senders]
)
res["profile_info"] = {
s.state_key: {
"displayname": s.content.get("displayname", None),
"avatar_url": s.content.get("avatar_url", None),
}
for s in state.values()
if s.type == EventTypes.Member and s.state_key in senders
}
contexts[event.event_id] = res
else:
contexts = {}
@ -287,6 +322,18 @@ class SearchHandler(BaseHandler):
for e in context["events_after"]
]
state_results = {}
if include_state:
rooms = set(e.room_id for e in allowed_events)
for room_id in rooms:
state = yield self.state_handler.get_current_state(room_id)
state_results[room_id] = state.values()
state_results.values()
# We're now about to serialize the events. We should not make any
# blocking calls after this. Otherwise the 'age' will be wrong
results = {
e.event_id: {
"rank": rank_map[e.event_id],
@ -303,6 +350,12 @@ class SearchHandler(BaseHandler):
"count": len(results)
}
if state_results:
rooms_cat_res["state"] = {
room_id: [serialize_event(e, time_now) for e in state]
for room_id, state in state_results.items()
}
if room_groups and "room_id" in group_keys:
rooms_cat_res.setdefault("groups", {})["room_id"] = room_groups

View file

@ -51,7 +51,7 @@ class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [
"timeline", # TimelineBatch
"state", # dict[(str, str), FrozenEvent]
"ephemeral",
"private_user_data",
"account_data",
])):
__slots__ = []
@ -63,7 +63,7 @@ class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [
self.timeline
or self.state
or self.ephemeral
or self.private_user_data
or self.account_data
)
@ -71,7 +71,7 @@ class ArchivedSyncResult(collections.namedtuple("JoinedSyncResult", [
"room_id", # str
"timeline", # TimelineBatch
"state", # dict[(str, str), FrozenEvent]
"private_user_data",
"account_data",
])):
__slots__ = []
@ -82,7 +82,7 @@ class ArchivedSyncResult(collections.namedtuple("JoinedSyncResult", [
return bool(
self.timeline
or self.state
or self.private_user_data
or self.account_data
)
@ -261,20 +261,20 @@ class SyncHandler(BaseHandler):
timeline=batch,
state=current_state,
ephemeral=ephemeral_by_room.get(room_id, []),
private_user_data=self.private_user_data_for_room(
account_data=self.account_data_for_room(
room_id, tags_by_room
),
))
def private_user_data_for_room(self, room_id, tags_by_room):
private_user_data = []
def account_data_for_room(self, room_id, tags_by_room):
account_data = []
tags = tags_by_room.get(room_id)
if tags is not None:
private_user_data.append({
account_data.append({
"type": "m.tag",
"content": {"tags": tags},
})
return private_user_data
return account_data
@defer.inlineCallbacks
def ephemeral_by_room(self, sync_config, now_token, since_token=None):
@ -357,7 +357,7 @@ class SyncHandler(BaseHandler):
room_id=room_id,
timeline=batch,
state=leave_state,
private_user_data=self.private_user_data_for_room(
account_data=self.account_data_for_room(
room_id, tags_by_room
),
))
@ -412,7 +412,7 @@ class SyncHandler(BaseHandler):
tags_by_room = yield self.store.get_updated_tags(
sync_config.user.to_string(),
since_token.private_user_data_key,
since_token.account_data_key,
)
joined = []
@ -468,7 +468,7 @@ class SyncHandler(BaseHandler):
),
state=state,
ephemeral=ephemeral_by_room.get(room_id, []),
private_user_data=self.private_user_data_for_room(
account_data=self.account_data_for_room(
room_id, tags_by_room
),
)
@ -605,7 +605,7 @@ class SyncHandler(BaseHandler):
timeline=batch,
state=state,
ephemeral=ephemeral_by_room.get(room_id, []),
private_user_data=self.private_user_data_for_room(
account_data=self.account_data_for_room(
room_id, tags_by_room
),
)
@ -653,7 +653,7 @@ class SyncHandler(BaseHandler):
room_id=leave_event.room_id,
timeline=batch,
state=state_events_delta,
private_user_data=self.private_user_data_for_room(
account_data=self.account_data_for_room(
leave_event.room_id, tags_by_room
),
)

View file

@ -190,11 +190,11 @@ class MatrixFederationHttpClient(object):
if retries_left and not timeout:
if long_retries:
delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left)
delay = max(delay, 60)
delay = min(delay, 60)
delay *= random.uniform(0.8, 1.4)
else:
delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left)
delay = max(delay, 2)
delay = min(delay, 2)
delay *= random.uniform(0.8, 1.4)
yield sleep(delay)
@ -302,7 +302,7 @@ class MatrixFederationHttpClient(object):
defer.returnValue(json.loads(body))
@defer.inlineCallbacks
def post_json(self, destination, path, data={}):
def post_json(self, destination, path, data={}, long_retries=True):
""" Sends the specifed json data using POST
Args:
@ -311,6 +311,8 @@ class MatrixFederationHttpClient(object):
path (str): The HTTP path.
data (dict): A dict containing the data that will be used as
the request body. This will be encoded as JSON.
long_retries (bool): A boolean that indicates whether we should
retry for a short or long time.
Returns:
Deferred: Succeeds when we get a 2xx HTTP response. The result
@ -330,6 +332,7 @@ class MatrixFederationHttpClient(object):
path.encode("ascii"),
body_callback=body_callback,
headers_dict={"Content-Type": ["application/json"]},
long_retries=True,
)
if 200 <= response.code < 300:

View file

@ -58,9 +58,18 @@ class LoginRestServlet(ClientV1RestServlet):
flows.append({"type": LoginRestServlet.SAML2_TYPE})
if self.cas_enabled:
flows.append({"type": LoginRestServlet.CAS_TYPE})
# While its valid for us to advertise this login type generally,
# synapse currently only gives out these tokens as part of the
# CAS login flow.
# Generally we don't want to advertise login flows that clients
# don't know how to implement, since they (currently) will always
# fall back to the fallback API if they don't understand one of the
# login flow types returned.
flows.append({"type": LoginRestServlet.TOKEN_TYPE})
if self.password_enabled:
flows.append({"type": LoginRestServlet.PASS_TYPE})
flows.append({"type": LoginRestServlet.TOKEN_TYPE})
return (200, {"flows": flows})
def on_OPTIONS(self, request):

View file

@ -22,7 +22,7 @@ from synapse.handlers.sync import SyncConfig
from synapse.types import StreamToken
from synapse.events import FrozenEvent
from synapse.events.utils import (
serialize_event, format_event_for_client_v2_without_event_id,
serialize_event, format_event_for_client_v2_without_room_id,
)
from synapse.api.filtering import FilterCollection
from ._base import client_v2_pattern
@ -148,9 +148,9 @@ class SyncRestServlet(RestServlet):
sync_result.presence, filter, time_now
),
"rooms": {
"joined": joined,
"invited": invited,
"archived": archived,
"join": joined,
"invite": invited,
"leave": archived,
},
"next_batch": sync_result.next_batch.to_string(),
}
@ -207,7 +207,7 @@ class SyncRestServlet(RestServlet):
for room in rooms:
invite = serialize_event(
room.invite, time_now, token_id=token_id,
event_format=format_event_for_client_v2_without_event_id,
event_format=format_event_for_client_v2_without_room_id,
)
invited_state = invite.get("unsigned", {}).pop("invite_room_state", [])
invited_state.append(invite)
@ -256,7 +256,13 @@ class SyncRestServlet(RestServlet):
:return: the room, encoded in our response format
:rtype: dict[str, object]
"""
event_map = {}
def serialize(event):
# TODO(mjark): Respect formatting requirements in the filter.
return serialize_event(
event, time_now, token_id=token_id,
event_format=format_event_for_client_v2_without_room_id,
)
state_dict = room.state
timeline_events = filter.filter_room_timeline(room.timeline.events)
@ -264,37 +270,22 @@ class SyncRestServlet(RestServlet):
state_dict, timeline_events)
state_events = filter.filter_room_state(state_dict.values())
state_event_ids = []
for event in state_events:
# TODO(mjark): Respect formatting requirements in the filter.
event_map[event.event_id] = serialize_event(
event, time_now, token_id=token_id,
event_format=format_event_for_client_v2_without_event_id,
)
state_event_ids.append(event.event_id)
timeline_event_ids = []
for event in timeline_events:
# TODO(mjark): Respect formatting requirements in the filter.
event_map[event.event_id] = serialize_event(
event, time_now, token_id=token_id,
event_format=format_event_for_client_v2_without_event_id,
)
timeline_event_ids.append(event.event_id)
serialized_state = [serialize(e) for e in state_events]
serialized_timeline = [serialize(e) for e in timeline_events]
private_user_data = filter.filter_room_private_user_data(
room.private_user_data
account_data = filter.filter_room_account_data(
room.account_data
)
result = {
"event_map": event_map,
"timeline": {
"events": timeline_event_ids,
"events": serialized_timeline,
"prev_batch": room.timeline.prev_batch.to_string(),
"limited": room.timeline.limited,
},
"state": {"events": state_event_ids},
"private_user_data": {"events": private_user_data},
"state": {"events": serialized_state},
"account_data": {"events": account_data},
}
if joined:

View file

@ -81,7 +81,7 @@ class TagServlet(RestServlet):
max_id = yield self.store.add_tag_to_room(user_id, room_id, tag, body)
yield self.notifier.on_new_event(
"private_user_data_key", max_id, users=[user_id]
"account_data_key", max_id, users=[user_id]
)
defer.returnValue((200, {}))
@ -95,7 +95,7 @@ class TagServlet(RestServlet):
max_id = yield self.store.remove_tag_from_room(user_id, room_id, tag)
yield self.notifier.on_new_event(
"private_user_data_key", max_id, users=[user_id]
"account_data_key", max_id, users=[user_id]
)
defer.returnValue((200, {}))

View file

@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
# Remember to update this number every time a change is made to database
# schema files, so the users will be informed on server restarts.
SCHEMA_VERSION = 25
SCHEMA_VERSION = 26
dir_path = os.path.abspath(os.path.dirname(__file__))

View file

@ -0,0 +1,17 @@
/* Copyright 2015 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
ALTER TABLE private_user_data_max_stream_id RENAME TO account_data_max_stream_id;

View file

@ -28,17 +28,17 @@ class TagsStore(SQLBaseStore):
def __init__(self, hs):
super(TagsStore, self).__init__(hs)
self._private_user_data_id_gen = StreamIdGenerator(
"private_user_data_max_stream_id", "stream_id"
self._account_data_id_gen = StreamIdGenerator(
"account_data_max_stream_id", "stream_id"
)
def get_max_private_user_data_stream_id(self):
def get_max_account_data_stream_id(self):
"""Get the current max stream id for the private user data stream
Returns:
A deferred int.
"""
return self._private_user_data_id_gen.get_max_token(self)
return self._account_data_id_gen.get_max_token(self)
@cached()
def get_tags_for_user(self, user_id):
@ -144,12 +144,12 @@ class TagsStore(SQLBaseStore):
)
self._update_revision_txn(txn, user_id, room_id, next_id)
with (yield self._private_user_data_id_gen.get_next(self)) as next_id:
with (yield self._account_data_id_gen.get_next(self)) as next_id:
yield self.runInteraction("add_tag", add_tag_txn, next_id)
self.get_tags_for_user.invalidate((user_id,))
result = yield self._private_user_data_id_gen.get_max_token(self)
result = yield self._account_data_id_gen.get_max_token(self)
defer.returnValue(result)
@defer.inlineCallbacks
@ -166,12 +166,12 @@ class TagsStore(SQLBaseStore):
txn.execute(sql, (user_id, room_id, tag))
self._update_revision_txn(txn, user_id, room_id, next_id)
with (yield self._private_user_data_id_gen.get_next(self)) as next_id:
with (yield self._account_data_id_gen.get_next(self)) as next_id:
yield self.runInteraction("remove_tag", remove_tag_txn, next_id)
self.get_tags_for_user.invalidate((user_id,))
result = yield self._private_user_data_id_gen.get_max_token(self)
result = yield self._account_data_id_gen.get_max_token(self)
defer.returnValue(result)
def _update_revision_txn(self, txn, user_id, room_id, next_id):
@ -185,7 +185,7 @@ class TagsStore(SQLBaseStore):
"""
update_max_id_sql = (
"UPDATE private_user_data_max_stream_id"
"UPDATE account_data_max_stream_id"
" SET stream_id = ?"
" WHERE stream_id < ?"
)

View file

@ -21,7 +21,7 @@ from synapse.handlers.presence import PresenceEventSource
from synapse.handlers.room import RoomEventSource
from synapse.handlers.typing import TypingNotificationEventSource
from synapse.handlers.receipts import ReceiptEventSource
from synapse.handlers.private_user_data import PrivateUserDataEventSource
from synapse.handlers.account_data import AccountDataEventSource
class EventSources(object):
@ -30,7 +30,7 @@ class EventSources(object):
"presence": PresenceEventSource,
"typing": TypingNotificationEventSource,
"receipt": ReceiptEventSource,
"private_user_data": PrivateUserDataEventSource,
"account_data": AccountDataEventSource,
}
def __init__(self, hs):
@ -54,8 +54,8 @@ class EventSources(object):
receipt_key=(
yield self.sources["receipt"].get_current_key()
),
private_user_data_key=(
yield self.sources["private_user_data"].get_current_key()
account_data_key=(
yield self.sources["account_data"].get_current_key()
),
)
defer.returnValue(token)

View file

@ -103,7 +103,7 @@ class StreamToken(
"presence_key",
"typing_key",
"receipt_key",
"private_user_data_key",
"account_data_key",
))
):
_SEPARATOR = "_"
@ -138,7 +138,7 @@ class StreamToken(
or (int(other.presence_key) < int(self.presence_key))
or (int(other.typing_key) < int(self.typing_key))
or (int(other.receipt_key) < int(self.receipt_key))
or (int(other.private_user_data_key) < int(self.private_user_data_key))
or (int(other.account_data_key) < int(self.account_data_key))
)
def copy_and_advance(self, key, new_value):