From f08c33e83482d6cd92cfad6c6e69355a62d6e1e9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 May 2015 13:29:00 +0100 Subject: [PATCH 01/21] Fix port_from_sqlite_to_postgres after changes to storage layer. --- scripts/port_from_sqlite_to_postgres.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/scripts/port_from_sqlite_to_postgres.py b/scripts/port_from_sqlite_to_postgres.py index f98342db5..da2511309 100755 --- a/scripts/port_from_sqlite_to_postgres.py +++ b/scripts/port_from_sqlite_to_postgres.py @@ -106,7 +106,7 @@ class Store(object): try: txn = conn.cursor() return func( - LoggingTransaction(txn, desc, self.database_engine), + LoggingTransaction(txn, desc, self.database_engine, []), *args, **kwargs ) except self.database_engine.module.DatabaseError as e: @@ -378,9 +378,7 @@ class Porter(object): for i, row in enumerate(rows): rows[i] = tuple( - self.postgres_store.database_engine.encode_parameter( - conv(j, col) - ) + conv(j, col) for j, col in enumerate(row) if j > 0 ) From 126d562576ae41945ae3607238c88ecbcb527b9d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 May 2015 13:29:37 +0100 Subject: [PATCH 02/21] Bump version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index c89f444f4..18b8ff775 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.9.0" +__version__ = "0.9.0-r1" From 9a9386226a8d522f105c8c3965e8e17304e03f60 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 May 2015 13:37:54 +0100 Subject: [PATCH 03/21] Mention Ivan Shapovalov contrib/systemd --- AUTHORS.rst | 3 +++ CHANGES.rst | 2 ++ 2 files changed, 5 insertions(+) diff --git a/AUTHORS.rst b/AUTHORS.rst index 8396e535e..3a457cd9f 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -35,3 +35,6 @@ Turned to Dust Brabo * Installation instruction fixes + +Ivan Shapovalov + * contrib/systemd: a sample systemd unit file and a logger configuration diff --git a/CHANGES.rst b/CHANGES.rst index f0bb973dc..65970a89c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -10,6 +10,8 @@ General: * Fix race in caches that occasionally caused some presence updates to be dropped - SYN-369. * Check server name has not changed on restart. +* Add a sample systemd unit file and a logger configuration in + contrib/systemd. Contributed Ivan Shapovalov. Federation: From 326121aec4b48d04f894a2abc567fffc1014a732 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 May 2015 13:38:29 +0100 Subject: [PATCH 04/21] UPGRADES: s/v0.x.x/v0.9.0 --- UPGRADE.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/UPGRADE.rst b/UPGRADE.rst index ab327a813..d98460f64 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -1,4 +1,4 @@ -Upgrading to v0.x.x +Upgrading to v0.9.0 =================== Application services have had a breaking API change in this version. From 4fa0f53521546c5be48a3a94815203aff84c2639 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 May 2015 13:45:58 +0100 Subject: [PATCH 05/21] Support reading directly from a config --- scripts/port_from_sqlite_to_postgres.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/port_from_sqlite_to_postgres.py b/scripts/port_from_sqlite_to_postgres.py index da2511309..e7ed4c309 100755 --- a/scripts/port_from_sqlite_to_postgres.py +++ b/scripts/port_from_sqlite_to_postgres.py @@ -723,6 +723,9 @@ if __name__ == "__main__": postgres_config = yaml.safe_load(args.postgres_config) + if "database" in postgres_config: + postgres_config = postgres_config["database"] + if "name" not in postgres_config: sys.stderr.write("Malformed database config: no 'name'") sys.exit(2) From 3a42f32134c305c1ba7947c53c23839f5dc3985f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 May 2015 13:47:48 +0100 Subject: [PATCH 06/21] Reword port script usage --- docs/postgres.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/postgres.rst b/docs/postgres.rst index 2dcc3caf9..76a76cbb5 100644 --- a/docs/postgres.rst +++ b/docs/postgres.rst @@ -82,8 +82,8 @@ complete, restart synapse. For instance:: cp homeserver.db homeserver.db.snapshot ./synctl start -Assuming your database config file (as described in the section *Synapse -config*) is named ``database_config.yaml`` and the SQLite snapshot is at +Assuming your new config file (as described in the section *Synapse config*) +is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at ``homeserver.db.snapshot`` then simply run:: python scripts/port_from_sqlite_to_postgres.py \ From 22a7ba8b22082503f2521c135168e1fb644efa60 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 May 2015 13:50:03 +0100 Subject: [PATCH 07/21] Actually rename all isntances --- docs/postgres.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/postgres.rst b/docs/postgres.rst index 76a76cbb5..19d839111 100644 --- a/docs/postgres.rst +++ b/docs/postgres.rst @@ -88,7 +88,7 @@ is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at python scripts/port_from_sqlite_to_postgres.py \ --sqlite-database homeserver.db.snapshot \ - --postgres-config database_config.yaml + --postgres-config homeserver-postgres.yaml The flag ``--curses`` displays a coloured curses progress UI. From 6538d445e885a8a163f5fcc792b11a9d9b890de3 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 8 May 2015 15:55:11 +0100 Subject: [PATCH 08/21] Make the timestamps in server_keys_json bigints --- .../delta/18/server_keys_bigger_ints.sql | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 synapse/storage/schema/delta/18/server_keys_bigger_ints.sql diff --git a/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql new file mode 100644 index 000000000..ae866475a --- /dev/null +++ b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql @@ -0,0 +1,32 @@ +/* Copyright 2015 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +CREATE TABLE IF NOT EXISTS new_server_keys_json ( + server_name TEXT NOT NULL, -- Server name. + key_id TEXT NOT NULL, -- Requested key id. + from_server TEXT NOT NULL, -- Which server the keys were fetched from. + ts_added_ms BIGINT UNSIGNED NOT NULL, -- When the keys were fetched + ts_valid_until_ms BIGINT UNSIGNED NOT NULL, -- When this version of the keys exipires. + key_json bytea NOT NULL, -- JSON certificate for the remote server. + CONSTRAINT uniqueness UNIQUE (server_name, key_id, from_server) +); + +INSERT INTO new_server_keys_json + SELECT server_name, key_id, from_server,ts_added_ms, ts_valid_until_ms, key_json FROM server_keys_json ; + +DROP TABLE new_server_keys_json; + +ALTER TABLE new_server_keys_json RENAME TO server_keys_json; From 84e1cacea4403d9ffde1c9dcd36eb625a37113af Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 8 May 2015 15:58:06 +0100 Subject: [PATCH 09/21] Bump schema version --- synapse/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 0cc14fb69..7cb91a0be 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -51,7 +51,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 17 +SCHEMA_VERSION = 18 dir_path = os.path.abspath(os.path.dirname(__file__)) From b0f71db3ffe3c65cd77248771ffb67774ecfadd5 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 8 May 2015 15:59:51 +0100 Subject: [PATCH 10/21] Remove unsigned --- synapse/storage/schema/delta/18/server_keys_bigger_ints.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql index ae866475a..d2c70d0aa 100644 --- a/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql +++ b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql @@ -18,8 +18,8 @@ CREATE TABLE IF NOT EXISTS new_server_keys_json ( server_name TEXT NOT NULL, -- Server name. key_id TEXT NOT NULL, -- Requested key id. from_server TEXT NOT NULL, -- Which server the keys were fetched from. - ts_added_ms BIGINT UNSIGNED NOT NULL, -- When the keys were fetched - ts_valid_until_ms BIGINT UNSIGNED NOT NULL, -- When this version of the keys exipires. + ts_added_ms BIGINT NOT NULL, -- When the keys were fetched + ts_valid_until_ms BIGINT NOT NULL, -- When this version of the keys exipires. key_json bytea NOT NULL, -- JSON certificate for the remote server. CONSTRAINT uniqueness UNIQUE (server_name, key_id, from_server) ); From 9d36eb4eab036d9793132892b56c7fad4e585984 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 8 May 2015 16:01:55 +0100 Subject: [PATCH 11/21] Rename unique constraint --- synapse/storage/schema/delta/18/server_keys_bigger_ints.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql index d2c70d0aa..566470ac6 100644 --- a/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql +++ b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql @@ -21,7 +21,7 @@ CREATE TABLE IF NOT EXISTS new_server_keys_json ( ts_added_ms BIGINT NOT NULL, -- When the keys were fetched ts_valid_until_ms BIGINT NOT NULL, -- When this version of the keys exipires. key_json bytea NOT NULL, -- JSON certificate for the remote server. - CONSTRAINT uniqueness UNIQUE (server_name, key_id, from_server) + CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server) ); INSERT INTO new_server_keys_json From 1c7912751ea53e115722bb81383734f06d2aa69d Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 8 May 2015 16:04:32 +0100 Subject: [PATCH 12/21] Drop the old table not the new table --- synapse/storage/schema/delta/18/server_keys_bigger_ints.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql index 566470ac6..c0b0fdfb6 100644 --- a/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql +++ b/synapse/storage/schema/delta/18/server_keys_bigger_ints.sql @@ -27,6 +27,6 @@ CREATE TABLE IF NOT EXISTS new_server_keys_json ( INSERT INTO new_server_keys_json SELECT server_name, key_id, from_server,ts_added_ms, ts_valid_until_ms, key_json FROM server_keys_json ; -DROP TABLE new_server_keys_json; +DROP TABLE server_keys_json; ALTER TABLE new_server_keys_json RENAME TO server_keys_json; From 0cd1401f8ddaaef56301940b4d81c2b626380389 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 May 2015 16:11:51 +0100 Subject: [PATCH 13/21] Bump version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 18b8ff775..bde6f7777 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.9.0-r1" +__version__ = "0.9.0-r2" From d79ffa1898ab78f0d9be77865d5402697171bd2a Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 9 May 2015 14:45:37 +0100 Subject: [PATCH 14/21] typo --- docs/application_services.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/application_services.rst b/docs/application_services.rst index a57bae619..7e87ac9ad 100644 --- a/docs/application_services.rst +++ b/docs/application_services.rst @@ -20,7 +20,7 @@ The format of the AS configuration file is as follows: url: as_token: - hs_token: + hs_token: sender_localpart: namespaces: users: # List of users we're interested in From 22d2f498fa6004e36cc15a56919a751ad14dbfaf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sun, 10 May 2015 10:50:51 +0100 Subject: [PATCH 15/21] Fix push rule bug: can't insert bool into small int column --- synapse/storage/push_rule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index ee7718d5e..da23c1a11 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -211,7 +211,7 @@ class PushRuleStore(SQLBaseStore): yield self._simple_upsert( PushRuleEnableTable.table_name, {'user_name': user_name, 'rule_id': rule_id}, - {'enabled': enabled}, + {'enabled': 1 if enabled else 0}, desc="set_push_rule_enabled", ) From de875418621c1d604507fbd095cf666ee5cac742 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sun, 10 May 2015 10:51:08 +0100 Subject: [PATCH 16/21] Bump version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index bde6f7777..705ea581d 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.9.0-r2" +__version__ = "0.9.0-r3" From 3c224f4d0ef653685cc8d20fd48d9ad62d7050e3 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 11 May 2015 11:00:06 +0100 Subject: [PATCH 17/21] SYN-376: Add script for converting server keys from v1 to v2 --- scripts-dev/convert_server_keys.py | 113 +++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 scripts-dev/convert_server_keys.py diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py new file mode 100644 index 000000000..024ddcdbd --- /dev/null +++ b/scripts-dev/convert_server_keys.py @@ -0,0 +1,113 @@ +import psycopg2 +import yaml +import sys +import json +import time +import hashlib +from syutil.base64util import encode_base64 +from syutil.crypto.signing_key import read_signing_keys +from syutil.crypto.jsonsign import sign_json +from syutil.jsonutil import encode_canonical_json + + +def select_v1_keys(connection): + cursor = connection.cursor() + cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys") + rows = cursor.fetchall() + cursor.close() + results = {} + for server_name, key_id, verify_key in rows: + results.setdefault(server_name, {})[key_id] = encode_base64(verify_key) + return results + + +def select_v1_certs(connection): + cursor = connection.cursor() + cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates") + rows = cursor.fetchall() + cursor.close() + results = {} + for server_name, tls_certificate in rows: + results[server_name] = tls_certificate + return results + + +def select_v2_json(connection): + cursor = connection.cursor() + cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json") + rows = cursor.fetchall() + cursor.close() + results = {} + for server_name, key_id, key_json in rows: + results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8")) + return results + + +def convert_v1_to_v2(server_name, valid_until, keys, certificate): + return { + "old_verify_keys": {}, + "server_name": server_name, + "verify_keys": keys, + "valid_until_ts": valid_until, + "tls_fingerprints": [fingerprint(certificate)], + } + + +def fingerprint(certificate): + finger = hashlib.sha256(certificate) + return {"sha256": encode_base64(finger.digest())} + + +def rows_v2(server, json): + valid_until = json["valid_until_ts"] + key_json = encode_canonical_json(json) + for key_id in json["verify_keys"]: + yield (server, key_id, "-", valid_until, valid_until, buffer(key_json)) + + +def main(): + config = yaml.load(open(sys.argv[1])) + valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24 + + server_name = config["server_name"] + signing_key = read_signing_keys(open(config["signing_key_path"]))[0] + + database = config["database"] + assert database["name"] == "psycopg2", "Can only convert for postgresql" + args = database["args"] + args.pop("cp_max") + args.pop("cp_min") + connection = psycopg2.connect(**args) + keys = select_v1_keys(connection) + certificates = select_v1_certs(connection) + json = select_v2_json(connection) + + result = {} + for server in keys: + if not server in json: + v2_json = convert_v1_to_v2( + server, valid_until, keys[server], certificates[server] + ) + v2_json = sign_json(v2_json, server_name, signing_key) + result[server] = v2_json + + yaml.safe_dump(result, sys.stdout, default_flow_style=False) + + rows = list( + row for server, json in result.items() + for row in rows_v2(server, json) + ) + + cursor = connection.cursor() + cursor.executemany( + "INSERT INTO server_keys_json (" + " server_name, key_id, from_server," + " ts_added_ms, ts_valid_until_ms, key_json" + ") VALUES (%s, %s, %s, %s, %s, %s)", + rows + ) + connection.commit() + + +if __name__ == '__main__': + main() From cd525c0f5a8a973626c81ccd158bf0f9586dc320 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 11 May 2015 10:58:36 +0100 Subject: [PATCH 18/21] push_rules table expects an 'id' field --- synapse/storage/_base.py | 1 + synapse/storage/push_rule.py | 25 ++++++++++++------------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index ee5587c72..9e348590b 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -308,6 +308,7 @@ class SQLBaseStore(object): self._state_groups_id_gen = IdGenerator("state_groups", "id", self) self._access_tokens_id_gen = IdGenerator("access_tokens", "id", self) self._pushers_id_gen = IdGenerator("pushers", "id", self) + self._push_rule_id_gen = IdGenerator("push_rules", "id", self) def start_profiling(self): self._previous_loop_ts = self._clock.time_msec() diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index da23c1a11..6811566e8 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -120,6 +120,7 @@ class PushRuleStore(SQLBaseStore): del new_rule['after'] new_rule['priority_class'] = priority_class new_rule['user_name'] = user_name + new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn) # check if the priority before/after is free new_rule_priority = base_rule_priority @@ -153,12 +154,11 @@ class PushRuleStore(SQLBaseStore): txn.execute(sql, (user_name, priority_class, new_rule_priority)) - # now insert the new rule - sql = "INSERT INTO "+PushRuleTable.table_name+" (" - sql += ",".join(new_rule.keys())+") VALUES (" - sql += ", ".join(["?" for _ in new_rule.keys()])+")" - - txn.execute(sql, new_rule.values()) + self._simple_insert_txn( + txn, + table=PushRuleTable.table_name, + values=new_rule, + ) def _add_push_rule_highest_priority_txn(self, txn, user_name, priority_class, **kwargs): @@ -177,17 +177,16 @@ class PushRuleStore(SQLBaseStore): # and insert the new rule new_rule = copy.copy(kwargs) - if 'id' in new_rule: - del new_rule['id'] + new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn) new_rule['user_name'] = user_name new_rule['priority_class'] = priority_class new_rule['priority'] = new_prio - sql = "INSERT INTO "+PushRuleTable.table_name+" (" - sql += ",".join(new_rule.keys())+") VALUES (" - sql += ", ".join(["?" for _ in new_rule.keys()])+")" - - txn.execute(sql, new_rule.values()) + self._simple_insert_txn( + txn, + table=PushRuleTable.table_name, + values=new_rule, + ) @defer.inlineCallbacks def delete_push_rule(self, user_name, rule_id): From b036596b75e20611d4122d70f5692af06a246318 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 11 May 2015 11:22:54 +0100 Subject: [PATCH 19/21] Prefer to use _simple_*. --- synapse/storage/push_rule.py | 83 +++++++++++++++++------------------- 1 file changed, 40 insertions(+), 43 deletions(-) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 6811566e8..34805e276 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -19,7 +19,6 @@ from ._base import SQLBaseStore, Table from twisted.internet import defer import logging -import copy import simplejson as json logger = logging.getLogger(__name__) @@ -28,46 +27,45 @@ logger = logging.getLogger(__name__) class PushRuleStore(SQLBaseStore): @defer.inlineCallbacks def get_push_rules_for_user(self, user_name): - sql = ( - "SELECT "+",".join(PushRuleTable.fields)+" " - "FROM "+PushRuleTable.table_name+" " - "WHERE user_name = ? " - "ORDER BY priority_class DESC, priority DESC" + rows = yield self._simple_select_list( + table=PushRuleTable.table_name, + keyvalues={ + "user_name": user_name, + }, + retcols=PushRuleTable.fields, ) - rows = yield self._execute("get_push_rules_for_user", None, sql, user_name) - dicts = [] - for r in rows: - d = {} - for i, f in enumerate(PushRuleTable.fields): - d[f] = r[i] - dicts.append(d) + rows.sort( + key=lambda row: (-int(row["priority_class"]), -int(row["priority"])) + ) - defer.returnValue(dicts) + defer.returnValue(rows) @defer.inlineCallbacks def get_push_rules_enabled_for_user(self, user_name): results = yield self._simple_select_list( - PushRuleEnableTable.table_name, - {'user_name': user_name}, - PushRuleEnableTable.fields, + table=PushRuleEnableTable.table_name, + keyvalues={ + 'user_name': user_name + }, + retcols=PushRuleEnableTable.fields, desc="get_push_rules_enabled_for_user", ) - defer.returnValue( - {r['rule_id']: False if r['enabled'] == 0 else True for r in results} - ) + defer.returnValue({ + r['rule_id']: False if r['enabled'] == 0 else True for r in results + }) @defer.inlineCallbacks def add_push_rule(self, before, after, **kwargs): - vals = copy.copy(kwargs) + vals = kwargs if 'conditions' in vals: vals['conditions'] = json.dumps(vals['conditions']) if 'actions' in vals: vals['actions'] = json.dumps(vals['actions']) + # we could check the rest of the keys are valid column names # but sqlite will do that anyway so I think it's just pointless. - if 'id' in vals: - del vals['id'] + vals.pop("id", None) if before or after: ret = yield self.runInteraction( @@ -87,37 +85,36 @@ class PushRuleStore(SQLBaseStore): defer.returnValue(ret) def _add_push_rule_relative_txn(self, txn, user_name, **kwargs): - after = None - relative_to_rule = None - if 'after' in kwargs and kwargs['after']: - after = kwargs['after'] - relative_to_rule = after - if 'before' in kwargs and kwargs['before']: - relative_to_rule = kwargs['before'] + after = kwargs.pop("after", None) + relative_to_rule = kwargs.pop("before", after) - # get the priority of the rule we're inserting after/before - sql = ( - "SELECT priority_class, priority FROM ? " - "WHERE user_name = ? and rule_id = ?" % (PushRuleTable.table_name,) + res = self._simple_select_one_txn( + txn, + table=PushRuleTable.table_name, + keyvalues={ + "user_name": user_name, + "rule_id": relative_to_rule, + }, + retcols=["priority_class", "priority"], + allow_none=True, ) - txn.execute(sql, (user_name, relative_to_rule)) - res = txn.fetchall() + if not res: raise RuleNotFoundException( "before/after rule not found: %s" % (relative_to_rule,) ) - priority_class, base_rule_priority = res[0] + + priority_class = res["priority_class"] + base_rule_priority = res["priority"] if 'priority_class' in kwargs and kwargs['priority_class'] != priority_class: raise InconsistentRuleException( "Given priority class does not match class of relative rule" ) - new_rule = copy.copy(kwargs) - if 'before' in new_rule: - del new_rule['before'] - if 'after' in new_rule: - del new_rule['after'] + new_rule = kwargs + new_rule.pop("before", None) + new_rule.pop("after", None) new_rule['priority_class'] = priority_class new_rule['user_name'] = user_name new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn) @@ -176,7 +173,7 @@ class PushRuleStore(SQLBaseStore): new_prio = highest_prio + 1 # and insert the new rule - new_rule = copy.copy(kwargs) + new_rule = kwargs new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn) new_rule['user_name'] = user_name new_rule['priority_class'] = priority_class From 4ef556f65027b060327b4907104cbe45d32ccac1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 11 May 2015 11:31:04 +0100 Subject: [PATCH 20/21] Bump version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 705ea581d..041e2151b 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.9.0-r3" +__version__ = "0.9.0-r4" From 5002056b16549ba2a9175ab62897014ee58e73ff Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 12 May 2015 11:20:40 +0100 Subject: [PATCH 21/21] SYN-377: Make sure that the StreamIdGenerator.get_next.__exit__ is called from the main thread after the transaction completes, not from database thread before the transaction completes. --- synapse/storage/events.py | 38 +++++++++++++-------------- synapse/storage/util/id_generators.py | 12 ++++++--- 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 38395c66a..626a5eaf6 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -23,6 +23,7 @@ from synapse.crypto.event_signing import compute_event_reference_hash from syutil.base64util import decode_base64 from syutil.jsonutil import encode_canonical_json +from contextlib import contextmanager import logging @@ -41,17 +42,25 @@ class EventsStore(SQLBaseStore): self.min_token -= 1 stream_ordering = self.min_token + if stream_ordering is None: + stream_ordering_manager = yield self._stream_id_gen.get_next(self) + else: + @contextmanager + def stream_ordering_manager(): + yield stream_ordering + try: - yield self.runInteraction( - "persist_event", - self._persist_event_txn, - event=event, - context=context, - backfilled=backfilled, - stream_ordering=stream_ordering, - is_new_state=is_new_state, - current_state=current_state, - ) + with stream_ordering_manager as stream_ordering: + yield self.runInteraction( + "persist_event", + self._persist_event_txn, + event=event, + context=context, + backfilled=backfilled, + stream_ordering=stream_ordering, + is_new_state=is_new_state, + current_state=current_state, + ) except _RollbackButIsFineException: pass @@ -95,15 +104,6 @@ class EventsStore(SQLBaseStore): # Remove the any existing cache entries for the event_id txn.call_after(self._invalidate_get_event_cache, event.event_id) - if stream_ordering is None: - with self._stream_id_gen.get_next_txn(txn) as stream_ordering: - return self._persist_event_txn( - txn, event, context, backfilled, - stream_ordering=stream_ordering, - is_new_state=is_new_state, - current_state=current_state, - ) - # We purposefully do this first since if we include a `current_state` # key, we *want* to update the `current_state_events` table if current_state: diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index e40eb8a8c..89d1643f1 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -78,14 +78,18 @@ class StreamIdGenerator(object): self._current_max = None self._unfinished_ids = deque() - def get_next_txn(self, txn): + @defer.inlineCallbacks + def get_next(self, store): """ Usage: - with stream_id_gen.get_next_txn(txn) as stream_id: + with yield stream_id_gen.get_next as stream_id: # ... persist event ... """ if not self._current_max: - self._get_or_compute_current_max(txn) + yield store.runInteraction( + "_compute_current_max", + self._get_or_compute_current_max, + ) with self._lock: self._current_max += 1 @@ -101,7 +105,7 @@ class StreamIdGenerator(object): with self._lock: self._unfinished_ids.remove(next_id) - return manager() + defer.returnValue(manager()) @defer.inlineCallbacks def get_max_token(self, store):