forked from MirrorHub/synapse
Make work in both Maria and SQLite. Fix tests
This commit is contained in:
parent
0e8f5095c7
commit
9236136f3a
36 changed files with 296 additions and 160 deletions
|
@ -20,6 +20,7 @@ sys.dont_write_bytecode = True
|
||||||
from synapse.storage import (
|
from synapse.storage import (
|
||||||
prepare_database, prepare_sqlite3_database, UpgradeDatabaseException,
|
prepare_database, prepare_sqlite3_database, UpgradeDatabaseException,
|
||||||
)
|
)
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
|
@ -376,7 +377,7 @@ def setup(config_options):
|
||||||
if name in ["MySQLdb", "mysql.connector"]:
|
if name in ["MySQLdb", "mysql.connector"]:
|
||||||
db_config.setdefault("args", {}).update({
|
db_config.setdefault("args", {}).update({
|
||||||
"sql_mode": "TRADITIONAL",
|
"sql_mode": "TRADITIONAL",
|
||||||
"charset": "utf8",
|
"charset": "utf8mb4",
|
||||||
"use_unicode": True,
|
"use_unicode": True,
|
||||||
})
|
})
|
||||||
elif name == "sqlite3":
|
elif name == "sqlite3":
|
||||||
|
@ -388,6 +389,8 @@ def setup(config_options):
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("Unsupported database type '%s'" % (name,))
|
raise RuntimeError("Unsupported database type '%s'" % (name,))
|
||||||
|
|
||||||
|
database_engine = create_engine(name)
|
||||||
|
|
||||||
hs = SynapseHomeServer(
|
hs = SynapseHomeServer(
|
||||||
config.server_name,
|
config.server_name,
|
||||||
domain_with_port=domain_with_port,
|
domain_with_port=domain_with_port,
|
||||||
|
@ -398,6 +401,7 @@ def setup(config_options):
|
||||||
config=config,
|
config=config,
|
||||||
content_addr=config.content_addr,
|
content_addr=config.content_addr,
|
||||||
version_string=version_string,
|
version_string=version_string,
|
||||||
|
database_engine=database_engine,
|
||||||
)
|
)
|
||||||
|
|
||||||
hs.create_resource_tree(
|
hs.create_resource_tree(
|
||||||
|
@ -409,12 +413,14 @@ def setup(config_options):
|
||||||
logger.info("Preparing database: %s...", db_name)
|
logger.info("Preparing database: %s...", db_name)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# with sqlite3.connect(db_name) as db_conn:
|
db_conn = database_engine.module.connect(**db_config.get("args", {}))
|
||||||
# prepare_sqlite3_database(db_conn)
|
|
||||||
# prepare_database(db_conn)
|
if name == "sqlite3":
|
||||||
import mysql.connector
|
prepare_sqlite3_database(db_conn)
|
||||||
db_conn = mysql.connector.connect(**db_config.get("args", {}))
|
|
||||||
prepare_database(db_conn)
|
prepare_database(db_conn, database_engine)
|
||||||
|
|
||||||
|
db_conn.commit()
|
||||||
except UpgradeDatabaseException:
|
except UpgradeDatabaseException:
|
||||||
sys.stderr.write(
|
sys.stderr.write(
|
||||||
"\nFailed to upgrade database.\n"
|
"\nFailed to upgrade database.\n"
|
||||||
|
|
|
@ -77,9 +77,6 @@ class DataStore(RoomMemberStore, RoomStore,
|
||||||
self.min_token_deferred = self._get_min_token()
|
self.min_token_deferred = self._get_min_token()
|
||||||
self.min_token = None
|
self.min_token = None
|
||||||
|
|
||||||
self._next_stream_id_lock = threading.Lock()
|
|
||||||
self._next_stream_id = int(hs.get_clock().time_msec()) * 1000
|
|
||||||
|
|
||||||
def insert_client_ip(self, user, access_token, device_id, ip, user_agent):
|
def insert_client_ip(self, user, access_token, device_id, ip, user_agent):
|
||||||
return self._simple_upsert(
|
return self._simple_upsert(
|
||||||
"user_ips",
|
"user_ips",
|
||||||
|
@ -127,19 +124,21 @@ class UpgradeDatabaseException(PrepareDatabaseException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def prepare_database(db_conn):
|
def prepare_database(db_conn, database_engine):
|
||||||
"""Prepares a database for usage. Will either create all necessary tables
|
"""Prepares a database for usage. Will either create all necessary tables
|
||||||
or upgrade from an older schema version.
|
or upgrade from an older schema version.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
cur = db_conn.cursor()
|
cur = db_conn.cursor()
|
||||||
version_info = _get_or_create_schema_state(cur)
|
version_info = _get_or_create_schema_state(cur, database_engine)
|
||||||
|
|
||||||
if version_info:
|
if version_info:
|
||||||
user_version, delta_files, upgraded = version_info
|
user_version, delta_files, upgraded = version_info
|
||||||
_upgrade_existing_database(cur, user_version, delta_files, upgraded)
|
_upgrade_existing_database(
|
||||||
|
cur, user_version, delta_files, upgraded, database_engine
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
_setup_new_database(cur)
|
_setup_new_database(cur, database_engine)
|
||||||
|
|
||||||
# cur.execute("PRAGMA user_version = %d" % (SCHEMA_VERSION,))
|
# cur.execute("PRAGMA user_version = %d" % (SCHEMA_VERSION,))
|
||||||
|
|
||||||
|
@ -150,7 +149,7 @@ def prepare_database(db_conn):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
def _setup_new_database(cur):
|
def _setup_new_database(cur, database_engine):
|
||||||
"""Sets up the database by finding a base set of "full schemas" and then
|
"""Sets up the database by finding a base set of "full schemas" and then
|
||||||
applying any necessary deltas.
|
applying any necessary deltas.
|
||||||
|
|
||||||
|
@ -210,7 +209,7 @@ def _setup_new_database(cur):
|
||||||
executescript(cur, sql_loc)
|
executescript(cur, sql_loc)
|
||||||
|
|
||||||
cur.execute(
|
cur.execute(
|
||||||
_convert_param_style(
|
database_engine.convert_param_style(
|
||||||
"REPLACE INTO schema_version (version, upgraded)"
|
"REPLACE INTO schema_version (version, upgraded)"
|
||||||
" VALUES (?,?)"
|
" VALUES (?,?)"
|
||||||
),
|
),
|
||||||
|
@ -221,12 +220,13 @@ def _setup_new_database(cur):
|
||||||
cur,
|
cur,
|
||||||
current_version=max_current_ver,
|
current_version=max_current_ver,
|
||||||
applied_delta_files=[],
|
applied_delta_files=[],
|
||||||
upgraded=False
|
upgraded=False,
|
||||||
|
database_engine=database_engine,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _upgrade_existing_database(cur, current_version, applied_delta_files,
|
def _upgrade_existing_database(cur, current_version, applied_delta_files,
|
||||||
upgraded):
|
upgraded, database_engine):
|
||||||
"""Upgrades an existing database.
|
"""Upgrades an existing database.
|
||||||
|
|
||||||
Delta files can either be SQL stored in *.sql files, or python modules
|
Delta files can either be SQL stored in *.sql files, or python modules
|
||||||
|
@ -335,26 +335,22 @@ def _upgrade_existing_database(cur, current_version, applied_delta_files,
|
||||||
|
|
||||||
# Mark as done.
|
# Mark as done.
|
||||||
cur.execute(
|
cur.execute(
|
||||||
_convert_param_style(
|
database_engine.convert_param_style(
|
||||||
"INSERT INTO applied_schema_deltas (version, file)"
|
"INSERT INTO applied_schema_deltas (version, file)"
|
||||||
" VALUES (?,?)"
|
" VALUES (?,?)",
|
||||||
),
|
),
|
||||||
(v, relative_path)
|
(v, relative_path)
|
||||||
)
|
)
|
||||||
|
|
||||||
cur.execute(
|
cur.execute(
|
||||||
_convert_param_style(
|
database_engine.convert_param_style(
|
||||||
"REPLACE INTO schema_version (version, upgraded)"
|
"REPLACE INTO schema_version (version, upgraded)"
|
||||||
" VALUES (?,?)"
|
" VALUES (?,?)",
|
||||||
),
|
),
|
||||||
(v, True)
|
(v, True)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _convert_param_style(sql):
|
|
||||||
return sql.replace("?", "%s")
|
|
||||||
|
|
||||||
|
|
||||||
def get_statements(f):
|
def get_statements(f):
|
||||||
statement_buffer = ""
|
statement_buffer = ""
|
||||||
in_comment = False # If we're in a /* ... */ style comment
|
in_comment = False # If we're in a /* ... */ style comment
|
||||||
|
@ -409,7 +405,7 @@ def executescript(txn, schema_path):
|
||||||
txn.execute(statement)
|
txn.execute(statement)
|
||||||
|
|
||||||
|
|
||||||
def _get_or_create_schema_state(txn):
|
def _get_or_create_schema_state(txn, database_engine):
|
||||||
try:
|
try:
|
||||||
# Bluntly try creating the schema_version tables.
|
# Bluntly try creating the schema_version tables.
|
||||||
schema_path = os.path.join(
|
schema_path = os.path.join(
|
||||||
|
@ -426,7 +422,7 @@ def _get_or_create_schema_state(txn):
|
||||||
|
|
||||||
if current_version:
|
if current_version:
|
||||||
txn.execute(
|
txn.execute(
|
||||||
_convert_param_style(
|
database_engine.convert_param_style(
|
||||||
"SELECT file FROM applied_schema_deltas WHERE version >= ?"
|
"SELECT file FROM applied_schema_deltas WHERE version >= ?"
|
||||||
),
|
),
|
||||||
(current_version,)
|
(current_version,)
|
||||||
|
@ -446,6 +442,8 @@ def prepare_sqlite3_database(db_conn):
|
||||||
new. This only affects sqlite databases since they were the only ones
|
new. This only affects sqlite databases since they were the only ones
|
||||||
supported at the time.
|
supported at the time.
|
||||||
"""
|
"""
|
||||||
|
import sqlite3
|
||||||
|
|
||||||
with db_conn:
|
with db_conn:
|
||||||
schema_path = os.path.join(
|
schema_path = os.path.join(
|
||||||
dir_path, "schema", "schema_version.sql",
|
dir_path, "schema", "schema_version.sql",
|
||||||
|
@ -466,7 +464,8 @@ def prepare_sqlite3_database(db_conn):
|
||||||
db_conn.execute(
|
db_conn.execute(
|
||||||
_convert_param_style(
|
_convert_param_style(
|
||||||
"REPLACE INTO schema_version (version, upgraded)"
|
"REPLACE INTO schema_version (version, upgraded)"
|
||||||
" VALUES (?,?)"
|
" VALUES (?,?)",
|
||||||
|
sqlite3
|
||||||
),
|
),
|
||||||
(row[0], False)
|
(row[0], False)
|
||||||
)
|
)
|
||||||
|
|
|
@ -29,6 +29,7 @@ import functools
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
import threading
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -118,19 +119,16 @@ def cached(max_entries=1000, num_args=1):
|
||||||
return wrap
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
def _convert_param_style(sql):
|
|
||||||
return sql.replace("?", "%s")
|
|
||||||
|
|
||||||
|
|
||||||
class LoggingTransaction(object):
|
class LoggingTransaction(object):
|
||||||
"""An object that almost-transparently proxies for the 'txn' object
|
"""An object that almost-transparently proxies for the 'txn' object
|
||||||
passed to the constructor. Adds logging and metrics to the .execute()
|
passed to the constructor. Adds logging and metrics to the .execute()
|
||||||
method."""
|
method."""
|
||||||
__slots__ = ["txn", "name"]
|
__slots__ = ["txn", "name", "database_engine"]
|
||||||
|
|
||||||
def __init__(self, txn, name):
|
def __init__(self, txn, name, database_engine):
|
||||||
object.__setattr__(self, "txn", txn)
|
object.__setattr__(self, "txn", txn)
|
||||||
object.__setattr__(self, "name", name)
|
object.__setattr__(self, "name", name)
|
||||||
|
object.__setattr__(self, "database_engine", database_engine)
|
||||||
|
|
||||||
def __getattr__(self, name):
|
def __getattr__(self, name):
|
||||||
return getattr(self.txn, name)
|
return getattr(self.txn, name)
|
||||||
|
@ -142,7 +140,7 @@ class LoggingTransaction(object):
|
||||||
# TODO(paul): Maybe use 'info' and 'debug' for values?
|
# TODO(paul): Maybe use 'info' and 'debug' for values?
|
||||||
sql_logger.debug("[SQL] {%s} %s", self.name, sql)
|
sql_logger.debug("[SQL] {%s} %s", self.name, sql)
|
||||||
|
|
||||||
sql = _convert_param_style(sql)
|
sql = self.database_engine.convert_param_style(sql)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if args and args[0]:
|
if args and args[0]:
|
||||||
|
@ -227,9 +225,14 @@ class SQLBaseStore(object):
|
||||||
|
|
||||||
self._get_event_cache = LruCache(hs.config.event_cache_size)
|
self._get_event_cache = LruCache(hs.config.event_cache_size)
|
||||||
|
|
||||||
|
self.database_engine = hs.database_engine
|
||||||
|
|
||||||
# Pretend the getEventCache is just another named cache
|
# Pretend the getEventCache is just another named cache
|
||||||
caches_by_name["*getEvent*"] = self._get_event_cache
|
caches_by_name["*getEvent*"] = self._get_event_cache
|
||||||
|
|
||||||
|
self._next_stream_id_lock = threading.Lock()
|
||||||
|
self._next_stream_id = int(hs.get_clock().time_msec()) * 1000
|
||||||
|
|
||||||
def start_profiling(self):
|
def start_profiling(self):
|
||||||
self._previous_loop_ts = self._clock.time_msec()
|
self._previous_loop_ts = self._clock.time_msec()
|
||||||
|
|
||||||
|
@ -281,7 +284,10 @@ class SQLBaseStore(object):
|
||||||
sql_scheduling_timer.inc_by(time.time() * 1000 - start_time)
|
sql_scheduling_timer.inc_by(time.time() * 1000 - start_time)
|
||||||
transaction_logger.debug("[TXN START] {%s}", name)
|
transaction_logger.debug("[TXN START] {%s}", name)
|
||||||
try:
|
try:
|
||||||
return func(LoggingTransaction(txn, name), *args, **kwargs)
|
return func(
|
||||||
|
LoggingTransaction(txn, name, self.database_engine),
|
||||||
|
*args, **kwargs
|
||||||
|
)
|
||||||
except:
|
except:
|
||||||
logger.exception("[TXN FAIL] {%s}", name)
|
logger.exception("[TXN FAIL] {%s}", name)
|
||||||
raise
|
raise
|
||||||
|
@ -588,7 +594,7 @@ class SQLBaseStore(object):
|
||||||
select_sql = "SELECT %s FROM %s WHERE %s" % (
|
select_sql = "SELECT %s FROM %s WHERE %s" % (
|
||||||
", ".join(retcols),
|
", ".join(retcols),
|
||||||
table,
|
table,
|
||||||
" AND ".join("%s = ?" % (k) for k in keyvalues)
|
" AND ".join("%s = ?" % (k,) for k in keyvalues)
|
||||||
)
|
)
|
||||||
|
|
||||||
txn.execute(select_sql, keyvalues.values())
|
txn.execute(select_sql, keyvalues.values())
|
||||||
|
@ -836,6 +842,12 @@ class SQLBaseStore(object):
|
||||||
result = txn.fetchone()
|
result = txn.fetchone()
|
||||||
return result[0] if result else None
|
return result[0] if result else None
|
||||||
|
|
||||||
|
def get_next_stream_id(self):
|
||||||
|
with self._next_stream_id_lock:
|
||||||
|
i = self._next_stream_id
|
||||||
|
self._next_stream_id += 1
|
||||||
|
return i
|
||||||
|
|
||||||
|
|
||||||
class _RollbackButIsFineException(Exception):
|
class _RollbackButIsFineException(Exception):
|
||||||
""" This exception is used to rollback a transaction without implying
|
""" This exception is used to rollback a transaction without implying
|
||||||
|
|
35
synapse/storage/engines/__init__.py
Normal file
35
synapse/storage/engines/__init__.py
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from .maria import MariaEngine
|
||||||
|
from .sqlite3 import Sqlite3Engine
|
||||||
|
|
||||||
|
|
||||||
|
SUPPORTED_MODULE = {
|
||||||
|
"sqlite3": Sqlite3Engine,
|
||||||
|
"mysql.connector": MariaEngine,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def create_engine(name):
|
||||||
|
engine_class = SUPPORTED_MODULE.get(name, None)
|
||||||
|
|
||||||
|
if engine_class:
|
||||||
|
module = __import__(name)
|
||||||
|
return engine_class(module)
|
||||||
|
|
||||||
|
raise RuntimeError(
|
||||||
|
"Unsupported database engine '%s'" % (name,)
|
||||||
|
)
|
30
synapse/storage/engines/maria.py
Normal file
30
synapse/storage/engines/maria.py
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import types
|
||||||
|
|
||||||
|
|
||||||
|
class MariaEngine(object):
|
||||||
|
def __init__(self, database_module):
|
||||||
|
self.module = database_module
|
||||||
|
|
||||||
|
def convert_param_style(self, sql):
|
||||||
|
return sql.replace("?", "%s")
|
||||||
|
|
||||||
|
def encode_parameter(self, param):
|
||||||
|
if isinstance(param, types.BufferType):
|
||||||
|
return str(param)
|
||||||
|
return param
|
25
synapse/storage/engines/sqlite3.py
Normal file
25
synapse/storage/engines/sqlite3.py
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
class Sqlite3Engine(object):
|
||||||
|
def __init__(self, database_module):
|
||||||
|
self.module = database_module
|
||||||
|
|
||||||
|
def convert_param_style(self, sql):
|
||||||
|
return sql
|
||||||
|
|
||||||
|
def encode_parameter(self, param):
|
||||||
|
return param
|
|
@ -64,7 +64,7 @@ class KeyStore(SQLBaseStore):
|
||||||
"fingerprint": fingerprint,
|
"fingerprint": fingerprint,
|
||||||
"from_server": from_server,
|
"from_server": from_server,
|
||||||
"ts_added_ms": time_now_ms,
|
"ts_added_ms": time_now_ms,
|
||||||
"tls_certificate": tls_certificate_bytes,
|
"tls_certificate": buffer(tls_certificate_bytes),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -113,6 +113,6 @@ class KeyStore(SQLBaseStore):
|
||||||
"key_id": "%s:%s" % (verify_key.alg, verify_key.version),
|
"key_id": "%s:%s" % (verify_key.alg, verify_key.version),
|
||||||
"from_server": from_server,
|
"from_server": from_server,
|
||||||
"ts_added_ms": time_now_ms,
|
"ts_added_ms": time_now_ms,
|
||||||
"verify_key": verify_key.encode(),
|
"verify_key": buffer(verify_key.encode()),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
|
@ -42,6 +42,7 @@ class RegistrationStore(SQLBaseStore):
|
||||||
yield self._simple_insert(
|
yield self._simple_insert(
|
||||||
"access_tokens",
|
"access_tokens",
|
||||||
{
|
{
|
||||||
|
"id": self.get_next_stream_id(),
|
||||||
"user_id": user_id,
|
"user_id": user_id,
|
||||||
"token": token
|
"token": token
|
||||||
},
|
},
|
||||||
|
@ -78,8 +79,11 @@ class RegistrationStore(SQLBaseStore):
|
||||||
|
|
||||||
# it's possible for this to get a conflict, but only for a single user
|
# it's possible for this to get a conflict, but only for a single user
|
||||||
# since tokens are namespaced based on their user ID
|
# since tokens are namespaced based on their user ID
|
||||||
txn.execute("INSERT INTO access_tokens(user_id, token) " +
|
txn.execute(
|
||||||
"VALUES (?,?)", [user_id, token])
|
"INSERT INTO access_tokens(id, user_id, token)"
|
||||||
|
" VALUES (?,?,?)",
|
||||||
|
(self.get_next_stream_id(), user_id, token,)
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_user_by_id(self, user_id):
|
def get_user_by_id(self, user_id):
|
||||||
|
|
|
@ -18,7 +18,7 @@ CREATE TABLE IF NOT EXISTS rejections(
|
||||||
reason VARCHAR(255) NOT NULL,
|
reason VARCHAR(255) NOT NULL,
|
||||||
last_check VARCHAR(255) NOT NULL,
|
last_check VARCHAR(255) NOT NULL,
|
||||||
UNIQUE (event_id)
|
UNIQUE (event_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
-- Push notification endpoints that users have configured
|
-- Push notification endpoints that users have configured
|
||||||
CREATE TABLE IF NOT EXISTS pushers (
|
CREATE TABLE IF NOT EXISTS pushers (
|
||||||
|
@ -37,7 +37,7 @@ CREATE TABLE IF NOT EXISTS pushers (
|
||||||
last_success BIGINT,
|
last_success BIGINT,
|
||||||
failing_since BIGINT,
|
failing_since BIGINT,
|
||||||
UNIQUE (app_id, pushkey)
|
UNIQUE (app_id, pushkey)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS push_rules (
|
CREATE TABLE IF NOT EXISTS push_rules (
|
||||||
id BIGINT PRIMARY KEY,
|
id BIGINT PRIMARY KEY,
|
||||||
|
@ -48,7 +48,7 @@ CREATE TABLE IF NOT EXISTS push_rules (
|
||||||
conditions VARCHAR(255) NOT NULL,
|
conditions VARCHAR(255) NOT NULL,
|
||||||
actions VARCHAR(255) NOT NULL,
|
actions VARCHAR(255) NOT NULL,
|
||||||
UNIQUE(user_name, rule_id)
|
UNIQUE(user_name, rule_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name);
|
CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name);
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ CREATE TABLE IF NOT EXISTS user_filters(
|
||||||
user_id VARCHAR(255),
|
user_id VARCHAR(255),
|
||||||
filter_id BIGINT,
|
filter_id BIGINT,
|
||||||
filter_json BLOB
|
filter_json BLOB
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters(
|
CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters(
|
||||||
user_id, filter_id
|
user_id, filter_id
|
||||||
|
|
|
@ -20,7 +20,7 @@ CREATE TABLE IF NOT EXISTS application_services(
|
||||||
hs_token VARCHAR(255),
|
hs_token VARCHAR(255),
|
||||||
sender VARCHAR(255),
|
sender VARCHAR(255),
|
||||||
UNIQUE(token)
|
UNIQUE(token)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS application_services_regex(
|
CREATE TABLE IF NOT EXISTS application_services_regex(
|
||||||
id BIGINT PRIMARY KEY,
|
id BIGINT PRIMARY KEY,
|
||||||
|
@ -28,4 +28,4 @@ CREATE TABLE IF NOT EXISTS application_services_regex(
|
||||||
namespace INTEGER, /* enum[room_id|room_alias|user_id] */
|
namespace INTEGER, /* enum[room_id|room_alias|user_id] */
|
||||||
regex VARCHAR(255),
|
regex VARCHAR(255),
|
||||||
FOREIGN KEY(as_id) REFERENCES application_services(id)
|
FOREIGN KEY(as_id) REFERENCES application_services(id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
|
@ -4,6 +4,6 @@ CREATE TABLE IF NOT EXISTS push_rules_enable (
|
||||||
rule_id VARCHAR(255) NOT NULL,
|
rule_id VARCHAR(255) NOT NULL,
|
||||||
enabled TINYINT,
|
enabled TINYINT,
|
||||||
UNIQUE(user_name, rule_id)
|
UNIQUE(user_name, rule_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS push_rules_enable_user_name on push_rules_enable (user_name);
|
CREATE INDEX IF NOT EXISTS push_rules_enable_user_name on push_rules_enable (user_name);
|
||||||
|
|
|
@ -17,7 +17,7 @@ CREATE TABLE IF NOT EXISTS event_forward_extremities(
|
||||||
event_id VARCHAR(255) NOT NULL,
|
event_id VARCHAR(255) NOT NULL,
|
||||||
room_id VARCHAR(255) NOT NULL,
|
room_id VARCHAR(255) NOT NULL,
|
||||||
UNIQUE (event_id, room_id)
|
UNIQUE (event_id, room_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS ev_extrem_room ON event_forward_extremities(room_id);
|
CREATE INDEX IF NOT EXISTS ev_extrem_room ON event_forward_extremities(room_id);
|
||||||
CREATE INDEX IF NOT EXISTS ev_extrem_id ON event_forward_extremities(event_id);
|
CREATE INDEX IF NOT EXISTS ev_extrem_id ON event_forward_extremities(event_id);
|
||||||
|
@ -27,7 +27,7 @@ CREATE TABLE IF NOT EXISTS event_backward_extremities(
|
||||||
event_id VARCHAR(255) NOT NULL,
|
event_id VARCHAR(255) NOT NULL,
|
||||||
room_id VARCHAR(255) NOT NULL,
|
room_id VARCHAR(255) NOT NULL,
|
||||||
UNIQUE (event_id, room_id)
|
UNIQUE (event_id, room_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS ev_b_extrem_room ON event_backward_extremities(room_id);
|
CREATE INDEX IF NOT EXISTS ev_b_extrem_room ON event_backward_extremities(room_id);
|
||||||
CREATE INDEX IF NOT EXISTS ev_b_extrem_id ON event_backward_extremities(event_id);
|
CREATE INDEX IF NOT EXISTS ev_b_extrem_id ON event_backward_extremities(event_id);
|
||||||
|
@ -39,7 +39,7 @@ CREATE TABLE IF NOT EXISTS event_edges(
|
||||||
room_id VARCHAR(255) NOT NULL,
|
room_id VARCHAR(255) NOT NULL,
|
||||||
is_state BOOL NOT NULL,
|
is_state BOOL NOT NULL,
|
||||||
UNIQUE (event_id, prev_event_id, room_id, is_state)
|
UNIQUE (event_id, prev_event_id, room_id, is_state)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS ev_edges_id ON event_edges(event_id);
|
CREATE INDEX IF NOT EXISTS ev_edges_id ON event_edges(event_id);
|
||||||
CREATE INDEX IF NOT EXISTS ev_edges_prev_id ON event_edges(prev_event_id);
|
CREATE INDEX IF NOT EXISTS ev_edges_prev_id ON event_edges(prev_event_id);
|
||||||
|
@ -49,7 +49,7 @@ CREATE TABLE IF NOT EXISTS room_depth(
|
||||||
room_id VARCHAR(255) NOT NULL,
|
room_id VARCHAR(255) NOT NULL,
|
||||||
min_depth INTEGER NOT NULL,
|
min_depth INTEGER NOT NULL,
|
||||||
UNIQUE (room_id)
|
UNIQUE (room_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS room_depth_room ON room_depth(room_id);
|
CREATE INDEX IF NOT EXISTS room_depth_room ON room_depth(room_id);
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ create TABLE IF NOT EXISTS event_destinations(
|
||||||
destination VARCHAR(255) NOT NULL,
|
destination VARCHAR(255) NOT NULL,
|
||||||
delivered_ts BIGINT DEFAULT 0, -- or 0 if not delivered
|
delivered_ts BIGINT DEFAULT 0, -- or 0 if not delivered
|
||||||
UNIQUE (event_id, destination)
|
UNIQUE (event_id, destination)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS event_destinations_id ON event_destinations(event_id);
|
CREATE INDEX IF NOT EXISTS event_destinations_id ON event_destinations(event_id);
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ CREATE TABLE IF NOT EXISTS state_forward_extremities(
|
||||||
type VARCHAR(255) NOT NULL,
|
type VARCHAR(255) NOT NULL,
|
||||||
state_key VARCHAR(255) NOT NULL,
|
state_key VARCHAR(255) NOT NULL,
|
||||||
UNIQUE (event_id, room_id)
|
UNIQUE (event_id, room_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS st_extrem_keys ON state_forward_extremities(
|
CREATE INDEX IF NOT EXISTS st_extrem_keys ON state_forward_extremities(
|
||||||
room_id, type, state_key
|
room_id, type, state_key
|
||||||
|
@ -83,7 +83,7 @@ CREATE TABLE IF NOT EXISTS event_auth(
|
||||||
auth_id VARCHAR(255) NOT NULL,
|
auth_id VARCHAR(255) NOT NULL,
|
||||||
room_id VARCHAR(255) NOT NULL,
|
room_id VARCHAR(255) NOT NULL,
|
||||||
UNIQUE (event_id, auth_id, room_id)
|
UNIQUE (event_id, auth_id, room_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS evauth_edges_id ON event_auth(event_id);
|
CREATE INDEX IF NOT EXISTS evauth_edges_id ON event_auth(event_id);
|
||||||
CREATE INDEX IF NOT EXISTS evauth_edges_auth_id ON event_auth(auth_id);
|
CREATE INDEX IF NOT EXISTS evauth_edges_auth_id ON event_auth(auth_id);
|
||||||
|
|
|
@ -18,7 +18,7 @@ CREATE TABLE IF NOT EXISTS event_content_hashes (
|
||||||
algorithm VARCHAR(255),
|
algorithm VARCHAR(255),
|
||||||
hash BLOB,
|
hash BLOB,
|
||||||
UNIQUE (event_id, algorithm)
|
UNIQUE (event_id, algorithm)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS event_content_hashes_id ON event_content_hashes(event_id);
|
CREATE INDEX IF NOT EXISTS event_content_hashes_id ON event_content_hashes(event_id);
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ CREATE TABLE IF NOT EXISTS event_reference_hashes (
|
||||||
algorithm VARCHAR(255),
|
algorithm VARCHAR(255),
|
||||||
hash BLOB,
|
hash BLOB,
|
||||||
UNIQUE (event_id, algorithm)
|
UNIQUE (event_id, algorithm)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS event_reference_hashes_id ON event_reference_hashes(event_id);
|
CREATE INDEX IF NOT EXISTS event_reference_hashes_id ON event_reference_hashes(event_id);
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ CREATE TABLE IF NOT EXISTS event_signatures (
|
||||||
key_id VARCHAR(255),
|
key_id VARCHAR(255),
|
||||||
signature BLOB,
|
signature BLOB,
|
||||||
UNIQUE (event_id, signature_name, key_id)
|
UNIQUE (event_id, signature_name, key_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS event_signatures_id ON event_signatures(event_id);
|
CREATE INDEX IF NOT EXISTS event_signatures_id ON event_signatures(event_id);
|
||||||
|
|
||||||
|
@ -50,6 +50,6 @@ CREATE TABLE IF NOT EXISTS event_edge_hashes(
|
||||||
algorithm VARCHAR(255),
|
algorithm VARCHAR(255),
|
||||||
hash BLOB,
|
hash BLOB,
|
||||||
UNIQUE (event_id, prev_event_id, algorithm)
|
UNIQUE (event_id, prev_event_id, algorithm)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS event_edge_hashes_id ON event_edge_hashes(event_id);
|
CREATE INDEX IF NOT EXISTS event_edge_hashes_id ON event_edge_hashes(event_id);
|
||||||
|
|
|
@ -25,7 +25,7 @@ CREATE TABLE IF NOT EXISTS events(
|
||||||
outlier BOOL NOT NULL,
|
outlier BOOL NOT NULL,
|
||||||
depth BIGINT DEFAULT 0 NOT NULL,
|
depth BIGINT DEFAULT 0 NOT NULL,
|
||||||
UNIQUE (event_id)
|
UNIQUE (event_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS events_stream_ordering ON events (stream_ordering);
|
CREATE INDEX IF NOT EXISTS events_stream_ordering ON events (stream_ordering);
|
||||||
CREATE INDEX IF NOT EXISTS events_topological_ordering ON events (topological_ordering);
|
CREATE INDEX IF NOT EXISTS events_topological_ordering ON events (topological_ordering);
|
||||||
|
@ -38,7 +38,7 @@ CREATE TABLE IF NOT EXISTS event_json(
|
||||||
internal_metadata BLOB NOT NULL,
|
internal_metadata BLOB NOT NULL,
|
||||||
json BLOB NOT NULL,
|
json BLOB NOT NULL,
|
||||||
UNIQUE (event_id)
|
UNIQUE (event_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS event_json_room_id ON event_json(room_id);
|
CREATE INDEX IF NOT EXISTS event_json_room_id ON event_json(room_id);
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ CREATE TABLE IF NOT EXISTS state_events(
|
||||||
state_key VARCHAR(255) NOT NULL,
|
state_key VARCHAR(255) NOT NULL,
|
||||||
prev_state VARCHAR(255),
|
prev_state VARCHAR(255),
|
||||||
UNIQUE (event_id)
|
UNIQUE (event_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS state_events_room_id ON state_events (room_id);
|
CREATE INDEX IF NOT EXISTS state_events_room_id ON state_events (room_id);
|
||||||
CREATE INDEX IF NOT EXISTS state_events_type ON state_events (type);
|
CREATE INDEX IF NOT EXISTS state_events_type ON state_events (type);
|
||||||
|
@ -64,7 +64,7 @@ CREATE TABLE IF NOT EXISTS current_state_events(
|
||||||
state_key VARCHAR(255) NOT NULL,
|
state_key VARCHAR(255) NOT NULL,
|
||||||
UNIQUE (event_id),
|
UNIQUE (event_id),
|
||||||
UNIQUE (room_id, type, state_key)
|
UNIQUE (room_id, type, state_key)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS current_state_events_room_id ON current_state_events (room_id);
|
CREATE INDEX IF NOT EXISTS current_state_events_room_id ON current_state_events (room_id);
|
||||||
CREATE INDEX IF NOT EXISTS current_state_events_type ON current_state_events (type);
|
CREATE INDEX IF NOT EXISTS current_state_events_type ON current_state_events (type);
|
||||||
|
@ -77,7 +77,7 @@ CREATE TABLE IF NOT EXISTS room_memberships(
|
||||||
room_id VARCHAR(255) NOT NULL,
|
room_id VARCHAR(255) NOT NULL,
|
||||||
membership VARCHAR(255) NOT NULL,
|
membership VARCHAR(255) NOT NULL,
|
||||||
UNIQUE (event_id)
|
UNIQUE (event_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS room_memberships_room_id ON room_memberships (room_id);
|
CREATE INDEX IF NOT EXISTS room_memberships_room_id ON room_memberships (room_id);
|
||||||
CREATE INDEX IF NOT EXISTS room_memberships_user_id ON room_memberships (user_id);
|
CREATE INDEX IF NOT EXISTS room_memberships_user_id ON room_memberships (user_id);
|
||||||
|
@ -89,14 +89,14 @@ CREATE TABLE IF NOT EXISTS feedback(
|
||||||
sender VARCHAR(255),
|
sender VARCHAR(255),
|
||||||
room_id VARCHAR(255),
|
room_id VARCHAR(255),
|
||||||
UNIQUE (event_id)
|
UNIQUE (event_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS topics(
|
CREATE TABLE IF NOT EXISTS topics(
|
||||||
event_id VARCHAR(255) NOT NULL,
|
event_id VARCHAR(255) NOT NULL,
|
||||||
room_id VARCHAR(255) NOT NULL,
|
room_id VARCHAR(255) NOT NULL,
|
||||||
topic VARCHAR(255) NOT NULL,
|
topic VARCHAR(255) NOT NULL,
|
||||||
UNIQUE (event_id)
|
UNIQUE (event_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS topics_room_id ON topics(room_id);
|
CREATE INDEX IF NOT EXISTS topics_room_id ON topics(room_id);
|
||||||
|
|
||||||
|
@ -113,12 +113,12 @@ CREATE TABLE IF NOT EXISTS rooms(
|
||||||
room_id VARCHAR(255) PRIMARY KEY NOT NULL,
|
room_id VARCHAR(255) PRIMARY KEY NOT NULL,
|
||||||
is_public BOOL,
|
is_public BOOL,
|
||||||
creator VARCHAR(255)
|
creator VARCHAR(255)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS room_hosts(
|
CREATE TABLE IF NOT EXISTS room_hosts(
|
||||||
room_id VARCHAR(255) NOT NULL,
|
room_id VARCHAR(255) NOT NULL,
|
||||||
host VARCHAR(255) NOT NULL,
|
host VARCHAR(255) NOT NULL,
|
||||||
UNIQUE (room_id, host)
|
UNIQUE (room_id, host)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS room_hosts_room_id ON room_hosts (room_id);
|
CREATE INDEX IF NOT EXISTS room_hosts_room_id ON room_hosts (room_id);
|
||||||
|
|
|
@ -19,7 +19,7 @@ CREATE TABLE IF NOT EXISTS server_tls_certificates(
|
||||||
ts_added_ms BIGINT, -- When the certifcate was added.
|
ts_added_ms BIGINT, -- When the certifcate was added.
|
||||||
tls_certificate BLOB, -- DER encoded x509 certificate.
|
tls_certificate BLOB, -- DER encoded x509 certificate.
|
||||||
UNIQUE (server_name, fingerprint)
|
UNIQUE (server_name, fingerprint)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS server_signature_keys(
|
CREATE TABLE IF NOT EXISTS server_signature_keys(
|
||||||
server_name VARCHAR(255), -- Server name.
|
server_name VARCHAR(255), -- Server name.
|
||||||
|
@ -28,4 +28,4 @@ CREATE TABLE IF NOT EXISTS server_signature_keys(
|
||||||
ts_added_ms BIGINT, -- When the key was added.
|
ts_added_ms BIGINT, -- When the key was added.
|
||||||
verify_key BLOB, -- NACL verification key.
|
verify_key BLOB, -- NACL verification key.
|
||||||
UNIQUE (server_name, key_id)
|
UNIQUE (server_name, key_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
|
@ -21,7 +21,7 @@ CREATE TABLE IF NOT EXISTS local_media_repository (
|
||||||
upload_name VARCHAR(255), -- The name the media was uploaded with.
|
upload_name VARCHAR(255), -- The name the media was uploaded with.
|
||||||
user_id VARCHAR(255), -- The user who uploaded the file.
|
user_id VARCHAR(255), -- The user who uploaded the file.
|
||||||
UNIQUE (media_id)
|
UNIQUE (media_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails (
|
CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails (
|
||||||
media_id VARCHAR(255), -- The id used to refer to the media.
|
media_id VARCHAR(255), -- The id used to refer to the media.
|
||||||
|
@ -33,7 +33,7 @@ CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails (
|
||||||
UNIQUE (
|
UNIQUE (
|
||||||
media_id, thumbnail_width, thumbnail_height, thumbnail_type
|
media_id, thumbnail_width, thumbnail_height, thumbnail_type
|
||||||
)
|
)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS local_media_repository_thumbnails_media_id
|
CREATE INDEX IF NOT EXISTS local_media_repository_thumbnails_media_id
|
||||||
ON local_media_repository_thumbnails (media_id);
|
ON local_media_repository_thumbnails (media_id);
|
||||||
|
@ -47,7 +47,7 @@ CREATE TABLE IF NOT EXISTS remote_media_cache (
|
||||||
media_length INTEGER, -- Length of the media in bytes.
|
media_length INTEGER, -- Length of the media in bytes.
|
||||||
filesystem_id VARCHAR(255), -- The name used to store the media on disk.
|
filesystem_id VARCHAR(255), -- The name used to store the media on disk.
|
||||||
UNIQUE (media_origin, media_id)
|
UNIQUE (media_origin, media_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails (
|
CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails (
|
||||||
media_origin VARCHAR(255), -- The remote HS the media came from.
|
media_origin VARCHAR(255), -- The remote HS the media came from.
|
||||||
|
@ -62,7 +62,7 @@ CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails (
|
||||||
media_origin, media_id, thumbnail_width, thumbnail_height,
|
media_origin, media_id, thumbnail_width, thumbnail_height,
|
||||||
thumbnail_type
|
thumbnail_type
|
||||||
)
|
)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS remote_media_cache_thumbnails_media_id
|
CREATE INDEX IF NOT EXISTS remote_media_cache_thumbnails_media_id
|
||||||
ON remote_media_cache_thumbnails (media_id);
|
ON remote_media_cache_thumbnails (media_id);
|
||||||
|
|
|
@ -18,7 +18,7 @@ CREATE TABLE IF NOT EXISTS presence(
|
||||||
status_msg VARCHAR(255),
|
status_msg VARCHAR(255),
|
||||||
mtime BIGINT, -- miliseconds since last state change
|
mtime BIGINT, -- miliseconds since last state change
|
||||||
UNIQUE(user_id)
|
UNIQUE(user_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
-- For each of /my/ users which possibly-remote users are allowed to see their
|
-- For each of /my/ users which possibly-remote users are allowed to see their
|
||||||
-- presence state
|
-- presence state
|
||||||
|
@ -26,7 +26,7 @@ CREATE TABLE IF NOT EXISTS presence_allow_inbound(
|
||||||
observed_user_id VARCHAR(255) NOT NULL,
|
observed_user_id VARCHAR(255) NOT NULL,
|
||||||
observer_user_id VARCHAR(255), -- a UserID,
|
observer_user_id VARCHAR(255), -- a UserID,
|
||||||
UNIQUE(observed_user_id)
|
UNIQUE(observed_user_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
-- For each of /my/ users (watcher), which possibly-remote users are they
|
-- For each of /my/ users (watcher), which possibly-remote users are they
|
||||||
-- watching?
|
-- watching?
|
||||||
|
@ -35,4 +35,4 @@ CREATE TABLE IF NOT EXISTS presence_list(
|
||||||
observed_user_id VARCHAR(255), -- a UserID,
|
observed_user_id VARCHAR(255), -- a UserID,
|
||||||
accepted BOOLEAN,
|
accepted BOOLEAN,
|
||||||
UNIQUE(user_id)
|
UNIQUE(user_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
*/
|
*/
|
||||||
CREATE TABLE IF NOT EXISTS profiles(
|
CREATE TABLE IF NOT EXISTS profiles(
|
||||||
user_id VARCHAR(255) NOT NULL,
|
user_id VARCHAR(255) NOT NULL,
|
||||||
displayname VARBINARY(255),
|
displayname VARCHAR(255),
|
||||||
avatar_url VARCHAR(255),
|
avatar_url VARCHAR(255),
|
||||||
UNIQUE(user_id)
|
UNIQUE(user_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
|
@ -16,7 +16,7 @@ CREATE TABLE IF NOT EXISTS redactions (
|
||||||
event_id VARCHAR(255) NOT NULL,
|
event_id VARCHAR(255) NOT NULL,
|
||||||
redacts VARCHAR(255) NOT NULL,
|
redacts VARCHAR(255) NOT NULL,
|
||||||
UNIQUE (event_id)
|
UNIQUE (event_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS redactions_event_id ON redactions (event_id);
|
CREATE INDEX IF NOT EXISTS redactions_event_id ON redactions (event_id);
|
||||||
CREATE INDEX IF NOT EXISTS redactions_redacts ON redactions (redacts);
|
CREATE INDEX IF NOT EXISTS redactions_redacts ON redactions (redacts);
|
||||||
|
|
|
@ -14,12 +14,12 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS room_aliases(
|
CREATE TABLE IF NOT EXISTS room_aliases(
|
||||||
room_alias VARCHAR(255) NOT NULL,
|
room_alias VARBINARY(255) NOT NULL,
|
||||||
room_id VARCHAR(255) NOT NULL,
|
room_id VARCHAR(255) NOT NULL,
|
||||||
UNIQUE (room_alias)
|
UNIQUE (room_alias)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS room_alias_servers(
|
CREATE TABLE IF NOT EXISTS room_alias_servers(
|
||||||
room_alias VARCHAR(255) NOT NULL,
|
room_alias VARBINARY(255) NOT NULL,
|
||||||
server VARCHAR(255) NOT NULL
|
server VARCHAR(255) NOT NULL
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
|
@ -17,7 +17,7 @@ CREATE TABLE IF NOT EXISTS state_groups(
|
||||||
id VARCHAR(20) PRIMARY KEY,
|
id VARCHAR(20) PRIMARY KEY,
|
||||||
room_id VARCHAR(255) NOT NULL,
|
room_id VARCHAR(255) NOT NULL,
|
||||||
event_id VARCHAR(255) NOT NULL
|
event_id VARCHAR(255) NOT NULL
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS state_groups_state(
|
CREATE TABLE IF NOT EXISTS state_groups_state(
|
||||||
state_group VARCHAR(20) NOT NULL,
|
state_group VARCHAR(20) NOT NULL,
|
||||||
|
@ -25,13 +25,13 @@ CREATE TABLE IF NOT EXISTS state_groups_state(
|
||||||
type VARCHAR(255) NOT NULL,
|
type VARCHAR(255) NOT NULL,
|
||||||
state_key VARCHAR(255) NOT NULL,
|
state_key VARCHAR(255) NOT NULL,
|
||||||
event_id VARCHAR(255) NOT NULL
|
event_id VARCHAR(255) NOT NULL
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS event_to_state_groups(
|
CREATE TABLE IF NOT EXISTS event_to_state_groups(
|
||||||
event_id VARCHAR(255) NOT NULL,
|
event_id VARCHAR(255) NOT NULL,
|
||||||
state_group VARCHAR(255) NOT NULL,
|
state_group VARCHAR(255) NOT NULL,
|
||||||
UNIQUE (event_id)
|
UNIQUE (event_id)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS state_groups_id ON state_groups(id);
|
CREATE INDEX IF NOT EXISTS state_groups_id ON state_groups(id);
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ CREATE TABLE IF NOT EXISTS received_transactions(
|
||||||
response_json BLOB,
|
response_json BLOB,
|
||||||
has_been_referenced BOOL default 0, -- Whether thishas been referenced by a prev_tx
|
has_been_referenced BOOL default 0, -- Whether thishas been referenced by a prev_tx
|
||||||
UNIQUE (transaction_id, origin)
|
UNIQUE (transaction_id, origin)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS transactions_have_ref ON received_transactions(origin, has_been_referenced);-- WHERE has_been_referenced = 0;
|
CREATE INDEX IF NOT EXISTS transactions_have_ref ON received_transactions(origin, has_been_referenced);-- WHERE has_been_referenced = 0;
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ CREATE TABLE IF NOT EXISTS sent_transactions(
|
||||||
response_code INTEGER DEFAULT 0,
|
response_code INTEGER DEFAULT 0,
|
||||||
response_json BLOB,
|
response_json BLOB,
|
||||||
ts BIGINT
|
ts BIGINT
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS sent_transaction_dest ON sent_transactions(destination);
|
CREATE INDEX IF NOT EXISTS sent_transaction_dest ON sent_transactions(destination);
|
||||||
CREATE INDEX IF NOT EXISTS sent_transaction_txn_id ON sent_transactions(transaction_id);
|
CREATE INDEX IF NOT EXISTS sent_transaction_txn_id ON sent_transactions(transaction_id);
|
||||||
|
@ -51,7 +51,7 @@ CREATE TABLE IF NOT EXISTS transaction_id_to_pdu(
|
||||||
pdu_id VARCHAR(255),
|
pdu_id VARCHAR(255),
|
||||||
pdu_origin VARCHAR(255),
|
pdu_origin VARCHAR(255),
|
||||||
UNIQUE (transaction_id, destination)
|
UNIQUE (transaction_id, destination)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_dest ON transaction_id_to_pdu(destination);
|
CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_dest ON transaction_id_to_pdu(destination);
|
||||||
|
|
||||||
|
@ -60,4 +60,4 @@ CREATE TABLE IF NOT EXISTS destinations(
|
||||||
destination VARCHAR(255) PRIMARY KEY,
|
destination VARCHAR(255) PRIMARY KEY,
|
||||||
retry_last_ts BIGINT,
|
retry_last_ts BIGINT,
|
||||||
retry_interval INTEGER
|
retry_interval INTEGER
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
|
@ -14,20 +14,20 @@
|
||||||
*/
|
*/
|
||||||
CREATE TABLE IF NOT EXISTS users(
|
CREATE TABLE IF NOT EXISTS users(
|
||||||
name VARCHAR(255),
|
name VARCHAR(255),
|
||||||
password_hash VARBINARY(255),
|
password_hash VARCHAR(255),
|
||||||
creation_ts BIGINT,
|
creation_ts BIGINT,
|
||||||
admin BOOL DEFAULT 0 NOT NULL,
|
admin BOOL DEFAULT 0 NOT NULL,
|
||||||
UNIQUE(name)
|
UNIQUE(name)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS access_tokens(
|
CREATE TABLE IF NOT EXISTS access_tokens(
|
||||||
id INTEGER PRIMARY KEY AUTO_INCREMENT,
|
id BIGINT PRIMARY KEY,
|
||||||
user_id VARCHAR(255) NOT NULL,
|
user_id VARCHAR(255) NOT NULL,
|
||||||
device_id VARCHAR(255),
|
device_id VARCHAR(255),
|
||||||
token VARCHAR(255) NOT NULL,
|
token VARCHAR(255) NOT NULL,
|
||||||
last_used BIGINT,
|
last_used BIGINT,
|
||||||
UNIQUE(token)
|
UNIQUE(token)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS user_ips (
|
CREATE TABLE IF NOT EXISTS user_ips (
|
||||||
user VARCHAR(255) NOT NULL,
|
user VARCHAR(255) NOT NULL,
|
||||||
|
@ -37,6 +37,6 @@ CREATE TABLE IF NOT EXISTS user_ips (
|
||||||
user_agent VARCHAR(255) NOT NULL,
|
user_agent VARCHAR(255) NOT NULL,
|
||||||
last_seen BIGINT NOT NULL,
|
last_seen BIGINT NOT NULL,
|
||||||
UNIQUE (user, access_token, ip, user_agent)
|
UNIQUE (user, access_token, ip, user_agent)
|
||||||
) ENGINE = INNODB;
|
) ;
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS user_ips_user ON user_ips(user);
|
CREATE INDEX IF NOT EXISTS user_ips_user ON user_ips(user);
|
||||||
|
|
|
@ -54,7 +54,7 @@ class SignatureStore(SQLBaseStore):
|
||||||
{
|
{
|
||||||
"event_id": event_id,
|
"event_id": event_id,
|
||||||
"algorithm": algorithm,
|
"algorithm": algorithm,
|
||||||
"hash": hash_bytes,
|
"hash": buffer(hash_bytes),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ class SignatureStore(SQLBaseStore):
|
||||||
{
|
{
|
||||||
"event_id": event_id,
|
"event_id": event_id,
|
||||||
"algorithm": algorithm,
|
"algorithm": algorithm,
|
||||||
"hash": hash_bytes,
|
"hash": buffer(hash_bytes),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ class SignatureStore(SQLBaseStore):
|
||||||
"event_id": event_id,
|
"event_id": event_id,
|
||||||
"signature_name": signature_name,
|
"signature_name": signature_name,
|
||||||
"key_id": key_id,
|
"key_id": key_id,
|
||||||
"signature": signature_bytes,
|
"signature": buffer(signature_bytes),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -193,6 +193,6 @@ class SignatureStore(SQLBaseStore):
|
||||||
"event_id": event_id,
|
"event_id": event_id,
|
||||||
"prev_event_id": prev_event_id,
|
"prev_event_id": prev_event_id,
|
||||||
"algorithm": algorithm,
|
"algorithm": algorithm,
|
||||||
"hash": hash_bytes,
|
"hash": buffer(hash_bytes),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
|
@ -433,12 +433,6 @@ class StreamStore(SQLBaseStore):
|
||||||
|
|
||||||
defer.returnValue(self.min_token)
|
defer.returnValue(self.min_token)
|
||||||
|
|
||||||
def get_next_stream_id(self):
|
|
||||||
with self._next_stream_id_lock:
|
|
||||||
i = self._next_stream_id
|
|
||||||
self._next_stream_id += 1
|
|
||||||
return i
|
|
||||||
|
|
||||||
def _get_room_events_max_id_txn(self, txn):
|
def _get_room_events_max_id_txn(self, txn):
|
||||||
txn.execute(
|
txn.execute(
|
||||||
"SELECT MAX(stream_ordering) as m FROM events"
|
"SELECT MAX(stream_ordering) as m FROM events"
|
||||||
|
|
|
@ -60,7 +60,7 @@ def get_retry_limiter(destination, clock, store, **kwargs):
|
||||||
|
|
||||||
if retry_timings:
|
if retry_timings:
|
||||||
retry_last_ts, retry_interval = (
|
retry_last_ts, retry_interval = (
|
||||||
retry_timings.retry_last_ts, retry_timings.retry_interval
|
retry_timings["retry_last_ts"], retry_timings["retry_interval"]
|
||||||
)
|
)
|
||||||
|
|
||||||
now = int(clock.time_msec())
|
now = int(clock.time_msec())
|
||||||
|
|
|
@ -24,8 +24,6 @@ from ..utils import MockHttpResource, MockClock, setup_test_homeserver
|
||||||
from synapse.federation import initialize_http_replication
|
from synapse.federation import initialize_http_replication
|
||||||
from synapse.events import FrozenEvent
|
from synapse.events import FrozenEvent
|
||||||
|
|
||||||
from synapse.storage.transactions import DestinationsTable
|
|
||||||
|
|
||||||
|
|
||||||
def make_pdu(prev_pdus=[], **kwargs):
|
def make_pdu(prev_pdus=[], **kwargs):
|
||||||
"""Provide some default fields for making a PduTuple."""
|
"""Provide some default fields for making a PduTuple."""
|
||||||
|
@ -57,8 +55,14 @@ class FederationTestCase(unittest.TestCase):
|
||||||
self.mock_persistence.get_received_txn_response.return_value = (
|
self.mock_persistence.get_received_txn_response.return_value = (
|
||||||
defer.succeed(None)
|
defer.succeed(None)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
retry_timings_res = {
|
||||||
|
"destination": "",
|
||||||
|
"retry_last_ts": 0,
|
||||||
|
"retry_interval": 0,
|
||||||
|
}
|
||||||
self.mock_persistence.get_destination_retry_timings.return_value = (
|
self.mock_persistence.get_destination_retry_timings.return_value = (
|
||||||
defer.succeed(DestinationsTable.EntryType("", 0, 0))
|
defer.succeed(retry_timings_res)
|
||||||
)
|
)
|
||||||
self.mock_persistence.get_auth_chain.return_value = []
|
self.mock_persistence.get_auth_chain.return_value = []
|
||||||
self.clock = MockClock()
|
self.clock = MockClock()
|
||||||
|
|
|
@ -87,6 +87,15 @@ class FederationTestCase(unittest.TestCase):
|
||||||
self.datastore.get_room.return_value = defer.succeed(True)
|
self.datastore.get_room.return_value = defer.succeed(True)
|
||||||
self.auth.check_host_in_room.return_value = defer.succeed(True)
|
self.auth.check_host_in_room.return_value = defer.succeed(True)
|
||||||
|
|
||||||
|
retry_timings_res = {
|
||||||
|
"destination": "",
|
||||||
|
"retry_last_ts": 0,
|
||||||
|
"retry_interval": 0,
|
||||||
|
}
|
||||||
|
self.datastore.get_destination_retry_timings.return_value = (
|
||||||
|
defer.succeed(retry_timings_res)
|
||||||
|
)
|
||||||
|
|
||||||
def have_events(event_ids):
|
def have_events(event_ids):
|
||||||
return defer.succeed({})
|
return defer.succeed({})
|
||||||
self.datastore.have_events.side_effect = have_events
|
self.datastore.have_events.side_effect = have_events
|
||||||
|
|
|
@ -194,8 +194,13 @@ class MockedDatastorePresenceTestCase(PresenceTestCase):
|
||||||
return datastore
|
return datastore
|
||||||
|
|
||||||
def setUp_datastore_federation_mocks(self, datastore):
|
def setUp_datastore_federation_mocks(self, datastore):
|
||||||
|
retry_timings_res = {
|
||||||
|
"destination": "",
|
||||||
|
"retry_last_ts": 0,
|
||||||
|
"retry_interval": 0,
|
||||||
|
}
|
||||||
datastore.get_destination_retry_timings.return_value = (
|
datastore.get_destination_retry_timings.return_value = (
|
||||||
defer.succeed(DestinationsTable.EntryType("", 0, 0))
|
defer.succeed(retry_timings_res)
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_received_txn_response(*args):
|
def get_received_txn_response(*args):
|
||||||
|
|
|
@ -96,8 +96,13 @@ class TypingNotificationsTestCase(unittest.TestCase):
|
||||||
self.event_source = hs.get_event_sources().sources["typing"]
|
self.event_source = hs.get_event_sources().sources["typing"]
|
||||||
|
|
||||||
self.datastore = hs.get_datastore()
|
self.datastore = hs.get_datastore()
|
||||||
|
retry_timings_res = {
|
||||||
|
"destination": "",
|
||||||
|
"retry_last_ts": 0,
|
||||||
|
"retry_interval": 0,
|
||||||
|
}
|
||||||
self.datastore.get_destination_retry_timings.return_value = (
|
self.datastore.get_destination_retry_timings.return_value = (
|
||||||
defer.succeed(DestinationsTable.EntryType("", 0, 0))
|
defer.succeed(retry_timings_res)
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_received_txn_response(*args):
|
def get_received_txn_response(*args):
|
||||||
|
|
|
@ -115,12 +115,6 @@ class EventStreamPermissionsTestCase(RestTestCase):
|
||||||
hs = yield setup_test_homeserver(
|
hs = yield setup_test_homeserver(
|
||||||
http_client=None,
|
http_client=None,
|
||||||
replication_layer=Mock(),
|
replication_layer=Mock(),
|
||||||
clock=Mock(spec=[
|
|
||||||
"call_later",
|
|
||||||
"cancel_call_later",
|
|
||||||
"time_msec",
|
|
||||||
"time"
|
|
||||||
]),
|
|
||||||
ratelimiter=NonCallableMock(spec_set=[
|
ratelimiter=NonCallableMock(spec_set=[
|
||||||
"send_message",
|
"send_message",
|
||||||
]),
|
]),
|
||||||
|
@ -132,9 +126,6 @@ class EventStreamPermissionsTestCase(RestTestCase):
|
||||||
|
|
||||||
hs.get_handlers().federation_handler = Mock()
|
hs.get_handlers().federation_handler = Mock()
|
||||||
|
|
||||||
hs.get_clock().time_msec.return_value = 1000000
|
|
||||||
hs.get_clock().time.return_value = 1000
|
|
||||||
|
|
||||||
synapse.rest.client.v1.register.register_servlets(hs, self.mock_resource)
|
synapse.rest.client.v1.register.register_servlets(hs, self.mock_resource)
|
||||||
synapse.rest.client.v1.events.register_servlets(hs, self.mock_resource)
|
synapse.rest.client.v1.events.register_servlets(hs, self.mock_resource)
|
||||||
synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
|
synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource)
|
||||||
|
|
|
@ -16,22 +16,18 @@ from tests import unittest
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.appservice import ApplicationService
|
from synapse.appservice import ApplicationService
|
||||||
from synapse.server import HomeServer
|
|
||||||
from synapse.storage.appservice import ApplicationServiceStore
|
from synapse.storage.appservice import ApplicationServiceStore
|
||||||
|
|
||||||
from mock import Mock
|
from tests.utils import setup_test_homeserver
|
||||||
from tests.utils import SQLiteMemoryDbPool, MockClock
|
|
||||||
|
|
||||||
|
|
||||||
class ApplicationServiceStoreTestCase(unittest.TestCase):
|
class ApplicationServiceStoreTestCase(unittest.TestCase):
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
db_pool = SQLiteMemoryDbPool()
|
hs = yield setup_test_homeserver()
|
||||||
yield db_pool.prepare()
|
db_pool = hs.get_db_pool()
|
||||||
hs = HomeServer(
|
|
||||||
"test", db_pool=db_pool, clock=MockClock(), config=Mock()
|
|
||||||
)
|
|
||||||
self.as_token = "token1"
|
self.as_token = "token1"
|
||||||
db_pool.runQuery(
|
db_pool.runQuery(
|
||||||
"INSERT INTO application_services(token) VALUES(?)",
|
"INSERT INTO application_services(token) VALUES(?)",
|
||||||
|
|
|
@ -24,6 +24,7 @@ from collections import OrderedDict
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
from synapse.storage._base import SQLBaseStore
|
from synapse.storage._base import SQLBaseStore
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
|
||||||
|
|
||||||
class SQLBaseStoreTestCase(unittest.TestCase):
|
class SQLBaseStoreTestCase(unittest.TestCase):
|
||||||
|
@ -40,7 +41,12 @@ class SQLBaseStoreTestCase(unittest.TestCase):
|
||||||
|
|
||||||
config = Mock()
|
config = Mock()
|
||||||
config.event_cache_size = 1
|
config.event_cache_size = 1
|
||||||
hs = HomeServer("test", db_pool=self.db_pool, config=config)
|
hs = HomeServer(
|
||||||
|
"test",
|
||||||
|
db_pool=self.db_pool,
|
||||||
|
config=config,
|
||||||
|
database_engine=create_engine("sqlite3"),
|
||||||
|
)
|
||||||
|
|
||||||
self.datastore = SQLBaseStore(hs)
|
self.datastore = SQLBaseStore(hs)
|
||||||
|
|
||||||
|
@ -86,8 +92,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
|
||||||
|
|
||||||
self.assertEquals("Value", value)
|
self.assertEquals("Value", value)
|
||||||
self.mock_txn.execute.assert_called_with(
|
self.mock_txn.execute.assert_called_with(
|
||||||
"SELECT retcol FROM tablename WHERE keycol = ? "
|
"SELECT retcol FROM tablename WHERE keycol = ?",
|
||||||
"ORDER BY rowid asc",
|
|
||||||
["TheKey"]
|
["TheKey"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -104,8 +109,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
|
||||||
|
|
||||||
self.assertEquals({"colA": 1, "colB": 2, "colC": 3}, ret)
|
self.assertEquals({"colA": 1, "colB": 2, "colC": 3}, ret)
|
||||||
self.mock_txn.execute.assert_called_with(
|
self.mock_txn.execute.assert_called_with(
|
||||||
"SELECT colA, colB, colC FROM tablename WHERE keycol = ? "
|
"SELECT colA, colB, colC FROM tablename WHERE keycol = ?",
|
||||||
"ORDER BY rowid asc",
|
|
||||||
["TheKey"]
|
["TheKey"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -139,8 +143,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
|
||||||
|
|
||||||
self.assertEquals([{"colA": 1}, {"colA": 2}, {"colA": 3}], ret)
|
self.assertEquals([{"colA": 1}, {"colA": 2}, {"colA": 3}], ret)
|
||||||
self.mock_txn.execute.assert_called_with(
|
self.mock_txn.execute.assert_called_with(
|
||||||
"SELECT colA FROM tablename WHERE keycol = ? "
|
"SELECT colA FROM tablename WHERE keycol = ?",
|
||||||
"ORDER BY rowid asc",
|
|
||||||
["A set"]
|
["A set"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -189,8 +192,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
|
||||||
|
|
||||||
self.assertEquals({"columname": "Old Value"}, ret)
|
self.assertEquals({"columname": "Old Value"}, ret)
|
||||||
self.mock_txn.execute.assert_has_calls([
|
self.mock_txn.execute.assert_has_calls([
|
||||||
call('SELECT columname FROM tablename WHERE keycol = ? '
|
call('SELECT columname FROM tablename WHERE keycol = ?',
|
||||||
'ORDER BY rowid asc',
|
|
||||||
['TheKey']),
|
['TheKey']),
|
||||||
call("UPDATE tablename SET columname = ? WHERE keycol = ?",
|
call("UPDATE tablename SET columname = ? WHERE keycol = ?",
|
||||||
["New Value", "TheKey"])
|
["New Value", "TheKey"])
|
||||||
|
|
|
@ -42,28 +42,38 @@ class RegistrationStoreTestCase(unittest.TestCase):
|
||||||
self.assertEquals(
|
self.assertEquals(
|
||||||
# TODO(paul): Surely this field should be 'user_id', not 'name'
|
# TODO(paul): Surely this field should be 'user_id', not 'name'
|
||||||
# Additionally surely it shouldn't come in a 1-element list
|
# Additionally surely it shouldn't come in a 1-element list
|
||||||
[{"name": self.user_id, "password_hash": self.pwhash}],
|
{"name": self.user_id, "password_hash": self.pwhash},
|
||||||
(yield self.store.get_user_by_id(self.user_id))
|
(yield self.store.get_user_by_id(self.user_id))
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEquals(
|
result = yield self.store.get_user_by_token(self.tokens[1])
|
||||||
{"admin": 0,
|
|
||||||
|
self.assertDictContainsSubset(
|
||||||
|
{
|
||||||
|
"admin": 0,
|
||||||
"device_id": None,
|
"device_id": None,
|
||||||
"name": self.user_id,
|
"name": self.user_id,
|
||||||
"token_id": 1},
|
},
|
||||||
(yield self.store.get_user_by_token(self.tokens[0]))
|
result
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.assertTrue("token_id" in result)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_add_tokens(self):
|
def test_add_tokens(self):
|
||||||
yield self.store.register(self.user_id, self.tokens[0], self.pwhash)
|
yield self.store.register(self.user_id, self.tokens[0], self.pwhash)
|
||||||
yield self.store.add_access_token_to_user(self.user_id, self.tokens[1])
|
yield self.store.add_access_token_to_user(self.user_id, self.tokens[1])
|
||||||
|
|
||||||
self.assertEquals(
|
result = yield self.store.get_user_by_token(self.tokens[1])
|
||||||
{"admin": 0,
|
|
||||||
|
self.assertDictContainsSubset(
|
||||||
|
{
|
||||||
|
"admin": 0,
|
||||||
"device_id": None,
|
"device_id": None,
|
||||||
"name": self.user_id,
|
"name": self.user_id,
|
||||||
"token_id": 2},
|
},
|
||||||
(yield self.store.get_user_by_token(self.tokens[1]))
|
result
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.assertTrue("token_id" in result)
|
||||||
|
|
||||||
|
|
|
@ -119,7 +119,7 @@ class RoomMemberStoreTestCase(unittest.TestCase):
|
||||||
yield self.inject_room_member(self.room, self.u_alice, Membership.JOIN)
|
yield self.inject_room_member(self.room, self.u_alice, Membership.JOIN)
|
||||||
|
|
||||||
self.assertEquals(
|
self.assertEquals(
|
||||||
["test"],
|
{"test"},
|
||||||
(yield self.store.get_joined_hosts_for_room(self.room.to_string()))
|
(yield self.store.get_joined_hosts_for_room(self.room.to_string()))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ class RoomMemberStoreTestCase(unittest.TestCase):
|
||||||
yield self.inject_room_member(self.room, self.u_bob, Membership.JOIN)
|
yield self.inject_room_member(self.room, self.u_bob, Membership.JOIN)
|
||||||
|
|
||||||
self.assertEquals(
|
self.assertEquals(
|
||||||
["test"],
|
{"test"},
|
||||||
(yield self.store.get_joined_hosts_for_room(self.room.to_string()))
|
(yield self.store.get_joined_hosts_for_room(self.room.to_string()))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -136,9 +136,9 @@ class RoomMemberStoreTestCase(unittest.TestCase):
|
||||||
|
|
||||||
self.assertEquals(
|
self.assertEquals(
|
||||||
{"test", "elsewhere"},
|
{"test", "elsewhere"},
|
||||||
set((yield
|
(yield
|
||||||
self.store.get_joined_hosts_for_room(self.room.to_string())
|
self.store.get_joined_hosts_for_room(self.room.to_string())
|
||||||
))
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Should still have both hosts
|
# Should still have both hosts
|
||||||
|
@ -146,15 +146,15 @@ class RoomMemberStoreTestCase(unittest.TestCase):
|
||||||
|
|
||||||
self.assertEquals(
|
self.assertEquals(
|
||||||
{"test", "elsewhere"},
|
{"test", "elsewhere"},
|
||||||
set((yield
|
(yield
|
||||||
self.store.get_joined_hosts_for_room(self.room.to_string())
|
self.store.get_joined_hosts_for_room(self.room.to_string())
|
||||||
))
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Should have only one host after other leaves
|
# Should have only one host after other leaves
|
||||||
yield self.inject_room_member(self.room, self.u_charlie, Membership.LEAVE)
|
yield self.inject_room_member(self.room, self.u_charlie, Membership.LEAVE)
|
||||||
|
|
||||||
self.assertEquals(
|
self.assertEquals(
|
||||||
["test"],
|
{"test"},
|
||||||
(yield self.store.get_joined_hosts_for_room(self.room.to_string()))
|
(yield self.store.get_joined_hosts_for_room(self.room.to_string()))
|
||||||
)
|
)
|
||||||
|
|
|
@ -17,6 +17,7 @@ from synapse.http.server import HttpServer
|
||||||
from synapse.api.errors import cs_error, CodeMessageException, StoreError
|
from synapse.api.errors import cs_error, CodeMessageException, StoreError
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.storage import prepare_database
|
from synapse.storage import prepare_database
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
from synapse.util.logcontext import LoggingContext
|
from synapse.util.logcontext import LoggingContext
|
||||||
|
@ -44,18 +45,23 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
|
||||||
config.event_cache_size = 1
|
config.event_cache_size = 1
|
||||||
config.disable_registration = False
|
config.disable_registration = False
|
||||||
|
|
||||||
|
if "clock" not in kargs:
|
||||||
|
kargs["clock"] = MockClock()
|
||||||
|
|
||||||
if datastore is None:
|
if datastore is None:
|
||||||
db_pool = SQLiteMemoryDbPool()
|
db_pool = SQLiteMemoryDbPool()
|
||||||
yield db_pool.prepare()
|
yield db_pool.prepare()
|
||||||
hs = HomeServer(
|
hs = HomeServer(
|
||||||
name, db_pool=db_pool, config=config,
|
name, db_pool=db_pool, config=config,
|
||||||
version_string="Synapse/tests",
|
version_string="Synapse/tests",
|
||||||
|
database_engine=create_engine("sqlite3"),
|
||||||
**kargs
|
**kargs
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
hs = HomeServer(
|
hs = HomeServer(
|
||||||
name, db_pool=None, datastore=datastore, config=config,
|
name, db_pool=None, datastore=datastore, config=config,
|
||||||
version_string="Synapse/tests",
|
version_string="Synapse/tests",
|
||||||
|
database_engine=create_engine("sqlite3"),
|
||||||
**kargs
|
**kargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -227,7 +233,10 @@ class SQLiteMemoryDbPool(ConnectionPool, object):
|
||||||
)
|
)
|
||||||
|
|
||||||
def prepare(self):
|
def prepare(self):
|
||||||
return self.runWithConnection(prepare_database)
|
engine = create_engine("sqlite3")
|
||||||
|
return self.runWithConnection(
|
||||||
|
lambda conn: prepare_database(conn, engine)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class MemoryDataStore(object):
|
class MemoryDataStore(object):
|
||||||
|
|
Loading…
Reference in a new issue