0
0
Fork 1
mirror of https://mau.dev/maunium/synapse.git synced 2024-12-23 05:44:30 +01:00

Merge branch 'develop' into pushrules2

Conflicts:
	synapse/storage/schema/pusher.sql
This commit is contained in:
David Baker 2015-03-04 15:24:21 +00:00
commit ed72fc3a50
32 changed files with 820 additions and 663 deletions

489
contrib/vertobot/bridge.pl Executable file
View file

@ -0,0 +1,489 @@
#!/usr/bin/env perl
use strict;
use warnings;
use 5.010; # //
use IO::Socket::SSL qw(SSL_VERIFY_NONE);
use IO::Async::Loop;
use Net::Async::WebSocket::Client;
use Net::Async::HTTP;
use Net::Async::HTTP::Server;
use JSON;
use YAML;
use Data::UUID;
use Getopt::Long;
use Data::Dumper;
use URI::Encode qw(uri_encode uri_decode);
binmode STDOUT, ":encoding(UTF-8)";
binmode STDERR, ":encoding(UTF-8)";
my $msisdn_to_matrix = {
'447417892400' => '@matthew:matrix.org',
};
my $matrix_to_msisdn = {};
foreach (keys %$msisdn_to_matrix) {
$matrix_to_msisdn->{$msisdn_to_matrix->{$_}} = $_;
}
my $loop = IO::Async::Loop->new;
# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See
# https://rt.cpan.org/Ticket/Display.html?id=93107
# ref $loop eq "IO::Async::Loop::Poll" and
# warn "Using SSL with IO::Poll causes known memory-leaks!!\n";
GetOptions(
'C|config=s' => \my $CONFIG,
'eval-from=s' => \my $EVAL_FROM,
) or exit 1;
if( defined $EVAL_FROM ) {
# An emergency 'eval() this file' hack
$SIG{HUP} = sub {
my $code = do {
open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return;
local $/; <$fh>
};
eval $code or warn "Cannot eval() - $@";
};
}
defined $CONFIG or die "Must supply --config\n";
my %CONFIG = %{ YAML::LoadFile( $CONFIG ) };
my %MATRIX_CONFIG = %{ $CONFIG{matrix} };
# No harm in always applying this
$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE;
my $bridgestate = {};
my $roomid_by_callid = {};
my $sessid = lc new Data::UUID->create_str();
my $as_token = $CONFIG{"matrix-bot"}->{as_token};
my $hs_domain = $CONFIG{"matrix-bot"}->{domain};
my $http = Net::Async::HTTP->new();
$loop->add( $http );
sub create_virtual_user
{
my ($localpart) = @_;
my ( $response ) = $http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/register?".
"access_token=$as_token&user_id=$localpart"
),
content_type => "application/json",
content => <<EOT
{
"type": "m.login.application_service",
"user": "$localpart"
}
EOT
)->get;
warn $response->as_string if ($response->code != 200);
}
my $http_server = Net::Async::HTTP::Server->new(
on_request => sub {
my $self = shift;
my ( $req ) = @_;
my $response;
my $path = uri_decode($req->path);
warn("request: $path");
if ($path =~ m#/users/\@(\+.*)#) {
# when queried about virtual users, auto-create them in the HS
my $localpart = $1;
create_virtual_user($localpart);
$response = HTTP::Response->new( 200 );
$response->add_content('{}');
$response->content_type( "application/json" );
}
elsif ($path =~ m#/transactions/(.*)#) {
my $event = JSON->new->decode($req->body);
print Dumper($event);
my $room_id = $event->{room_id};
my %dp = %{$CONFIG{'verto-dialog-params'}};
$dp{callID} = $bridgestate->{$room_id}->{callid};
if ($event->{type} eq 'm.room.membership') {
my $membership = $event->{content}->{membership};
my $state_key = $event->{state_key};
my $room_id = $event->{state_id};
if ($membership eq 'invite') {
# autojoin invites
my ( $response ) = $http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/rooms/$room_id/join?".
"access_token=$as_token&user_id=$state_key"
),
content_type => "application/json",
content => "{}",
)->get;
warn $response->as_string if ($response->code != 200);
}
}
elsif ($event->{type} eq 'm.call.invite') {
my $room_id = $event->{room_id};
$bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id};
$bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str();
$bridgestate->{$room_id}->{sessid} = $sessid;
# $bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp};
my $offer = $event->{content}->{offer}->{sdp};
# $bridgestate->{$room_id}->{gathered_candidates} = 0;
$roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id;
# no trickle ICE in verto apparently
my $f = send_verto_json_request("verto.invite", {
"sdp" => $offer,
"dialogParams" => \%dp,
"sessid" => $bridgestate->{$room_id}->{sessid},
});
$self->adopt_future($f);
}
# elsif ($event->{type} eq 'm.call.candidates') {
# # XXX: this could fire for both matrix->verto and verto->matrix calls
# # and races as it collects candidates. much better to just turn off
# # candidate gathering in the webclient entirely for now
#
# my $room_id = $event->{room_id};
# # XXX: compare call IDs
# if (!$bridgestate->{$room_id}->{gathered_candidates}) {
# $bridgestate->{$room_id}->{gathered_candidates} = 1;
# my $offer = $bridgestate->{$room_id}->{offer};
# my $candidate_block = "";
# foreach (@{$event->{content}->{candidates}}) {
# $candidate_block .= "a=" . $_->{candidate} . "\r\n";
# }
# # XXX: collate using the right m= line - for now assume audio call
# $offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/;
#
# my $f = send_verto_json_request("verto.invite", {
# "sdp" => $offer,
# "dialogParams" => \%dp,
# "sessid" => $bridgestate->{$room_id}->{sessid},
# });
# $self->adopt_future($f);
# }
# else {
# # ignore them, as no trickle ICE, although we might as well
# # batch them up
# # foreach (@{$event->{content}->{candidates}}) {
# # push @{$bridgestate->{$room_id}->{candidates}}, $_;
# # }
# }
# }
elsif ($event->{type} eq 'm.call.answer') {
# grab the answer and relay it to verto as a verto.answer
my $room_id = $event->{room_id};
my $answer = $event->{content}->{answer}->{sdp};
my $f = send_verto_json_request("verto.answer", {
"sdp" => $answer,
"dialogParams" => \%dp,
"sessid" => $bridgestate->{$room_id}->{sessid},
});
$self->adopt_future($f);
}
elsif ($event->{type} eq 'm.call.hangup') {
my $room_id = $event->{room_id};
if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) {
my $f = send_verto_json_request("verto.bye", {
"dialogParams" => \%dp,
"sessid" => $bridgestate->{$room_id}->{sessid},
});
$self->adopt_future($f);
}
else {
warn "Ignoring unrecognised callid: ".$event->{content}->{call_id};
}
}
else {
warn "Unhandled event: $event->{type}";
}
$response = HTTP::Response->new( 200 );
$response->add_content('{}');
$response->content_type( "application/json" );
}
else {
warn "Unhandled path: $path";
$response = HTTP::Response->new( 404 );
}
$req->respond( $response );
},
);
$loop->add( $http_server );
$http_server->listen(
addr => { family => "inet", socktype => "stream", port => 8009 },
on_listen_error => sub { die "Cannot listen - $_[-1]\n" },
);
my $bot_verto = Net::Async::WebSocket::Client->new(
on_frame => sub {
my ( $self, $frame ) = @_;
warn "[Verto] receiving $frame";
on_verto_json($frame);
},
);
$loop->add( $bot_verto );
my $verto_connecting = $loop->new_future;
$bot_verto->connect(
%{ $CONFIG{"verto-bot"} },
on_connected => sub {
warn("[Verto] connected to websocket");
if (not $verto_connecting->is_done) {
$verto_connecting->done($bot_verto);
send_verto_json_request("login", {
'login' => $CONFIG{'verto-dialog-params'}{'login'},
'passwd' => $CONFIG{'verto-config'}{'passwd'},
'sessid' => $sessid,
});
}
},
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
);
# die Dumper($verto_connecting);
my $as_url = $CONFIG{"matrix-bot"}->{as_url};
Future->needs_all(
$http->do_request(
method => "POST",
uri => URI->new( $CONFIG{"matrix"}->{server}."/_matrix/appservice/v1/register" ),
content_type => "application/json",
content => <<EOT
{
"as_token": "$as_token",
"url": "$as_url",
"namespaces": { "users": ["\@\\\\+.*"] }
}
EOT
),
$verto_connecting,
)->get;
$loop->attach_signal(
PIPE => sub { warn "pipe\n" }
);
$loop->attach_signal(
INT => sub { $loop->stop },
);
$loop->attach_signal(
TERM => sub { $loop->stop },
);
eval {
$loop->run;
} or my $e = $@;
die $e if $e;
exit 0;
{
my $json_id;
my $requests;
sub send_verto_json_request
{
$json_id ||= 1;
my ($method, $params) = @_;
my $json = {
jsonrpc => "2.0",
method => $method,
params => $params,
id => $json_id,
};
my $text = JSON->new->encode( $json );
warn "[Verto] sending $text";
$bot_verto->send_frame ( $text );
my $request = $loop->new_future;
$requests->{$json_id} = $request;
$json_id++;
return $request;
}
sub send_verto_json_response
{
my ($result, $id) = @_;
my $json = {
jsonrpc => "2.0",
result => $result,
id => $id,
};
my $text = JSON->new->encode( $json );
warn "[Verto] sending $text";
$bot_verto->send_frame ( $text );
}
sub on_verto_json
{
my $json = JSON->new->decode( $_[0] );
if ($json->{method}) {
if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) ||
$json->{method} eq 'verto.media') {
my $caller = $json->{dialogParams}->{caller_id_number};
my $callee = $json->{dialogParams}->{destination_number};
my $caller_user = '@+' . $caller . ':' . $hs_domain;
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
if ($json->{params}->{sdp}) {
$http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/send/m.call.answer?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => JSON->new->encode({
call_id => $bridgestate->{$room_id}->{matrix_callid},
version => 0,
answer => {
sdp => $json->{params}->{sdp},
type => "answer",
},
}),
)->then( sub {
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
})->get;
}
}
elsif ($json->{method} eq 'verto.invite') {
my $caller = $json->{dialogParams}->{caller_id_number};
my $callee = $json->{dialogParams}->{destination_number};
my $caller_user = '@+' . $caller . ':' . $hs_domain;
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
my $alias = ($caller lt $callee) ? ($caller.'-'.$callee) : ($callee.'-'.$caller);
my $room_id;
# create a virtual user for the caller if needed.
create_virtual_user($caller);
# create a room of form #peer-peer and invite the callee
$http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/createRoom?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => JSON->new->encode({
room_alias_name => $alias,
invite => [ $callee_user ],
}),
)->then( sub {
my ( $response ) = @_;
my $resp = JSON->new->decode($response->content);
$room_id = $resp->{room_id};
$roomid_by_callid->{$json->{params}->{callID}} = $room_id;
})->get;
# join it
my ($response) = $http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/join/$room_id?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => '{}',
)->get;
$bridgestate->{$room_id}->{matrix_callid} = lc new Data::UUID->create_str();
$bridgestate->{$room_id}->{callid} = $json->{dialogParams}->{callID};
$bridgestate->{$room_id}->{sessid} = $sessid;
# put the m.call.invite in there
$http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/send/m.call.invite?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => JSON->new->encode({
call_id => $bridgestate->{$room_id}->{matrix_callid},
version => 0,
answer => {
sdp => $json->{params}->{sdp},
type => "offer",
},
}),
)->then( sub {
# acknowledge the verto
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
})->get;
}
elsif ($json->{method} eq 'verto.bye') {
my $caller = $json->{dialogParams}->{caller_id_number};
my $callee = $json->{dialogParams}->{destination_number};
my $caller_user = '@+' . $caller . ':' . $hs_domain;
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
# put the m.call.hangup into the room
$http->do_request(
method => "POST",
uri => URI->new(
$CONFIG{"matrix"}->{server}.
"/_matrix/client/api/v1/send/m.call.hangup?".
"access_token=$as_token&user_id=$caller_user"
),
content_type => "application/json",
content => JSON->new->encode({
call_id => $bridgestate->{$room_id}->{matrix_callid},
version => 0,
}),
)->then( sub {
# acknowledge the verto
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
})->get;
}
else {
warn ("[Verto] unhandled method: " . $json->{method});
send_verto_json_response( {
method => $json->{method},
}, $json->{id});
}
}
elsif ($json->{result}) {
$requests->{$json->{id}}->done($json->{result});
}
elsif ($json->{error}) {
$requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error});
}
}
}

View file

@ -1,54 +0,0 @@
from synapse.storage import read_schema
import argparse
import json
import sqlite3
def do_other_deltas(cursor):
cursor.execute("PRAGMA user_version")
row = cursor.fetchone()
if row and row[0]:
user_version = row[0]
# Run every version since after the current version.
for v in range(user_version + 1, 10):
print "Running delta: %d" % (v,)
sql_script = read_schema("delta/v%d" % (v,))
cursor.executescript(sql_script)
def update_app_service_table(cur):
cur.execute("SELECT id, regex FROM application_services_regex")
for row in cur.fetchall():
try:
print "checking %s..." % row[0]
json.loads(row[1])
except ValueError:
# row isn't in json, make it so.
string_regex = row[1]
new_regex = json.dumps({
"regex": string_regex,
"exclusive": True
})
cur.execute(
"UPDATE application_services_regex SET regex=? WHERE id=?",
(new_regex, row[0])
)
def main(dbname):
con = sqlite3.connect(dbname)
cur = con.cursor()
do_other_deltas(cur)
update_app_service_table(cur)
cur.execute("PRAGMA user_version = 14")
cur.close()
con.commit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("database")
args = parser.parse_args()
main(args.database)

View file

@ -17,7 +17,9 @@
import sys
sys.dont_write_bytecode = True
from synapse.storage import prepare_database, UpgradeDatabaseException
from synapse.storage import (
prepare_database, prepare_sqlite3_database, UpgradeDatabaseException,
)
from synapse.server import HomeServer
@ -335,6 +337,7 @@ def setup():
try:
with sqlite3.connect(db_name) as db_conn:
prepare_sqlite3_database(db_conn)
prepare_database(db_conn)
except UpgradeDatabaseException:
sys.stderr.write(

View file

@ -45,35 +45,18 @@ from syutil.jsonutil import encode_canonical_json
from synapse.crypto.event_signing import compute_event_reference_hash
import fnmatch
import imp
import logging
import os
import re
logger = logging.getLogger(__name__)
SCHEMAS = [
"transactions",
"users",
"profiles",
"presence",
"im",
"room_aliases",
"keys",
"redactions",
"state",
"event_edges",
"event_signatures",
"pusher",
"media_repository",
"application_services",
"filtering",
"rejections",
]
# Remember to update this number every time an incompatible change is made to
# database schema files, so the users will be informed on server restarts.
# Remember to update this number every time a change is made to database
# schema files, so the users will be informed on server restarts.
SCHEMA_VERSION = 14
dir_path = os.path.abspath(os.path.dirname(__file__))
@ -576,28 +559,15 @@ class DataStore(RoomMemberStore, RoomStore,
)
def schema_path(schema):
""" Get a filesystem path for the named database schema
Args:
schema: Name of the database schema.
Returns:
A filesystem path pointing at a ".sql" file.
"""
schemaPath = os.path.join(dir_path, "schema", schema + ".sql")
return schemaPath
def read_schema(schema):
def read_schema(path):
""" Read the named database schema.
Args:
schema: Name of the datbase schema.
path: Path of the database schema.
Returns:
A string containing the database schema.
"""
with open(schema_path(schema)) as schema_file:
with open(path) as schema_file:
return schema_file.read()
@ -610,49 +580,275 @@ class UpgradeDatabaseException(PrepareDatabaseException):
def prepare_database(db_conn):
""" Set up all the dbs. Since all the *.sql have IF NOT EXISTS, so we
don't have to worry about overwriting existing content.
"""Prepares a database for usage. Will either create all necessary tables
or upgrade from an older schema version.
"""
c = db_conn.cursor()
c.execute("PRAGMA user_version")
row = c.fetchone()
try:
cur = db_conn.cursor()
version_info = _get_or_create_schema_state(cur)
if row and row[0]:
user_version = row[0]
if user_version > SCHEMA_VERSION:
raise ValueError(
"Cannot use this database as it is too " +
"new for the server to understand"
)
elif user_version < SCHEMA_VERSION:
logger.info(
"Upgrading database from version %d",
user_version
)
# Run every version since after the current version.
for v in range(user_version + 1, SCHEMA_VERSION + 1):
if v in (10, 14,):
raise UpgradeDatabaseException(
"No delta for version 10"
)
sql_script = read_schema("delta/v%d" % (v,))
c.executescript(sql_script)
db_conn.commit()
if version_info:
user_version, delta_files, upgraded = version_info
_upgrade_existing_database(cur, user_version, delta_files, upgraded)
else:
logger.info("Database is at version %r", user_version)
_setup_new_database(cur)
else:
sql_script = "BEGIN TRANSACTION;\n"
for sql_loc in SCHEMAS:
logger.debug("Applying schema %r", sql_loc)
sql_script += read_schema(sql_loc)
sql_script += "\n"
sql_script += "COMMIT TRANSACTION;"
c.executescript(sql_script)
cur.execute("PRAGMA user_version = %d" % (SCHEMA_VERSION,))
cur.close()
db_conn.commit()
c.execute("PRAGMA user_version = %d" % SCHEMA_VERSION)
except:
db_conn.rollback()
raise
c.close()
def _setup_new_database(cur):
"""Sets up the database by finding a base set of "full schemas" and then
applying any necessary deltas.
The "full_schemas" directory has subdirectories named after versions. This
function searches for the highest version less than or equal to
`SCHEMA_VERSION` and executes all .sql files in that directory.
The function will then apply all deltas for all versions after the base
version.
Example directory structure:
schema/
delta/
...
full_schemas/
3/
test.sql
...
11/
foo.sql
bar.sql
...
In the example foo.sql and bar.sql would be run, and then any delta files
for versions strictly greater than 11.
"""
current_dir = os.path.join(dir_path, "schema", "full_schemas")
directory_entries = os.listdir(current_dir)
valid_dirs = []
pattern = re.compile(r"^\d+(\.sql)?$")
for filename in directory_entries:
match = pattern.match(filename)
abs_path = os.path.join(current_dir, filename)
if match and os.path.isdir(abs_path):
ver = int(match.group(0))
if ver <= SCHEMA_VERSION:
valid_dirs.append((ver, abs_path))
else:
logger.warn("Unexpected entry in 'full_schemas': %s", filename)
if not valid_dirs:
raise PrepareDatabaseException(
"Could not find a suitable base set of full schemas"
)
max_current_ver, sql_dir = max(valid_dirs, key=lambda x: x[0])
logger.debug("Initialising schema v%d", max_current_ver)
directory_entries = os.listdir(sql_dir)
sql_script = "BEGIN TRANSACTION;\n"
for filename in fnmatch.filter(directory_entries, "*.sql"):
sql_loc = os.path.join(sql_dir, filename)
logger.debug("Applying schema %s", sql_loc)
sql_script += read_schema(sql_loc)
sql_script += "\n"
sql_script += "COMMIT TRANSACTION;"
cur.executescript(sql_script)
cur.execute(
"INSERT OR REPLACE INTO schema_version (version, upgraded)"
" VALUES (?,?)",
(max_current_ver, False)
)
_upgrade_existing_database(
cur,
current_version=max_current_ver,
applied_delta_files=[],
upgraded=False
)
def _upgrade_existing_database(cur, current_version, applied_delta_files,
upgraded):
"""Upgrades an existing database.
Delta files can either be SQL stored in *.sql files, or python modules
in *.py.
There can be multiple delta files per version. Synapse will keep track of
which delta files have been applied, and will apply any that haven't been
even if there has been no version bump. This is useful for development
where orthogonal schema changes may happen on separate branches.
Different delta files for the same version *must* be orthogonal and give
the same result when applied in any order. No guarantees are made on the
order of execution of these scripts.
This is a no-op of current_version == SCHEMA_VERSION.
Example directory structure:
schema/
delta/
11/
foo.sql
...
12/
foo.sql
bar.py
...
full_schemas/
...
In the example, if current_version is 11, then foo.sql will be run if and
only if `upgraded` is True. Then `foo.sql` and `bar.py` would be run in
some arbitrary order.
Args:
cur (Cursor)
current_version (int): The current version of the schema.
applied_delta_files (list): A list of deltas that have already been
applied.
upgraded (bool): Whether the current version was generated by having
applied deltas or from full schema file. If `True` the function
will never apply delta files for the given `current_version`, since
the current_version wasn't generated by applying those delta files.
"""
if current_version > SCHEMA_VERSION:
raise ValueError(
"Cannot use this database as it is too " +
"new for the server to understand"
)
start_ver = current_version
if not upgraded:
start_ver += 1
for v in range(start_ver, SCHEMA_VERSION + 1):
logger.debug("Upgrading schema to v%d", v)
delta_dir = os.path.join(dir_path, "schema", "delta", str(v))
try:
directory_entries = os.listdir(delta_dir)
except OSError:
logger.exception("Could not open delta dir for version %d", v)
raise UpgradeDatabaseException(
"Could not open delta dir for version %d" % (v,)
)
directory_entries.sort()
for file_name in directory_entries:
relative_path = os.path.join(str(v), file_name)
if relative_path in applied_delta_files:
continue
absolute_path = os.path.join(
dir_path, "schema", "delta", relative_path,
)
root_name, ext = os.path.splitext(file_name)
if ext == ".py":
# This is a python upgrade module. We need to import into some
# package and then execute its `run_upgrade` function.
module_name = "synapse.storage.v%d_%s" % (
v, root_name
)
with open(absolute_path) as python_file:
module = imp.load_source(
module_name, absolute_path, python_file
)
logger.debug("Running script %s", relative_path)
module.run_upgrade(cur)
elif ext == ".sql":
# A plain old .sql file, just read and execute it
delta_schema = read_schema(absolute_path)
logger.debug("Applying schema %s", relative_path)
cur.executescript(delta_schema)
else:
# Not a valid delta file.
logger.warn(
"Found directory entry that did not end in .py or"
" .sql: %s",
relative_path,
)
continue
# Mark as done.
cur.execute(
"INSERT INTO applied_schema_deltas (version, file)"
" VALUES (?,?)",
(v, relative_path)
)
cur.execute(
"INSERT OR REPLACE INTO schema_version (version, upgraded)"
" VALUES (?,?)",
(v, True)
)
def _get_or_create_schema_state(txn):
schema_path = os.path.join(
dir_path, "schema", "schema_version.sql",
)
create_schema = read_schema(schema_path)
txn.executescript(create_schema)
txn.execute("SELECT version, upgraded FROM schema_version")
row = txn.fetchone()
current_version = int(row[0]) if row else None
upgraded = bool(row[1]) if row else None
if current_version:
txn.execute(
"SELECT file FROM applied_schema_deltas WHERE version >= ?",
(current_version,)
)
return current_version, txn.fetchall(), upgraded
return None
def prepare_sqlite3_database(db_conn):
"""This function should be called before `prepare_database` on sqlite3
databases.
Since we changed the way we store the current schema version and handle
updates to schemas, we need a way to upgrade from the old method to the
new. This only affects sqlite databases since they were the only ones
supported at the time.
"""
with db_conn:
schema_path = os.path.join(
dir_path, "schema", "schema_version.sql",
)
create_schema = read_schema(schema_path)
db_conn.executescript(create_schema)
c = db_conn.execute("SELECT * FROM schema_version")
rows = c.fetchall()
c.close()
if not rows:
c = db_conn.execute("PRAGMA user_version")
row = c.fetchone()
c.close()
if row and row[0]:
db_conn.execute(
"INSERT OR REPLACE INTO schema_version (version, upgraded)"
" VALUES (?,?)",
(row[0], False)
)

View file

@ -1,34 +0,0 @@
/* Copyright 2015 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CREATE TABLE IF NOT EXISTS application_services(
id INTEGER PRIMARY KEY AUTOINCREMENT,
url TEXT,
token TEXT,
hs_token TEXT,
sender TEXT,
UNIQUE(token) ON CONFLICT ROLLBACK
);
CREATE TABLE IF NOT EXISTS application_services_regex(
id INTEGER PRIMARY KEY AUTOINCREMENT,
as_id INTEGER NOT NULL,
namespace INTEGER, /* enum[room_id|room_alias|user_id] */
regex TEXT,
FOREIGN KEY(as_id) REFERENCES application_services(id)
);

View file

@ -0,0 +1,23 @@
import json
import logging
logger = logging.getLogger(__name__)
def run_upgrade(cur):
cur.execute("SELECT id, regex FROM application_services_regex")
for row in cur.fetchall():
try:
logger.debug("Checking %s..." % row[0])
json.loads(row[1])
except ValueError:
# row isn't in json, make it so.
string_regex = row[1]
new_regex = json.dumps({
"regex": string_regex,
"exclusive": True
})
cur.execute(
"UPDATE application_services_regex SET regex=? WHERE id=?",
(new_regex, row[0])
)

View file

@ -1,168 +0,0 @@
/* Copyright 2014, 2015 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CREATE TABLE IF NOT EXISTS events(
stream_ordering INTEGER PRIMARY KEY AUTOINCREMENT,
topological_ordering INTEGER NOT NULL,
event_id TEXT NOT NULL,
type TEXT NOT NULL,
room_id TEXT NOT NULL,
content TEXT NOT NULL,
unrecognized_keys TEXT,
processed BOOL NOT NULL,
outlier BOOL NOT NULL,
CONSTRAINT ev_uniq UNIQUE (event_id)
);
CREATE INDEX IF NOT EXISTS events_event_id ON events (event_id);
CREATE INDEX IF NOT EXISTS events_stream_ordering ON events (stream_ordering);
CREATE INDEX IF NOT EXISTS events_topological_ordering ON events (topological_ordering);
CREATE INDEX IF NOT EXISTS events_room_id ON events (room_id);
CREATE TABLE IF NOT EXISTS state_events(
event_id TEXT NOT NULL,
room_id TEXT NOT NULL,
type TEXT NOT NULL,
state_key TEXT NOT NULL,
prev_state TEXT
);
CREATE UNIQUE INDEX IF NOT EXISTS state_events_event_id ON state_events (event_id);
CREATE INDEX IF NOT EXISTS state_events_room_id ON state_events (room_id);
CREATE INDEX IF NOT EXISTS state_events_type ON state_events (type);
CREATE INDEX IF NOT EXISTS state_events_state_key ON state_events (state_key);
CREATE TABLE IF NOT EXISTS current_state_events(
event_id TEXT NOT NULL,
room_id TEXT NOT NULL,
type TEXT NOT NULL,
state_key TEXT NOT NULL,
CONSTRAINT curr_uniq UNIQUE (room_id, type, state_key) ON CONFLICT REPLACE
);
CREATE INDEX IF NOT EXISTS curr_events_event_id ON current_state_events (event_id);
CREATE INDEX IF NOT EXISTS current_state_events_room_id ON current_state_events (room_id);
CREATE INDEX IF NOT EXISTS current_state_events_type ON current_state_events (type);
CREATE INDEX IF NOT EXISTS current_state_events_state_key ON current_state_events (state_key);
CREATE TABLE IF NOT EXISTS room_memberships(
event_id TEXT NOT NULL,
user_id TEXT NOT NULL,
sender TEXT NOT NULL,
room_id TEXT NOT NULL,
membership TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS room_memberships_event_id ON room_memberships (event_id);
CREATE INDEX IF NOT EXISTS room_memberships_room_id ON room_memberships (room_id);
CREATE INDEX IF NOT EXISTS room_memberships_user_id ON room_memberships (user_id);
CREATE TABLE IF NOT EXISTS feedback(
event_id TEXT NOT NULL,
feedback_type TEXT,
target_event_id TEXT,
sender TEXT,
room_id TEXT
);
CREATE TABLE IF NOT EXISTS topics(
event_id TEXT NOT NULL,
room_id TEXT NOT NULL,
topic TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS room_names(
event_id TEXT NOT NULL,
room_id TEXT NOT NULL,
name TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS rooms(
room_id TEXT PRIMARY KEY NOT NULL,
is_public INTEGER,
creator TEXT
);
CREATE TABLE IF NOT EXISTS room_join_rules(
event_id TEXT NOT NULL,
room_id TEXT NOT NULL,
join_rule TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS room_join_rules_event_id ON room_join_rules(event_id);
CREATE INDEX IF NOT EXISTS room_join_rules_room_id ON room_join_rules(room_id);
CREATE TABLE IF NOT EXISTS room_power_levels(
event_id TEXT NOT NULL,
room_id TEXT NOT NULL,
user_id TEXT NOT NULL,
level INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS room_power_levels_event_id ON room_power_levels(event_id);
CREATE INDEX IF NOT EXISTS room_power_levels_room_id ON room_power_levels(room_id);
CREATE INDEX IF NOT EXISTS room_power_levels_room_user ON room_power_levels(room_id, user_id);
CREATE TABLE IF NOT EXISTS room_default_levels(
event_id TEXT NOT NULL,
room_id TEXT NOT NULL,
level INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS room_default_levels_event_id ON room_default_levels(event_id);
CREATE INDEX IF NOT EXISTS room_default_levels_room_id ON room_default_levels(room_id);
CREATE TABLE IF NOT EXISTS room_add_state_levels(
event_id TEXT NOT NULL,
room_id TEXT NOT NULL,
level INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS room_add_state_levels_event_id ON room_add_state_levels(event_id);
CREATE INDEX IF NOT EXISTS room_add_state_levels_room_id ON room_add_state_levels(room_id);
CREATE TABLE IF NOT EXISTS room_send_event_levels(
event_id TEXT NOT NULL,
room_id TEXT NOT NULL,
level INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS room_send_event_levels_event_id ON room_send_event_levels(event_id);
CREATE INDEX IF NOT EXISTS room_send_event_levels_room_id ON room_send_event_levels(room_id);
CREATE TABLE IF NOT EXISTS room_ops_levels(
event_id TEXT NOT NULL,
room_id TEXT NOT NULL,
ban_level INTEGER,
kick_level INTEGER
);
CREATE INDEX IF NOT EXISTS room_ops_levels_event_id ON room_ops_levels(event_id);
CREATE INDEX IF NOT EXISTS room_ops_levels_room_id ON room_ops_levels(room_id);
CREATE TABLE IF NOT EXISTS room_hosts(
room_id TEXT NOT NULL,
host TEXT NOT NULL,
CONSTRAINT room_hosts_uniq UNIQUE (room_id, host) ON CONFLICT IGNORE
);
CREATE INDEX IF NOT EXISTS room_hosts_room_id ON room_hosts (room_id);
PRAGMA user_version = 2;

View file

@ -1,27 +0,0 @@
/* Copyright 2014, 2015 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CREATE INDEX IF NOT EXISTS room_aliases_alias ON room_aliases(room_alias);
CREATE INDEX IF NOT EXISTS room_aliases_id ON room_aliases(room_id);
CREATE INDEX IF NOT EXISTS room_alias_servers_alias ON room_alias_servers(room_alias);
DELETE FROM room_aliases WHERE rowid NOT IN (SELECT max(rowid) FROM room_aliases GROUP BY room_alias, room_id);
CREATE UNIQUE INDEX IF NOT EXISTS room_aliases_uniq ON room_aliases(room_alias, room_id);
PRAGMA user_version = 3;

View file

@ -1,26 +0,0 @@
/* Copyright 2014, 2015 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CREATE TABLE IF NOT EXISTS redactions (
event_id TEXT NOT NULL,
redacts TEXT NOT NULL,
CONSTRAINT ev_uniq UNIQUE (event_id)
);
CREATE INDEX IF NOT EXISTS redactions_event_id ON redactions (event_id);
CREATE INDEX IF NOT EXISTS redactions_redacts ON redactions (redacts);
ALTER TABLE room_ops_levels ADD COLUMN redact_level INTEGER;
PRAGMA user_version = 4;

View file

@ -1,30 +0,0 @@
/* Copyright 2014, 2015 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CREATE TABLE IF NOT EXISTS user_ips (
user TEXT NOT NULL,
access_token TEXT NOT NULL,
device_id TEXT,
ip TEXT NOT NULL,
user_agent TEXT NOT NULL,
last_seen INTEGER NOT NULL,
CONSTRAINT user_ip UNIQUE (user, access_token, ip, user_agent) ON CONFLICT REPLACE
);
CREATE INDEX IF NOT EXISTS user_ips_user ON user_ips(user);
ALTER TABLE users ADD COLUMN admin BOOL DEFAULT 0 NOT NULL;
PRAGMA user_version = 5;

View file

@ -1,31 +0,0 @@
/* Copyright 2014, 2015 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CREATE TABLE IF NOT EXISTS server_tls_certificates(
server_name TEXT, -- Server name.
fingerprint TEXT, -- Certificate fingerprint.
from_server TEXT, -- Which key server the certificate was fetched from.
ts_added_ms INTEGER, -- When the certifcate was added.
tls_certificate BLOB, -- DER encoded x509 certificate.
CONSTRAINT uniqueness UNIQUE (server_name, fingerprint)
);
CREATE TABLE IF NOT EXISTS server_signature_keys(
server_name TEXT, -- Server name.
key_id TEXT, -- Key version.
from_server TEXT, -- Which key server the key was fetched form.
ts_added_ms INTEGER, -- When the key was added.
verify_key BLOB, -- NACL verification key.
CONSTRAINT uniqueness UNIQUE (server_name, key_id)
);

View file

@ -1,34 +0,0 @@
/* Copyright 2014, 2015 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CREATE TABLE IF NOT EXISTS event_signatures_2 (
event_id TEXT,
signature_name TEXT,
key_id TEXT,
signature BLOB,
CONSTRAINT uniqueness UNIQUE (event_id, signature_name, key_id)
);
INSERT INTO event_signatures_2 (event_id, signature_name, key_id, signature)
SELECT event_id, signature_name, key_id, signature FROM event_signatures;
DROP TABLE event_signatures;
ALTER TABLE event_signatures_2 RENAME TO event_signatures;
CREATE INDEX IF NOT EXISTS event_signatures_id ON event_signatures (
event_id
);
PRAGMA user_version = 8;

View file

@ -1,79 +0,0 @@
/* Copyright 2014, 2015 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- To track destination health
CREATE TABLE IF NOT EXISTS destinations(
destination TEXT PRIMARY KEY,
retry_last_ts INTEGER,
retry_interval INTEGER
);
CREATE TABLE IF NOT EXISTS local_media_repository (
media_id TEXT, -- The id used to refer to the media.
media_type TEXT, -- The MIME-type of the media.
media_length INTEGER, -- Length of the media in bytes.
created_ts INTEGER, -- When the content was uploaded in ms.
upload_name TEXT, -- The name the media was uploaded with.
user_id TEXT, -- The user who uploaded the file.
CONSTRAINT uniqueness UNIQUE (media_id)
);
CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails (
media_id TEXT, -- The id used to refer to the media.
thumbnail_width INTEGER, -- The width of the thumbnail in pixels.
thumbnail_height INTEGER, -- The height of the thumbnail in pixels.
thumbnail_type TEXT, -- The MIME-type of the thumbnail.
thumbnail_method TEXT, -- The method used to make the thumbnail.
thumbnail_length INTEGER, -- The length of the thumbnail in bytes.
CONSTRAINT uniqueness UNIQUE (
media_id, thumbnail_width, thumbnail_height, thumbnail_type
)
);
CREATE INDEX IF NOT EXISTS local_media_repository_thumbnails_media_id
ON local_media_repository_thumbnails (media_id);
CREATE TABLE IF NOT EXISTS remote_media_cache (
media_origin TEXT, -- The remote HS the media came from.
media_id TEXT, -- The id used to refer to the media on that server.
media_type TEXT, -- The MIME-type of the media.
created_ts INTEGER, -- When the content was uploaded in ms.
upload_name TEXT, -- The name the media was uploaded with.
media_length INTEGER, -- Length of the media in bytes.
filesystem_id TEXT, -- The name used to store the media on disk.
CONSTRAINT uniqueness UNIQUE (media_origin, media_id)
);
CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails (
media_origin TEXT, -- The remote HS the media came from.
media_id TEXT, -- The id used to refer to the media.
thumbnail_width INTEGER, -- The width of the thumbnail in pixels.
thumbnail_height INTEGER, -- The height of the thumbnail in pixels.
thumbnail_method TEXT, -- The method used to make the thumbnail
thumbnail_type TEXT, -- The MIME-type of the thumbnail.
thumbnail_length INTEGER, -- The length of the thumbnail in bytes.
filesystem_id TEXT, -- The name used to store the media on disk.
CONSTRAINT uniqueness UNIQUE (
media_origin, media_id, thumbnail_width, thumbnail_height,
thumbnail_type, thumbnail_type
)
);
CREATE INDEX IF NOT EXISTS remote_media_cache_thumbnails_media_id
ON local_media_repository_thumbnails (media_id);
PRAGMA user_version = 9;

View file

@ -1,24 +0,0 @@
/* Copyright 2015 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CREATE TABLE IF NOT EXISTS user_filters(
user_id TEXT,
filter_id INTEGER,
filter_json TEXT,
FOREIGN KEY(user_id) REFERENCES users(id)
);
CREATE INDEX IF NOT EXISTS user_filters_by_user_id_filter_id ON user_filters(
user_id, filter_id
);

View file

@ -1,56 +0,0 @@
/* Copyright 2014 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- Push notification endpoints that users have configured
CREATE TABLE IF NOT EXISTS pushers (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_name TEXT NOT NULL,
profile_tag varchar(32) NOT NULL,
kind varchar(8) NOT NULL,
app_id varchar(64) NOT NULL,
app_display_name varchar(64) NOT NULL,
device_display_name varchar(128) NOT NULL,
pushkey blob NOT NULL,
ts BIGINT NOT NULL,
lang varchar(8),
data blob,
last_token TEXT,
last_success BIGINT,
failing_since BIGINT,
FOREIGN KEY(user_name) REFERENCES users(name),
UNIQUE (app_id, pushkey)
);
CREATE TABLE IF NOT EXISTS push_rules (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_name TEXT NOT NULL,
rule_id TEXT NOT NULL,
priority_class TINYINT NOT NULL,
priority INTEGER NOT NULL DEFAULT 0,
conditions TEXT NOT NULL,
actions TEXT NOT NULL,
UNIQUE(user_name, rule_id)
);
CREATE INDEX IF NOT EXISTS push_rules_user_name on push_rules (user_name);
CREATE TABLE IF NOT EXISTS push_rules_enable (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_name TEXT NOT NULL,
rule_id TEXT NOT NULL,
enabled TINYINT,
UNIQUE(user_name, rule_id)
);
CREATE INDEX IF NOT EXISTS push_rules_enable_user_name on push_rules_enable (user_name);

View file

@ -1,21 +0,0 @@
/* Copyright 2015 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CREATE TABLE IF NOT EXISTS rejections(
event_id TEXT NOT NULL,
reason TEXT NOT NULL,
last_check TEXT NOT NULL,
CONSTRAINT ev_id UNIQUE (event_id) ON CONFLICT REPLACE
);

View file

@ -0,0 +1,30 @@
/* Copyright 2015 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CREATE TABLE IF NOT EXISTS schema_version(
Lock char(1) NOT NULL DEFAULT 'X', -- Makes sure this table only has one row.
version INTEGER NOT NULL,
upgraded BOOL NOT NULL, -- Whether we reached this version from an upgrade or an initial schema.
CONSTRAINT schema_version_lock_x CHECK (Lock='X')
CONSTRAINT schema_version_lock_uniq UNIQUE (Lock)
);
CREATE TABLE IF NOT EXISTS applied_schema_deltas(
version INTEGER NOT NULL,
file TEXT NOT NULL,
CONSTRAINT schema_deltas_ver_file UNIQUE (version, file) ON CONFLICT IGNORE
);
CREATE INDEX IF NOT EXISTS schema_deltas_ver ON applied_schema_deltas(version);