2020-02-13 18:27:33 +01:00
|
|
|
// Copyright 2017-2018 New Vector Ltd
|
|
|
|
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package sqlite3
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"database/sql"
|
|
|
|
"fmt"
|
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
|
|
|
|
"github.com/matrix-org/dendrite/common"
|
|
|
|
"github.com/matrix-org/dendrite/roomserver/types"
|
|
|
|
"github.com/matrix-org/util"
|
|
|
|
)
|
|
|
|
|
|
|
|
const stateDataSchema = `
|
|
|
|
CREATE TABLE IF NOT EXISTS roomserver_state_block (
|
2020-02-20 10:28:03 +01:00
|
|
|
state_block_nid INTEGER NOT NULL,
|
2020-02-13 18:27:33 +01:00
|
|
|
event_type_nid INTEGER NOT NULL,
|
|
|
|
event_state_key_nid INTEGER NOT NULL,
|
|
|
|
event_nid INTEGER NOT NULL,
|
|
|
|
UNIQUE (state_block_nid, event_type_nid, event_state_key_nid)
|
|
|
|
);
|
|
|
|
`
|
|
|
|
|
|
|
|
const insertStateDataSQL = "" +
|
|
|
|
"INSERT INTO roomserver_state_block (state_block_nid, event_type_nid, event_state_key_nid, event_nid)" +
|
|
|
|
" VALUES ($1, $2, $3, $4)"
|
|
|
|
|
|
|
|
const selectNextStateBlockNIDSQL = `
|
2020-02-20 10:28:03 +01:00
|
|
|
SELECT IFNULL(MAX(state_block_nid), 0) + 1 FROM roomserver_state_block
|
2020-02-13 18:27:33 +01:00
|
|
|
`
|
|
|
|
|
|
|
|
// Bulk state lookup by numeric state block ID.
|
|
|
|
// Sort by the state_block_nid, event_type_nid, event_state_key_nid
|
|
|
|
// This means that all the entries for a given state_block_nid will appear
|
|
|
|
// together in the list and those entries will sorted by event_type_nid
|
|
|
|
// and event_state_key_nid. This property makes it easier to merge two
|
|
|
|
// state data blocks together.
|
|
|
|
const bulkSelectStateBlockEntriesSQL = "" +
|
|
|
|
"SELECT state_block_nid, event_type_nid, event_state_key_nid, event_nid" +
|
|
|
|
" FROM roomserver_state_block WHERE state_block_nid IN ($1)" +
|
|
|
|
" ORDER BY state_block_nid, event_type_nid, event_state_key_nid"
|
|
|
|
|
|
|
|
// Bulk state lookup by numeric state block ID.
|
|
|
|
// Filters the rows in each block to the requested types and state keys.
|
|
|
|
// We would like to restrict to particular type state key pairs but we are
|
|
|
|
// restricted by the query language to pull the cross product of a list
|
|
|
|
// of types and a list state_keys. So we have to filter the result in the
|
|
|
|
// application to restrict it to the list of event types and state keys we
|
|
|
|
// actually wanted.
|
|
|
|
const bulkSelectFilteredStateBlockEntriesSQL = "" +
|
|
|
|
"SELECT state_block_nid, event_type_nid, event_state_key_nid, event_nid" +
|
|
|
|
" FROM roomserver_state_block WHERE state_block_nid IN ($1)" +
|
|
|
|
" AND event_type_nid IN ($2) AND event_state_key_nid IN ($3)" +
|
|
|
|
" ORDER BY state_block_nid, event_type_nid, event_state_key_nid"
|
|
|
|
|
|
|
|
type stateBlockStatements struct {
|
|
|
|
db *sql.DB
|
|
|
|
insertStateDataStmt *sql.Stmt
|
|
|
|
selectNextStateBlockNIDStmt *sql.Stmt
|
|
|
|
bulkSelectStateBlockEntriesStmt *sql.Stmt
|
|
|
|
bulkSelectFilteredStateBlockEntriesStmt *sql.Stmt
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *stateBlockStatements) prepare(db *sql.DB) (err error) {
|
|
|
|
s.db = db
|
|
|
|
_, err = db.Exec(stateDataSchema)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
return statementList{
|
|
|
|
{&s.insertStateDataStmt, insertStateDataSQL},
|
|
|
|
{&s.selectNextStateBlockNIDStmt, selectNextStateBlockNIDSQL},
|
|
|
|
{&s.bulkSelectStateBlockEntriesStmt, bulkSelectStateBlockEntriesSQL},
|
|
|
|
{&s.bulkSelectFilteredStateBlockEntriesStmt, bulkSelectFilteredStateBlockEntriesSQL},
|
|
|
|
}.prepare(db)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *stateBlockStatements) bulkInsertStateData(
|
|
|
|
ctx context.Context, txn *sql.Tx,
|
|
|
|
entries []types.StateEntry,
|
2020-02-20 10:28:03 +01:00
|
|
|
) (types.StateBlockNID, error) {
|
|
|
|
if len(entries) == 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
var stateBlockNID types.StateBlockNID
|
|
|
|
err := txn.Stmt(s.selectNextStateBlockNIDStmt).QueryRowContext(ctx).Scan(&stateBlockNID)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2020-02-13 18:27:33 +01:00
|
|
|
for _, entry := range entries {
|
2020-02-20 10:28:03 +01:00
|
|
|
_, err := txn.Stmt(s.insertStateDataStmt).ExecContext(
|
2020-02-13 18:27:33 +01:00
|
|
|
ctx,
|
|
|
|
int64(stateBlockNID),
|
|
|
|
int64(entry.EventTypeNID),
|
|
|
|
int64(entry.EventStateKeyNID),
|
|
|
|
int64(entry.EventNID),
|
|
|
|
)
|
|
|
|
if err != nil {
|
2020-02-20 10:28:03 +01:00
|
|
|
return 0, err
|
2020-02-13 18:27:33 +01:00
|
|
|
}
|
|
|
|
}
|
2020-02-20 10:28:03 +01:00
|
|
|
return stateBlockNID, nil
|
2020-02-13 18:27:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *stateBlockStatements) bulkSelectStateBlockEntries(
|
|
|
|
ctx context.Context, txn *sql.Tx, stateBlockNIDs []types.StateBlockNID,
|
|
|
|
) ([]types.StateEntryList, error) {
|
|
|
|
nids := make([]interface{}, len(stateBlockNIDs))
|
|
|
|
for k, v := range stateBlockNIDs {
|
|
|
|
nids[k] = v
|
|
|
|
}
|
|
|
|
selectOrig := strings.Replace(bulkSelectStateBlockEntriesSQL, "($1)", common.QueryVariadic(len(nids)), 1)
|
|
|
|
selectPrep, err := s.db.Prepare(selectOrig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
selectStmt := common.TxStmt(txn, selectPrep)
|
|
|
|
rows, err := selectStmt.QueryContext(ctx, nids...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-03-18 11:17:18 +01:00
|
|
|
defer common.CloseAndLogIfError(ctx, rows, "bulkSelectStateBlockEntries: rows.close() failed")
|
2020-02-13 18:27:33 +01:00
|
|
|
|
|
|
|
results := make([]types.StateEntryList, len(stateBlockNIDs))
|
|
|
|
// current is a pointer to the StateEntryList to append the state entries to.
|
|
|
|
var current *types.StateEntryList
|
|
|
|
i := 0
|
|
|
|
for rows.Next() {
|
|
|
|
var (
|
|
|
|
stateBlockNID int64
|
|
|
|
eventTypeNID int64
|
|
|
|
eventStateKeyNID int64
|
|
|
|
eventNID int64
|
|
|
|
entry types.StateEntry
|
|
|
|
)
|
|
|
|
if err := rows.Scan(
|
|
|
|
&stateBlockNID, &eventTypeNID, &eventStateKeyNID, &eventNID,
|
|
|
|
); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
entry.EventTypeNID = types.EventTypeNID(eventTypeNID)
|
|
|
|
entry.EventStateKeyNID = types.EventStateKeyNID(eventStateKeyNID)
|
|
|
|
entry.EventNID = types.EventNID(eventNID)
|
|
|
|
if current == nil || types.StateBlockNID(stateBlockNID) != current.StateBlockNID {
|
|
|
|
// The state entry row is for a different state data block to the current one.
|
|
|
|
// So we start appending to the next entry in the list.
|
|
|
|
current = &results[i]
|
|
|
|
current.StateBlockNID = types.StateBlockNID(stateBlockNID)
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
current.StateEntries = append(current.StateEntries, entry)
|
|
|
|
}
|
|
|
|
if i != len(nids) {
|
|
|
|
return nil, fmt.Errorf("storage: state data NIDs missing from the database (%d != %d)", i, len(nids))
|
|
|
|
}
|
|
|
|
return results, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *stateBlockStatements) bulkSelectFilteredStateBlockEntries(
|
|
|
|
ctx context.Context, txn *sql.Tx, // nolint: unparam
|
|
|
|
stateBlockNIDs []types.StateBlockNID,
|
|
|
|
stateKeyTuples []types.StateKeyTuple,
|
|
|
|
) ([]types.StateEntryList, error) {
|
|
|
|
tuples := stateKeyTupleSorter(stateKeyTuples)
|
|
|
|
// Sort the tuples so that we can run binary search against them as we filter the rows returned by the db.
|
|
|
|
sort.Sort(tuples)
|
|
|
|
|
|
|
|
eventTypeNIDArray, eventStateKeyNIDArray := tuples.typesAndStateKeysAsArrays()
|
|
|
|
sqlStatement := strings.Replace(bulkSelectFilteredStateBlockEntriesSQL, "($1)", common.QueryVariadic(len(stateBlockNIDs)), 1)
|
|
|
|
sqlStatement = strings.Replace(sqlStatement, "($2)", common.QueryVariadicOffset(len(eventTypeNIDArray), len(stateBlockNIDs)), 1)
|
|
|
|
sqlStatement = strings.Replace(sqlStatement, "($3)", common.QueryVariadicOffset(len(eventStateKeyNIDArray), len(stateBlockNIDs)+len(eventTypeNIDArray)), 1)
|
|
|
|
|
|
|
|
var params []interface{}
|
|
|
|
for _, val := range stateBlockNIDs {
|
|
|
|
params = append(params, int64(val))
|
|
|
|
}
|
|
|
|
for _, val := range eventTypeNIDArray {
|
|
|
|
params = append(params, val)
|
|
|
|
}
|
|
|
|
for _, val := range eventStateKeyNIDArray {
|
|
|
|
params = append(params, val)
|
|
|
|
}
|
|
|
|
|
|
|
|
rows, err := s.db.QueryContext(
|
|
|
|
ctx,
|
|
|
|
sqlStatement,
|
|
|
|
params...,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-03-18 11:17:18 +01:00
|
|
|
defer common.CloseAndLogIfError(ctx, rows, "bulkSelectFilteredStateBlockEntries: rows.close() failed")
|
2020-02-13 18:27:33 +01:00
|
|
|
|
|
|
|
var results []types.StateEntryList
|
|
|
|
var current types.StateEntryList
|
|
|
|
for rows.Next() {
|
|
|
|
var (
|
|
|
|
stateBlockNID int64
|
|
|
|
eventTypeNID int64
|
|
|
|
eventStateKeyNID int64
|
|
|
|
eventNID int64
|
|
|
|
entry types.StateEntry
|
|
|
|
)
|
|
|
|
if err := rows.Scan(
|
|
|
|
&stateBlockNID, &eventTypeNID, &eventStateKeyNID, &eventNID,
|
|
|
|
); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
entry.EventTypeNID = types.EventTypeNID(eventTypeNID)
|
|
|
|
entry.EventStateKeyNID = types.EventStateKeyNID(eventStateKeyNID)
|
|
|
|
entry.EventNID = types.EventNID(eventNID)
|
|
|
|
|
|
|
|
// We can use binary search here because we sorted the tuples earlier
|
|
|
|
if !tuples.contains(entry.StateKeyTuple) {
|
|
|
|
// The select will return the cross product of types and state keys.
|
|
|
|
// So we need to check if type of the entry is in the list.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if types.StateBlockNID(stateBlockNID) != current.StateBlockNID {
|
|
|
|
// The state entry row is for a different state data block to the current one.
|
|
|
|
// So we append the current entry to the results and start adding to a new one.
|
|
|
|
// The first time through the loop current will be empty.
|
|
|
|
if current.StateEntries != nil {
|
|
|
|
results = append(results, current)
|
|
|
|
}
|
|
|
|
current = types.StateEntryList{StateBlockNID: types.StateBlockNID(stateBlockNID)}
|
|
|
|
}
|
|
|
|
current.StateEntries = append(current.StateEntries, entry)
|
|
|
|
}
|
|
|
|
// Add the last entry to the list if it is not empty.
|
|
|
|
if current.StateEntries != nil {
|
|
|
|
results = append(results, current)
|
|
|
|
}
|
|
|
|
return results, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type stateKeyTupleSorter []types.StateKeyTuple
|
|
|
|
|
|
|
|
func (s stateKeyTupleSorter) Len() int { return len(s) }
|
|
|
|
func (s stateKeyTupleSorter) Less(i, j int) bool { return s[i].LessThan(s[j]) }
|
|
|
|
func (s stateKeyTupleSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
|
|
|
|
|
|
// Check whether a tuple is in the list. Assumes that the list is sorted.
|
|
|
|
func (s stateKeyTupleSorter) contains(value types.StateKeyTuple) bool {
|
|
|
|
i := sort.Search(len(s), func(i int) bool { return !s[i].LessThan(value) })
|
|
|
|
return i < len(s) && s[i] == value
|
|
|
|
}
|
|
|
|
|
|
|
|
// List the unique eventTypeNIDs and eventStateKeyNIDs.
|
|
|
|
// Assumes that the list is sorted.
|
Add peer-to-peer support into Dendrite via libp2p and fetch (#880)
* Use a fork of pq which supports userCurrent on wasm
* Use sqlite3_js driver when running in JS
* Add cmd/dendritejs to pull in sqlite3_js driver for wasm only
* Update to latest go-sqlite-js version
* Replace prometheus with a stub. sigh
* Hard-code a config and don't use opentracing
* Latest go-sqlite3-js version
* Generate a key for now
* Listen for fetch traffic rather than HTTP
* Latest hacks for js
* libp2p support
* More libp2p
* Fork gjson to allow us to enforce auth checks as before
Previously, all events would come down redacted because the hash
checks would fail. They would fail because sjson.DeleteBytes didn't
remove keys not used for hashing. This didn't work because of a build
tag which included a file which no-oped the index returned.
See https://github.com/tidwall/gjson/issues/157
When it's resolved, let's go back to mainline.
* Use gjson@1.6.0 as it fixes https://github.com/tidwall/gjson/issues/157
* Use latest gomatrixserverlib for sig checks
* Fix a bug which could cause exclude_from_sync to not be set
Caused when sending events over federation.
* Use query variadic to make lookups actually work!
* Latest gomatrixserverlib
* Add notes on getting p2p up and running
Partly so I don't forget myself!
* refactor: Move p2p specific stuff to cmd/dendritejs
This is important or else the normal build of dendrite will fail
because the p2p libraries depend on syscall/js which doesn't work
on normal builds.
Also, clean up main.go to read a bit better.
* Update ho-http-js-libp2p to return errors from RoundTrip
* Add an LRU cache around the key DB
We actually need this for P2P because otherwise we can *segfault*
with things like: "runtime: unexpected return pc for runtime.handleEvent"
where the event is a `syscall/js` event, caused by spamming sql.js
caused by "Checking event signatures for 14 events of room state" which
hammers the key DB repeatedly in quick succession.
Using a cache fixes this, though the underlying cause is probably a bug
in the version of Go I'm on (1.13.7)
* breaking: Add Tracing.Enabled to toggle whether we do opentracing
Defaults to false, which is why this is a breaking change. We need
this flag because WASM builds cannot do opentracing.
* Start adding conditional builds for wasm to handle lib/pq
The general idea here is to have the wasm build have a `NewXXXDatabase`
that doesn't import any postgres package and hence we never import
`lib/pq`, which doesn't work under WASM (undefined `userCurrent`).
* Remove lib/pq for wasm for syncapi
* Add conditional building to remaining storage APIs
* Update build script to set env vars correctly for dendritejs
* sqlite bug fixes
* Docs
* Add a no-op main for dendritejs when not building under wasm
* Use the real prometheus, even for WASM
Instead, the dendrite-sw.js must mock out `process.pid` and
`fs.stat` - which must invoke the callback with an error (e.g `EINVAL`)
in order for it to work:
```
global.process = {
pid: 1,
};
global.fs.stat = function(path, cb) {
cb({
code: "EINVAL",
});
}
```
* Linting
2020-03-06 11:23:55 +01:00
|
|
|
func (s stateKeyTupleSorter) typesAndStateKeysAsArrays() (eventTypeNIDs []int64, eventStateKeyNIDs []int64) {
|
|
|
|
eventTypeNIDs = make([]int64, len(s))
|
|
|
|
eventStateKeyNIDs = make([]int64, len(s))
|
2020-02-13 18:27:33 +01:00
|
|
|
for i := range s {
|
|
|
|
eventTypeNIDs[i] = int64(s[i].EventTypeNID)
|
|
|
|
eventStateKeyNIDs[i] = int64(s[i].EventStateKeyNID)
|
|
|
|
}
|
|
|
|
eventTypeNIDs = eventTypeNIDs[:util.SortAndUnique(int64Sorter(eventTypeNIDs))]
|
|
|
|
eventStateKeyNIDs = eventStateKeyNIDs[:util.SortAndUnique(int64Sorter(eventStateKeyNIDs))]
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
type int64Sorter []int64
|
|
|
|
|
|
|
|
func (s int64Sorter) Len() int { return len(s) }
|
|
|
|
func (s int64Sorter) Less(i, j int) bool { return s[i] < s[j] }
|
|
|
|
func (s int64Sorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|