2017-04-21 00:40:52 +02:00
|
|
|
// Copyright 2017 Vector Creations Ltd
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2017-03-30 16:29:23 +02:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
2017-09-18 17:52:22 +02:00
|
|
|
"context"
|
2017-03-30 16:29:23 +02:00
|
|
|
"database/sql"
|
2019-08-07 12:12:09 +02:00
|
|
|
"encoding/json"
|
2018-11-07 20:12:23 +01:00
|
|
|
"sort"
|
2017-03-30 16:29:23 +02:00
|
|
|
|
2017-12-06 10:37:18 +01:00
|
|
|
"github.com/matrix-org/dendrite/roomserver/api"
|
|
|
|
|
2017-03-30 16:29:23 +02:00
|
|
|
"github.com/lib/pq"
|
2017-08-21 18:20:23 +02:00
|
|
|
"github.com/matrix-org/dendrite/common"
|
2017-04-05 11:30:13 +02:00
|
|
|
"github.com/matrix-org/gomatrixserverlib"
|
2017-11-16 11:12:02 +01:00
|
|
|
log "github.com/sirupsen/logrus"
|
2017-03-30 16:29:23 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
const outputRoomEventsSchema = `
|
2017-09-19 18:15:46 +02:00
|
|
|
-- This sequence is shared between all the tables generated from kafka logs.
|
|
|
|
CREATE SEQUENCE IF NOT EXISTS syncapi_stream_id;
|
|
|
|
|
2017-03-30 16:29:23 +02:00
|
|
|
-- Stores output room events received from the roomserver.
|
2017-08-07 12:51:46 +02:00
|
|
|
CREATE TABLE IF NOT EXISTS syncapi_output_room_events (
|
2017-03-30 16:29:23 +02:00
|
|
|
-- An incrementing ID which denotes the position in the log that this event resides at.
|
|
|
|
-- NB: 'serial' makes no guarantees to increment by 1 every time, only that it increments.
|
|
|
|
-- This isn't a problem for us since we just want to order by this field.
|
2017-09-19 18:15:46 +02:00
|
|
|
id BIGINT PRIMARY KEY DEFAULT nextval('syncapi_stream_id'),
|
2017-04-05 11:30:13 +02:00
|
|
|
-- The event ID for the event
|
|
|
|
event_id TEXT NOT NULL,
|
2017-03-30 16:29:23 +02:00
|
|
|
-- The 'room_id' key for the event.
|
|
|
|
room_id TEXT NOT NULL,
|
|
|
|
-- The JSON for the event. Stored as TEXT because this should be valid UTF-8.
|
|
|
|
event_json TEXT NOT NULL,
|
2019-08-07 12:12:09 +02:00
|
|
|
-- The event type e.g 'm.room.member'.
|
|
|
|
type TEXT NOT NULL,
|
|
|
|
-- The 'sender' property of the event.
|
|
|
|
sender TEXT NOT NULL,
|
|
|
|
-- true if the event content contains a url key.
|
|
|
|
contains_url BOOL NOT NULL,
|
2017-04-05 11:30:13 +02:00
|
|
|
-- A list of event IDs which represent a delta of added/removed room state. This can be NULL
|
|
|
|
-- if there is no delta.
|
|
|
|
add_state_ids TEXT[],
|
2017-12-06 10:37:18 +01:00
|
|
|
remove_state_ids TEXT[],
|
2019-08-23 18:55:40 +02:00
|
|
|
session_id BIGINT, -- The client session that sent the event, if any
|
2017-12-06 10:37:18 +01:00
|
|
|
transaction_id TEXT -- The transaction id used to send the event, if any
|
2017-03-30 16:29:23 +02:00
|
|
|
);
|
2017-04-05 11:30:13 +02:00
|
|
|
-- for event selection
|
2017-08-07 12:51:46 +02:00
|
|
|
CREATE UNIQUE INDEX IF NOT EXISTS syncapi_event_id_idx ON syncapi_output_room_events(event_id);
|
2017-03-30 16:29:23 +02:00
|
|
|
`
|
|
|
|
|
|
|
|
const insertEventSQL = "" +
|
2017-08-07 12:51:46 +02:00
|
|
|
"INSERT INTO syncapi_output_room_events (" +
|
2019-08-23 18:55:40 +02:00
|
|
|
"room_id, event_id, event_json, type, sender, contains_url, add_state_ids, remove_state_ids, session_id, transaction_id" +
|
2019-08-07 12:12:09 +02:00
|
|
|
") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) RETURNING id"
|
2017-04-05 11:30:13 +02:00
|
|
|
|
|
|
|
const selectEventsSQL = "" +
|
2020-01-02 15:51:21 +01:00
|
|
|
"SELECT id, event_json, session_id, transaction_id FROM syncapi_output_room_events WHERE event_id = ANY($1)"
|
2017-04-10 16:12:18 +02:00
|
|
|
|
2017-04-13 17:56:46 +02:00
|
|
|
const selectRecentEventsSQL = "" +
|
2019-08-23 18:55:40 +02:00
|
|
|
"SELECT id, event_json, session_id, transaction_id FROM syncapi_output_room_events" +
|
2017-08-07 12:51:46 +02:00
|
|
|
" WHERE room_id = $1 AND id > $2 AND id <= $3" +
|
2018-11-07 20:12:23 +01:00
|
|
|
" ORDER BY id DESC LIMIT $4"
|
2017-04-13 17:56:46 +02:00
|
|
|
|
2017-09-19 18:15:46 +02:00
|
|
|
const selectMaxEventIDSQL = "" +
|
2017-08-07 12:51:46 +02:00
|
|
|
"SELECT MAX(id) FROM syncapi_output_room_events"
|
2017-04-10 16:12:18 +02:00
|
|
|
|
2017-04-19 17:04:01 +02:00
|
|
|
// In order for us to apply the state updates correctly, rows need to be ordered in the order they were received (id).
|
|
|
|
const selectStateInRangeSQL = "" +
|
2017-08-07 12:51:46 +02:00
|
|
|
"SELECT id, event_json, add_state_ids, remove_state_ids" +
|
|
|
|
" FROM syncapi_output_room_events" +
|
2017-05-12 17:56:17 +02:00
|
|
|
" WHERE (id > $1 AND id <= $2) AND (add_state_ids IS NOT NULL OR remove_state_ids IS NOT NULL)" +
|
2019-08-07 12:12:09 +02:00
|
|
|
" AND ( $3::text[] IS NULL OR sender = ANY($3) )" +
|
|
|
|
" AND ( $4::text[] IS NULL OR NOT(sender = ANY($4)) )" +
|
|
|
|
" AND ( $5::text[] IS NULL OR type LIKE ANY($5) )" +
|
|
|
|
" AND ( $6::text[] IS NULL OR NOT(type LIKE ANY($6)) )" +
|
|
|
|
" AND ( $7::bool IS NULL OR contains_url = $7 )" +
|
|
|
|
" ORDER BY id ASC" +
|
|
|
|
" LIMIT $8"
|
2017-04-19 17:04:01 +02:00
|
|
|
|
2017-03-30 16:29:23 +02:00
|
|
|
type outputRoomEventsStatements struct {
|
2017-05-17 17:21:27 +02:00
|
|
|
insertEventStmt *sql.Stmt
|
|
|
|
selectEventsStmt *sql.Stmt
|
2017-09-19 18:15:46 +02:00
|
|
|
selectMaxEventIDStmt *sql.Stmt
|
2017-05-17 17:21:27 +02:00
|
|
|
selectRecentEventsStmt *sql.Stmt
|
|
|
|
selectStateInRangeStmt *sql.Stmt
|
2017-03-30 16:29:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *outputRoomEventsStatements) prepare(db *sql.DB) (err error) {
|
|
|
|
_, err = db.Exec(outputRoomEventsSchema)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if s.insertEventStmt, err = db.Prepare(insertEventSQL); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-04-05 11:30:13 +02:00
|
|
|
if s.selectEventsStmt, err = db.Prepare(selectEventsSQL); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-09-19 18:15:46 +02:00
|
|
|
if s.selectMaxEventIDStmt, err = db.Prepare(selectMaxEventIDSQL); err != nil {
|
2017-04-10 16:12:18 +02:00
|
|
|
return
|
|
|
|
}
|
2017-04-13 17:56:46 +02:00
|
|
|
if s.selectRecentEventsStmt, err = db.Prepare(selectRecentEventsSQL); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-04-19 17:04:01 +02:00
|
|
|
if s.selectStateInRangeStmt, err = db.Prepare(selectStateInRangeSQL); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-04-10 16:12:18 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-07-12 16:59:53 +02:00
|
|
|
// selectStateInRange returns the state events between the two given PDU stream positions, exclusive of oldPos, inclusive of newPos.
|
2017-04-19 17:04:01 +02:00
|
|
|
// Results are bucketed based on the room ID. If the same state is overwritten multiple times between the
|
|
|
|
// two positions, only the most recent state is returned.
|
2017-06-05 11:37:04 +02:00
|
|
|
func (s *outputRoomEventsStatements) selectStateInRange(
|
2019-07-12 16:59:53 +02:00
|
|
|
ctx context.Context, txn *sql.Tx, oldPos, newPos int64,
|
2019-08-07 12:12:09 +02:00
|
|
|
stateFilterPart *gomatrixserverlib.FilterPart,
|
2017-06-05 11:37:04 +02:00
|
|
|
) (map[string]map[string]bool, map[string]streamEvent, error) {
|
2017-09-18 17:52:22 +02:00
|
|
|
stmt := common.TxStmt(txn, s.selectStateInRangeStmt)
|
|
|
|
|
2019-08-07 12:12:09 +02:00
|
|
|
rows, err := stmt.QueryContext(
|
|
|
|
ctx, oldPos, newPos,
|
|
|
|
pq.StringArray(stateFilterPart.Senders),
|
|
|
|
pq.StringArray(stateFilterPart.NotSenders),
|
|
|
|
pq.StringArray(filterConvertTypeWildcardToSQL(stateFilterPart.Types)),
|
|
|
|
pq.StringArray(filterConvertTypeWildcardToSQL(stateFilterPart.NotTypes)),
|
|
|
|
stateFilterPart.ContainsURL,
|
|
|
|
stateFilterPart.Limit,
|
|
|
|
)
|
2017-04-19 17:04:01 +02:00
|
|
|
if err != nil {
|
2017-06-05 11:37:04 +02:00
|
|
|
return nil, nil, err
|
2017-04-19 17:04:01 +02:00
|
|
|
}
|
|
|
|
// Fetch all the state change events for all rooms between the two positions then loop each event and:
|
|
|
|
// - Keep a cache of the event by ID (99% of state change events are for the event itself)
|
|
|
|
// - For each room ID, build up an array of event IDs which represents cumulative adds/removes
|
|
|
|
// For each room, map cumulative event IDs to events and return. This may need to a batch SELECT based on event ID
|
|
|
|
// if they aren't in the event ID cache. We don't handle state deletion yet.
|
2017-05-17 17:21:27 +02:00
|
|
|
eventIDToEvent := make(map[string]streamEvent)
|
2017-04-19 17:04:01 +02:00
|
|
|
|
|
|
|
// RoomID => A set (map[string]bool) of state event IDs which are between the two positions
|
|
|
|
stateNeeded := make(map[string]map[string]bool)
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var (
|
2017-05-17 17:21:27 +02:00
|
|
|
streamPos int64
|
2017-04-19 17:04:01 +02:00
|
|
|
eventBytes []byte
|
|
|
|
addIDs pq.StringArray
|
|
|
|
delIDs pq.StringArray
|
|
|
|
)
|
2017-05-17 17:21:27 +02:00
|
|
|
if err := rows.Scan(&streamPos, &eventBytes, &addIDs, &delIDs); err != nil {
|
2017-06-05 11:37:04 +02:00
|
|
|
return nil, nil, err
|
2017-04-19 17:04:01 +02:00
|
|
|
}
|
|
|
|
// Sanity check for deleted state and whine if we see it. We don't need to do anything
|
|
|
|
// since it'll just mark the event as not being needed.
|
|
|
|
if len(addIDs) < len(delIDs) {
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"since": oldPos,
|
|
|
|
"current": newPos,
|
|
|
|
"adds": addIDs,
|
|
|
|
"dels": delIDs,
|
|
|
|
}).Warn("StateBetween: ignoring deleted state")
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Handle redacted events
|
|
|
|
ev, err := gomatrixserverlib.NewEventFromTrustedJSON(eventBytes, false)
|
|
|
|
if err != nil {
|
2017-06-05 11:37:04 +02:00
|
|
|
return nil, nil, err
|
2017-04-19 17:04:01 +02:00
|
|
|
}
|
|
|
|
needSet := stateNeeded[ev.RoomID()]
|
|
|
|
if needSet == nil { // make set if required
|
|
|
|
needSet = make(map[string]bool)
|
|
|
|
}
|
|
|
|
for _, id := range delIDs {
|
|
|
|
needSet[id] = false
|
|
|
|
}
|
|
|
|
for _, id := range addIDs {
|
|
|
|
needSet[id] = true
|
|
|
|
}
|
|
|
|
stateNeeded[ev.RoomID()] = needSet
|
|
|
|
|
2017-12-06 10:37:18 +01:00
|
|
|
eventIDToEvent[ev.EventID()] = streamEvent{
|
|
|
|
Event: ev,
|
2019-07-12 16:59:53 +02:00
|
|
|
streamPosition: streamPos,
|
2017-12-06 10:37:18 +01:00
|
|
|
}
|
2017-04-19 17:04:01 +02:00
|
|
|
}
|
|
|
|
|
2017-06-05 11:37:04 +02:00
|
|
|
return stateNeeded, eventIDToEvent, nil
|
2017-04-19 17:04:01 +02:00
|
|
|
}
|
|
|
|
|
2017-04-13 17:56:46 +02:00
|
|
|
// MaxID returns the ID of the last inserted event in this table. 'txn' is optional. If it is not supplied,
|
|
|
|
// then this function should only ever be used at startup, as it will race with inserting events if it is
|
|
|
|
// done afterwards. If there are no inserted events, 0 is returned.
|
2017-09-19 18:15:46 +02:00
|
|
|
func (s *outputRoomEventsStatements) selectMaxEventID(
|
2017-09-18 17:52:22 +02:00
|
|
|
ctx context.Context, txn *sql.Tx,
|
|
|
|
) (id int64, err error) {
|
2017-04-10 16:12:18 +02:00
|
|
|
var nullableID sql.NullInt64
|
2017-09-19 18:15:46 +02:00
|
|
|
stmt := common.TxStmt(txn, s.selectMaxEventIDStmt)
|
2017-09-18 17:52:22 +02:00
|
|
|
err = stmt.QueryRowContext(ctx).Scan(&nullableID)
|
2017-04-10 16:12:18 +02:00
|
|
|
if nullableID.Valid {
|
|
|
|
id = nullableID.Int64
|
|
|
|
}
|
2017-03-30 16:29:23 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-10 16:12:18 +02:00
|
|
|
// InsertEvent into the output_room_events table. addState and removeState are an optional list of state event IDs. Returns the position
|
|
|
|
// of the inserted event.
|
2017-09-18 17:52:22 +02:00
|
|
|
func (s *outputRoomEventsStatements) insertEvent(
|
|
|
|
ctx context.Context, txn *sql.Tx,
|
|
|
|
event *gomatrixserverlib.Event, addState, removeState []string,
|
2017-12-06 10:37:18 +01:00
|
|
|
transactionID *api.TransactionID,
|
2017-09-18 17:52:22 +02:00
|
|
|
) (streamPos int64, err error) {
|
2019-08-23 18:55:40 +02:00
|
|
|
var txnID *string
|
|
|
|
var sessionID *int64
|
2017-12-06 10:37:18 +01:00
|
|
|
if transactionID != nil {
|
2019-08-23 18:55:40 +02:00
|
|
|
sessionID = &transactionID.SessionID
|
2017-12-06 10:37:18 +01:00
|
|
|
txnID = &transactionID.TransactionID
|
|
|
|
}
|
|
|
|
|
2019-08-07 12:12:09 +02:00
|
|
|
// Parse content as JSON and search for an "url" key
|
|
|
|
containsURL := false
|
|
|
|
var content map[string]interface{}
|
|
|
|
if json.Unmarshal(event.Content(), &content) != nil {
|
|
|
|
// Set containsURL to true if url is present
|
|
|
|
_, containsURL = content["url"]
|
|
|
|
}
|
|
|
|
|
2017-09-18 17:52:22 +02:00
|
|
|
stmt := common.TxStmt(txn, s.insertEventStmt)
|
|
|
|
err = stmt.QueryRowContext(
|
|
|
|
ctx,
|
|
|
|
event.RoomID(),
|
|
|
|
event.EventID(),
|
|
|
|
event.JSON(),
|
2019-08-07 12:12:09 +02:00
|
|
|
event.Type(),
|
|
|
|
event.Sender(),
|
|
|
|
containsURL,
|
2017-09-18 17:52:22 +02:00
|
|
|
pq.StringArray(addState),
|
|
|
|
pq.StringArray(removeState),
|
2019-08-23 18:55:40 +02:00
|
|
|
sessionID,
|
2017-12-06 10:37:18 +01:00
|
|
|
txnID,
|
2017-04-10 16:12:18 +02:00
|
|
|
).Scan(&streamPos)
|
|
|
|
return
|
2017-03-30 16:29:23 +02:00
|
|
|
}
|
2017-04-05 11:30:13 +02:00
|
|
|
|
2017-04-13 17:56:46 +02:00
|
|
|
// RecentEventsInRoom returns the most recent events in the given room, up to a maximum of 'limit'.
|
2017-06-05 11:37:04 +02:00
|
|
|
func (s *outputRoomEventsStatements) selectRecentEvents(
|
2017-09-18 17:52:22 +02:00
|
|
|
ctx context.Context, txn *sql.Tx,
|
2019-07-12 16:59:53 +02:00
|
|
|
roomID string, fromPos, toPos int64, limit int,
|
2017-06-05 11:37:04 +02:00
|
|
|
) ([]streamEvent, error) {
|
2017-09-18 17:52:22 +02:00
|
|
|
stmt := common.TxStmt(txn, s.selectRecentEventsStmt)
|
|
|
|
rows, err := stmt.QueryContext(ctx, roomID, fromPos, toPos, limit)
|
2017-04-13 17:56:46 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-09-20 11:59:19 +02:00
|
|
|
defer rows.Close() // nolint: errcheck
|
2017-06-07 17:35:41 +02:00
|
|
|
events, err := rowsToStreamEvents(rows)
|
2017-04-20 12:18:26 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-11-07 20:12:23 +01:00
|
|
|
// The events need to be returned from oldest to latest, which isn't
|
2019-07-12 17:43:01 +02:00
|
|
|
// necessarily the way the SQL query returns them, so a sort is necessary to
|
2018-11-07 20:12:23 +01:00
|
|
|
// ensure the events are in the right order in the slice.
|
|
|
|
sort.SliceStable(events, func(i int, j int) bool {
|
|
|
|
return events[i].streamPosition < events[j].streamPosition
|
|
|
|
})
|
2018-01-02 11:33:25 +01:00
|
|
|
return events, nil
|
2017-04-13 17:56:46 +02:00
|
|
|
}
|
|
|
|
|
2017-04-05 11:30:13 +02:00
|
|
|
// Events returns the events for the given event IDs. Returns an error if any one of the event IDs given are missing
|
|
|
|
// from the database.
|
2017-09-18 17:52:22 +02:00
|
|
|
func (s *outputRoomEventsStatements) selectEvents(
|
|
|
|
ctx context.Context, txn *sql.Tx, eventIDs []string,
|
|
|
|
) ([]streamEvent, error) {
|
|
|
|
stmt := common.TxStmt(txn, s.selectEventsStmt)
|
|
|
|
rows, err := stmt.QueryContext(ctx, pq.StringArray(eventIDs))
|
2017-04-13 17:56:46 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-09-20 11:59:19 +02:00
|
|
|
defer rows.Close() // nolint: errcheck
|
2017-06-07 17:35:41 +02:00
|
|
|
return rowsToStreamEvents(rows)
|
2017-04-13 17:56:46 +02:00
|
|
|
}
|
|
|
|
|
2017-06-07 17:35:41 +02:00
|
|
|
func rowsToStreamEvents(rows *sql.Rows) ([]streamEvent, error) {
|
2017-05-17 17:21:27 +02:00
|
|
|
var result []streamEvent
|
2017-04-13 17:56:46 +02:00
|
|
|
for rows.Next() {
|
2017-05-17 17:21:27 +02:00
|
|
|
var (
|
2017-12-06 10:37:18 +01:00
|
|
|
streamPos int64
|
|
|
|
eventBytes []byte
|
2019-08-23 18:55:40 +02:00
|
|
|
sessionID *int64
|
2017-12-06 10:37:18 +01:00
|
|
|
txnID *string
|
|
|
|
transactionID *api.TransactionID
|
2017-05-17 17:21:27 +02:00
|
|
|
)
|
2019-08-23 18:55:40 +02:00
|
|
|
if err := rows.Scan(&streamPos, &eventBytes, &sessionID, &txnID); err != nil {
|
2017-04-05 11:30:13 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
2017-04-13 17:56:46 +02:00
|
|
|
// TODO: Handle redacted events
|
2017-04-05 11:30:13 +02:00
|
|
|
ev, err := gomatrixserverlib.NewEventFromTrustedJSON(eventBytes, false)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-12-06 10:37:18 +01:00
|
|
|
|
2019-08-23 18:55:40 +02:00
|
|
|
if sessionID != nil && txnID != nil {
|
2017-12-06 10:37:18 +01:00
|
|
|
transactionID = &api.TransactionID{
|
2019-08-23 18:55:40 +02:00
|
|
|
SessionID: *sessionID,
|
2017-12-06 10:37:18 +01:00
|
|
|
TransactionID: *txnID,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result = append(result, streamEvent{
|
|
|
|
Event: ev,
|
2019-07-12 16:59:53 +02:00
|
|
|
streamPosition: streamPos,
|
2017-12-06 10:37:18 +01:00
|
|
|
transactionID: transactionID,
|
|
|
|
})
|
2017-04-05 11:30:13 +02:00
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|