2017-07-13 12:41:30 +02:00
|
|
|
// Copyright 2017 Vector Creations Ltd
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
// Package input contains the code processes new room events
|
2020-09-02 18:13:15 +02:00
|
|
|
package input
|
2017-07-13 12:41:30 +02:00
|
|
|
|
|
|
|
import (
|
2017-09-13 14:37:50 +02:00
|
|
|
"context"
|
2017-07-13 12:41:30 +02:00
|
|
|
"encoding/json"
|
2020-08-20 17:24:33 +02:00
|
|
|
"sync"
|
2020-09-03 16:22:16 +02:00
|
|
|
"time"
|
2017-07-13 12:41:30 +02:00
|
|
|
|
2020-04-22 16:26:56 +02:00
|
|
|
"github.com/Shopify/sarama"
|
2020-11-19 12:34:59 +01:00
|
|
|
"github.com/matrix-org/dendrite/internal/hooks"
|
2020-09-04 11:40:58 +02:00
|
|
|
"github.com/matrix-org/dendrite/roomserver/acls"
|
2017-07-13 12:41:30 +02:00
|
|
|
"github.com/matrix-org/dendrite/roomserver/api"
|
2020-09-02 18:13:15 +02:00
|
|
|
"github.com/matrix-org/dendrite/roomserver/storage"
|
|
|
|
"github.com/matrix-org/gomatrixserverlib"
|
2020-06-12 13:10:08 +02:00
|
|
|
log "github.com/sirupsen/logrus"
|
2020-09-03 16:22:16 +02:00
|
|
|
"go.uber.org/atomic"
|
2017-07-13 12:41:30 +02:00
|
|
|
)
|
|
|
|
|
2020-09-02 18:13:15 +02:00
|
|
|
type Inputer struct {
|
|
|
|
DB storage.Database
|
|
|
|
Producer sarama.SyncProducer
|
|
|
|
ServerName gomatrixserverlib.ServerName
|
2020-09-04 11:40:58 +02:00
|
|
|
ACLs *acls.ServerACLs
|
2020-09-02 18:13:15 +02:00
|
|
|
OutputRoomEventTopic string
|
|
|
|
|
2020-09-03 16:22:16 +02:00
|
|
|
workers sync.Map // room ID -> *inputWorker
|
|
|
|
}
|
|
|
|
|
|
|
|
type inputTask struct {
|
|
|
|
ctx context.Context
|
|
|
|
event *api.InputRoomEvent
|
|
|
|
wg *sync.WaitGroup
|
|
|
|
err error // written back by worker, only safe to read when all tasks are done
|
|
|
|
}
|
|
|
|
|
|
|
|
type inputWorker struct {
|
|
|
|
r *Inputer
|
|
|
|
running atomic.Bool
|
|
|
|
input chan *inputTask
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *inputWorker) start() {
|
|
|
|
if !w.running.CAS(false, true) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer w.running.Store(false)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case task := <-w.input:
|
2020-11-19 12:34:59 +01:00
|
|
|
hooks.Run(hooks.KindNewEventReceived, &task.event.Event)
|
2020-09-03 16:22:16 +02:00
|
|
|
_, task.err = w.r.processRoomEvent(task.ctx, task.event)
|
2020-11-19 12:34:59 +01:00
|
|
|
if task.err == nil {
|
|
|
|
hooks.Run(hooks.KindNewEventPersisted, &task.event.Event)
|
|
|
|
}
|
2020-09-03 16:22:16 +02:00
|
|
|
task.wg.Done()
|
|
|
|
case <-time.After(time.Second * 5):
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2020-09-02 18:13:15 +02:00
|
|
|
}
|
|
|
|
|
2017-08-08 17:38:03 +02:00
|
|
|
// WriteOutputEvents implements OutputRoomEventWriter
|
2020-09-02 18:13:15 +02:00
|
|
|
func (r *Inputer) WriteOutputEvents(roomID string, updates []api.OutputEvent) error {
|
2017-08-08 17:38:03 +02:00
|
|
|
messages := make([]*sarama.ProducerMessage, len(updates))
|
|
|
|
for i := range updates {
|
|
|
|
value, err := json.Marshal(updates[i])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-06-12 13:10:08 +02:00
|
|
|
logger := log.WithFields(log.Fields{
|
|
|
|
"room_id": roomID,
|
|
|
|
"type": updates[i].Type,
|
|
|
|
})
|
|
|
|
if updates[i].NewRoomEvent != nil {
|
|
|
|
logger = logger.WithFields(log.Fields{
|
|
|
|
"event_type": updates[i].NewRoomEvent.Event.Type(),
|
|
|
|
"event_id": updates[i].NewRoomEvent.Event.EventID(),
|
|
|
|
"adds_state": len(updates[i].NewRoomEvent.AddsStateEventIDs),
|
|
|
|
"removes_state": len(updates[i].NewRoomEvent.RemovesStateEventIDs),
|
|
|
|
"send_as_server": updates[i].NewRoomEvent.SendAsServer,
|
|
|
|
"sender": updates[i].NewRoomEvent.Event.Sender(),
|
|
|
|
})
|
2020-09-04 11:40:58 +02:00
|
|
|
if updates[i].NewRoomEvent.Event.Type() == "m.room.server_acl" && updates[i].NewRoomEvent.Event.StateKeyEquals("") {
|
|
|
|
ev := updates[i].NewRoomEvent.Event.Unwrap()
|
2020-11-16 16:44:53 +01:00
|
|
|
defer r.ACLs.OnServerACLUpdate(ev)
|
2020-09-04 11:40:58 +02:00
|
|
|
}
|
2020-06-12 13:10:08 +02:00
|
|
|
}
|
|
|
|
logger.Infof("Producing to topic '%s'", r.OutputRoomEventTopic)
|
2017-08-08 17:38:03 +02:00
|
|
|
messages[i] = &sarama.ProducerMessage{
|
|
|
|
Topic: r.OutputRoomEventTopic,
|
|
|
|
Key: sarama.StringEncoder(roomID),
|
|
|
|
Value: sarama.ByteEncoder(value),
|
|
|
|
}
|
2017-07-13 12:41:30 +02:00
|
|
|
}
|
2020-10-27 15:11:37 +01:00
|
|
|
errs := r.Producer.SendMessages(messages)
|
|
|
|
if errs != nil {
|
|
|
|
for _, err := range errs.(sarama.ProducerErrors) {
|
|
|
|
log.WithError(err).WithField("message_bytes", err.Msg.Value.Length()).Error("Write to kafka failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return errs
|
2017-07-13 12:41:30 +02:00
|
|
|
}
|
|
|
|
|
2020-05-01 11:48:17 +02:00
|
|
|
// InputRoomEvents implements api.RoomserverInternalAPI
|
2020-09-02 18:13:15 +02:00
|
|
|
func (r *Inputer) InputRoomEvents(
|
2017-09-13 14:37:50 +02:00
|
|
|
ctx context.Context,
|
2017-07-13 12:41:30 +02:00
|
|
|
request *api.InputRoomEventsRequest,
|
|
|
|
response *api.InputRoomEventsResponse,
|
2020-09-16 14:00:52 +02:00
|
|
|
) {
|
2020-09-03 16:22:16 +02:00
|
|
|
// Create a wait group. Each task that we dispatch will call Done on
|
|
|
|
// this wait group so that we know when all of our events have been
|
|
|
|
// processed.
|
|
|
|
wg := &sync.WaitGroup{}
|
|
|
|
wg.Add(len(request.InputRoomEvents))
|
|
|
|
tasks := make([]*inputTask, len(request.InputRoomEvents))
|
|
|
|
|
2020-08-20 17:24:33 +02:00
|
|
|
for i, e := range request.InputRoomEvents {
|
2020-09-03 16:22:16 +02:00
|
|
|
// Work out if we are running per-room workers or if we're just doing
|
|
|
|
// it on a global basis (e.g. SQLite).
|
2020-08-20 17:24:33 +02:00
|
|
|
roomID := "global"
|
|
|
|
if r.DB.SupportsConcurrentRoomInputs() {
|
|
|
|
roomID = e.Event.RoomID()
|
|
|
|
}
|
2020-09-03 16:22:16 +02:00
|
|
|
|
|
|
|
// Look up the worker, or create it if it doesn't exist. This channel
|
|
|
|
// is buffered to reduce the chance that we'll be blocked by another
|
|
|
|
// room - the channel will be quite small as it's just pointer types.
|
|
|
|
w, _ := r.workers.LoadOrStore(roomID, &inputWorker{
|
|
|
|
r: r,
|
|
|
|
input: make(chan *inputTask, 10),
|
|
|
|
})
|
|
|
|
worker := w.(*inputWorker)
|
|
|
|
|
|
|
|
// Create a task. This contains the input event and a reference to
|
|
|
|
// the wait group, so that the worker can notify us when this specific
|
|
|
|
// task has been finished.
|
|
|
|
tasks[i] = &inputTask{
|
|
|
|
ctx: ctx,
|
|
|
|
event: &request.InputRoomEvents[i],
|
|
|
|
wg: wg,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send the task to the worker.
|
|
|
|
go worker.start()
|
|
|
|
worker.input <- tasks[i]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all of the workers to return results about our tasks.
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// If any of the tasks returned an error, we should probably report
|
|
|
|
// that back to the caller.
|
|
|
|
for _, task := range tasks {
|
|
|
|
if task.err != nil {
|
2020-09-16 14:00:52 +02:00
|
|
|
response.ErrMsg = task.err.Error()
|
|
|
|
_, rejected := task.err.(*gomatrixserverlib.NotAllowed)
|
|
|
|
response.NotAllowed = rejected
|
|
|
|
return
|
2017-08-21 17:37:11 +02:00
|
|
|
}
|
|
|
|
}
|
2017-07-13 12:41:30 +02:00
|
|
|
}
|