2017-04-21 00:40:52 +02:00
|
|
|
// Copyright 2017 Vector Creations Ltd
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2017-04-07 15:32:42 +02:00
|
|
|
package sync
|
|
|
|
|
|
|
|
import (
|
|
|
|
"net/http"
|
|
|
|
"time"
|
|
|
|
|
2017-05-23 18:43:05 +02:00
|
|
|
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
2017-07-26 15:53:11 +02:00
|
|
|
"github.com/matrix-org/dendrite/clientapi/auth/storage/accounts"
|
2017-04-07 15:32:42 +02:00
|
|
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
2017-04-10 16:12:18 +02:00
|
|
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
2017-04-20 18:22:44 +02:00
|
|
|
"github.com/matrix-org/dendrite/syncapi/storage"
|
|
|
|
"github.com/matrix-org/dendrite/syncapi/types"
|
2017-07-26 15:53:11 +02:00
|
|
|
"github.com/matrix-org/gomatrixserverlib"
|
2017-04-07 15:32:42 +02:00
|
|
|
"github.com/matrix-org/util"
|
2017-10-26 12:34:54 +02:00
|
|
|
log "github.com/sirupsen/logrus"
|
2017-04-07 15:32:42 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// RequestPool manages HTTP long-poll connections for /sync
|
|
|
|
type RequestPool struct {
|
2017-07-26 15:53:11 +02:00
|
|
|
db *storage.SyncServerDatabase
|
|
|
|
accountDB *accounts.Database
|
|
|
|
notifier *Notifier
|
2017-04-10 16:12:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewRequestPool makes a new RequestPool
|
2017-07-26 15:53:11 +02:00
|
|
|
func NewRequestPool(db *storage.SyncServerDatabase, n *Notifier, adb *accounts.Database) *RequestPool {
|
|
|
|
return &RequestPool{db, adb, n}
|
2017-04-07 15:32:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// OnIncomingSyncRequest is called when a client makes a /sync request. This function MUST be
|
|
|
|
// called in a dedicated goroutine for this request. This function will block the goroutine
|
|
|
|
// until a response is ready, or it times out.
|
2017-05-23 18:43:05 +02:00
|
|
|
func (rp *RequestPool) OnIncomingSyncRequest(req *http.Request, device *authtypes.Device) util.JSONResponse {
|
2017-04-07 15:32:42 +02:00
|
|
|
// Extract values from request
|
|
|
|
logger := util.GetLogger(req.Context())
|
2017-05-23 18:43:05 +02:00
|
|
|
userID := device.UserID
|
2017-12-15 16:42:55 +01:00
|
|
|
syncReq, err := newSyncRequest(req, *device)
|
2017-04-10 16:12:18 +02:00
|
|
|
if err != nil {
|
|
|
|
return util.JSONResponse{
|
|
|
|
Code: 400,
|
|
|
|
JSON: jsonerror.Unknown(err.Error()),
|
|
|
|
}
|
|
|
|
}
|
2017-04-07 15:32:42 +02:00
|
|
|
logger.WithFields(log.Fields{
|
|
|
|
"userID": userID,
|
2017-04-18 11:32:32 +02:00
|
|
|
"since": syncReq.since,
|
|
|
|
"timeout": syncReq.timeout,
|
2017-04-07 15:32:42 +02:00
|
|
|
}).Info("Incoming /sync request")
|
|
|
|
|
2017-10-16 14:34:08 +02:00
|
|
|
currPos := rp.notifier.CurrentPosition()
|
|
|
|
|
|
|
|
// If this is an initial sync or timeout=0 we return immediately
|
2017-11-22 10:51:12 +01:00
|
|
|
if syncReq.since == nil || syncReq.timeout == 0 {
|
2017-10-16 14:34:08 +02:00
|
|
|
syncData, err := rp.currentSyncForUser(*syncReq, currPos)
|
2017-04-10 16:12:18 +02:00
|
|
|
if err != nil {
|
2017-10-16 14:34:08 +02:00
|
|
|
return httputil.LogThenError(req, err)
|
2017-04-10 16:12:18 +02:00
|
|
|
}
|
|
|
|
return util.JSONResponse{
|
|
|
|
Code: 200,
|
2017-10-16 14:34:08 +02:00
|
|
|
JSON: syncData,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we wait for the notifier to tell us if something *may* have
|
|
|
|
// happened. We loop in case it turns out that nothing did happen.
|
|
|
|
|
|
|
|
timer := time.NewTimer(syncReq.timeout) // case of timeout=0 is handled above
|
|
|
|
defer timer.Stop()
|
|
|
|
|
2017-10-26 12:34:54 +02:00
|
|
|
userStreamListener := rp.notifier.GetListener(*syncReq)
|
|
|
|
defer userStreamListener.Close()
|
|
|
|
|
2017-10-16 14:34:08 +02:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
// Wait for notifier to wake us up
|
2017-10-26 12:34:54 +02:00
|
|
|
case <-userStreamListener.GetNotifyChannel(currPos):
|
|
|
|
currPos = userStreamListener.GetStreamPosition()
|
2017-10-16 14:34:08 +02:00
|
|
|
// Or for timeout to expire
|
|
|
|
case <-timer.C:
|
|
|
|
return util.JSONResponse{
|
|
|
|
Code: 200,
|
2017-11-22 10:51:12 +01:00
|
|
|
JSON: types.NewResponse(currPos),
|
2017-10-16 14:34:08 +02:00
|
|
|
}
|
|
|
|
// Or for the request to be cancelled
|
|
|
|
case <-req.Context().Done():
|
|
|
|
return httputil.LogThenError(req, req.Context().Err())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note that we don't time out during calculation of sync
|
|
|
|
// response. This ensures that we don't waste the hard work
|
|
|
|
// of calculating the sync only to get timed out before we
|
|
|
|
// can respond
|
|
|
|
|
|
|
|
syncData, err := rp.currentSyncForUser(*syncReq, currPos)
|
|
|
|
if err != nil {
|
|
|
|
return httputil.LogThenError(req, err)
|
|
|
|
}
|
|
|
|
if !syncData.IsEmpty() {
|
|
|
|
return util.JSONResponse{
|
|
|
|
Code: 200,
|
|
|
|
JSON: syncData,
|
|
|
|
}
|
2017-04-10 16:12:18 +02:00
|
|
|
}
|
2017-10-16 14:34:08 +02:00
|
|
|
|
2017-04-07 15:32:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-16 14:34:08 +02:00
|
|
|
func (rp *RequestPool) currentSyncForUser(req syncRequest, currentPos types.StreamPosition) (res *types.Response, err error) {
|
2017-04-11 12:52:26 +02:00
|
|
|
// TODO: handle ignored users
|
2017-11-22 10:51:12 +01:00
|
|
|
if req.since == nil {
|
2017-12-15 16:42:55 +01:00
|
|
|
res, err = rp.db.CompleteSync(req.ctx, req.device.UserID, req.limit)
|
2017-10-16 14:34:08 +02:00
|
|
|
} else {
|
2017-12-15 16:42:55 +01:00
|
|
|
res, err = rp.db.IncrementalSync(req.ctx, req.device, *req.since, currentPos, req.limit)
|
2017-04-11 12:52:26 +02:00
|
|
|
}
|
2017-10-16 14:34:08 +02:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-12-15 16:42:55 +01:00
|
|
|
res, err = rp.appendAccountData(res, req.device.UserID, req, currentPos)
|
2017-10-16 14:34:08 +02:00
|
|
|
return
|
2017-04-07 15:32:42 +02:00
|
|
|
}
|
2017-07-26 15:53:11 +02:00
|
|
|
|
2017-08-02 17:21:35 +02:00
|
|
|
func (rp *RequestPool) appendAccountData(
|
|
|
|
data *types.Response, userID string, req syncRequest, currentPos types.StreamPosition,
|
|
|
|
) (*types.Response, error) {
|
2017-09-22 12:34:54 +02:00
|
|
|
// TODO: Account data doesn't have a sync position of its own, meaning that
|
|
|
|
// account data might be sent multiple time to the client if multiple account
|
|
|
|
// data keys were set between two message. This isn't a huge issue since the
|
|
|
|
// duplicate data doesn't represent a huge quantity of data, but an optimisation
|
|
|
|
// here would be making sure each data is sent only once to the client.
|
2017-07-26 15:53:11 +02:00
|
|
|
localpart, _, err := gomatrixserverlib.SplitID('@', userID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-11-22 10:51:12 +01:00
|
|
|
if req.since == nil {
|
2017-08-02 17:21:35 +02:00
|
|
|
// If this is the initial sync, we don't need to check if a data has
|
|
|
|
// already been sent. Instead, we send the whole batch.
|
|
|
|
var global []gomatrixserverlib.ClientEvent
|
|
|
|
var rooms map[string][]gomatrixserverlib.ClientEvent
|
2017-09-18 15:15:27 +02:00
|
|
|
global, rooms, err = rp.accountDB.GetAccountData(req.ctx, localpart)
|
2017-08-02 17:21:35 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
data.AccountData.Events = global
|
|
|
|
|
|
|
|
for r, j := range data.Rooms.Join {
|
|
|
|
if len(rooms[r]) > 0 {
|
|
|
|
j.AccountData.Events = rooms[r]
|
|
|
|
data.Rooms.Join[r] = j
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return data, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sync is not initial, get all account data since the latest sync
|
2017-11-22 10:51:12 +01:00
|
|
|
dataTypes, err := rp.db.GetAccountDataInRange(req.ctx, userID, *req.since, currentPos)
|
2017-07-26 15:53:11 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-08-02 17:21:35 +02:00
|
|
|
if len(dataTypes) == 0 {
|
|
|
|
return data, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over the rooms
|
|
|
|
for roomID, dataTypes := range dataTypes {
|
|
|
|
events := []gomatrixserverlib.ClientEvent{}
|
|
|
|
// Request the missing data from the database
|
|
|
|
for _, dataType := range dataTypes {
|
2017-09-18 15:15:27 +02:00
|
|
|
evs, err := rp.accountDB.GetAccountDataByType(
|
|
|
|
req.ctx, localpart, roomID, dataType,
|
|
|
|
)
|
2017-08-02 17:21:35 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
events = append(events, evs...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append the data to the response
|
|
|
|
if len(roomID) > 0 {
|
|
|
|
jr := data.Rooms.Join[roomID]
|
|
|
|
jr.AccountData.Events = events
|
|
|
|
data.Rooms.Join[roomID] = jr
|
|
|
|
} else {
|
|
|
|
data.AccountData.Events = events
|
2017-07-26 15:53:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return data, nil
|
|
|
|
}
|