0
0
Fork 0
mirror of https://github.com/matrix-org/dendrite synced 2024-12-14 23:53:49 +01:00

Merge branch 'master' into add-nats-support

This commit is contained in:
Neil Alexander 2021-07-08 10:12:28 +01:00
commit fc5f8e1743
No known key found for this signature in database
GPG key ID: A02A2019A2BB0944
54 changed files with 2761 additions and 318 deletions

View file

@ -1,7 +1,7 @@
# Dendrite [![Build Status](https://badge.buildkite.com/4be40938ab19f2bbc4a6c6724517353ee3ec1422e279faf374.svg?branch=master)](https://buildkite.com/matrix-dot-org/dendrite) [![Dendrite](https://img.shields.io/matrix/dendrite:matrix.org.svg?label=%23dendrite%3Amatrix.org&logo=matrix&server_fqdn=matrix.org)](https://matrix.to/#/#dendrite:matrix.org) [![Dendrite Dev](https://img.shields.io/matrix/dendrite-dev:matrix.org.svg?label=%23dendrite-dev%3Amatrix.org&logo=matrix&server_fqdn=matrix.org)](https://matrix.to/#/#dendrite-dev:matrix.org)
Dendrite is a second-generation Matrix homeserver written in Go.
It intends to provide an **efficient**, **reliable** and **scalable** alternative to Synapse:
It intends to provide an **efficient**, **reliable** and **scalable** alternative to [Synapse](https://github.com/matrix-org/synapse):
- Efficient: A small memory footprint with better baseline performance than an out-of-the-box Synapse.
- Reliable: Implements the Matrix specification as written, using the
[same test suite](https://github.com/matrix-org/sytest) as Synapse as well as

View file

@ -32,14 +32,18 @@ INSERT OR IGNORE INTO appservice_counters (name, last_id) VALUES('txn_id', 1);
`
const selectTxnIDSQL = `
SELECT last_id FROM appservice_counters WHERE name='txn_id';
UPDATE appservice_counters SET last_id=last_id+1 WHERE name='txn_id';
SELECT last_id FROM appservice_counters WHERE name='txn_id'
`
const updateTxnIDSQL = `
UPDATE appservice_counters SET last_id=last_id+1 WHERE name='txn_id'
`
type txnStatements struct {
db *sql.DB
writer sqlutil.Writer
selectTxnIDStmt *sql.Stmt
updateTxnIDStmt *sql.Stmt
}
func (s *txnStatements) prepare(db *sql.DB, writer sqlutil.Writer) (err error) {
@ -54,6 +58,10 @@ func (s *txnStatements) prepare(db *sql.DB, writer sqlutil.Writer) (err error) {
return
}
if s.updateTxnIDStmt, err = db.Prepare(updateTxnIDSQL); err != nil {
return
}
return
}
@ -63,6 +71,11 @@ func (s *txnStatements) selectTxnID(
) (txnID int, err error) {
err = s.writer.Do(s.db, nil, func(txn *sql.Tx) error {
err := s.selectTxnIDStmt.QueryRowContext(ctx).Scan(&txnID)
if err != nil {
return err
}
_, err = s.updateTxnIDStmt.ExecContext(ctx)
return err
})
return

View file

@ -37,19 +37,21 @@ runtime config should come from. The mounted folder must contain:
To generate keys:
```
go run github.com/matrix-org/dendrite/cmd/generate-keys \
--private-key=matrix_key.pem \
--tls-cert=server.crt \
--tls-key=server.key
docker run --rm --entrypoint="" \
-v $(pwd):/mnt \
matrixdotorg/dendrite-monolith:latest \
/usr/bin/generate-keys \
-private-key /mnt/matrix_key.pem \
-tls-cert /mnt/server.crt \
-tls-key /mnt/server.key
```
The key files will now exist in your current working directory, and can be mounted into place.
## Starting Dendrite as a monolith deployment
Create your config based on the `dendrite.yaml` configuration file in the `docker/config`
folder in the [Dendrite repository](https://github.com/matrix-org/dendrite). Additionally,
make the following changes to the configuration:
- Enable Naffka: `use_naffka: true`
folder in the [Dendrite repository](https://github.com/matrix-org/dendrite).
Once in place, start the PostgreSQL dependency:

View file

@ -7,7 +7,7 @@ do
case "$option"
in
a) gomobile bind -v -target android -trimpath -ldflags="-s -w" github.com/matrix-org/dendrite/build/gobind-pinecone ;;
i) gomobile bind -v -target ios -trimpath -ldflags="-s -w" github.com/matrix-org/dendrite/build/gobind-pinecone ;;
i) gomobile bind -v -target ios -trimpath -ldflags="" github.com/matrix-org/dendrite/build/gobind-pinecone ;;
*) echo "No target specified, specify -a or -i"; exit 1 ;;
esac
done

View file

@ -10,7 +10,6 @@ import (
"io"
"io/ioutil"
"log"
"math"
"net"
"net/http"
"os"
@ -37,15 +36,16 @@ import (
userapiAPI "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib"
"github.com/sirupsen/logrus"
"go.uber.org/atomic"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
pineconeMulticast "github.com/matrix-org/pinecone/multicast"
"github.com/matrix-org/pinecone/router"
pineconeRouter "github.com/matrix-org/pinecone/router"
pineconeSessions "github.com/matrix-org/pinecone/sessions"
"github.com/matrix-org/pinecone/types"
pineconeTypes "github.com/matrix-org/pinecone/types"
_ "golang.org/x/mobile/bind"
)
const (
@ -63,7 +63,7 @@ type DendriteMonolith struct {
CacheDirectory string
staticPeerURI string
staticPeerMutex sync.RWMutex
staticPeerAttempts atomic.Uint32
staticPeerAttempt chan struct{}
listener net.Listener
httpServer *http.Server
processContext *process.ProcessContext
@ -97,7 +97,9 @@ func (m *DendriteMonolith) SetStaticPeer(uri string) {
m.staticPeerMutex.Unlock()
m.DisconnectType(pineconeRouter.PeerTypeRemote)
if uri != "" {
m.staticPeerConnect()
go func() {
m.staticPeerAttempt <- struct{}{}
}()
}
}
@ -193,6 +195,8 @@ func (m *DendriteMonolith) RegisterDevice(localpart, deviceID string) (string, e
}
func (m *DendriteMonolith) staticPeerConnect() {
attempt := func() {
if m.PineconeRouter.PeerCount(router.PeerTypeRemote) == 0 {
m.staticPeerMutex.RLock()
uri := m.staticPeerURI
m.staticPeerMutex.RUnlock()
@ -200,10 +204,18 @@ func (m *DendriteMonolith) staticPeerConnect() {
return
}
if err := conn.ConnectToPeer(m.PineconeRouter, uri); err != nil {
exp := time.Second * time.Duration(math.Exp2(float64(m.staticPeerAttempts.Inc())))
time.AfterFunc(exp, m.staticPeerConnect)
} else {
m.staticPeerAttempts.Store(0)
logrus.WithError(err).Error("Failed to connect to static peer")
}
}
}
for {
select {
case <-m.processContext.Context().Done():
case <-m.staticPeerAttempt:
attempt()
case <-time.After(time.Second * 5):
attempt()
}
}
}
@ -246,13 +258,6 @@ func (m *DendriteMonolith) Start() {
m.PineconeQUIC = pineconeSessions.NewSessions(logger, m.PineconeRouter)
m.PineconeMulticast = pineconeMulticast.NewMulticast(logger, m.PineconeRouter)
m.PineconeRouter.SetDisconnectedCallback(func(port pineconeTypes.SwitchPortID, public pineconeTypes.PublicKey, peertype int, err error) {
if peertype == pineconeRouter.PeerTypeRemote {
m.staticPeerAttempts.Store(0)
time.AfterFunc(time.Second, m.staticPeerConnect)
}
})
prefix := hex.EncodeToString(pk)
cfg := &config.Dendrite{}
cfg.Defaults()
@ -272,6 +277,7 @@ func (m *DendriteMonolith) Start() {
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-appservice.db", m.StorageDirectory, prefix))
cfg.MediaAPI.BasePath = config.Path(fmt.Sprintf("%s/media", m.CacheDirectory))
cfg.MediaAPI.AbsBasePath = config.Path(fmt.Sprintf("%s/media", m.CacheDirectory))
cfg.MSCs.MSCs = []string{"msc2836", "msc2946"}
if err := cfg.Derive(); err != nil {
panic(err)
}
@ -290,7 +296,7 @@ func (m *DendriteMonolith) Start() {
)
fsAPI := federationsender.NewInternalAPI(
base, federation, rsAPI, keyRing,
base, federation, rsAPI, keyRing, true,
)
keyAPI := keyserver.NewInternalAPI(&base.Cfg.KeyServer, fsAPI)
@ -356,8 +362,12 @@ func (m *DendriteMonolith) Start() {
},
Handler: h2c.NewHandler(pMux, h2s),
}
m.processContext = base.ProcessContext
m.staticPeerAttempt = make(chan struct{}, 1)
go m.staticPeerConnect()
go func() {
m.logger.Info("Listening on ", cfg.Global.ServerName)
m.logger.Fatal(m.httpServer.Serve(m.PineconeQUIC))

View file

@ -25,6 +25,8 @@ import (
"github.com/matrix-org/dendrite/userapi"
"github.com/matrix-org/gomatrixserverlib"
"github.com/sirupsen/logrus"
_ "golang.org/x/mobile/bind"
)
type DendriteMonolith struct {
@ -118,7 +120,7 @@ func (m *DendriteMonolith) Start() {
)
fsAPI := federationsender.NewInternalAPI(
base, federation, rsAPI, keyRing,
base, federation, rsAPI, keyRing, true,
)
keyAPI := keyserver.NewInternalAPI(&base.Cfg.KeyServer, federation)

View file

@ -69,7 +69,7 @@ func GetAccountData(
return util.JSONResponse{
Code: http.StatusNotFound,
JSON: jsonerror.Forbidden("data not found"),
JSON: jsonerror.NotFound("data not found"),
}
}

View file

@ -18,13 +18,18 @@ import (
"context"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"syscall"
"github.com/matrix-org/dendrite/setup"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/userapi/storage/accounts"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/bcrypt"
"golang.org/x/term"
)
const usage = `Usage: %s
@ -33,7 +38,15 @@ Creates a new user account on the homeserver.
Example:
./create-account --config dendrite.yaml --username alice --password foobarbaz
# provide password by parameter
%s --config dendrite.yaml -username alice -password foobarbaz
# use password from file
%s --config dendrite.yaml -username alice -passwordfile my.pass
# ask user to provide password
%s --config dendrite.yaml -username alice -ask-pass
# read password from stdin
%s --config dendrite.yaml -username alice -passwordstdin < my.pass
cat my.pass | %s --config dendrite.yaml -username alice -passwordstdin
Arguments:
@ -42,11 +55,15 @@ Arguments:
var (
username = flag.String("username", "", "The username of the account to register (specify the localpart only, e.g. 'alice' for '@alice:domain.com')")
password = flag.String("password", "", "The password to associate with the account (optional, account will be password-less if not specified)")
pwdFile = flag.String("passwordfile", "", "The file to use for the password (e.g. for automated account creation)")
pwdStdin = flag.Bool("passwordstdin", false, "Reads the password from stdin")
askPass = flag.Bool("ask-pass", false, "Ask for the password to use")
)
func main() {
name := os.Args[0]
flag.Usage = func() {
fmt.Fprintf(os.Stderr, usage, os.Args[0])
_, _ = fmt.Fprintf(os.Stderr, usage, name, name, name, name, name, name)
flag.PrintDefaults()
}
cfg := setup.ParseFlags(true)
@ -56,6 +73,8 @@ func main() {
os.Exit(1)
}
pass := getPassword(password, pwdFile, pwdStdin, askPass, os.Stdin)
accountDB, err := accounts.NewDatabase(&config.DatabaseOptions{
ConnectionString: cfg.UserAPI.AccountDatabase.ConnectionString,
}, cfg.Global.ServerName, bcrypt.DefaultCost, cfg.UserAPI.OpenIDTokenLifetimeMS)
@ -63,10 +82,61 @@ func main() {
logrus.Fatalln("Failed to connect to the database:", err.Error())
}
_, err = accountDB.CreateAccount(context.Background(), *username, *password, "")
_, err = accountDB.CreateAccount(context.Background(), *username, pass, "")
if err != nil {
logrus.Fatalln("Failed to create the account:", err.Error())
}
logrus.Infoln("Created account", *username)
}
func getPassword(password, pwdFile *string, pwdStdin, askPass *bool, r io.Reader) string {
// no password option set, use empty password
if password == nil && pwdFile == nil && pwdStdin == nil && askPass == nil {
return ""
}
// password defined as parameter
if password != nil && *password != "" {
return *password
}
// read password from file
if pwdFile != nil && *pwdFile != "" {
pw, err := ioutil.ReadFile(*pwdFile)
if err != nil {
logrus.Fatalln("Unable to read password from file:", err)
}
return strings.TrimSpace(string(pw))
}
// read password from stdin
if pwdStdin != nil && *pwdStdin {
data, err := ioutil.ReadAll(r)
if err != nil {
logrus.Fatalln("Unable to read password from stdin:", err)
}
return strings.TrimSpace(string(data))
}
// ask the user to provide the password
if *askPass {
fmt.Print("Enter Password: ")
bytePassword, err := term.ReadPassword(syscall.Stdin)
if err != nil {
logrus.Fatalln("Unable to read password:", err)
}
fmt.Println()
fmt.Print("Confirm Password: ")
bytePassword2, err := term.ReadPassword(syscall.Stdin)
if err != nil {
logrus.Fatalln("Unable to read password:", err)
}
fmt.Println()
if strings.TrimSpace(string(bytePassword)) != strings.TrimSpace(string(bytePassword2)) {
logrus.Fatalln("Entered passwords don't match")
}
return strings.TrimSpace(string(bytePassword))
}
return ""
}

View file

@ -0,0 +1,62 @@
package main
import (
"bytes"
"io"
"testing"
)
func Test_getPassword(t *testing.T) {
type args struct {
password *string
pwdFile *string
pwdStdin *bool
askPass *bool
reader io.Reader
}
pass := "mySecretPass"
passwordFile := "testdata/my.pass"
passwordStdin := true
reader := &bytes.Buffer{}
_, err := reader.WriteString(pass)
if err != nil {
t.Errorf("unable to write to buffer: %+v", err)
}
tests := []struct {
name string
args args
want string
}{
{
name: "no password defined",
args: args{},
want: "",
},
{
name: "password defined",
args: args{password: &pass},
want: pass,
},
{
name: "pwdFile defined",
args: args{pwdFile: &passwordFile},
want: pass,
},
{
name: "read pass from stdin defined",
args: args{
pwdStdin: &passwordStdin,
reader: reader,
},
want: pass,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getPassword(tt.args.password, tt.args.pwdFile, tt.args.pwdStdin, tt.args.askPass, tt.args.reader); got != tt.want {
t.Errorf("getPassword() = '%v', want '%v'", got, tt.want)
}
})
}
}

1
cmd/create-account/testdata/my.pass vendored Normal file
View file

@ -0,0 +1 @@
mySecretPass

View file

@ -166,7 +166,7 @@ func main() {
asAPI := appservice.NewInternalAPI(&base.Base, userAPI, rsAPI)
rsAPI.SetAppserviceAPI(asAPI)
fsAPI := federationsender.NewInternalAPI(
&base.Base, federation, rsAPI, keyRing,
&base.Base, federation, rsAPI, keyRing, true,
)
rsAPI.SetFederationSenderAPI(fsAPI)
provider := newPublicRoomsProvider(base.LibP2PPubsub, rsAPI)

View file

@ -1,14 +1,15 @@
package conn
import (
"context"
"fmt"
"net"
"net/http"
"strings"
"github.com/gorilla/websocket"
"github.com/matrix-org/dendrite/setup"
"github.com/matrix-org/gomatrixserverlib"
"nhooyr.io/websocket"
pineconeRouter "github.com/matrix-org/pinecone/router"
pineconeSessions "github.com/matrix-org/pinecone/sessions"
@ -17,11 +18,12 @@ import (
func ConnectToPeer(pRouter *pineconeRouter.Router, peer string) error {
var parent net.Conn
if strings.HasPrefix(peer, "ws://") || strings.HasPrefix(peer, "wss://") {
c, _, err := websocket.DefaultDialer.Dial(peer, nil)
ctx := context.Background()
c, _, err := websocket.Dial(ctx, peer, nil)
if err != nil {
return fmt.Errorf("websocket.DefaultDialer.Dial: %w", err)
}
parent = WrapWebSocketConn(c)
parent = websocket.NetConn(ctx, c, websocket.MessageBinary)
} else {
var err error
parent, err = net.Dial("tcp", peer)
@ -46,7 +48,13 @@ func (y *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
}
func createTransport(s *pineconeSessions.Sessions) *http.Transport {
tr := &http.Transport{}
tr := &http.Transport{
DisableKeepAlives: false,
Dial: s.Dial,
DialContext: s.DialContext,
DialTLS: s.DialTLS,
DialTLSContext: s.DialTLSContext,
}
tr.RegisterProtocol(
"matrix", &RoundTripper{
inner: &http.Transport{

View file

@ -23,7 +23,6 @@ import (
"fmt"
"io/ioutil"
"log"
"math"
"net"
"net/http"
"os"
@ -48,12 +47,11 @@ import (
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/userapi"
"github.com/matrix-org/gomatrixserverlib"
"go.uber.org/atomic"
pineconeMulticast "github.com/matrix-org/pinecone/multicast"
"github.com/matrix-org/pinecone/router"
pineconeRouter "github.com/matrix-org/pinecone/router"
pineconeSessions "github.com/matrix-org/pinecone/sessions"
pineconeTypes "github.com/matrix-org/pinecone/types"
"github.com/sirupsen/logrus"
)
@ -123,27 +121,23 @@ func main() {
pMulticast := pineconeMulticast.NewMulticast(logger, pRouter)
pMulticast.Start()
var staticPeerAttempts atomic.Uint32
var connectToStaticPeer func()
connectToStaticPeer = func() {
connectToStaticPeer := func() {
attempt := func() {
if pRouter.PeerCount(router.PeerTypeRemote) == 0 {
uri := *instancePeer
if uri == "" {
return
}
if err := conn.ConnectToPeer(pRouter, uri); err != nil {
exp := time.Second * time.Duration(math.Exp2(float64(staticPeerAttempts.Inc())))
time.AfterFunc(exp, connectToStaticPeer)
} else {
staticPeerAttempts.Store(0)
logrus.WithError(err).Error("Failed to connect to static peer")
}
}
pRouter.SetDisconnectedCallback(func(port pineconeTypes.SwitchPortID, public pineconeTypes.PublicKey, peertype int, err error) {
if peertype == pineconeRouter.PeerTypeRemote && err != nil {
staticPeerAttempts.Store(0)
time.AfterFunc(time.Second, connectToStaticPeer)
}
})
go connectToStaticPeer()
for {
attempt()
time.Sleep(time.Second * 5)
}
}
cfg := &config.Dendrite{}
cfg.Defaults()
@ -161,6 +155,7 @@ func main() {
cfg.FederationSender.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationsender.db", *instanceName))
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName))
cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-naffka.db", *instanceName))
cfg.MSCs.MSCs = []string{"msc2836", "msc2946"}
if err := cfg.Derive(); err != nil {
panic(err)
}
@ -179,7 +174,7 @@ func main() {
)
rsAPI := rsComponent
fsAPI := federationsender.NewInternalAPI(
base, federation, rsAPI, keyRing,
base, federation, rsAPI, keyRing, true,
)
keyAPI := keyserver.NewInternalAPI(&base.Cfg.KeyServer, fsAPI)
@ -217,7 +212,11 @@ func main() {
base.PublicMediaAPIMux,
)
wsUpgrader := websocket.Upgrader{}
wsUpgrader := websocket.Upgrader{
CheckOrigin: func(_ *http.Request) bool {
return true
},
}
httpRouter := mux.NewRouter().SkipClean(true).UseEncodedPath()
httpRouter.PathPrefix(httputil.InternalPathPrefix).Handler(base.InternalAPIMux)
httpRouter.PathPrefix(httputil.PublicClientPathPrefix).Handler(base.PublicClientAPIMux)
@ -256,6 +255,7 @@ func main() {
Handler: pMux,
}
go connectToStaticPeer()
go func() {
pubkey := pRouter.PublicKey()
logrus.Info("Listening on ", hex.EncodeToString(pubkey[:]))

View file

@ -114,7 +114,7 @@ func main() {
asAPI := appservice.NewInternalAPI(base, userAPI, rsAPI)
rsAPI.SetAppserviceAPI(asAPI)
fsAPI := federationsender.NewInternalAPI(
base, federation, rsAPI, keyRing,
base, federation, rsAPI, keyRing, true,
)
ygg.SetSessionFunc(func(address string) {

View file

@ -99,7 +99,7 @@ func main() {
}
fsAPI := federationsender.NewInternalAPI(
base, federation, rsAPI, keyRing,
base, federation, rsAPI, keyRing, false,
)
if base.UseHTTPAPIs {
federationsender.AddInternalRoutes(base.InternalAPIMux, fsAPI)

View file

@ -33,7 +33,7 @@ func FederationAPI(base *setup.BaseDendrite, cfg *config.Dendrite) {
base.PublicFederationAPIMux, base.PublicKeyAPIMux,
&base.Cfg.FederationAPI, userAPI, federation, keyRing,
rsAPI, fsAPI, base.EDUServerClient(), keyAPI,
&base.Cfg.MSCs,
&base.Cfg.MSCs, nil,
)
base.SetupAndServeHTTP(

View file

@ -28,7 +28,7 @@ func FederationSender(base *setup.BaseDendrite, cfg *config.Dendrite) {
rsAPI := base.RoomserverHTTPClient()
fsAPI := federationsender.NewInternalAPI(
base, federation, rsAPI, keyRing,
base, federation, rsAPI, keyRing, false,
)
federationsender.AddInternalRoutes(base.InternalAPIMux, fsAPI)

View file

@ -0,0 +1,477 @@
package main
import (
"bytes"
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/Masterminds/semver/v3"
"github.com/codeclysm/extract"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
)
var (
flagTempDir = flag.String("tmp", "tmp", "Path to temporary directory to dump tarballs to")
flagFrom = flag.String("from", "HEAD-1", "The version to start from e.g '0.3.1'. If 'HEAD-N' then starts N versions behind HEAD.")
flagTo = flag.String("to", "HEAD", "The version to end on e.g '0.3.3'.")
flagBuildConcurrency = flag.Int("build-concurrency", runtime.NumCPU(), "The amount of build concurrency when building images")
alphaNumerics = regexp.MustCompile("[^a-zA-Z0-9]+")
)
// Embed the Dockerfile to use when building dendrite versions.
// We cannot use the dockerfile associated with the repo with each version sadly due to changes in
// Docker versions. Specifically, earlier Dendrite versions are incompatible with newer Docker clients
// due to the error:
// When using COPY with more than one source file, the destination must be a directory and end with a /
// We need to run a postgres anyway, so use the dockerfile associated with Complement instead.
const Dockerfile = `FROM golang:1.13-stretch as build
RUN apt-get update && apt-get install -y postgresql
WORKDIR /build
# Copy the build context to the repo as this is the right dendrite code. This is different to the
# Complement Dockerfile which wgets a branch.
COPY . .
RUN go build ./cmd/dendrite-monolith-server
RUN go build ./cmd/generate-keys
RUN go build ./cmd/generate-config
RUN ./generate-config --ci > dendrite.yaml
RUN ./generate-keys --private-key matrix_key.pem --tls-cert server.crt --tls-key server.key
# Replace the connection string with a single postgres DB, using user/db = 'postgres' and no password
RUN sed -i "s%connection_string:.*$%connection_string: postgresql://postgres@localhost/postgres?sslmode=disable%g" dendrite.yaml
# No password when connecting over localhost
RUN sed -i "s%127.0.0.1/32 md5%127.0.0.1/32 trust%g" /etc/postgresql/9.6/main/pg_hba.conf
# Bump up max conns for moar concurrency
RUN sed -i 's/max_connections = 100/max_connections = 2000/g' /etc/postgresql/9.6/main/postgresql.conf
RUN sed -i 's/max_open_conns:.*$/max_open_conns: 100/g' dendrite.yaml
# This entry script starts postgres, waits for it to be up then starts dendrite
RUN echo '\
#!/bin/bash -eu \n\
pg_lsclusters \n\
pg_ctlcluster 9.6 main start \n\
\n\
until pg_isready \n\
do \n\
echo "Waiting for postgres"; \n\
sleep 1; \n\
done \n\
\n\
sed -i "s/server_name: localhost/server_name: ${SERVER_NAME}/g" dendrite.yaml \n\
./dendrite-monolith-server --tls-cert server.crt --tls-key server.key --config dendrite.yaml \n\
' > run_dendrite.sh && chmod +x run_dendrite.sh
ENV SERVER_NAME=localhost
EXPOSE 8008 8448
CMD /build/run_dendrite.sh `
const dendriteUpgradeTestLabel = "dendrite_upgrade_test"
// downloadArchive downloads an arbitrary github archive of the form:
// https://github.com/matrix-org/dendrite/archive/v0.3.11.tar.gz
// and re-tarballs it without the top-level directory which contains branch information. It inserts
// the contents of `dockerfile` as a root file `Dockerfile` in the re-tarballed directory such that
// you can directly feed the retarballed archive to `ImageBuild` to have it run said dockerfile.
// Returns the tarball buffer on success.
func downloadArchive(cli *http.Client, tmpDir, archiveURL string, dockerfile []byte) (*bytes.Buffer, error) {
resp, err := cli.Get(archiveURL)
if err != nil {
return nil, err
}
// nolint:errcheck
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("got HTTP %d", resp.StatusCode)
}
if err = os.Mkdir(tmpDir, os.ModePerm); err != nil {
return nil, fmt.Errorf("failed to make temporary directory: %s", err)
}
// nolint:errcheck
defer os.RemoveAll(tmpDir)
// dump the tarball temporarily, stripping the top-level directory
err = extract.Archive(context.Background(), resp.Body, tmpDir, func(inPath string) string {
// remove top level
segments := strings.Split(inPath, "/")
return strings.Join(segments[1:], "/")
})
if err != nil {
return nil, err
}
// add top level Dockerfile
err = ioutil.WriteFile(path.Join(tmpDir, "Dockerfile"), dockerfile, os.ModePerm)
if err != nil {
return nil, fmt.Errorf("failed to inject /Dockerfile: %w", err)
}
// now re-tarball it :/
var tarball bytes.Buffer
err = compress(tmpDir, &tarball)
if err != nil {
return nil, err
}
return &tarball, nil
}
// buildDendrite builds Dendrite on the branchOrTagName given. Returns the image ID or an error
func buildDendrite(httpClient *http.Client, dockerClient *client.Client, tmpDir, branchOrTagName string) (string, error) {
log.Printf("%s: Downloading version %s to %s\n", branchOrTagName, branchOrTagName, tmpDir)
// pull an archive, this contains a top-level directory which screws with the build context
// which we need to fix up post download
u := fmt.Sprintf("https://github.com/matrix-org/dendrite/archive/%s.tar.gz", branchOrTagName)
tarball, err := downloadArchive(httpClient, tmpDir, u, []byte(Dockerfile))
if err != nil {
return "", fmt.Errorf("failed to download archive %s: %w", u, err)
}
log.Printf("%s: %s => %d bytes\n", branchOrTagName, u, tarball.Len())
log.Printf("%s: Building version %s\n", branchOrTagName, branchOrTagName)
res, err := dockerClient.ImageBuild(context.Background(), tarball, types.ImageBuildOptions{
Tags: []string{"dendrite-upgrade"},
})
if err != nil {
return "", fmt.Errorf("failed to start building image: %s", err)
}
// nolint:errcheck
defer res.Body.Close()
decoder := json.NewDecoder(res.Body)
// {"aux":{"ID":"sha256:247082c717963bc2639fc2daed08838d67811ea12356cd4fda43e1ffef94f2eb"}}
var imageID string
for decoder.More() {
var dl struct {
Stream string `json:"stream"`
Aux map[string]interface{} `json:"aux"`
}
if err := decoder.Decode(&dl); err != nil {
return "", fmt.Errorf("failed to decode build image output line: %w", err)
}
log.Printf("%s: %s", branchOrTagName, dl.Stream)
if dl.Aux != nil {
imgID, ok := dl.Aux["ID"]
if ok {
imageID = imgID.(string)
}
}
}
return imageID, nil
}
func getAndSortVersionsFromGithub(httpClient *http.Client) (semVers []*semver.Version, err error) {
u := "https://api.github.com/repos/matrix-org/dendrite/tags"
res, err := httpClient.Get(u)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
return nil, fmt.Errorf("%s returned HTTP %d", u, res.StatusCode)
}
resp := []struct {
Name string `json:"name"`
}{}
if err = json.NewDecoder(res.Body).Decode(&resp); err != nil {
return nil, err
}
for _, r := range resp {
v, err := semver.NewVersion(r.Name)
if err != nil {
continue // not a semver, that's ok and isn't an error, we allow tags that aren't semvers
}
semVers = append(semVers, v)
}
sort.Sort(semver.Collection(semVers))
return semVers, nil
}
func calculateVersions(cli *http.Client, from, to string) []string {
semvers, err := getAndSortVersionsFromGithub(cli)
if err != nil {
log.Fatalf("failed to collect semvers from github: %s", err)
}
// snip the lower bound depending on --from
if from != "" {
if strings.HasPrefix(from, "HEAD-") {
var headN int
headN, err = strconv.Atoi(strings.TrimPrefix(from, "HEAD-"))
if err != nil {
log.Fatalf("invalid --from, try 'HEAD-1'")
}
if headN >= len(semvers) {
log.Fatalf("only have %d versions, but asked to go to HEAD-%d", len(semvers), headN)
}
if headN > 0 {
semvers = semvers[len(semvers)-headN:]
}
} else {
fromVer, err := semver.NewVersion(from)
if err != nil {
log.Fatalf("invalid --from: %s", err)
}
i := 0
for i = 0; i < len(semvers); i++ {
if semvers[i].LessThan(fromVer) {
continue
}
break
}
semvers = semvers[i:]
}
}
if to != "" && to != "HEAD" {
toVer, err := semver.NewVersion(to)
if err != nil {
log.Fatalf("invalid --to: %s", err)
}
var i int
for i = len(semvers) - 1; i >= 0; i-- {
if semvers[i].GreaterThan(toVer) {
continue
}
break
}
semvers = semvers[:i+1]
}
var versions []string
for _, sv := range semvers {
versions = append(versions, sv.Original())
}
if to == "HEAD" {
versions = append(versions, "HEAD")
}
return versions
}
func buildDendriteImages(httpClient *http.Client, dockerClient *client.Client, baseTempDir string, concurrency int, branchOrTagNames []string) map[string]string {
// concurrently build all versions, this can be done in any order. The mutex protects the map
branchToImageID := make(map[string]string)
var mu sync.Mutex
var wg sync.WaitGroup
wg.Add(concurrency)
ch := make(chan string, len(branchOrTagNames))
for _, branchName := range branchOrTagNames {
ch <- branchName
}
close(ch)
for i := 0; i < concurrency; i++ {
go func() {
defer wg.Done()
for branchName := range ch {
tmpDir := baseTempDir + alphaNumerics.ReplaceAllString(branchName, "")
imgID, err := buildDendrite(httpClient, dockerClient, tmpDir, branchName)
if err != nil {
log.Fatalf("%s: failed to build dendrite image: %s", branchName, err)
}
mu.Lock()
branchToImageID[branchName] = imgID
mu.Unlock()
}
}()
}
wg.Wait()
return branchToImageID
}
func runImage(dockerClient *client.Client, volumeName, version, imageID string) (csAPIURL, containerID string, err error) {
log.Printf("%s: running image %s\n", version, imageID)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
body, err := dockerClient.ContainerCreate(ctx, &container.Config{
Image: imageID,
Env: []string{"SERVER_NAME=hs1"},
Labels: map[string]string{
dendriteUpgradeTestLabel: "yes",
},
}, &container.HostConfig{
PublishAllPorts: true,
Mounts: []mount.Mount{
{
Type: mount.TypeVolume,
Source: volumeName,
Target: "/var/lib/postgresql/9.6/main",
},
},
}, nil, nil, "dendrite_upgrade_test_"+version)
if err != nil {
return "", "", fmt.Errorf("failed to ContainerCreate: %s", err)
}
containerID = body.ID
err = dockerClient.ContainerStart(ctx, containerID, types.ContainerStartOptions{})
if err != nil {
return "", "", fmt.Errorf("failed to ContainerStart: %s", err)
}
inspect, err := dockerClient.ContainerInspect(ctx, containerID)
if err != nil {
return "", "", err
}
csapiPortInfo, ok := inspect.NetworkSettings.Ports[nat.Port("8008/tcp")]
if !ok {
return "", "", fmt.Errorf("port 8008 not exposed - exposed ports: %v", inspect.NetworkSettings.Ports)
}
baseURL := fmt.Sprintf("http://localhost:%s", csapiPortInfo[0].HostPort)
versionsURL := fmt.Sprintf("%s/_matrix/client/versions", baseURL)
// hit /versions to check it is up
var lastErr error
for i := 0; i < 500; i++ {
res, err := http.Get(versionsURL)
if err != nil {
lastErr = fmt.Errorf("GET %s => error: %s", versionsURL, err)
time.Sleep(50 * time.Millisecond)
continue
}
if res.StatusCode != 200 {
lastErr = fmt.Errorf("GET %s => HTTP %s", versionsURL, res.Status)
time.Sleep(50 * time.Millisecond)
continue
}
lastErr = nil
break
}
if lastErr != nil {
logs, err := dockerClient.ContainerLogs(context.Background(), containerID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
})
// ignore errors when cannot get logs, it's just for debugging anyways
if err == nil {
logbody, err := ioutil.ReadAll(logs)
if err == nil {
log.Printf("Container logs:\n\n%s\n\n", string(logbody))
}
}
}
return baseURL, containerID, lastErr
}
func destroyContainer(dockerClient *client.Client, containerID string) {
err := dockerClient.ContainerRemove(context.TODO(), containerID, types.ContainerRemoveOptions{
Force: true,
})
if err != nil {
log.Printf("failed to remove container %s : %s", containerID, err)
}
}
func loadAndRunTests(dockerClient *client.Client, volumeName, v string, branchToImageID map[string]string) error {
csAPIURL, containerID, err := runImage(dockerClient, volumeName, v, branchToImageID[v])
if err != nil {
return fmt.Errorf("failed to run container for branch %v: %v", v, err)
}
defer destroyContainer(dockerClient, containerID)
log.Printf("URL %s -> %s \n", csAPIURL, containerID)
if err = runTests(csAPIURL, v); err != nil {
return fmt.Errorf("failed to run tests on version %s: %s", v, err)
}
return nil
}
func verifyTests(dockerClient *client.Client, volumeName string, versions []string, branchToImageID map[string]string) error {
lastVer := versions[len(versions)-1]
csAPIURL, containerID, err := runImage(dockerClient, volumeName, lastVer, branchToImageID[lastVer])
if err != nil {
return fmt.Errorf("failed to run container for branch %v: %v", lastVer, err)
}
defer destroyContainer(dockerClient, containerID)
return verifyTestsRan(csAPIURL, versions)
}
// cleanup old containers/volumes from a previous run
func cleanup(dockerClient *client.Client) {
// ignore all errors, we are just cleaning up and don't want to fail just because we fail to cleanup
containers, _ := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{
Filters: label(dendriteUpgradeTestLabel),
})
for _, c := range containers {
s := time.Second
_ = dockerClient.ContainerStop(context.Background(), c.ID, &s)
_ = dockerClient.ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{
Force: true,
})
}
_ = dockerClient.VolumeRemove(context.Background(), "dendrite_upgrade_test", true)
}
func label(in string) filters.Args {
f := filters.NewArgs()
f.Add("label", in)
return f
}
func main() {
flag.Parse()
httpClient := &http.Client{
Timeout: 60 * time.Second,
}
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
log.Fatalf("failed to make docker client: %s", err)
}
if *flagFrom == "" {
flag.Usage()
os.Exit(1)
}
cleanup(dockerClient)
versions := calculateVersions(httpClient, *flagFrom, *flagTo)
log.Printf("Testing dendrite versions: %v\n", versions)
branchToImageID := buildDendriteImages(httpClient, dockerClient, *flagTempDir, *flagBuildConcurrency, versions)
// make a shared postgres volume
volume, err := dockerClient.VolumeCreate(context.Background(), volume.VolumeCreateBody{
Name: "dendrite_upgrade_test",
Labels: map[string]string{
dendriteUpgradeTestLabel: "yes",
},
})
if err != nil {
log.Fatalf("failed to make docker volume: %s", err)
}
failed := false
defer func() {
perr := recover()
log.Println("removing postgres volume")
verr := dockerClient.VolumeRemove(context.Background(), volume.Name, true)
if perr == nil {
perr = verr
}
if perr != nil {
panic(perr)
}
if failed {
os.Exit(1)
}
}()
// run through images sequentially
for _, v := range versions {
if err = loadAndRunTests(dockerClient, volume.Name, v, branchToImageID); err != nil {
log.Printf("failed to run tests for %v: %s\n", v, err)
failed = true
break
}
}
if err := verifyTests(dockerClient, volume.Name, versions, branchToImageID); err != nil {
log.Printf("failed to verify test results: %s", err)
failed = true
}
}

View file

@ -0,0 +1,57 @@
package main
import (
"archive/tar"
"compress/gzip"
"io"
"os"
"path/filepath"
"strings"
)
// From https://gist.github.com/mimoo/25fc9716e0f1353791f5908f94d6e726
// Modified to strip off top-level when compressing
func compress(src string, buf io.Writer) error {
// tar > gzip > buf
zr := gzip.NewWriter(buf)
tw := tar.NewWriter(zr)
// walk through every file in the folder
_ = filepath.Walk(src, func(file string, fi os.FileInfo, e error) error {
// generate tar header
header, err := tar.FileInfoHeader(fi, file)
if err != nil {
return err
}
// must provide real name
// (see https://golang.org/src/archive/tar/common.go?#L626)
header.Name = strings.TrimPrefix(filepath.ToSlash(file), src+"/")
// write header
if err := tw.WriteHeader(header); err != nil {
return err
}
// if not a dir, write file content
if !fi.IsDir() {
data, err := os.Open(file)
if err != nil {
return err
}
if _, err := io.Copy(tw, data); err != nil {
return err
}
}
return nil
})
// produce tar
if err := tw.Close(); err != nil {
return err
}
// produce gzip
if err := zr.Close(); err != nil {
return err
}
//
return nil
}

View file

@ -0,0 +1,192 @@
package main
import (
"fmt"
"log"
"strings"
"github.com/matrix-org/gomatrix"
"github.com/matrix-org/gomatrixserverlib"
)
const userPassword = "this_is_a_long_password"
type user struct {
userID string
localpart string
client *gomatrix.Client
}
// runTests performs the following operations:
// - register alice and bob with branch name muxed into the localpart
// - create a DM room for the 2 users and exchange messages
// - create/join a public #global room and exchange messages
func runTests(baseURL, branchName string) error {
// register 2 users
users := []user{
{
localpart: "alice" + branchName,
},
{
localpart: "bob" + branchName,
},
}
for i, u := range users {
client, err := gomatrix.NewClient(baseURL, "", "")
if err != nil {
return err
}
resp, err := client.RegisterDummy(&gomatrix.ReqRegister{
Username: strings.ToLower(u.localpart),
Password: userPassword,
})
if err != nil {
return fmt.Errorf("failed to register %s: %s", u.localpart, err)
}
client, err = gomatrix.NewClient(baseURL, resp.UserID, resp.AccessToken)
if err != nil {
return err
}
users[i].client = client
users[i].userID = resp.UserID
}
// create DM room, join it and exchange messages
createRoomResp, err := users[0].client.CreateRoom(&gomatrix.ReqCreateRoom{
Preset: "trusted_private_chat",
Invite: []string{users[1].userID},
IsDirect: true,
})
if err != nil {
return fmt.Errorf("failed to create DM room: %s", err)
}
dmRoomID := createRoomResp.RoomID
if _, err = users[1].client.JoinRoom(dmRoomID, "", nil); err != nil {
return fmt.Errorf("failed to join DM room: %s", err)
}
msgs := []struct {
client *gomatrix.Client
text string
}{
{
client: users[0].client, text: "1: " + branchName,
},
{
client: users[1].client, text: "2: " + branchName,
},
{
client: users[0].client, text: "3: " + branchName,
},
{
client: users[1].client, text: "4: " + branchName,
},
}
for _, msg := range msgs {
_, err = msg.client.SendText(dmRoomID, msg.text)
if err != nil {
return fmt.Errorf("failed to send text in dm room: %s", err)
}
}
// attempt to create/join the shared public room
publicRoomID := ""
createRoomResp, err = users[0].client.CreateRoom(&gomatrix.ReqCreateRoom{
RoomAliasName: "global",
Preset: "public_chat",
})
if err != nil { // this is okay and expected if the room already exists and the aliases clash
// try to join it
_, domain, err2 := gomatrixserverlib.SplitID('@', users[0].userID)
if err2 != nil {
return fmt.Errorf("failed to split user ID: %s, %s", users[0].userID, err2)
}
joinRoomResp, err2 := users[0].client.JoinRoom(fmt.Sprintf("#global:%s", domain), "", nil)
if err2 != nil {
return fmt.Errorf("alice failed to join public room: %s", err2)
}
publicRoomID = joinRoomResp.RoomID
} else {
publicRoomID = createRoomResp.RoomID
}
if _, err = users[1].client.JoinRoom(publicRoomID, "", nil); err != nil {
return fmt.Errorf("bob failed to join public room: %s", err)
}
// send messages
for _, msg := range msgs {
_, err = msg.client.SendText(publicRoomID, "public "+msg.text)
if err != nil {
return fmt.Errorf("failed to send text in public room: %s", err)
}
}
log.Printf("OK! rooms(public=%s, dm=%s) users(%s, %s)\n", publicRoomID, dmRoomID, users[0].userID, users[1].userID)
return nil
}
// verifyTestsRan checks that the HS has the right rooms/messages
func verifyTestsRan(baseURL string, branchNames []string) error {
log.Println("Verifying tests....")
// check we can login as all users
var resp *gomatrix.RespLogin
for _, branchName := range branchNames {
client, err := gomatrix.NewClient(baseURL, "", "")
if err != nil {
return err
}
userLocalparts := []string{
"alice" + branchName,
"bob" + branchName,
}
for _, userLocalpart := range userLocalparts {
resp, err = client.Login(&gomatrix.ReqLogin{
Type: "m.login.password",
User: strings.ToLower(userLocalpart),
Password: userPassword,
})
if err != nil {
return fmt.Errorf("failed to login as %s: %s", userLocalpart, err)
}
if resp.AccessToken == "" {
return fmt.Errorf("failed to login, bad response: %+v", resp)
}
}
}
log.Println(" accounts exist: OK")
client, err := gomatrix.NewClient(baseURL, resp.UserID, resp.AccessToken)
if err != nil {
return err
}
_, domain, err := gomatrixserverlib.SplitID('@', client.UserID)
if err != nil {
return err
}
u := client.BuildURL("directory", "room", fmt.Sprintf("#global:%s", domain))
r := struct {
RoomID string `json:"room_id"`
}{}
err = client.MakeRequest("GET", u, nil, &r)
if err != nil {
return fmt.Errorf("failed to /directory: %s", err)
}
if r.RoomID == "" {
return fmt.Errorf("/directory lookup returned no room ID")
}
log.Println(" public room exists: OK")
history, err := client.Messages(r.RoomID, client.Store.LoadNextBatch(client.UserID), "", 'b', 100)
if err != nil {
return fmt.Errorf("failed to get /messages: %s", err)
}
// we expect 4 messages per version
msgCount := 0
for _, ev := range history.Chunk {
if ev.Type == "m.room.message" {
msgCount += 1
}
}
wantMsgCount := len(branchNames) * 4
if msgCount != wantMsgCount {
return fmt.Errorf("got %d messages in global room, want %d", msgCount, wantMsgCount)
}
log.Println(" messages exist: OK")
return nil
}

View file

@ -0,0 +1,100 @@
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build wasm
package main
import (
"bufio"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"syscall/js"
)
// JSServer exposes an HTTP-like server interface which allows JS to 'send' requests to it.
type JSServer struct {
// The router which will service requests
Mux http.Handler
}
// OnRequestFromJS is the function that JS will invoke when there is a new request.
// The JS function signature is:
// function(reqString: string): Promise<{result: string, error: string}>
// Usage is like:
// const res = await global._go_js_server.fetch(reqString);
// if (res.error) {
// // handle error: this is a 'network' error, not a non-2xx error.
// }
// const rawHttpResponse = res.result;
func (h *JSServer) OnRequestFromJS(this js.Value, args []js.Value) interface{} {
// we HAVE to spawn a new goroutine and return immediately or else Go will deadlock
// if this request blocks at all e.g for /sync calls
httpStr := args[0].String()
promise := js.Global().Get("Promise").New(js.FuncOf(func(pthis js.Value, pargs []js.Value) interface{} {
// The initial callback code for new Promise() is also called on the critical path, which is why
// we need to put this in an immediately invoked goroutine.
go func() {
resolve := pargs[0]
resStr, err := h.handle(httpStr)
errStr := ""
if err != nil {
errStr = err.Error()
}
resolve.Invoke(map[string]interface{}{
"result": resStr,
"error": errStr,
})
}()
return nil
}))
return promise
}
// handle invokes the http.ServeMux for this request and returns the raw HTTP response.
func (h *JSServer) handle(httpStr string) (resStr string, err error) {
req, err := http.ReadRequest(bufio.NewReader(strings.NewReader(httpStr)))
if err != nil {
return
}
w := httptest.NewRecorder()
h.Mux.ServeHTTP(w, req)
res := w.Result()
var resBuffer strings.Builder
err = res.Write(&resBuffer)
return resBuffer.String(), err
}
// ListenAndServe registers a variable in JS-land with the given namespace. This variable is
// a function which JS-land can call to 'send' HTTP requests. The function is attached to
// a global object called "_go_js_server". See OnRequestFromJS for more info.
func (h *JSServer) ListenAndServe(namespace string) {
globalName := "_go_js_server"
// register a hook in JS-land for it to invoke stuff
server := js.Global().Get(globalName)
if !server.Truthy() {
server = js.Global().Get("Object").New()
js.Global().Set(globalName, server)
}
server.Set(namespace, js.FuncOf(h.OnRequestFromJS))
fmt.Printf("Listening for requests from JS on function %s.%s\n", globalName, namespace)
// Block forever to mimic http.ListenAndServe
select {}
}

View file

@ -0,0 +1,256 @@
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build wasm
package main
import (
"crypto/ed25519"
"encoding/hex"
"fmt"
"log"
"os"
"syscall/js"
"time"
"github.com/gorilla/mux"
"github.com/matrix-org/dendrite/appservice"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/conn"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/rooms"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-yggdrasil/signing"
"github.com/matrix-org/dendrite/eduserver"
"github.com/matrix-org/dendrite/eduserver/cache"
"github.com/matrix-org/dendrite/federationsender"
"github.com/matrix-org/dendrite/internal/httputil"
"github.com/matrix-org/dendrite/keyserver"
"github.com/matrix-org/dendrite/roomserver"
"github.com/matrix-org/dendrite/setup"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/userapi"
"github.com/matrix-org/gomatrixserverlib"
"github.com/sirupsen/logrus"
_ "github.com/matrix-org/go-sqlite3-js"
pineconeRouter "github.com/matrix-org/pinecone/router"
pineconeSessions "github.com/matrix-org/pinecone/sessions"
)
var GitCommit string
func init() {
fmt.Printf("[%s] dendrite.js starting...\n", GitCommit)
}
const publicPeer = "wss://pinecone.matrix.org/public"
const keyNameEd25519 = "_go_ed25519_key"
func readKeyFromLocalStorage() (key ed25519.PrivateKey, err error) {
localforage := js.Global().Get("localforage")
if !localforage.Truthy() {
err = fmt.Errorf("readKeyFromLocalStorage: no localforage")
return
}
// https://localforage.github.io/localForage/
item, ok := await(localforage.Call("getItem", keyNameEd25519))
if !ok || !item.Truthy() {
err = fmt.Errorf("readKeyFromLocalStorage: no key in localforage")
return
}
fmt.Println("Found key in localforage")
// extract []byte and make an ed25519 key
seed := make([]byte, 32, 32)
js.CopyBytesToGo(seed, item)
return ed25519.NewKeyFromSeed(seed), nil
}
func writeKeyToLocalStorage(key ed25519.PrivateKey) error {
localforage := js.Global().Get("localforage")
if !localforage.Truthy() {
return fmt.Errorf("writeKeyToLocalStorage: no localforage")
}
// make a Uint8Array from the key's seed
seed := key.Seed()
jsSeed := js.Global().Get("Uint8Array").New(len(seed))
js.CopyBytesToJS(jsSeed, seed)
// write it
localforage.Call("setItem", keyNameEd25519, jsSeed)
return nil
}
// taken from https://go-review.googlesource.com/c/go/+/150917
// await waits until the promise v has been resolved or rejected and returns the promise's result value.
// The boolean value ok is true if the promise has been resolved, false if it has been rejected.
// If v is not a promise, v itself is returned as the value and ok is true.
func await(v js.Value) (result js.Value, ok bool) {
if v.Type() != js.TypeObject || v.Get("then").Type() != js.TypeFunction {
return v, true
}
done := make(chan struct{})
onResolve := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
result = args[0]
ok = true
close(done)
return nil
})
defer onResolve.Release()
onReject := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
result = args[0]
ok = false
close(done)
return nil
})
defer onReject.Release()
v.Call("then", onResolve, onReject)
<-done
return
}
func generateKey() ed25519.PrivateKey {
// attempt to look for a seed in JS-land and if it exists use it.
priv, err := readKeyFromLocalStorage()
if err == nil {
fmt.Println("Read key from localStorage")
return priv
}
// generate a new key
fmt.Println(err, " : Generating new ed25519 key")
_, priv, err = ed25519.GenerateKey(nil)
if err != nil {
logrus.Fatalf("Failed to generate ed25519 key: %s", err)
}
if err := writeKeyToLocalStorage(priv); err != nil {
fmt.Println("failed to write key to localStorage: ", err)
// non-fatal, we'll just have amnesia for a while
}
return priv
}
func main() {
sk := generateKey()
pk := sk.Public().(ed25519.PublicKey)
logger := log.New(os.Stdout, "", 0)
pRouter := pineconeRouter.NewRouter(logger, "dendrite", sk, pk, nil)
pSessions := pineconeSessions.NewSessions(logger, pRouter)
cfg := &config.Dendrite{}
cfg.Defaults()
cfg.UserAPI.AccountDatabase.ConnectionString = "file:/idb/dendritejs_account.db"
cfg.AppServiceAPI.Database.ConnectionString = "file:/idb/dendritejs_appservice.db"
cfg.UserAPI.DeviceDatabase.ConnectionString = "file:/idb/dendritejs_device.db"
cfg.FederationSender.Database.ConnectionString = "file:/idb/dendritejs_fedsender.db"
cfg.MediaAPI.Database.ConnectionString = "file:/idb/dendritejs_mediaapi.db"
cfg.RoomServer.Database.ConnectionString = "file:/idb/dendritejs_roomserver.db"
cfg.SigningKeyServer.Database.ConnectionString = "file:/idb/dendritejs_signingkeyserver.db"
cfg.SyncAPI.Database.ConnectionString = "file:/idb/dendritejs_syncapi.db"
cfg.KeyServer.Database.ConnectionString = "file:/idb/dendritejs_e2ekey.db"
cfg.Global.Kafka.UseNaffka = true
cfg.Global.Kafka.Database.ConnectionString = "file:/idb/dendritejs_naffka.db"
cfg.Global.TrustedIDServers = []string{}
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
cfg.Global.PrivateKey = sk
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
if err := cfg.Derive(); err != nil {
logrus.Fatalf("Failed to derive values from config: %s", err)
}
base := setup.NewBaseDendrite(cfg, "Monolith", false)
defer base.Close() // nolint: errcheck
accountDB := base.CreateAccountsDB()
federation := conn.CreateFederationClient(base, pSessions)
keyAPI := keyserver.NewInternalAPI(&base.Cfg.KeyServer, federation)
userAPI := userapi.NewInternalAPI(accountDB, &cfg.UserAPI, nil, keyAPI)
keyAPI.SetUserAPI(userAPI)
serverKeyAPI := &signing.YggdrasilKeys{}
keyRing := serverKeyAPI.KeyRing()
rsAPI := roomserver.NewInternalAPI(base, keyRing)
eduInputAPI := eduserver.NewInternalAPI(base, cache.New(), userAPI)
asQuery := appservice.NewInternalAPI(
base, userAPI, rsAPI,
)
rsAPI.SetAppserviceAPI(asQuery)
fedSenderAPI := federationsender.NewInternalAPI(base, federation, rsAPI, keyRing, true)
rsAPI.SetFederationSenderAPI(fedSenderAPI)
monolith := setup.Monolith{
Config: base.Cfg,
AccountDB: accountDB,
Client: conn.CreateClient(base, pSessions),
FedClient: federation,
KeyRing: keyRing,
AppserviceAPI: asQuery,
EDUInternalAPI: eduInputAPI,
FederationSenderAPI: fedSenderAPI,
RoomserverAPI: rsAPI,
UserAPI: userAPI,
KeyAPI: keyAPI,
//ServerKeyAPI: serverKeyAPI,
ExtPublicRoomsProvider: rooms.NewPineconeRoomProvider(pRouter, pSessions, fedSenderAPI, federation),
}
monolith.AddAllPublicRoutes(
base.ProcessContext,
base.PublicClientAPIMux,
base.PublicFederationAPIMux,
base.PublicKeyAPIMux,
base.PublicMediaAPIMux,
)
httpRouter := mux.NewRouter().SkipClean(true).UseEncodedPath()
httpRouter.PathPrefix(httputil.InternalPathPrefix).Handler(base.InternalAPIMux)
httpRouter.PathPrefix(httputil.PublicClientPathPrefix).Handler(base.PublicClientAPIMux)
httpRouter.PathPrefix(httputil.PublicMediaPathPrefix).Handler(base.PublicMediaAPIMux)
p2pRouter := pSessions.HTTP().Mux()
p2pRouter.Handle(httputil.PublicFederationPathPrefix, base.PublicFederationAPIMux)
p2pRouter.Handle(httputil.PublicMediaPathPrefix, base.PublicMediaAPIMux)
// Expose the matrix APIs via fetch - for local traffic
go func() {
logrus.Info("Listening for service-worker fetch traffic")
s := JSServer{
Mux: httpRouter,
}
s.ListenAndServe("fetch")
}()
// Connect to the static peer
go func() {
for {
if pRouter.PeerCount(pineconeRouter.PeerTypeRemote) == 0 {
if err := conn.ConnectToPeer(pRouter, publicPeer); err != nil {
logrus.WithError(err).Error("Failed to connect to static peer")
}
}
select {
case <-base.ProcessContext.Context().Done():
return
case <-time.After(time.Second * 5):
}
}
}()
// We want to block forever to let the fetch and libp2p handler serve the APIs
select {}
}

View file

@ -0,0 +1,23 @@
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !wasm
package main
import "fmt"
func main() {
fmt.Println("dendritejs: no-op when not compiling for WebAssembly")
}

View file

@ -210,7 +210,7 @@ func main() {
base, userAPI, rsAPI,
)
rsAPI.SetAppserviceAPI(asQuery)
fedSenderAPI := federationsender.NewInternalAPI(base, federation, rsAPI, &keyRing)
fedSenderAPI := federationsender.NewInternalAPI(base, federation, rsAPI, &keyRing, true)
rsAPI.SetFederationSenderAPI(fedSenderAPI)
p2pPublicRoomProvider := NewLibP2PPublicRoomsProvider(node, fedSenderAPI, federation)

View file

@ -1,6 +1,6 @@
server {
listen 443 ssl; # IPv4
listen [::]:443; # IPv6
listen [::]:443 ssl; # IPv6
server_name my.hostname.com;
ssl_certificate /path/to/fullchain.pem;
@ -16,6 +16,9 @@ server {
}
location /.well-known/matrix/client {
# If your sever_name here doesn't match your matrix homeserver URL
# (e.g. hostname.com as server_name and matrix.hostname.com as homeserver URL)
# add_header Access-Control-Allow-Origin '*';
return 200 '{ "m.homeserver": { "base_url": "https://my.hostname.com" } }';
}

View file

@ -1,6 +1,6 @@
server {
listen 443 ssl; # IPv4
listen [::]:443; # IPv6
listen [::]:443 ssl; # IPv6
server_name my.hostname.com;
ssl_certificate /path/to/fullchain.pem;
@ -16,6 +16,9 @@ server {
}
location /.well-known/matrix/client {
# If your sever_name here doesn't match your matrix homeserver URL
# (e.g. hostname.com as server_name and matrix.hostname.com as homeserver URL)
# add_header Access-Control-Allow-Origin '*';
return 200 '{ "m.homeserver": { "base_url": "https://my.hostname.com" } }';
}

View file

@ -0,0 +1,11 @@
package api
import (
"context"
"github.com/matrix-org/gomatrixserverlib"
)
type ServersInRoomProvider interface {
GetServersForRoom(ctx context.Context, roomID string, event *gomatrixserverlib.Event) []gomatrixserverlib.ServerName
}

View file

@ -17,6 +17,7 @@ package federationapi
import (
"github.com/gorilla/mux"
eduserverAPI "github.com/matrix-org/dendrite/eduserver/api"
federationAPI "github.com/matrix-org/dendrite/federationapi/api"
federationSenderAPI "github.com/matrix-org/dendrite/federationsender/api"
keyserverAPI "github.com/matrix-org/dendrite/keyserver/api"
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
@ -39,10 +40,12 @@ func AddPublicRoutes(
eduAPI eduserverAPI.EDUServerInputAPI,
keyAPI keyserverAPI.KeyInternalAPI,
mscCfg *config.MSCs,
servers federationAPI.ServersInRoomProvider,
) {
routing.Setup(
fedRouter, keyRouter, cfg, rsAPI,
eduAPI, federationSenderAPI, keyRing,
federation, userAPI, keyAPI, mscCfg,
servers,
)
}

View file

@ -31,7 +31,7 @@ func TestRoomsV3URLEscapeDoNot404(t *testing.T) {
fsAPI := base.FederationSenderHTTPClient()
// TODO: This is pretty fragile, as if anything calls anything on these nils this test will break.
// Unfortunately, it makes little sense to instantiate these dependencies when we just want to test routing.
federationapi.AddPublicRoutes(base.PublicFederationAPIMux, base.PublicKeyAPIMux, &cfg.FederationAPI, nil, nil, keyRing, nil, fsAPI, nil, nil, &cfg.MSCs)
federationapi.AddPublicRoutes(base.PublicFederationAPIMux, base.PublicKeyAPIMux, &cfg.FederationAPI, nil, nil, keyRing, nil, fsAPI, nil, nil, &cfg.MSCs, nil)
baseURL, cancel := test.ListenAndServe(t, base.PublicFederationAPIMux, true)
defer cancel()
serverName := gomatrixserverlib.ServerName(strings.TrimPrefix(baseURL, "https://"))

View file

@ -20,6 +20,7 @@ import (
"github.com/gorilla/mux"
"github.com/matrix-org/dendrite/clientapi/jsonerror"
eduserverAPI "github.com/matrix-org/dendrite/eduserver/api"
federationAPI "github.com/matrix-org/dendrite/federationapi/api"
federationSenderAPI "github.com/matrix-org/dendrite/federationsender/api"
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/httputil"
@ -50,6 +51,7 @@ func Setup(
userAPI userapi.UserInternalAPI,
keyAPI keyserverAPI.KeyInternalAPI,
mscCfg *config.MSCs,
servers federationAPI.ServersInRoomProvider,
) {
v2keysmux := keyMux.PathPrefix("/v2").Subrouter()
v1fedmux := fedMux.PathPrefix("/v1").Subrouter()
@ -99,7 +101,7 @@ func Setup(
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
return Send(
httpReq, request, gomatrixserverlib.TransactionID(vars["txnID"]),
cfg, rsAPI, eduAPI, keyAPI, keys, federation, mu,
cfg, rsAPI, eduAPI, keyAPI, keys, federation, mu, servers,
)
},
)).Methods(http.MethodPut, http.MethodOptions)

View file

@ -16,16 +16,16 @@ package routing
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"net/http"
"sync"
"time"
"github.com/getsentry/sentry-go"
"github.com/matrix-org/dendrite/clientapi/jsonerror"
eduserverAPI "github.com/matrix-org/dendrite/eduserver/api"
federationAPI "github.com/matrix-org/dendrite/federationapi/api"
"github.com/matrix-org/dendrite/internal"
keyapi "github.com/matrix-org/dendrite/keyserver/api"
"github.com/matrix-org/dendrite/roomserver/api"
@ -34,6 +34,7 @@ import (
"github.com/matrix-org/util"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"go.uber.org/atomic"
)
const (
@ -88,6 +89,67 @@ func init() {
)
}
type sendFIFOQueue struct {
tasks []*inputTask
count int
mutex sync.Mutex
notifs chan struct{}
}
func newSendFIFOQueue() *sendFIFOQueue {
q := &sendFIFOQueue{
notifs: make(chan struct{}, 1),
}
return q
}
func (q *sendFIFOQueue) push(frame *inputTask) {
q.mutex.Lock()
defer q.mutex.Unlock()
q.tasks = append(q.tasks, frame)
q.count++
select {
case q.notifs <- struct{}{}:
default:
}
}
// pop returns the first item of the queue, if there is one.
// The second return value will indicate if a task was returned.
func (q *sendFIFOQueue) pop() (*inputTask, bool) {
q.mutex.Lock()
defer q.mutex.Unlock()
if q.count == 0 {
return nil, false
}
frame := q.tasks[0]
q.tasks[0] = nil
q.tasks = q.tasks[1:]
q.count--
if q.count == 0 {
// Force a GC of the underlying array, since it might have
// grown significantly if the queue was hammered for some reason
q.tasks = nil
}
return frame, true
}
type inputTask struct {
ctx context.Context
t *txnReq
event *gomatrixserverlib.Event
wg *sync.WaitGroup
err error // written back by worker, only safe to read when all tasks are done
duration time.Duration // written back by worker, only safe to read when all tasks are done
}
type inputWorker struct {
running atomic.Bool
input *sendFIFOQueue
}
var inputWorkers sync.Map // room ID -> *inputWorker
// Send implements /_matrix/federation/v1/send/{txnID}
func Send(
httpReq *http.Request,
@ -100,14 +162,16 @@ func Send(
keys gomatrixserverlib.JSONVerifier,
federation *gomatrixserverlib.FederationClient,
mu *internal.MutexByRoom,
servers federationAPI.ServersInRoomProvider,
) util.JSONResponse {
t := txnReq{
rsAPI: rsAPI,
eduAPI: eduAPI,
keys: keys,
federation: federation,
hadEvents: make(map[string]bool),
haveEvents: make(map[string]*gomatrixserverlib.HeaderedEvent),
newEvents: make(map[string]bool),
servers: servers,
keyAPI: keyAPI,
roomsMu: mu,
}
@ -141,7 +205,7 @@ func Send(
util.GetLogger(httpReq.Context()).Infof("Received transaction %q from %q containing %d PDUs, %d EDUs", txnID, request.Origin(), len(t.PDUs), len(t.EDUs))
resp, jsonErr := t.processTransaction(context.Background())
resp, jsonErr := t.processTransaction(httpReq.Context())
if jsonErr != nil {
util.GetLogger(httpReq.Context()).WithField("jsonErr", jsonErr).Error("t.processTransaction failed")
return *jsonErr
@ -164,18 +228,25 @@ type txnReq struct {
keyAPI keyapi.KeyInternalAPI
keys gomatrixserverlib.JSONVerifier
federation txnFederationClient
servers []gomatrixserverlib.ServerName
serversMutex sync.RWMutex
roomsMu *internal.MutexByRoom
// something that can tell us about which servers are in a room right now
servers federationAPI.ServersInRoomProvider
// a list of events from the auth and prev events which we already had
hadEvents map[string]bool
hadEventsMutex sync.Mutex
// local cache of events for auth checks, etc - this may include events
// which the roomserver is unaware of.
haveEvents map[string]*gomatrixserverlib.HeaderedEvent
// new events which the roomserver does not know about
newEvents map[string]bool
newEventsMutex sync.RWMutex
haveEventsMutex sync.Mutex
work string // metrics
}
func (t *txnReq) hadEvent(eventID string, had bool) {
t.hadEventsMutex.Lock()
defer t.hadEventsMutex.Unlock()
t.hadEvents[eventID] = had
}
// A subset of FederationClient functionality that txn requires. Useful for testing.
type txnFederationClient interface {
LookupState(ctx context.Context, s gomatrixserverlib.ServerName, roomID string, eventID string, roomVersion gomatrixserverlib.RoomVersion) (
@ -189,8 +260,9 @@ type txnFederationClient interface {
func (t *txnReq) processTransaction(ctx context.Context) (*gomatrixserverlib.RespSend, *util.JSONResponse) {
results := make(map[string]gomatrixserverlib.PDUResult)
var wg sync.WaitGroup
var tasks []*inputTask
pdus := []*gomatrixserverlib.HeaderedEvent{}
for _, pdu := range t.PDUs {
pduCountTotal.WithLabelValues("total").Inc()
var header struct {
@ -241,83 +313,94 @@ func (t *txnReq) processTransaction(ctx context.Context) (*gomatrixserverlib.Res
}
continue
}
pdus = append(pdus, event.Headered(verRes.RoomVersion))
v, _ := inputWorkers.LoadOrStore(event.RoomID(), &inputWorker{
input: newSendFIFOQueue(),
})
worker := v.(*inputWorker)
wg.Add(1)
task := &inputTask{
ctx: ctx,
t: t,
event: event,
wg: &wg,
}
// Process the events.
for _, e := range pdus {
evStart := time.Now()
if err := t.processEvent(ctx, e.Unwrap()); err != nil {
// If the error is due to the event itself being bad then we skip
// it and move onto the next event. We report an error so that the
// sender knows that we have skipped processing it.
//
// However if the event is due to a temporary failure in our server
// such as a database being unavailable then we should bail, and
// hope that the sender will retry when we are feeling better.
//
// It is uncertain what we should do if an event fails because
// we failed to fetch more information from the sending server.
// For example if a request to /state fails.
// If we skip the event then we risk missing the event until we
// receive another event referencing it.
// If we bail and stop processing then we risk wedging incoming
// transactions from that server forever.
if isProcessingErrorFatal(err) {
sentry.CaptureException(err)
// Any other error should be the result of a temporary error in
// our server so we should bail processing the transaction entirely.
util.GetLogger(ctx).Warnf("Processing %s failed fatally: %s", e.EventID(), err)
jsonErr := util.ErrorResponse(err)
processEventSummary.WithLabelValues(t.work, MetricsOutcomeFatal).Observe(
float64(time.Since(evStart).Nanoseconds()) / 1000.,
)
return nil, &jsonErr
} else {
// Auth errors mean the event is 'rejected' which have to be silent to appease sytest
errMsg := ""
outcome := MetricsOutcomeRejected
_, rejected := err.(*gomatrixserverlib.NotAllowed)
if !rejected {
errMsg = err.Error()
outcome = MetricsOutcomeFail
}
util.GetLogger(ctx).WithError(err).WithField("event_id", e.EventID()).WithField("rejected", rejected).Warn(
"Failed to process incoming federation event, skipping",
)
processEventSummary.WithLabelValues(t.work, outcome).Observe(
float64(time.Since(evStart).Nanoseconds()) / 1000.,
)
results[e.EventID()] = gomatrixserverlib.PDUResult{
Error: errMsg,
}
}
} else {
results[e.EventID()] = gomatrixserverlib.PDUResult{}
pduCountTotal.WithLabelValues("success").Inc()
processEventSummary.WithLabelValues(t.work, MetricsOutcomeOK).Observe(
float64(time.Since(evStart).Nanoseconds()) / 1000.,
)
tasks = append(tasks, task)
worker.input.push(task)
if worker.running.CAS(false, true) {
go worker.run()
}
}
t.processEDUs(ctx)
wg.Wait()
for _, task := range tasks {
if task.err != nil {
results[task.event.EventID()] = gomatrixserverlib.PDUResult{
Error: task.err.Error(),
}
} else {
results[task.event.EventID()] = gomatrixserverlib.PDUResult{}
}
}
if c := len(results); c > 0 {
util.GetLogger(ctx).Infof("Processed %d PDUs from transaction %q", c, t.TransactionID)
}
return &gomatrixserverlib.RespSend{PDUs: results}, nil
}
// isProcessingErrorFatal returns true if the error is really bad and
// we should stop processing the transaction, and returns false if it
// is just some less serious error about a specific event.
func isProcessingErrorFatal(err error) bool {
switch err {
case sql.ErrConnDone:
case sql.ErrTxDone:
return true
func (t *inputWorker) run() {
defer t.running.Store(false)
for {
task, ok := t.input.pop()
if !ok {
return
}
if task == nil {
continue
}
func() {
defer task.wg.Done()
select {
case <-task.ctx.Done():
task.err = context.DeadlineExceeded
pduCountTotal.WithLabelValues("expired").Inc()
return
default:
evStart := time.Now()
// TODO: Is 5 minutes too long?
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5)
task.err = task.t.processEvent(ctx, task.event)
cancel()
task.duration = time.Since(evStart)
if err := task.err; err != nil {
switch err.(type) {
case *gomatrixserverlib.NotAllowed:
processEventSummary.WithLabelValues(task.t.work, MetricsOutcomeRejected).Observe(
float64(time.Since(evStart).Nanoseconds()) / 1000.,
)
util.GetLogger(task.ctx).WithError(err).WithField("event_id", task.event.EventID()).WithField("rejected", true).Warn(
"Failed to process incoming federation event, skipping",
)
task.err = nil // make "rejected" failures silent
default:
processEventSummary.WithLabelValues(task.t.work, MetricsOutcomeFail).Observe(
float64(time.Since(evStart).Nanoseconds()) / 1000.,
)
util.GetLogger(task.ctx).WithError(err).WithField("event_id", task.event.EventID()).WithField("rejected", false).Warn(
"Failed to process incoming federation event, skipping",
)
}
} else {
pduCountTotal.WithLabelValues("success").Inc()
processEventSummary.WithLabelValues(task.t.work, MetricsOutcomeOK).Observe(
float64(time.Since(evStart).Nanoseconds()) / 1000.,
)
}
}
}()
}
return false
}
type roomNotFoundError struct {
@ -340,19 +423,6 @@ func (e missingPrevEventsError) Error() string {
return fmt.Sprintf("unable to get prev_events for event %q: %s", e.eventID, e.err)
}
func (t *txnReq) haveEventIDs() map[string]bool {
t.newEventsMutex.RLock()
defer t.newEventsMutex.RUnlock()
result := make(map[string]bool, len(t.haveEvents))
for eventID := range t.haveEvents {
if t.newEvents[eventID] {
continue
}
result[eventID] = true
}
return result
}
func (t *txnReq) processEDUs(ctx context.Context) {
for _, e := range t.EDUs {
eduCountTotal.Inc()
@ -479,22 +549,24 @@ func (t *txnReq) processDeviceListUpdate(ctx context.Context, e gomatrixserverli
}
}
func (t *txnReq) getServers(ctx context.Context, roomID string) []gomatrixserverlib.ServerName {
t.serversMutex.Lock()
defer t.serversMutex.Unlock()
func (t *txnReq) getServers(ctx context.Context, roomID string, event *gomatrixserverlib.Event) []gomatrixserverlib.ServerName {
// The server that sent us the event should be sufficient to tell us about missing
// prev and auth events.
servers := []gomatrixserverlib.ServerName{t.Origin}
// If the event origin is different to the transaction origin then we can use
// this as a last resort. The origin server that created the event would have
// had to know the auth and prev events.
if event != nil {
if origin := event.Origin(); origin != t.Origin {
servers = append(servers, origin)
}
}
// If a specific room-to-server provider exists then use that. This will primarily
// be used for the P2P demos.
if t.servers != nil {
return t.servers
servers = append(servers, t.servers.GetServersForRoom(ctx, roomID, event)...)
}
t.servers = []gomatrixserverlib.ServerName{t.Origin}
serverReq := &api.QueryServerJoinedToRoomRequest{
RoomID: roomID,
}
serverRes := &api.QueryServerJoinedToRoomResponse{}
if err := t.rsAPI.QueryServerJoinedToRoom(ctx, serverReq, serverRes); err == nil {
t.servers = append(t.servers, serverRes.ServerNames...)
util.GetLogger(ctx).Infof("Found %d server(s) to query for missing events in %q", len(t.servers), roomID)
}
return t.servers
return servers
}
func (t *txnReq) processEvent(ctx context.Context, e *gomatrixserverlib.Event) error {
@ -527,6 +599,15 @@ func (t *txnReq) processEvent(ctx context.Context, e *gomatrixserverlib.Event) e
return roomNotFoundError{e.RoomID()}
}
// Prepare a map of all the events we already had before this point, so
// that we don't send them to the roomserver again.
for _, eventID := range append(e.AuthEventIDs(), e.PrevEventIDs()...) {
t.hadEvent(eventID, true)
}
for _, eventID := range append(stateResp.MissingAuthEventIDs, stateResp.MissingPrevEventIDs...) {
t.hadEvent(eventID, false)
}
if len(stateResp.MissingAuthEventIDs) > 0 {
t.work = MetricsWorkMissingAuthEvents
logger.Infof("Event refers to %d unknown auth_events", len(stateResp.MissingAuthEventIDs))
@ -570,11 +651,14 @@ func (t *txnReq) retrieveMissingAuthEvents(
withNextEvent:
for missingAuthEventID := range missingAuthEvents {
withNextServer:
for _, server := range t.getServers(ctx, e.RoomID()) {
for _, server := range t.getServers(ctx, e.RoomID(), e) {
logger.Infof("Retrieving missing auth event %q from %q", missingAuthEventID, server)
tx, err := t.federation.GetEvent(ctx, server, missingAuthEventID)
if err != nil {
logger.WithError(err).Warnf("Failed to retrieve auth event %q", missingAuthEventID)
if errors.Is(err, context.DeadlineExceeded) {
return err
}
continue withNextServer
}
ev, err := gomatrixserverlib.NewEventFromUntrustedJSON(tx.PDUs[0], stateResp.RoomVersion)
@ -596,6 +680,8 @@ withNextEvent:
); err != nil {
return fmt.Errorf("api.SendEvents: %w", err)
}
t.hadEvent(ev.EventID(), true) // if the roomserver didn't know about the event before, it does now
t.cacheAndReturn(ev.Headered(stateResp.RoomVersion))
delete(missingAuthEvents, missingAuthEventID)
continue withNextEvent
}
@ -621,11 +707,6 @@ func checkAllowedByState(e *gomatrixserverlib.Event, stateEvents []*gomatrixserv
func (t *txnReq) processEventWithMissingState(
ctx context.Context, e *gomatrixserverlib.Event, roomVersion gomatrixserverlib.RoomVersion,
) error {
// Do this with a fresh context, so that we keep working even if the
// original request times out. With any luck, by the time the remote
// side retries, we'll have fetched the missing state.
gmectx, cancel := context.WithTimeout(context.Background(), time.Minute*5)
defer cancel()
// We are missing the previous events for this events.
// This means that there is a gap in our view of the history of the
// room. There two ways that we can handle such a gap:
@ -646,7 +727,7 @@ func (t *txnReq) processEventWithMissingState(
// - fill in the gap completely then process event `e` returning no backwards extremity
// - fail to fill in the gap and tell us to terminate the transaction err=not nil
// - fail to fill in the gap and tell us to fetch state at the new backwards extremity, and to not terminate the transaction
newEvents, err := t.getMissingEvents(gmectx, e, roomVersion)
newEvents, err := t.getMissingEvents(ctx, e, roomVersion)
if err != nil {
return err
}
@ -673,7 +754,7 @@ func (t *txnReq) processEventWithMissingState(
// Look up what the state is after the backward extremity. This will either
// come from the roomserver, if we know all the required events, or it will
// come from a remote server via /state_ids if not.
prevState, trustworthy, lerr := t.lookupStateAfterEvent(gmectx, roomVersion, backwardsExtremity.RoomID(), prevEventID)
prevState, trustworthy, lerr := t.lookupStateAfterEvent(ctx, roomVersion, backwardsExtremity.RoomID(), prevEventID)
if lerr != nil {
util.GetLogger(ctx).WithError(lerr).Errorf("Failed to lookup state after prev_event: %s", prevEventID)
return lerr
@ -717,7 +798,7 @@ func (t *txnReq) processEventWithMissingState(
}
// There's more than one previous state - run them all through state res
t.roomsMu.Lock(e.RoomID())
resolvedState, err = t.resolveStatesAndCheck(gmectx, roomVersion, respStates, backwardsExtremity)
resolvedState, err = t.resolveStatesAndCheck(ctx, roomVersion, respStates, backwardsExtremity)
t.roomsMu.Unlock(e.RoomID())
if err != nil {
util.GetLogger(ctx).WithError(err).Errorf("Failed to resolve state conflicts for event %s", backwardsExtremity.EventID())
@ -727,14 +808,23 @@ func (t *txnReq) processEventWithMissingState(
// First of all, send the backward extremity into the roomserver with the
// newly resolved state. This marks the "oldest" point in the backfill and
// sets the baseline state for any new events after this.
// sets the baseline state for any new events after this. We'll make a
// copy of the hadEvents map so that it can be taken downstream without
// worrying about concurrent map reads/writes, since t.hadEvents is meant
// to be protected by a mutex.
hadEvents := map[string]bool{}
t.hadEventsMutex.Lock()
for k, v := range t.hadEvents {
hadEvents[k] = v
}
t.hadEventsMutex.Unlock()
err = api.SendEventWithState(
context.Background(),
t.rsAPI,
api.KindOld,
resolvedState,
backwardsExtremity.Headered(roomVersion),
t.haveEventIDs(),
hadEvents,
)
if err != nil {
return fmt.Errorf("api.SendEventWithState: %w", err)
@ -786,7 +876,7 @@ func (t *txnReq) lookupStateAfterEvent(ctx context.Context, roomVersion gomatrix
default:
return nil, false, fmt.Errorf("t.lookupEvent: %w", err)
}
t.cacheAndReturn(h)
h = t.cacheAndReturn(h)
if h.StateKey() != nil {
addedToState := false
for i := range respState.StateEvents {
@ -806,6 +896,8 @@ func (t *txnReq) lookupStateAfterEvent(ctx context.Context, roomVersion gomatrix
}
func (t *txnReq) cacheAndReturn(ev *gomatrixserverlib.HeaderedEvent) *gomatrixserverlib.HeaderedEvent {
t.haveEventsMutex.Lock()
defer t.haveEventsMutex.Unlock()
if cached, exists := t.haveEvents[ev.EventID()]; exists {
return cached
}
@ -828,6 +920,7 @@ func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, event
// set the event from the haveEvents cache - this means we will share pointers with other prev_event branches for this
// processEvent request, which is better for memory.
stateEvents[i] = t.cacheAndReturn(ev)
t.hadEvent(ev.EventID(), true)
}
// we should never access res.StateEvents again so we delete it here to make GC faster
res.StateEvents = nil
@ -835,6 +928,7 @@ func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, event
var authEvents []*gomatrixserverlib.Event
missingAuthEvents := map[string]bool{}
for _, ev := range stateEvents {
t.haveEventsMutex.Lock()
for _, ae := range ev.AuthEventIDs() {
if aev, ok := t.haveEvents[ae]; ok {
authEvents = append(authEvents, aev.Unwrap())
@ -842,6 +936,7 @@ func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, event
missingAuthEvents[ae] = true
}
}
t.haveEventsMutex.Unlock()
}
// QueryStateAfterEvents does not return the auth events, so fetch them now. We know the roomserver has them else it wouldn't
// have stored the event.
@ -858,8 +953,9 @@ func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, event
if err = t.rsAPI.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil {
return nil
}
for i := range queryRes.Events {
for i, ev := range queryRes.Events {
authEvents = append(authEvents, t.cacheAndReturn(queryRes.Events[i]).Unwrap())
t.hadEvent(ev.EventID(), true)
}
queryRes.Events = nil
}
@ -934,12 +1030,13 @@ func (t *txnReq) getMissingEvents(ctx context.Context, e *gomatrixserverlib.Even
return nil, err
}
latestEvents := make([]string, len(res.LatestEvents))
for i := range res.LatestEvents {
for i, ev := range res.LatestEvents {
latestEvents[i] = res.LatestEvents[i].EventID
t.hadEvent(ev.EventID, true)
}
var missingResp *gomatrixserverlib.RespMissingEvents
servers := t.getServers(ctx, e.RoomID())
servers := t.getServers(ctx, e.RoomID(), e)
for _, server := range servers {
var m gomatrixserverlib.RespMissingEvents
if m, err = t.federation.LookupMissingEvents(ctx, server, e.RoomID(), gomatrixserverlib.MissingEvents{
@ -953,6 +1050,9 @@ func (t *txnReq) getMissingEvents(ctx context.Context, e *gomatrixserverlib.Even
break
} else {
logger.WithError(err).Errorf("%s pushed us an event but %q did not respond to /get_missing_events", t.Origin, server)
if errors.Is(err, context.DeadlineExceeded) {
break
}
}
}
@ -980,6 +1080,12 @@ func (t *txnReq) getMissingEvents(ctx context.Context, e *gomatrixserverlib.Even
// For now, we do not allow Case B, so reject the event.
logger.Infof("get_missing_events returned %d events", len(missingResp.Events))
// Make sure events from the missingResp are using the cache - missing events
// will be added and duplicates will be removed.
for i, ev := range missingResp.Events {
missingResp.Events[i] = t.cacheAndReturn(ev.Headered(roomVersion)).Unwrap()
}
// topologically sort and sanity check that we are making forward progress
newEvents = gomatrixserverlib.ReverseTopologicalOrdering(missingResp.Events, gomatrixserverlib.TopologicalOrderByPrevEvents)
shouldHaveSomeEventIDs := e.PrevEventIDs()
@ -1018,6 +1124,14 @@ func (t *txnReq) lookupMissingStateViaState(ctx context.Context, roomID, eventID
if err := state.Check(ctx, t.keys, nil); err != nil {
return nil, err
}
// Cache the results of this state lookup and deduplicate anything we already
// have in the cache, freeing up memory.
for i, ev := range state.AuthEvents {
state.AuthEvents[i] = t.cacheAndReturn(ev.Headered(roomVersion)).Unwrap()
}
for i, ev := range state.StateEvents {
state.StateEvents[i] = t.cacheAndReturn(ev.Headered(roomVersion)).Unwrap()
}
return &state, nil
}
@ -1033,6 +1147,7 @@ func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, even
wantIDs := append(stateIDs.StateEventIDs, stateIDs.AuthEventIDs...)
missing := make(map[string]bool)
var missingEventList []string
t.haveEventsMutex.Lock()
for _, sid := range wantIDs {
if _, ok := t.haveEvents[sid]; !ok {
if !missing[sid] {
@ -1041,6 +1156,7 @@ func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, even
}
}
}
t.haveEventsMutex.Unlock()
// fetch as many as we can from the roomserver
queryReq := api.QueryEventsByIDRequest{
@ -1050,9 +1166,10 @@ func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, even
if err = t.rsAPI.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil {
return nil, err
}
for i := range queryRes.Events {
for i, ev := range queryRes.Events {
queryRes.Events[i] = t.cacheAndReturn(queryRes.Events[i])
t.hadEvent(ev.EventID(), true)
evID := queryRes.Events[i].EventID()
t.cacheAndReturn(queryRes.Events[i])
if missing[evID] {
delete(missing, evID)
}
@ -1153,6 +1270,9 @@ func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, even
func (t *txnReq) createRespStateFromStateIDs(stateIDs gomatrixserverlib.RespStateIDs) (
*gomatrixserverlib.RespState, error) { // nolint:unparam
t.haveEventsMutex.Lock()
defer t.haveEventsMutex.Unlock()
// create a RespState response using the response to /state_ids as a guide
respState := gomatrixserverlib.RespState{}
@ -1193,11 +1313,14 @@ func (t *txnReq) lookupEvent(ctx context.Context, roomVersion gomatrixserverlib.
}
var event *gomatrixserverlib.Event
found := false
servers := t.getServers(ctx, roomID)
servers := t.getServers(ctx, roomID, nil)
for _, serverName := range servers {
txn, err := t.federation.GetEvent(ctx, serverName, missingEventID)
if err != nil || len(txn.PDUs) == 0 {
util.GetLogger(ctx).WithError(err).WithField("event_id", missingEventID).Warn("Failed to get missing /event for event ID")
if errors.Is(err, context.DeadlineExceeded) {
break
}
continue
}
event, err = gomatrixserverlib.NewEventFromUntrustedJSON(txn.PDUs[0], roomVersion)
@ -1216,9 +1339,5 @@ func (t *txnReq) lookupEvent(ctx context.Context, roomVersion gomatrixserverlib.
util.GetLogger(ctx).WithError(err).Warnf("Transaction: Couldn't validate signature of event %q", event.EventID())
return nil, verifySigError{event.EventID(), err}
}
h := event.Headered(roomVersion)
t.newEventsMutex.Lock()
t.newEvents[h.EventID()] = true
t.newEventsMutex.Unlock()
return h, nil
return t.cacheAndReturn(event.Headered(roomVersion)), nil
}

View file

@ -370,7 +370,7 @@ func mustCreateTransaction(rsAPI api.RoomserverInternalAPI, fedClient txnFederat
keys: &test.NopJSONVerifier{},
federation: fedClient,
haveEvents: make(map[string]*gomatrixserverlib.HeaderedEvent),
newEvents: make(map[string]bool),
hadEvents: make(map[string]bool),
roomsMu: internal.NewMutexByRoom(),
}
t.PDUs = pdus

View file

@ -43,6 +43,7 @@ func NewInternalAPI(
federation *gomatrixserverlib.FederationClient,
rsAPI roomserverAPI.RoomserverInternalAPI,
keyRing *gomatrixserverlib.KeyRing,
resetBlacklist bool,
) api.FederationSenderInternalAPI {
cfg := &base.Cfg.FederationSender
@ -51,6 +52,10 @@ func NewInternalAPI(
logrus.WithError(err).Panic("failed to connect to federation sender db")
}
if resetBlacklist {
_ = federationSenderDB.RemoveAllServersFromBlacklist()
}
stats := &statistics.Statistics{
DB: federationSenderDB,
FailuresUntilBlacklist: cfg.FederationMaxRetries,

View file

@ -572,6 +572,7 @@ func (r *FederationSenderInternalAPI) PerformServersAlive(
response *api.PerformServersAliveResponse,
) (err error) {
for _, srv := range request.Servers {
_ = r.db.RemoveServerFromBlacklist(srv)
r.queues.RetryServer(srv)
}

View file

@ -54,6 +54,7 @@ type Database interface {
// these don't have contexts passed in as we want things to happen regardless of the request context
AddServerToBlacklist(serverName gomatrixserverlib.ServerName) error
RemoveServerFromBlacklist(serverName gomatrixserverlib.ServerName) error
RemoveAllServersFromBlacklist() error
IsServerBlacklisted(serverName gomatrixserverlib.ServerName) (bool, error)
AddOutboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) error

View file

@ -40,11 +40,15 @@ const selectBlacklistSQL = "" +
const deleteBlacklistSQL = "" +
"DELETE FROM federationsender_blacklist WHERE server_name = $1"
const deleteAllBlacklistSQL = "" +
"TRUNCATE federationsender_blacklist"
type blacklistStatements struct {
db *sql.DB
insertBlacklistStmt *sql.Stmt
selectBlacklistStmt *sql.Stmt
deleteBlacklistStmt *sql.Stmt
deleteAllBlacklistStmt *sql.Stmt
}
func NewPostgresBlacklistTable(db *sql.DB) (s *blacklistStatements, err error) {
@ -65,11 +69,12 @@ func NewPostgresBlacklistTable(db *sql.DB) (s *blacklistStatements, err error) {
if s.deleteBlacklistStmt, err = db.Prepare(deleteBlacklistSQL); err != nil {
return
}
if s.deleteAllBlacklistStmt, err = db.Prepare(deleteAllBlacklistSQL); err != nil {
return
}
return
}
// insertRoom inserts the room if it didn't already exist.
// If the room didn't exist then last_event_id is set to the empty string.
func (s *blacklistStatements) InsertBlacklist(
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName,
) error {
@ -78,9 +83,6 @@ func (s *blacklistStatements) InsertBlacklist(
return err
}
// selectRoomForUpdate locks the row for the room and returns the last_event_id.
// The row must already exist in the table. Callers can ensure that the row
// exists by calling insertRoom first.
func (s *blacklistStatements) SelectBlacklist(
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName,
) (bool, error) {
@ -96,8 +98,6 @@ func (s *blacklistStatements) SelectBlacklist(
return res.Next(), nil
}
// updateRoom updates the last_event_id for the room. selectRoomForUpdate should
// have already been called earlier within the transaction.
func (s *blacklistStatements) DeleteBlacklist(
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName,
) error {
@ -105,3 +105,11 @@ func (s *blacklistStatements) DeleteBlacklist(
_, err := stmt.ExecContext(ctx, serverName)
return err
}
func (s *blacklistStatements) DeleteAllBlacklist(
ctx context.Context, txn *sql.Tx,
) error {
stmt := sqlutil.TxStmt(txn, s.deleteAllBlacklistStmt)
_, err := stmt.ExecContext(ctx)
return err
}

View file

@ -148,6 +148,12 @@ func (d *Database) RemoveServerFromBlacklist(serverName gomatrixserverlib.Server
})
}
func (d *Database) RemoveAllServersFromBlacklist() error {
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
return d.FederationSenderBlacklist.DeleteAllBlacklist(context.TODO(), txn)
})
}
func (d *Database) IsServerBlacklisted(serverName gomatrixserverlib.ServerName) (bool, error) {
return d.FederationSenderBlacklist.SelectBlacklist(context.TODO(), nil, serverName)
}

View file

@ -40,11 +40,15 @@ const selectBlacklistSQL = "" +
const deleteBlacklistSQL = "" +
"DELETE FROM federationsender_blacklist WHERE server_name = $1"
const deleteAllBlacklistSQL = "" +
"DELETE FROM federationsender_blacklist"
type blacklistStatements struct {
db *sql.DB
insertBlacklistStmt *sql.Stmt
selectBlacklistStmt *sql.Stmt
deleteBlacklistStmt *sql.Stmt
deleteAllBlacklistStmt *sql.Stmt
}
func NewSQLiteBlacklistTable(db *sql.DB) (s *blacklistStatements, err error) {
@ -65,11 +69,12 @@ func NewSQLiteBlacklistTable(db *sql.DB) (s *blacklistStatements, err error) {
if s.deleteBlacklistStmt, err = db.Prepare(deleteBlacklistSQL); err != nil {
return
}
if s.deleteAllBlacklistStmt, err = db.Prepare(deleteAllBlacklistSQL); err != nil {
return
}
return
}
// insertRoom inserts the room if it didn't already exist.
// If the room didn't exist then last_event_id is set to the empty string.
func (s *blacklistStatements) InsertBlacklist(
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName,
) error {
@ -78,9 +83,6 @@ func (s *blacklistStatements) InsertBlacklist(
return err
}
// selectRoomForUpdate locks the row for the room and returns the last_event_id.
// The row must already exist in the table. Callers can ensure that the row
// exists by calling insertRoom first.
func (s *blacklistStatements) SelectBlacklist(
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName,
) (bool, error) {
@ -96,8 +98,6 @@ func (s *blacklistStatements) SelectBlacklist(
return res.Next(), nil
}
// updateRoom updates the last_event_id for the room. selectRoomForUpdate should
// have already been called earlier within the transaction.
func (s *blacklistStatements) DeleteBlacklist(
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName,
) error {
@ -105,3 +105,11 @@ func (s *blacklistStatements) DeleteBlacklist(
_, err := stmt.ExecContext(ctx, serverName)
return err
}
func (s *blacklistStatements) DeleteAllBlacklist(
ctx context.Context, txn *sql.Tx,
) error {
stmt := sqlutil.TxStmt(txn, s.deleteAllBlacklistStmt)
_, err := stmt.ExecContext(ctx)
return err
}

View file

@ -60,6 +60,7 @@ type FederationSenderBlacklist interface {
InsertBlacklist(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName) error
SelectBlacklist(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName) (bool, error)
DeleteBlacklist(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName) error
DeleteAllBlacklist(ctx context.Context, txn *sql.Tx) error
}
type FederationSenderOutboundPeeks interface {

20
go.mod
View file

@ -3,13 +3,20 @@ module github.com/matrix-org/dendrite
require (
github.com/DATA-DOG/go-sqlmock v1.5.0
github.com/HdrHistogram/hdrhistogram-go v1.0.1 // indirect
github.com/Masterminds/semver/v3 v3.1.1
github.com/S7evinK/saramajetstream v0.0.0-20210604172822-4f305c9f1537
github.com/Shopify/sarama v1.29.0
github.com/codeclysm/extract v2.2.0+incompatible
github.com/containerd/containerd v1.5.2 // indirect
github.com/docker/docker v20.10.7+incompatible
github.com/docker/go-connections v0.4.0
github.com/getsentry/sentry-go v0.10.0
github.com/gologme/log v1.2.0
github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.4.2
github.com/h2non/filetype v1.1.1 // indirect
github.com/hashicorp/golang-lru v0.5.4
github.com/juju/testing v0.0.0-20210324180055-18c50b0c2098 // indirect
github.com/klauspost/compress v1.13.0 // indirect
github.com/lib/pq v1.9.0
github.com/libp2p/go-libp2p v0.13.0
@ -23,13 +30,14 @@ require (
github.com/lucas-clemente/quic-go v0.19.3
github.com/matrix-org/dugong v0.0.0-20180820122854-51a565b5666b
github.com/matrix-org/go-http-js-libp2p v0.0.0-20200518170932-783164aeeda4
github.com/matrix-org/go-sqlite3-js v0.0.0-20200522092705-bc8506ccbcf3
github.com/matrix-org/go-sqlite3-js v0.0.0-20210625141222-bd2b7124cee8
github.com/matrix-org/gomatrix v0.0.0-20200827122206-7dd5e2a05bcd
github.com/matrix-org/gomatrixserverlib v0.0.0-20210302161955-6142fe3f8c2c
github.com/matrix-org/naffka v0.0.0-20201009174903-d26a3b9cb161
github.com/matrix-org/pinecone v0.0.0-20210510160342-a1dfbcf4bd47
github.com/matrix-org/gomatrixserverlib v0.0.0-20210702152949-0cac5159e7d6
github.com/matrix-org/naffka v0.0.0-20210623111924-14ff508b58e0
github.com/matrix-org/pinecone v0.0.0-20210623102758-74f885644c1b
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
github.com/mattn/go-sqlite3 v1.14.7-0.20210414154423-1157a4212dcb
github.com/morikuni/aec v1.0.0 // indirect
github.com/nats-io/nats.go v1.11.0
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
github.com/ngrok/sqlmw v0.0.0-20200129213757-d5c93a81bec6
@ -46,10 +54,12 @@ require (
github.com/yggdrasil-network/yggdrasil-go v0.3.15-0.20210218094457-e77ca8019daa
go.uber.org/atomic v1.7.0
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a
golang.org/x/mobile v0.0.0-20210220033013-bdb1ca9a1e08
golang.org/x/net v0.0.0-20210525063256-abc453219eb5
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69 // indirect
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1
gopkg.in/h2non/bimg.v1 v1.1.5
gopkg.in/yaml.v2 v2.4.0
nhooyr.io/websocket v1.8.7
)
go 1.14

722
go.sum

File diff suppressed because it is too large Load diff

View file

@ -20,6 +20,7 @@ import (
"encoding/json"
"time"
"github.com/lib/pq"
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/keyserver/api"
@ -47,7 +48,7 @@ const upsertKeysSQL = "" +
" DO UPDATE SET key_json = $6"
const selectKeysSQL = "" +
"SELECT key_id, algorithm, key_json FROM keyserver_one_time_keys WHERE user_id=$1 AND device_id=$2"
"SELECT concat(algorithm, ':', key_id) as algorithmwithid, key_json FROM keyserver_one_time_keys WHERE user_id=$1 AND device_id=$2 AND concat(algorithm, ':', key_id) = ANY($3);"
const selectKeysCountSQL = "" +
"SELECT algorithm, COUNT(key_id) FROM keyserver_one_time_keys WHERE user_id=$1 AND device_id=$2 GROUP BY algorithm"
@ -94,29 +95,22 @@ func NewPostgresOneTimeKeysTable(db *sql.DB) (tables.OneTimeKeys, error) {
}
func (s *oneTimeKeysStatements) SelectOneTimeKeys(ctx context.Context, userID, deviceID string, keyIDsWithAlgorithms []string) (map[string]json.RawMessage, error) {
rows, err := s.selectKeysStmt.QueryContext(ctx, userID, deviceID)
rows, err := s.selectKeysStmt.QueryContext(ctx, userID, deviceID, pq.Array(keyIDsWithAlgorithms))
if err != nil {
return nil, err
}
defer internal.CloseAndLogIfError(ctx, rows, "selectKeysStmt: rows.close() failed")
wantSet := make(map[string]bool, len(keyIDsWithAlgorithms))
for _, ka := range keyIDsWithAlgorithms {
wantSet[ka] = true
}
result := make(map[string]json.RawMessage)
var (
algorithmWithID string
keyJSONStr string
)
for rows.Next() {
var keyID string
var algorithm string
var keyJSONStr string
if err := rows.Scan(&keyID, &algorithm, &keyJSONStr); err != nil {
if err := rows.Scan(&algorithmWithID, &keyJSONStr); err != nil {
return nil, err
}
keyIDWithAlgo := algorithm + ":" + keyID
if wantSet[keyIDWithAlgo] {
result[keyIDWithAlgo] = json.RawMessage(keyJSONStr)
}
result[algorithmWithID] = json.RawMessage(keyJSONStr)
}
return result, rows.Err()
}

View file

@ -685,7 +685,7 @@ func (r *downloadRequest) GetContentLengthAndReader(contentLengthHeader string,
r.Logger.WithError(parseErr).Warn("Failed to parse content length")
return 0, nil, fmt.Errorf("strconv.ParseInt: %w", parseErr)
}
if parsedLength > int64(maxFileSizeBytes) {
if maxFileSizeBytes > 0 && parsedLength > int64(maxFileSizeBytes) {
return 0, nil, fmt.Errorf(
"remote file size (%d bytes) exceeds locally configured max media size (%d bytes)",
parsedLength, maxFileSizeBytes,

View file

@ -147,7 +147,20 @@ func (r *uploadRequest) doUpload(
// r.storeFileAndMetadata(ctx, tmpDir, ...)
// before you return from doUpload else we will leak a temp file. We could make this nicer with a `WithTransaction` style of
// nested function to guarantee either storage or cleanup.
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, reqReader, cfg.AbsBasePath)
// should not happen, but prevents any int overflows
if cfg.MaxFileSizeBytes != nil && *cfg.MaxFileSizeBytes+1 <= 0 {
r.Logger.WithFields(log.Fields{
"MaxFileSizeBytes": *cfg.MaxFileSizeBytes + 1,
}).Error("Error while transferring file, configured max_file_size_bytes overflows int64")
return &util.JSONResponse{
Code: http.StatusBadRequest,
JSON: jsonerror.Unknown("Failed to upload"),
}
}
lr := io.LimitReader(reqReader, int64(*cfg.MaxFileSizeBytes)+1)
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, lr, cfg.AbsBasePath)
if err != nil {
r.Logger.WithError(err).WithFields(log.Fields{
"MaxFileSizeBytes": *cfg.MaxFileSizeBytes,

View file

@ -0,0 +1,132 @@
package routing
import (
"context"
"io"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"github.com/matrix-org/dendrite/mediaapi/fileutils"
"github.com/matrix-org/dendrite/mediaapi/storage"
"github.com/matrix-org/dendrite/mediaapi/types"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/util"
log "github.com/sirupsen/logrus"
)
func Test_uploadRequest_doUpload(t *testing.T) {
type fields struct {
MediaMetadata *types.MediaMetadata
Logger *log.Entry
}
type args struct {
ctx context.Context
reqReader io.Reader
cfg *config.MediaAPI
db storage.Database
activeThumbnailGeneration *types.ActiveThumbnailGeneration
}
wd, err := os.Getwd()
if err != nil {
t.Errorf("failed to get current working directory: %v", err)
}
maxSize := config.FileSizeBytes(8)
logger := log.New().WithField("mediaapi", "test")
testdataPath := filepath.Join(wd, "./testdata")
cfg := &config.MediaAPI{
MaxFileSizeBytes: &maxSize,
BasePath: config.Path(testdataPath),
AbsBasePath: config.Path(testdataPath),
DynamicThumbnails: false,
}
// create testdata folder and remove when done
_ = os.Mkdir(testdataPath, os.ModePerm)
defer fileutils.RemoveDir(types.Path(testdataPath), nil)
db, err := storage.Open(&config.DatabaseOptions{
ConnectionString: "file::memory:?cache=shared",
MaxOpenConnections: 100,
MaxIdleConnections: 2,
ConnMaxLifetimeSeconds: -1,
})
if err != nil {
t.Errorf("error opening mediaapi database: %v", err)
}
tests := []struct {
name string
fields fields
args args
want *util.JSONResponse
}{
{
name: "upload ok",
args: args{
ctx: context.Background(),
reqReader: strings.NewReader("test"),
cfg: cfg,
db: db,
},
fields: fields{
Logger: logger,
MediaMetadata: &types.MediaMetadata{
MediaID: "1337",
UploadName: "test ok",
},
},
want: nil,
},
{
name: "upload ok (exact size)",
args: args{
ctx: context.Background(),
reqReader: strings.NewReader("testtest"),
cfg: cfg,
db: db,
},
fields: fields{
Logger: logger,
MediaMetadata: &types.MediaMetadata{
MediaID: "1338",
UploadName: "test ok (exact size)",
},
},
want: nil,
},
{
name: "upload not ok",
args: args{
ctx: context.Background(),
reqReader: strings.NewReader("test test test"),
cfg: cfg,
db: db,
},
fields: fields{
Logger: logger,
MediaMetadata: &types.MediaMetadata{
MediaID: "1339",
UploadName: "test fail",
},
},
want: requestEntityTooLargeJSONResponse(maxSize),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &uploadRequest{
MediaMetadata: tt.fields.MediaMetadata,
Logger: tt.fields.Logger,
}
if got := r.doUpload(tt.args.ctx, tt.args.reqReader, tt.args.cfg, tt.args.db, tt.args.activeThumbnailGeneration); !reflect.DeepEqual(got, tt.want) {
t.Errorf("doUpload() = %+v, want %+v", got, tt.want)
}
})
}
}

View file

@ -28,6 +28,7 @@ import (
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/roomserver/storage"
"github.com/matrix-org/gomatrixserverlib"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"go.uber.org/atomic"
)
@ -38,7 +39,6 @@ type Inputer struct {
ServerName gomatrixserverlib.ServerName
ACLs *acls.ServerACLs
OutputRoomEventTopic string
workers sync.Map // room ID -> *inputWorker
}
@ -52,7 +52,7 @@ type inputTask struct {
type inputWorker struct {
r *Inputer
running atomic.Bool
input chan *inputTask
input *fifoQueue
}
// Guarded by a CAS on w.running
@ -60,7 +60,14 @@ func (w *inputWorker) start() {
defer w.running.Store(false)
for {
select {
case task := <-w.input:
case <-w.input.wait():
task, ok := w.input.pop()
if !ok {
continue
}
roomserverInputBackpressure.With(prometheus.Labels{
"room_id": task.event.Event.RoomID(),
}).Dec()
hooks.Run(hooks.KindNewEventReceived, task.event.Event)
_, task.err = w.r.processRoomEvent(task.ctx, task.event)
if task.err == nil {
@ -117,6 +124,20 @@ func (r *Inputer) WriteOutputEvents(roomID string, updates []api.OutputEvent) er
return errs
}
func init() {
prometheus.MustRegister(roomserverInputBackpressure)
}
var roomserverInputBackpressure = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "dendrite",
Subsystem: "roomserver",
Name: "input_backpressure",
Help: "How many events are queued for input for a given room",
},
[]string{"room_id"},
)
// InputRoomEvents implements api.RoomserverInternalAPI
func (r *Inputer) InputRoomEvents(
_ context.Context,
@ -143,7 +164,7 @@ func (r *Inputer) InputRoomEvents(
// room - the channel will be quite small as it's just pointer types.
w, _ := r.workers.LoadOrStore(roomID, &inputWorker{
r: r,
input: make(chan *inputTask, 32),
input: newFIFOQueue(),
})
worker := w.(*inputWorker)
@ -160,7 +181,10 @@ func (r *Inputer) InputRoomEvents(
if worker.running.CAS(false, true) {
go worker.start()
}
worker.input <- tasks[i]
worker.input.push(tasks[i])
roomserverInputBackpressure.With(prometheus.Labels{
"room_id": roomID,
}).Inc()
}
// Wait for all of the workers to return results about our tasks.

View file

@ -0,0 +1,64 @@
package input
import (
"sync"
)
type fifoQueue struct {
tasks []*inputTask
count int
mutex sync.Mutex
notifs chan struct{}
}
func newFIFOQueue() *fifoQueue {
q := &fifoQueue{
notifs: make(chan struct{}, 1),
}
return q
}
func (q *fifoQueue) push(frame *inputTask) {
q.mutex.Lock()
defer q.mutex.Unlock()
q.tasks = append(q.tasks, frame)
q.count++
select {
case q.notifs <- struct{}{}:
default:
}
}
// pop returns the first item of the queue, if there is one.
// The second return value will indicate if a task was returned.
// You must check this value, even after calling wait().
func (q *fifoQueue) pop() (*inputTask, bool) {
q.mutex.Lock()
defer q.mutex.Unlock()
if q.count == 0 {
return nil, false
}
frame := q.tasks[0]
q.tasks[0] = nil
q.tasks = q.tasks[1:]
q.count--
if q.count == 0 {
// Force a GC of the underlying array, since it might have
// grown significantly if the queue was hammered for some reason
q.tasks = nil
}
return frame, true
}
// wait returns a channel which can be used to detect when an
// item is waiting in the queue.
func (q *fifoQueue) wait() <-chan struct{} {
q.mutex.Lock()
defer q.mutex.Unlock()
if q.count > 0 && len(q.notifs) == 0 {
ch := make(chan struct{})
close(ch)
return ch
}
return q.notifs
}

View file

@ -117,13 +117,17 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
_roomserver_state_block.event_nid
FROM
_roomserver_state_snapshots
JOIN _roomserver_state_block ON _roomserver_state_block.state_block_nid = ANY (_roomserver_state_snapshots.state_block_nids)
LEFT JOIN _roomserver_state_block ON _roomserver_state_block.state_block_nid = ANY (_roomserver_state_snapshots.state_block_nids)
WHERE
_roomserver_state_snapshots.state_snapshot_nid = ANY ( SELECT DISTINCT
_roomserver_state_snapshots.state_snapshot_nid = ANY (
SELECT
_roomserver_state_snapshots.state_snapshot_nid
FROM
_roomserver_state_snapshots
LIMIT $1 OFFSET $2)) AS _roomserver_state_block
ORDER BY _roomserver_state_snapshots.state_snapshot_nid ASC
LIMIT $1 OFFSET $2
)
) AS _roomserver_state_block
GROUP BY
state_snapshot_nid,
room_nid,
@ -136,22 +140,49 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
logrus.Warnf("Rewriting snapshots %d-%d of %d...", batchoffset, batchoffset+batchsize, snapshotcount)
var snapshots []stateBlockData
var badCreateSnapshots []stateBlockData
for snapshotrows.Next() {
var snapshot stateBlockData
var eventsarray pq.Int64Array
if err = snapshotrows.Scan(&snapshot.StateSnapshotNID, &snapshot.RoomNID, &snapshot.StateBlockNID, &eventsarray); err != nil {
var eventsarray []sql.NullInt64
var nulStateBlockNID sql.NullInt64
if err = snapshotrows.Scan(&snapshot.StateSnapshotNID, &snapshot.RoomNID, &nulStateBlockNID, pq.Array(&eventsarray)); err != nil {
return fmt.Errorf("rows.Scan: %w", err)
}
if nulStateBlockNID.Valid {
snapshot.StateBlockNID = types.StateBlockNID(nulStateBlockNID.Int64)
}
// Dendrite v0.1.0 would not make a state block for the create event, resulting in [NULL] from the query above.
// Remember the snapshot and we'll fill it in after we close this cursor as we can't have 2 queries running at the same time
if len(eventsarray) == 1 && !eventsarray[0].Valid {
badCreateSnapshots = append(badCreateSnapshots, snapshot)
continue
}
for _, e := range eventsarray {
snapshot.EventNIDs = append(snapshot.EventNIDs, types.EventNID(e))
if e.Valid {
snapshot.EventNIDs = append(snapshot.EventNIDs, types.EventNID(e.Int64))
}
}
snapshot.EventNIDs = snapshot.EventNIDs[:util.SortAndUnique(snapshot.EventNIDs)]
snapshots = append(snapshots, snapshot)
}
if err = snapshotrows.Close(); err != nil {
return fmt.Errorf("snapshots.Close: %w", err)
}
// fill in bad create snapshots
for _, s := range badCreateSnapshots {
var createEventNID types.EventNID
err = tx.QueryRow(
`SELECT event_nid FROM roomserver_events WHERE state_snapshot_nid = $1 AND event_type_nid = 1`, s.StateSnapshotNID,
).Scan(&createEventNID)
if err != nil {
return fmt.Errorf("cannot xref null state block with snapshot %d: %s", s.StateSnapshotNID, err)
}
if createEventNID == 0 {
return fmt.Errorf("cannot xref null state block with snapshot %d, no create event", s.StateSnapshotNID)
}
s.EventNIDs = append(s.EventNIDs, createEventNID)
snapshots = append(snapshots, s)
}
newsnapshots := map[stateSnapshotData]types.StateBlockNIDs{}
@ -202,6 +233,23 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
}
}
// By this point we should have no more state_snapshot_nids below maxsnapshotid in either roomserver_rooms or roomserver_events
// If we do, this is a problem if Dendrite tries to load the snapshot as it will not exist
// in roomserver_state_snapshots
var count int64
if err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_events WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil {
return fmt.Errorf("assertion query failed: %s", err)
}
if count > 0 {
return fmt.Errorf("%d events exist in roomserver_events which have not been converted to a new state_snapshot_nid; this is a bug, please report", count)
}
if err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil {
return fmt.Errorf("assertion query failed: %s", err)
}
if count > 0 {
return fmt.Errorf("%d rooms exist in roomserver_rooms which have not been converted to a new state_snapshot_nid; this is a bug, please report", count)
}
if _, err = tx.Exec(`
DROP TABLE _roomserver_state_snapshots;
DROP SEQUENCE roomserver_state_snapshot_nid_seq;

View file

@ -156,7 +156,7 @@ func (d *Database) AddState(
stateBlockNIDs []types.StateBlockNID,
state []types.StateEntry,
) (stateNID types.StateSnapshotNID, err error) {
if len(stateBlockNIDs) > 0 {
if len(stateBlockNIDs) > 0 && len(state) > 0 {
// Check to see if the event already appears in any of the existing state
// blocks. If it does then we should not add it again, as this will just
// result in excess state blocks and snapshots.

View file

@ -31,6 +31,7 @@ func LoadStateBlocksRefactor(m *sqlutil.Migrations) {
m.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)
}
// nolint:gocyclo
func UpStateBlocksRefactor(tx *sql.Tx) error {
logrus.Warn("Performing state storage upgrade. Please wait, this may take some time!")
defer logrus.Warn("State storage upgrade complete")
@ -45,6 +46,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
}
maxsnapshotid++
maxblockid++
oldMaxSnapshotID := maxsnapshotid
if _, err := tx.Exec(`ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil {
return fmt.Errorf("tx.Exec: %w", err)
@ -133,6 +135,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
if jerr != nil {
return fmt.Errorf("json.Marshal (new blocks): %w", jerr)
}
var newsnapshot types.StateSnapshotNID
err = tx.QueryRow(`
INSERT INTO roomserver_state_snapshots (state_snapshot_nid, state_snapshot_hash, room_nid, state_block_nids)
@ -144,7 +147,8 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
return fmt.Errorf("tx.QueryRow.Scan (insert new snapshot): %w", err)
}
maxsnapshotid++
if _, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid); err != nil {
_, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid)
if err != nil {
return fmt.Errorf("tx.Exec (update events): %w", err)
}
if _, err = tx.Exec(`UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid); err != nil {
@ -153,6 +157,23 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
}
}
// By this point we should have no more state_snapshot_nids below oldMaxSnapshotID in either roomserver_rooms or roomserver_events
// If we do, this is a problem if Dendrite tries to load the snapshot as it will not exist
// in roomserver_state_snapshots
var count int64
if err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_events WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID).Scan(&count); err != nil {
return fmt.Errorf("assertion query failed: %s", err)
}
if count > 0 {
return fmt.Errorf("%d events exist in roomserver_events which have not been converted to a new state_snapshot_nid; this is a bug, please report", count)
}
if err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID).Scan(&count); err != nil {
return fmt.Errorf("assertion query failed: %s", err)
}
if count > 0 {
return fmt.Errorf("%d rooms exist in roomserver_rooms which have not been converted to a new state_snapshot_nid; this is a bug, please report", count)
}
if _, err = tx.Exec(`DROP TABLE _roomserver_state_snapshots;`); err != nil {
return fmt.Errorf("tx.Exec (delete old snapshot table): %w", err)
}

View file

@ -101,7 +101,7 @@ func (a *ApplicationService) IsInterestedInRoomID(
) bool {
if namespaceSlice, ok := a.NamespaceMap["rooms"]; ok {
for _, namespace := range namespaceSlice {
if namespace.RegexpObject.MatchString(roomID) {
if namespace.RegexpObject != nil && namespace.RegexpObject.MatchString(roomID) {
return true
}
}
@ -222,6 +222,10 @@ func setupRegexps(asAPI *AppServiceAPI, derived *Derived) (err error) {
case "aliases":
appendExclusiveNamespaceRegexs(&exclusiveAliasStrings, namespaceSlice)
}
if err = compileNamespaceRegexes(namespaceSlice); err != nil {
return fmt.Errorf("invalid regex in appservice %q, namespace %q: %w", appservice.ID, key, err)
}
}
}
@ -258,18 +262,31 @@ func setupRegexps(asAPI *AppServiceAPI, derived *Derived) (err error) {
func appendExclusiveNamespaceRegexs(
exclusiveStrings *[]string, namespaces []ApplicationServiceNamespace,
) {
for index, namespace := range namespaces {
for _, namespace := range namespaces {
if namespace.Exclusive {
// We append parenthesis to later separate each regex when we compile
// i.e. "app1.*", "app2.*" -> "(app1.*)|(app2.*)"
*exclusiveStrings = append(*exclusiveStrings, "("+namespace.Regex+")")
}
// Compile this regex into a Regexp object for later use
namespaces[index].RegexpObject, _ = regexp.Compile(namespace.Regex)
}
}
// compileNamespaceRegexes turns strings into regex objects and complains
// if some of there are bad
func compileNamespaceRegexes(namespaces []ApplicationServiceNamespace) (err error) {
for index, namespace := range namespaces {
// Compile this regex into a Regexp object for later use
r, err := regexp.Compile(namespace.Regex)
if err != nil {
return fmt.Errorf("regex at namespace %d: %w", index, err)
}
namespaces[index].RegexpObject = r
}
return nil
}
// checkErrors checks for any configuration errors amongst the loaded
// application services according to the application service spec.
func checkErrors(config *AppServiceAPI, derived *Derived) (err error) {

View file

@ -2,6 +2,7 @@ package config
import (
"fmt"
"math"
)
type MediaAPI struct {
@ -57,6 +58,11 @@ func (c *MediaAPI) Verify(configErrs *ConfigErrors, isMonolith bool) {
checkNotEmpty(configErrs, "media_api.database.connection_string", string(c.Database.ConnectionString))
checkNotEmpty(configErrs, "media_api.base_path", string(c.BasePath))
// allow "unlimited" file size
if c.MaxFileSizeBytes != nil && *c.MaxFileSizeBytes <= 0 {
unlimitedSize := FileSizeBytes(math.MaxInt64 - 1)
c.MaxFileSizeBytes = &unlimitedSize
}
checkPositive(configErrs, "media_api.max_file_size_bytes", int64(*c.MaxFileSizeBytes))
checkPositive(configErrs, "media_api.max_thumbnail_generators", int64(c.MaxThumbnailGenerators))

View file

@ -68,7 +68,7 @@ func (m *Monolith) AddAllPublicRoutes(process *process.ProcessContext, csMux, ss
federationapi.AddPublicRoutes(
ssMux, keyMux, &m.Config.FederationAPI, m.UserAPI, m.FedClient,
m.KeyRing, m.RoomserverAPI, m.FederationSenderAPI,
m.EDUInternalAPI, m.KeyAPI, &m.Config.MSCs,
m.EDUInternalAPI, m.KeyAPI, &m.Config.MSCs, nil,
)
mediaapi.AddPublicRoutes(mediaMux, &m.Config.MediaAPI, m.UserAPI, m.Client)
syncapi.AddPublicRoutes(

View file

@ -39,9 +39,9 @@ import (
)
const (
ConstCreateEventContentKey = "org.matrix.msc1772.type"
ConstSpaceChildEventType = "org.matrix.msc1772.space.child"
ConstSpaceParentEventType = "org.matrix.msc1772.space.parent"
ConstCreateEventContentKey = "type"
ConstSpaceChildEventType = "m.space.child"
ConstSpaceParentEventType = "m.space.parent"
)
// Defaults sets the request defaults