forked from MirrorHub/mautrix-whatsapp
Fix things in legacy backfill
This commit is contained in:
parent
5892169dd0
commit
c850f6f373
3 changed files with 24 additions and 4 deletions
|
@ -65,7 +65,7 @@ func (user *User) HandleBackfillRequestsLoop(backfillTypes []database.BackfillTy
|
|||
req := user.BackfillQueue.GetNextBackfill(user.MXID, backfillTypes, waitForBackfillTypes, reCheckChannel)
|
||||
user.log.Infofln("Handling backfill request %s", req)
|
||||
|
||||
conv := user.bridge.DB.HistorySync.GetConversation(user.MXID, req.Portal)
|
||||
conv := user.bridge.DB.HistorySync.GetConversation(user.MXID, *req.Portal)
|
||||
if conv == nil {
|
||||
user.log.Debugfln("Could not find history sync conversation data for %s", req.Portal.String())
|
||||
req.MarkDone()
|
||||
|
|
|
@ -183,7 +183,7 @@ func (hsq *HistorySyncQuery) GetNMostRecentConversations(userID id.UserID, n int
|
|||
return
|
||||
}
|
||||
|
||||
func (hsq *HistorySyncQuery) GetConversation(userID id.UserID, portalKey *PortalKey) (conversation *HistorySyncConversation) {
|
||||
func (hsq *HistorySyncQuery) GetConversation(userID id.UserID, portalKey PortalKey) (conversation *HistorySyncConversation) {
|
||||
rows, err := hsq.db.Query(getConversationByPortal, userID, portalKey.JID, portalKey.Receiver)
|
||||
defer rows.Close()
|
||||
if err != nil || rows == nil {
|
||||
|
@ -323,3 +323,14 @@ func (hsq *HistorySyncQuery) DeleteAllMessagesForPortal(userID id.UserID, portal
|
|||
hsq.log.Warnfln("Failed to delete historical messages for %s/%s: %v", userID, portalKey.JID, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (hsq *HistorySyncQuery) DeleteConversation(userID id.UserID, jid string) {
|
||||
// This will also clear history_sync_message as there's a foreign key constraint
|
||||
_, err := hsq.db.Exec(`
|
||||
DELETE FROM history_sync_conversation
|
||||
WHERE user_mxid=$1 AND conversation_id=$2
|
||||
`, userID, jid)
|
||||
if err != nil {
|
||||
hsq.log.Warnfln("Failed to delete historical messages for %s/%s: %v", userID, jid, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -151,7 +151,7 @@ func (user *User) backfillAll() {
|
|||
user.zlog.Debug().
|
||||
Str("portal_jid", portal.Key.JID.String()).
|
||||
Msg("Chat already has a room, deleting messages from database")
|
||||
user.bridge.DB.HistorySync.DeleteAllMessagesForPortal(user.MXID, portal.Key)
|
||||
user.bridge.DB.HistorySync.DeleteConversation(user.MXID, portal.Key.JID.String())
|
||||
} else if i < user.bridge.Config.Bridge.HistorySync.MaxInitialConversations {
|
||||
err = portal.CreateMatrixRoom(user, nil, true, true)
|
||||
if err != nil {
|
||||
|
@ -173,6 +173,7 @@ func (portal *Portal) legacyBackfill(user *User) {
|
|||
Str("portal_jid", portal.Key.JID.String()).
|
||||
Str("action", "legacy backfill").
|
||||
Logger()
|
||||
conv := user.bridge.DB.HistorySync.GetConversation(user.MXID, portal.Key)
|
||||
messages := user.bridge.DB.HistorySync.GetMessagesBetween(user.MXID, portal.Key.JID.String(), nil, nil, portal.bridge.Config.Bridge.HistorySync.MessageCount)
|
||||
log.Debug().Int("message_count", len(messages)).Msg("Got messages to backfill from database")
|
||||
for i := len(messages) - 1; i >= 0; i-- {
|
||||
|
@ -187,8 +188,16 @@ func (portal *Portal) legacyBackfill(user *User) {
|
|||
}
|
||||
portal.handleMessage(user, msgEvt)
|
||||
}
|
||||
if conv != nil {
|
||||
isUnread := conv.MarkedAsUnread || conv.UnreadCount > 0
|
||||
isTooOld := user.bridge.Config.Bridge.HistorySync.UnreadHoursThreshold > 0 && conv.LastMessageTimestamp.Before(time.Now().Add(time.Duration(-user.bridge.Config.Bridge.HistorySync.UnreadHoursThreshold)*time.Hour))
|
||||
shouldMarkAsRead := !isUnread || isTooOld
|
||||
if shouldMarkAsRead {
|
||||
user.markSelfReadFull(portal)
|
||||
}
|
||||
}
|
||||
log.Debug().Msg("Backfill complete, deleting leftover messages from database")
|
||||
user.bridge.DB.HistorySync.DeleteAllMessagesForPortal(user.MXID, portal.Key)
|
||||
user.bridge.DB.HistorySync.DeleteConversation(user.MXID, portal.Key.JID.String())
|
||||
}
|
||||
|
||||
func (user *User) dailyMediaRequestLoop() {
|
||||
|
|
Loading…
Reference in a new issue