2020-02-03 00:19:58 +01:00
|
|
|
// Copyright 2020 The Gitea Authors. All rights reserved.
|
2022-11-27 19:20:29 +01:00
|
|
|
// SPDX-License-Identifier: MIT
|
2020-02-03 00:19:58 +01:00
|
|
|
|
|
|
|
package queue
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-03-31 19:01:43 +02:00
|
|
|
"runtime/pprof"
|
2020-02-03 00:19:58 +01:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"code.gitea.io/gitea/modules/log"
|
|
|
|
)
|
|
|
|
|
|
|
|
// PersistableChannelUniqueQueueType is the type for persistable queue
|
|
|
|
const PersistableChannelUniqueQueueType Type = "unique-persistable-channel"
|
|
|
|
|
|
|
|
// PersistableChannelUniqueQueueConfiguration is the configuration for a PersistableChannelUniqueQueue
|
|
|
|
type PersistableChannelUniqueQueueConfiguration struct {
|
|
|
|
Name string
|
|
|
|
DataDir string
|
|
|
|
BatchLength int
|
|
|
|
QueueLength int
|
|
|
|
Timeout time.Duration
|
|
|
|
MaxAttempts int
|
|
|
|
Workers int
|
|
|
|
MaxWorkers int
|
|
|
|
BlockTimeout time.Duration
|
|
|
|
BoostTimeout time.Duration
|
|
|
|
BoostWorkers int
|
|
|
|
}
|
|
|
|
|
|
|
|
// PersistableChannelUniqueQueue wraps a channel queue and level queue together
|
|
|
|
//
|
|
|
|
// Please note that this Queue does not guarantee that a particular
|
|
|
|
// task cannot be processed twice or more at the same time. Uniqueness is
|
|
|
|
// only guaranteed whilst the task is waiting in the queue.
|
|
|
|
type PersistableChannelUniqueQueue struct {
|
2021-05-15 16:22:26 +02:00
|
|
|
channelQueue *ChannelUniqueQueue
|
2020-02-03 00:19:58 +01:00
|
|
|
delayedStarter
|
|
|
|
lock sync.Mutex
|
|
|
|
closed chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewPersistableChannelUniqueQueue creates a wrapped batched channel queue with persistable level queue backend when shutting down
|
|
|
|
// This differs from a wrapped queue in that the persistent queue is only used to persist at shutdown/terminate
|
|
|
|
func NewPersistableChannelUniqueQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
|
|
|
|
configInterface, err := toConfig(PersistableChannelUniqueQueueConfiguration{}, cfg)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
config := configInterface.(PersistableChannelUniqueQueueConfiguration)
|
|
|
|
|
2022-01-22 22:22:14 +01:00
|
|
|
queue := &PersistableChannelUniqueQueue{
|
|
|
|
closed: make(chan struct{}),
|
|
|
|
}
|
|
|
|
|
|
|
|
wrappedHandle := func(data ...Data) (failed []Data) {
|
|
|
|
for _, unhandled := range handle(data...) {
|
|
|
|
if fail := queue.PushBack(unhandled); fail != nil {
|
|
|
|
failed = append(failed, fail)
|
|
|
|
}
|
|
|
|
}
|
2022-06-20 12:02:49 +02:00
|
|
|
return failed
|
2022-01-22 22:22:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
channelUniqueQueue, err := NewChannelUniqueQueue(wrappedHandle, ChannelUniqueQueueConfiguration{
|
2020-02-03 00:19:58 +01:00
|
|
|
WorkerPoolConfiguration: WorkerPoolConfiguration{
|
|
|
|
QueueLength: config.QueueLength,
|
|
|
|
BatchLength: config.BatchLength,
|
|
|
|
BlockTimeout: config.BlockTimeout,
|
|
|
|
BoostTimeout: config.BoostTimeout,
|
|
|
|
BoostWorkers: config.BoostWorkers,
|
|
|
|
MaxWorkers: config.MaxWorkers,
|
2022-03-31 19:01:43 +02:00
|
|
|
Name: config.Name + "-channel",
|
2020-02-03 00:19:58 +01:00
|
|
|
},
|
|
|
|
Workers: config.Workers,
|
|
|
|
}, exemplar)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// the level backend only needs temporary workers to catch up with the previously dropped work
|
|
|
|
levelCfg := LevelUniqueQueueConfiguration{
|
|
|
|
ByteFIFOQueueConfiguration: ByteFIFOQueueConfiguration{
|
|
|
|
WorkerPoolConfiguration: WorkerPoolConfiguration{
|
|
|
|
QueueLength: config.QueueLength,
|
|
|
|
BatchLength: config.BatchLength,
|
2021-05-02 09:22:30 +02:00
|
|
|
BlockTimeout: 1 * time.Second,
|
|
|
|
BoostTimeout: 5 * time.Minute,
|
|
|
|
BoostWorkers: 1,
|
|
|
|
MaxWorkers: 5,
|
2022-03-31 19:01:43 +02:00
|
|
|
Name: config.Name + "-level",
|
2020-02-03 00:19:58 +01:00
|
|
|
},
|
2021-05-02 09:22:30 +02:00
|
|
|
Workers: 0,
|
2020-02-03 00:19:58 +01:00
|
|
|
},
|
|
|
|
DataDir: config.DataDir,
|
|
|
|
}
|
|
|
|
|
2022-01-22 22:22:14 +01:00
|
|
|
queue.channelQueue = channelUniqueQueue.(*ChannelUniqueQueue)
|
2020-02-03 00:19:58 +01:00
|
|
|
|
2022-01-22 22:22:14 +01:00
|
|
|
levelQueue, err := NewLevelUniqueQueue(func(data ...Data) []Data {
|
2020-02-03 00:19:58 +01:00
|
|
|
for _, datum := range data {
|
|
|
|
err := queue.Push(datum)
|
|
|
|
if err != nil && err != ErrAlreadyInQueue {
|
|
|
|
log.Error("Unable push to channelled queue: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2022-01-22 22:22:14 +01:00
|
|
|
return nil
|
2020-02-03 00:19:58 +01:00
|
|
|
}, levelCfg, exemplar)
|
|
|
|
if err == nil {
|
|
|
|
queue.delayedStarter = delayedStarter{
|
|
|
|
internal: levelQueue.(*LevelUniqueQueue),
|
|
|
|
name: config.Name,
|
|
|
|
}
|
|
|
|
|
|
|
|
_ = GetManager().Add(queue, PersistableChannelUniqueQueueType, config, exemplar)
|
|
|
|
return queue, nil
|
|
|
|
}
|
|
|
|
if IsErrInvalidConfiguration(err) {
|
|
|
|
// Retrying ain't gonna make this any better...
|
|
|
|
return nil, ErrInvalidConfiguration{cfg: cfg}
|
|
|
|
}
|
|
|
|
|
|
|
|
queue.delayedStarter = delayedStarter{
|
|
|
|
cfg: levelCfg,
|
|
|
|
underlying: LevelUniqueQueueType,
|
|
|
|
timeout: config.Timeout,
|
|
|
|
maxAttempts: config.MaxAttempts,
|
|
|
|
name: config.Name,
|
|
|
|
}
|
|
|
|
_ = GetManager().Add(queue, PersistableChannelUniqueQueueType, config, exemplar)
|
|
|
|
return queue, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Name returns the name of this queue
|
|
|
|
func (q *PersistableChannelUniqueQueue) Name() string {
|
|
|
|
return q.delayedStarter.name
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push will push the indexer data to queue
|
|
|
|
func (q *PersistableChannelUniqueQueue) Push(data Data) error {
|
|
|
|
return q.PushFunc(data, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// PushFunc will push the indexer data to queue
|
|
|
|
func (q *PersistableChannelUniqueQueue) PushFunc(data Data, fn func() error) error {
|
|
|
|
select {
|
|
|
|
case <-q.closed:
|
|
|
|
return q.internal.(UniqueQueue).PushFunc(data, fn)
|
|
|
|
default:
|
2021-05-15 16:22:26 +02:00
|
|
|
return q.channelQueue.PushFunc(data, fn)
|
2020-02-03 00:19:58 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-22 22:22:14 +01:00
|
|
|
// PushBack will push the indexer data to queue
|
|
|
|
func (q *PersistableChannelUniqueQueue) PushBack(data Data) error {
|
|
|
|
select {
|
|
|
|
case <-q.closed:
|
|
|
|
if pbr, ok := q.internal.(PushBackable); ok {
|
|
|
|
return pbr.PushBack(data)
|
|
|
|
}
|
|
|
|
return q.internal.Push(data)
|
|
|
|
default:
|
|
|
|
return q.channelQueue.Push(data)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-03 00:19:58 +01:00
|
|
|
// Has will test if the queue has the data
|
|
|
|
func (q *PersistableChannelUniqueQueue) Has(data Data) (bool, error) {
|
|
|
|
// This is more difficult...
|
2021-05-15 16:22:26 +02:00
|
|
|
has, err := q.channelQueue.Has(data)
|
2020-02-03 00:19:58 +01:00
|
|
|
if err != nil || has {
|
|
|
|
return has, err
|
|
|
|
}
|
2021-02-13 20:02:09 +01:00
|
|
|
q.lock.Lock()
|
|
|
|
defer q.lock.Unlock()
|
|
|
|
if q.internal == nil {
|
|
|
|
return false, nil
|
|
|
|
}
|
2020-02-03 00:19:58 +01:00
|
|
|
return q.internal.(UniqueQueue).Has(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run starts to run the queue
|
2021-05-15 16:22:26 +02:00
|
|
|
func (q *PersistableChannelUniqueQueue) Run(atShutdown, atTerminate func(func())) {
|
2022-03-31 19:01:43 +02:00
|
|
|
pprof.SetGoroutineLabels(q.channelQueue.baseCtx)
|
2020-02-03 00:19:58 +01:00
|
|
|
log.Debug("PersistableChannelUniqueQueue: %s Starting", q.delayedStarter.name)
|
|
|
|
|
|
|
|
q.lock.Lock()
|
|
|
|
if q.internal == nil {
|
2022-01-22 22:22:14 +01:00
|
|
|
err := q.setInternal(atShutdown, func(data ...Data) []Data {
|
2020-02-03 00:19:58 +01:00
|
|
|
for _, datum := range data {
|
|
|
|
err := q.Push(datum)
|
|
|
|
if err != nil && err != ErrAlreadyInQueue {
|
|
|
|
log.Error("Unable push to channelled queue: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2022-01-22 22:22:14 +01:00
|
|
|
return nil
|
2021-05-15 16:22:26 +02:00
|
|
|
}, q.channelQueue.exemplar)
|
2020-02-03 00:19:58 +01:00
|
|
|
q.lock.Unlock()
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal("Unable to create internal queue for %s Error: %v", q.Name(), err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
q.lock.Unlock()
|
|
|
|
}
|
2021-05-15 16:22:26 +02:00
|
|
|
atShutdown(q.Shutdown)
|
|
|
|
atTerminate(q.Terminate)
|
|
|
|
_ = q.channelQueue.AddWorkers(q.channelQueue.workers, 0)
|
2020-02-03 00:19:58 +01:00
|
|
|
|
2021-05-15 16:22:26 +02:00
|
|
|
if luq, ok := q.internal.(*LevelUniqueQueue); ok && luq.ByteFIFOUniqueQueue.byteFIFO.Len(luq.shutdownCtx) != 0 {
|
|
|
|
// Just run the level queue - we shut it down once it's flushed
|
|
|
|
go q.internal.Run(func(_ func()) {}, func(_ func()) {})
|
|
|
|
go func() {
|
|
|
|
_ = q.internal.Flush(0)
|
2021-06-05 16:23:22 +02:00
|
|
|
log.Debug("LevelUniqueQueue: %s flushed so shutting down", q.internal.(*LevelUniqueQueue).Name())
|
2021-05-15 16:22:26 +02:00
|
|
|
q.internal.(*LevelUniqueQueue).Shutdown()
|
|
|
|
GetManager().Remove(q.internal.(*LevelUniqueQueue).qid)
|
|
|
|
}()
|
|
|
|
} else {
|
|
|
|
log.Debug("PersistableChannelUniqueQueue: %s Skipping running the empty level queue", q.delayedStarter.name)
|
|
|
|
q.internal.(*LevelUniqueQueue).Shutdown()
|
|
|
|
GetManager().Remove(q.internal.(*LevelUniqueQueue).qid)
|
|
|
|
}
|
2020-02-03 00:19:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Flush flushes the queue
|
|
|
|
func (q *PersistableChannelUniqueQueue) Flush(timeout time.Duration) error {
|
2021-05-15 16:22:26 +02:00
|
|
|
return q.channelQueue.Flush(timeout)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FlushWithContext flushes the queue
|
|
|
|
func (q *PersistableChannelUniqueQueue) FlushWithContext(ctx context.Context) error {
|
|
|
|
return q.channelQueue.FlushWithContext(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsEmpty checks if a queue is empty
|
|
|
|
func (q *PersistableChannelUniqueQueue) IsEmpty() bool {
|
|
|
|
return q.channelQueue.IsEmpty()
|
2020-02-03 00:19:58 +01:00
|
|
|
}
|
|
|
|
|
2022-01-24 23:54:35 +01:00
|
|
|
// IsPaused will return if the pool or queue is paused
|
|
|
|
func (q *PersistableChannelUniqueQueue) IsPaused() bool {
|
|
|
|
return q.channelQueue.IsPaused()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pause will pause the pool or queue
|
|
|
|
func (q *PersistableChannelUniqueQueue) Pause() {
|
|
|
|
q.channelQueue.Pause()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Resume will resume the pool or queue
|
|
|
|
func (q *PersistableChannelUniqueQueue) Resume() {
|
|
|
|
q.channelQueue.Resume()
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsPausedIsResumed will return a bool indicating if the pool or queue is paused and a channel that will be closed when it is resumed
|
|
|
|
func (q *PersistableChannelUniqueQueue) IsPausedIsResumed() (paused, resumed <-chan struct{}) {
|
|
|
|
return q.channelQueue.IsPausedIsResumed()
|
|
|
|
}
|
|
|
|
|
2020-02-03 00:19:58 +01:00
|
|
|
// Shutdown processing this queue
|
|
|
|
func (q *PersistableChannelUniqueQueue) Shutdown() {
|
|
|
|
log.Trace("PersistableChannelUniqueQueue: %s Shutting down", q.delayedStarter.name)
|
|
|
|
q.lock.Lock()
|
|
|
|
select {
|
|
|
|
case <-q.closed:
|
2021-05-15 16:22:26 +02:00
|
|
|
q.lock.Unlock()
|
|
|
|
return
|
2020-02-03 00:19:58 +01:00
|
|
|
default:
|
|
|
|
if q.internal != nil {
|
|
|
|
q.internal.(*LevelUniqueQueue).Shutdown()
|
|
|
|
}
|
|
|
|
close(q.closed)
|
2021-05-15 16:22:26 +02:00
|
|
|
q.lock.Unlock()
|
2020-02-03 00:19:58 +01:00
|
|
|
}
|
2021-05-15 16:22:26 +02:00
|
|
|
|
|
|
|
log.Trace("PersistableChannelUniqueQueue: %s Cancelling pools", q.delayedStarter.name)
|
|
|
|
q.internal.(*LevelUniqueQueue).baseCtxCancel()
|
|
|
|
q.channelQueue.baseCtxCancel()
|
|
|
|
log.Trace("PersistableChannelUniqueQueue: %s Waiting til done", q.delayedStarter.name)
|
|
|
|
q.channelQueue.Wait()
|
|
|
|
q.internal.(*LevelUniqueQueue).Wait()
|
|
|
|
// Redirect all remaining data in the chan to the internal channel
|
2022-01-29 12:37:08 +01:00
|
|
|
close(q.channelQueue.dataChan)
|
|
|
|
log.Trace("PersistableChannelUniqueQueue: %s Redirecting remaining data", q.delayedStarter.name)
|
|
|
|
for data := range q.channelQueue.dataChan {
|
|
|
|
_ = q.internal.Push(data)
|
|
|
|
}
|
|
|
|
log.Trace("PersistableChannelUniqueQueue: %s Done Redirecting remaining data", q.delayedStarter.name)
|
2021-05-15 16:22:26 +02:00
|
|
|
|
2020-02-03 00:19:58 +01:00
|
|
|
log.Debug("PersistableChannelUniqueQueue: %s Shutdown", q.delayedStarter.name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Terminate this queue and close the queue
|
|
|
|
func (q *PersistableChannelUniqueQueue) Terminate() {
|
|
|
|
log.Trace("PersistableChannelUniqueQueue: %s Terminating", q.delayedStarter.name)
|
|
|
|
q.Shutdown()
|
|
|
|
q.lock.Lock()
|
|
|
|
defer q.lock.Unlock()
|
|
|
|
if q.internal != nil {
|
|
|
|
q.internal.(*LevelUniqueQueue).Terminate()
|
|
|
|
}
|
2022-03-31 19:01:43 +02:00
|
|
|
q.channelQueue.baseCtxFinished()
|
2020-02-03 00:19:58 +01:00
|
|
|
log.Debug("PersistableChannelUniqueQueue: %s Terminated", q.delayedStarter.name)
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
queuesMap[PersistableChannelUniqueQueueType] = NewPersistableChannelUniqueQueue
|
|
|
|
}
|