mirror of
https://github.com/go-gitea/gitea
synced 2024-11-21 21:01:08 +01:00
Improve testing and try to fix MySQL hanging (#32515)
By some CI fine tunes (`run tests`), SQLite & MSSQL could complete in about 12~13 minutes (before > 14), MySQL could complete in 18 minutes (before: about 23 or even > 30) Major changes: 1. use tmpfs for MySQL storage 1. run `make test-mysql` instead of `make integration-test-coverage` because the code coverage is not really used at the moment. 1. refactor testlogger to make it more reliable and be able to report stuck stacktrace 1. do not requeue failed items when a queue is being flushed (failed items would keep failing and make flush uncompleted) 1. reduce the file sizes for testing 1. use math ChaCha20 random data instead of crypot/rand (for testing purpose only) 1. no need to `DeleteRepository` in `TestLinguist` 1. other related refactoring to make code easier to maintain
This commit is contained in:
parent
a0c0cb3a2c
commit
ecbb03dc6d
15 changed files with 201 additions and 246 deletions
10
.github/workflows/pull-db-tests.yml
vendored
10
.github/workflows/pull-db-tests.yml
vendored
|
@ -154,12 +154,15 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
mysql:
|
mysql:
|
||||||
image: mysql:8.0
|
# the bitnami mysql image has more options than the official one, it's easier to customize
|
||||||
|
image: bitnami/mysql:8.0
|
||||||
env:
|
env:
|
||||||
MYSQL_ALLOW_EMPTY_PASSWORD: true
|
ALLOW_EMPTY_PASSWORD: true
|
||||||
MYSQL_DATABASE: testgitea
|
MYSQL_DATABASE: testgitea
|
||||||
ports:
|
ports:
|
||||||
- "3306:3306"
|
- "3306:3306"
|
||||||
|
options: >-
|
||||||
|
--mount type=tmpfs,destination=/bitnami/mysql/data
|
||||||
elasticsearch:
|
elasticsearch:
|
||||||
image: elasticsearch:7.5.0
|
image: elasticsearch:7.5.0
|
||||||
env:
|
env:
|
||||||
|
@ -188,7 +191,8 @@ jobs:
|
||||||
- name: run migration tests
|
- name: run migration tests
|
||||||
run: make test-mysql-migration
|
run: make test-mysql-migration
|
||||||
- name: run tests
|
- name: run tests
|
||||||
run: make integration-test-coverage
|
# run: make integration-test-coverage (at the moment, no coverage is really handled)
|
||||||
|
run: make test-mysql
|
||||||
env:
|
env:
|
||||||
TAGS: bindata
|
TAGS: bindata
|
||||||
RACE_ENABLED: true
|
RACE_ENABLED: true
|
||||||
|
|
|
@ -15,7 +15,6 @@ import (
|
||||||
"code.gitea.io/gitea/models/unittest"
|
"code.gitea.io/gitea/models/unittest"
|
||||||
"code.gitea.io/gitea/modules/base"
|
"code.gitea.io/gitea/modules/base"
|
||||||
"code.gitea.io/gitea/modules/git"
|
"code.gitea.io/gitea/modules/git"
|
||||||
"code.gitea.io/gitea/modules/log"
|
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/testlogger"
|
"code.gitea.io/gitea/modules/testlogger"
|
||||||
|
|
||||||
|
@ -91,12 +90,11 @@ func PrepareTestEnv(t *testing.T, skip int, syncModels ...any) (*xorm.Engine, fu
|
||||||
}
|
}
|
||||||
|
|
||||||
func MainTest(m *testing.M) {
|
func MainTest(m *testing.M) {
|
||||||
log.RegisterEventWriter("test", testlogger.NewTestLoggerWriter)
|
testlogger.Init()
|
||||||
|
|
||||||
giteaRoot := base.SetupGiteaRoot()
|
giteaRoot := base.SetupGiteaRoot()
|
||||||
if giteaRoot == "" {
|
if giteaRoot == "" {
|
||||||
fmt.Println("Environment variable $GITEA_ROOT not set")
|
testlogger.Fatalf("Environment variable $GITEA_ROOT not set\n")
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
giteaBinary := "gitea"
|
giteaBinary := "gitea"
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
|
@ -104,8 +102,7 @@ func MainTest(m *testing.M) {
|
||||||
}
|
}
|
||||||
setting.AppPath = filepath.Join(giteaRoot, giteaBinary)
|
setting.AppPath = filepath.Join(giteaRoot, giteaBinary)
|
||||||
if _, err := os.Stat(setting.AppPath); err != nil {
|
if _, err := os.Stat(setting.AppPath); err != nil {
|
||||||
fmt.Printf("Could not find gitea binary at %s\n", setting.AppPath)
|
testlogger.Fatalf("Could not find gitea binary at %s\n", setting.AppPath)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
giteaConf := os.Getenv("GITEA_CONF")
|
giteaConf := os.Getenv("GITEA_CONF")
|
||||||
|
@ -122,8 +119,7 @@ func MainTest(m *testing.M) {
|
||||||
|
|
||||||
tmpDataPath, err := os.MkdirTemp("", "data")
|
tmpDataPath, err := os.MkdirTemp("", "data")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Unable to create temporary data path %v\n", err)
|
testlogger.Fatalf("Unable to create temporary data path %v\n", err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
setting.CustomPath = filepath.Join(setting.AppWorkPath, "custom")
|
setting.CustomPath = filepath.Join(setting.AppWorkPath, "custom")
|
||||||
|
@ -131,8 +127,7 @@ func MainTest(m *testing.M) {
|
||||||
|
|
||||||
unittest.InitSettings()
|
unittest.InitSettings()
|
||||||
if err = git.InitFull(context.Background()); err != nil {
|
if err = git.InitFull(context.Background()); err != nil {
|
||||||
fmt.Printf("Unable to InitFull: %v\n", err)
|
testlogger.Fatalf("Unable to InitFull: %v\n", err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
setting.LoadDBSetting()
|
setting.LoadDBSetting()
|
||||||
setting.InitLoggersForTest()
|
setting.InitLoggersForTest()
|
||||||
|
|
|
@ -86,6 +86,8 @@ type ColoredValue struct {
|
||||||
colors []ColorAttribute
|
colors []ColorAttribute
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ fmt.Formatter = (*ColoredValue)(nil)
|
||||||
|
|
||||||
func (c *ColoredValue) Format(f fmt.State, verb rune) {
|
func (c *ColoredValue) Format(f fmt.State, verb rune) {
|
||||||
_, _ = f.Write(ColorBytes(c.colors...))
|
_, _ = f.Write(ColorBytes(c.colors...))
|
||||||
s := fmt.Sprintf(fmt.FormatString(f, verb), c.v)
|
s := fmt.Sprintf(fmt.FormatString(f, verb), c.v)
|
||||||
|
@ -93,6 +95,10 @@ func (c *ColoredValue) Format(f fmt.State, verb rune) {
|
||||||
_, _ = f.Write(resetBytes)
|
_, _ = f.Write(resetBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *ColoredValue) Value() any {
|
||||||
|
return c.v
|
||||||
|
}
|
||||||
|
|
||||||
func NewColoredValue(v any, color ...ColorAttribute) *ColoredValue {
|
func NewColoredValue(v any, color ...ColorAttribute) *ColoredValue {
|
||||||
return &ColoredValue{v: v, colors: color}
|
return &ColoredValue{v: v, colors: color}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
unhandledItemRequeueDuration.Store(int64(5 * time.Second))
|
unhandledItemRequeueDuration.Store(int64(time.Second))
|
||||||
}
|
}
|
||||||
|
|
||||||
// workerGroup is a group of workers to work with a WorkerPoolQueue
|
// workerGroup is a group of workers to work with a WorkerPoolQueue
|
||||||
|
@ -104,7 +104,12 @@ func (q *WorkerPoolQueue[T]) doWorkerHandle(batch []T) {
|
||||||
// if none of the items were handled, it should back-off for a few seconds
|
// if none of the items were handled, it should back-off for a few seconds
|
||||||
// in this case the handler (eg: document indexer) may have encountered some errors/failures
|
// in this case the handler (eg: document indexer) may have encountered some errors/failures
|
||||||
if len(unhandled) == len(batch) && unhandledItemRequeueDuration.Load() != 0 {
|
if len(unhandled) == len(batch) && unhandledItemRequeueDuration.Load() != 0 {
|
||||||
|
if q.isFlushing.Load() {
|
||||||
|
return // do not requeue items when flushing, since all items failed, requeue them will continue failing.
|
||||||
|
}
|
||||||
log.Error("Queue %q failed to handle batch of %d items, backoff for a few seconds", q.GetName(), len(batch))
|
log.Error("Queue %q failed to handle batch of %d items, backoff for a few seconds", q.GetName(), len(batch))
|
||||||
|
// TODO: ideally it shouldn't "sleep" here (blocks the worker, then blocks flush).
|
||||||
|
// It could debounce the requeue operation, and try to requeue the items in the future.
|
||||||
select {
|
select {
|
||||||
case <-q.ctxRun.Done():
|
case <-q.ctxRun.Done():
|
||||||
case <-time.After(time.Duration(unhandledItemRequeueDuration.Load())):
|
case <-time.After(time.Duration(unhandledItemRequeueDuration.Load())):
|
||||||
|
@ -193,6 +198,9 @@ func (q *WorkerPoolQueue[T]) doStartNewWorker(wp *workerGroup[T]) {
|
||||||
// doFlush flushes the queue: it tries to read all items from the queue and handles them.
|
// doFlush flushes the queue: it tries to read all items from the queue and handles them.
|
||||||
// It is for testing purpose only. It's not designed to work for a cluster.
|
// It is for testing purpose only. It's not designed to work for a cluster.
|
||||||
func (q *WorkerPoolQueue[T]) doFlush(wg *workerGroup[T], flush flushType) {
|
func (q *WorkerPoolQueue[T]) doFlush(wg *workerGroup[T], flush flushType) {
|
||||||
|
q.isFlushing.Store(true)
|
||||||
|
defer q.isFlushing.Store(false)
|
||||||
|
|
||||||
log.Debug("Queue %q starts flushing", q.GetName())
|
log.Debug("Queue %q starts flushing", q.GetName())
|
||||||
defer log.Debug("Queue %q finishes flushing", q.GetName())
|
defer log.Debug("Queue %q finishes flushing", q.GetName())
|
||||||
|
|
||||||
|
@ -236,6 +244,9 @@ loop:
|
||||||
emptyCounter := 0
|
emptyCounter := 0
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
case <-q.ctxRun.Done():
|
||||||
|
log.Debug("Queue %q is shutting down", q.GetName())
|
||||||
|
return
|
||||||
case data, dataOk := <-wg.popItemChan:
|
case data, dataOk := <-wg.popItemChan:
|
||||||
if !dataOk {
|
if !dataOk {
|
||||||
return
|
return
|
||||||
|
@ -251,9 +262,6 @@ loop:
|
||||||
log.Error("Failed to pop item from queue %q (doFlush): %v", q.GetName(), err)
|
log.Error("Failed to pop item from queue %q (doFlush): %v", q.GetName(), err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case <-q.ctxRun.Done():
|
|
||||||
log.Debug("Queue %q is shutting down", q.GetName())
|
|
||||||
return
|
|
||||||
case <-time.After(20 * time.Millisecond):
|
case <-time.After(20 * time.Millisecond):
|
||||||
// There is no reliable way to make sure all queue items are consumed by the Flush, there always might be some items stored in some buffers/temp variables.
|
// There is no reliable way to make sure all queue items are consumed by the Flush, there always might be some items stored in some buffers/temp variables.
|
||||||
// If we run Gitea in a cluster, we can even not guarantee all items are consumed in a deterministic instance.
|
// If we run Gitea in a cluster, we can even not guarantee all items are consumed in a deterministic instance.
|
||||||
|
@ -331,6 +339,15 @@ func (q *WorkerPoolQueue[T]) doRun() {
|
||||||
var batchDispatchC <-chan time.Time = infiniteTimerC
|
var batchDispatchC <-chan time.Time = infiniteTimerC
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
case flush := <-q.flushChan:
|
||||||
|
// before flushing, it needs to try to dispatch the batch to worker first, in case there is no worker running
|
||||||
|
// after the flushing, there is at least one worker running, so "doFlush" could wait for workers to finish
|
||||||
|
// since we are already in a "flush" operation, so the dispatching function shouldn't read the flush chan.
|
||||||
|
q.doDispatchBatchToWorker(wg, skipFlushChan)
|
||||||
|
q.doFlush(wg, flush)
|
||||||
|
case <-q.ctxRun.Done():
|
||||||
|
log.Debug("Queue %q is shutting down", q.GetName())
|
||||||
|
return
|
||||||
case data, dataOk := <-wg.popItemChan:
|
case data, dataOk := <-wg.popItemChan:
|
||||||
if !dataOk {
|
if !dataOk {
|
||||||
return
|
return
|
||||||
|
@ -349,20 +366,11 @@ func (q *WorkerPoolQueue[T]) doRun() {
|
||||||
case <-batchDispatchC:
|
case <-batchDispatchC:
|
||||||
batchDispatchC = infiniteTimerC
|
batchDispatchC = infiniteTimerC
|
||||||
q.doDispatchBatchToWorker(wg, q.flushChan)
|
q.doDispatchBatchToWorker(wg, q.flushChan)
|
||||||
case flush := <-q.flushChan:
|
|
||||||
// before flushing, it needs to try to dispatch the batch to worker first, in case there is no worker running
|
|
||||||
// after the flushing, there is at least one worker running, so "doFlush" could wait for workers to finish
|
|
||||||
// since we are already in a "flush" operation, so the dispatching function shouldn't read the flush chan.
|
|
||||||
q.doDispatchBatchToWorker(wg, skipFlushChan)
|
|
||||||
q.doFlush(wg, flush)
|
|
||||||
case err := <-wg.popItemErr:
|
case err := <-wg.popItemErr:
|
||||||
if !q.isCtxRunCanceled() {
|
if !q.isCtxRunCanceled() {
|
||||||
log.Error("Failed to pop item from queue %q (doRun): %v", q.GetName(), err)
|
log.Error("Failed to pop item from queue %q (doRun): %v", q.GetName(), err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case <-q.ctxRun.Done():
|
|
||||||
log.Debug("Queue %q is shutting down", q.GetName())
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,8 +32,9 @@ type WorkerPoolQueue[T any] struct {
|
||||||
baseConfig *BaseConfig
|
baseConfig *BaseConfig
|
||||||
baseQueue baseQueue
|
baseQueue baseQueue
|
||||||
|
|
||||||
batchChan chan []T
|
batchChan chan []T
|
||||||
flushChan chan flushType
|
flushChan chan flushType
|
||||||
|
isFlushing atomic.Bool
|
||||||
|
|
||||||
batchLength int
|
batchLength int
|
||||||
workerNum int
|
workerNum int
|
||||||
|
|
|
@ -19,9 +19,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
prefix string
|
prefix string
|
||||||
SlowTest = 10 * time.Second
|
TestTimeout = 10 * time.Minute
|
||||||
SlowFlush = 5 * time.Second
|
TestSlowRun = 10 * time.Second
|
||||||
|
TestSlowFlush = 1 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
var WriterCloser = &testLoggerWriterCloser{}
|
var WriterCloser = &testLoggerWriterCloser{}
|
||||||
|
@ -89,79 +90,97 @@ func (w *testLoggerWriterCloser) Reset() {
|
||||||
w.Unlock()
|
w.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Printf takes a format and args and prints the string to os.Stdout
|
||||||
|
func Printf(format string, args ...any) {
|
||||||
|
if !log.CanColorStdout {
|
||||||
|
for i := 0; i < len(args); i++ {
|
||||||
|
if c, ok := args[i].(*log.ColoredValue); ok {
|
||||||
|
args[i] = c.Value()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, _ = fmt.Fprintf(os.Stdout, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
// PrintCurrentTest prints the current test to os.Stdout
|
// PrintCurrentTest prints the current test to os.Stdout
|
||||||
func PrintCurrentTest(t testing.TB, skip ...int) func() {
|
func PrintCurrentTest(t testing.TB, skip ...int) func() {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
start := time.Now()
|
runStart := time.Now()
|
||||||
actualSkip := util.OptionalArg(skip) + 1
|
actualSkip := util.OptionalArg(skip) + 1
|
||||||
_, filename, line, _ := runtime.Caller(actualSkip)
|
_, filename, line, _ := runtime.Caller(actualSkip)
|
||||||
|
|
||||||
if log.CanColorStdout {
|
Printf("=== %s (%s:%d)\n", log.NewColoredValue(t.Name()), strings.TrimPrefix(filename, prefix), line)
|
||||||
_, _ = fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", fmt.Formatter(log.NewColoredValue(t.Name())), strings.TrimPrefix(filename, prefix), line)
|
|
||||||
} else {
|
|
||||||
_, _ = fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", t.Name(), strings.TrimPrefix(filename, prefix), line)
|
|
||||||
}
|
|
||||||
WriterCloser.pushT(t)
|
WriterCloser.pushT(t)
|
||||||
return func() {
|
timeoutChecker := time.AfterFunc(TestTimeout, func() {
|
||||||
took := time.Since(start)
|
l := 128 * 1024
|
||||||
if took > SlowTest {
|
var stack []byte
|
||||||
if log.CanColorStdout {
|
for {
|
||||||
_, _ = fmt.Fprintf(os.Stdout, "+++ %s is a slow test (took %v)\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgYellow)), fmt.Formatter(log.NewColoredValue(took, log.Bold, log.FgYellow)))
|
stack = make([]byte, l)
|
||||||
} else {
|
n := runtime.Stack(stack, true)
|
||||||
_, _ = fmt.Fprintf(os.Stdout, "+++ %s is a slow test (took %v)\n", t.Name(), took)
|
if n <= l {
|
||||||
|
stack = stack[:n]
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
l = n
|
||||||
}
|
}
|
||||||
timer := time.AfterFunc(SlowFlush, func() {
|
Printf("!!! %s ... timeout: %v ... stacktrace:\n%s\n\n", log.NewColoredValue(t.Name(), log.Bold, log.FgRed), TestTimeout, string(stack))
|
||||||
if log.CanColorStdout {
|
})
|
||||||
_, _ = fmt.Fprintf(os.Stdout, "+++ %s ... still flushing after %v ...\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgRed)), SlowFlush)
|
return func() {
|
||||||
} else {
|
flushStart := time.Now()
|
||||||
_, _ = fmt.Fprintf(os.Stdout, "+++ %s ... still flushing after %v ...\n", t.Name(), SlowFlush)
|
slowFlushChecker := time.AfterFunc(TestSlowFlush, func() {
|
||||||
}
|
Printf("+++ %s ... still flushing after %v ...\n", log.NewColoredValue(t.Name(), log.Bold, log.FgRed), TestSlowFlush)
|
||||||
})
|
})
|
||||||
if err := queue.GetManager().FlushAll(context.Background(), -1); err != nil {
|
if err := queue.GetManager().FlushAll(context.Background(), -1); err != nil {
|
||||||
t.Errorf("Flushing queues failed with error %v", err)
|
t.Errorf("Flushing queues failed with error %v", err)
|
||||||
}
|
}
|
||||||
timer.Stop()
|
slowFlushChecker.Stop()
|
||||||
flushTook := time.Since(start) - took
|
timeoutChecker.Stop()
|
||||||
if flushTook > SlowFlush {
|
|
||||||
if log.CanColorStdout {
|
runDuration := time.Since(runStart)
|
||||||
_, _ = fmt.Fprintf(os.Stdout, "+++ %s had a slow clean-up flush (took %v)\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgRed)), fmt.Formatter(log.NewColoredValue(flushTook, log.Bold, log.FgRed)))
|
flushDuration := time.Since(flushStart)
|
||||||
} else {
|
if runDuration > TestSlowRun {
|
||||||
_, _ = fmt.Fprintf(os.Stdout, "+++ %s had a slow clean-up flush (took %v)\n", t.Name(), flushTook)
|
Printf("+++ %s is a slow test (run: %v, flush: %v)\n", log.NewColoredValue(t.Name(), log.Bold, log.FgYellow), runDuration, flushDuration)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
WriterCloser.popT()
|
WriterCloser.popT()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Printf takes a format and args and prints the string to os.Stdout
|
|
||||||
func Printf(format string, args ...any) {
|
|
||||||
if log.CanColorStdout {
|
|
||||||
for i := 0; i < len(args); i++ {
|
|
||||||
args[i] = log.NewColoredValue(args[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, _ = fmt.Fprintf(os.Stdout, "\t"+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestLogEventWriter is a logger which will write to the testing log
|
// TestLogEventWriter is a logger which will write to the testing log
|
||||||
type TestLogEventWriter struct {
|
type TestLogEventWriter struct {
|
||||||
*log.EventWriterBaseImpl
|
*log.EventWriterBaseImpl
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTestLoggerWriter creates a TestLogEventWriter as a log.LoggerProvider
|
// newTestLoggerWriter creates a TestLogEventWriter as a log.LoggerProvider
|
||||||
func NewTestLoggerWriter(name string, mode log.WriterMode) log.EventWriter {
|
func newTestLoggerWriter(name string, mode log.WriterMode) log.EventWriter {
|
||||||
w := &TestLogEventWriter{}
|
w := &TestLogEventWriter{}
|
||||||
w.EventWriterBaseImpl = log.NewEventWriterBase(name, "test-log-writer", mode)
|
w.EventWriterBaseImpl = log.NewEventWriterBase(name, "test-log-writer", mode)
|
||||||
w.OutputWriteCloser = WriterCloser
|
w.OutputWriteCloser = WriterCloser
|
||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func Init() {
|
||||||
const relFilePath = "modules/testlogger/testlogger.go"
|
const relFilePath = "modules/testlogger/testlogger.go"
|
||||||
_, filename, _, _ := runtime.Caller(0)
|
_, filename, _, _ := runtime.Caller(0)
|
||||||
if !strings.HasSuffix(filename, relFilePath) {
|
if !strings.HasSuffix(filename, relFilePath) {
|
||||||
panic("source code file path doesn't match expected: " + relFilePath)
|
panic("source code file path doesn't match expected: " + relFilePath)
|
||||||
}
|
}
|
||||||
prefix = strings.TrimSuffix(filename, relFilePath)
|
prefix = strings.TrimSuffix(filename, relFilePath)
|
||||||
|
|
||||||
|
log.RegisterEventWriter("test", newTestLoggerWriter)
|
||||||
|
|
||||||
|
duration, err := time.ParseDuration(os.Getenv("GITEA_TEST_SLOW_RUN"))
|
||||||
|
if err == nil && duration > 0 {
|
||||||
|
TestSlowRun = duration
|
||||||
|
}
|
||||||
|
|
||||||
|
duration, err = time.ParseDuration(os.Getenv("GITEA_TEST_SLOW_FLUSH"))
|
||||||
|
if err == nil && duration > 0 {
|
||||||
|
TestSlowFlush = duration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Fatalf(format string, args ...any) {
|
||||||
|
Printf(format+"\n", args...)
|
||||||
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,18 +99,8 @@ We appreciate that some testing machines may not be very powerful and
|
||||||
the default timeouts for declaring a slow test or a slow clean-up flush
|
the default timeouts for declaring a slow test or a slow clean-up flush
|
||||||
may not be appropriate.
|
may not be appropriate.
|
||||||
|
|
||||||
You can either:
|
You can set the following environment variables:
|
||||||
|
|
||||||
* Within the test ini file set the following section:
|
|
||||||
|
|
||||||
```ini
|
|
||||||
[integration-tests]
|
|
||||||
SLOW_TEST = 10s ; 10s is the default value
|
|
||||||
SLOW_FLUSH = 5S ; 5s is the default value
|
|
||||||
```
|
|
||||||
|
|
||||||
* Set the following environment variables:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
GITEA_SLOW_TEST_TIME="10s" GITEA_SLOW_FLUSH_TIME="5s" make test-sqlite
|
GITEA_TEST_SLOW_RUN="10s" GITEA_TEST_SLOW_FLUSH="1s" make test-sqlite
|
||||||
```
|
```
|
||||||
|
|
|
@ -39,11 +39,11 @@ func TestAPIGetRawFileOrLFS(t *testing.T) {
|
||||||
|
|
||||||
t.Run("Partial Clone", doPartialGitClone(dstPath2, u))
|
t.Run("Partial Clone", doPartialGitClone(dstPath2, u))
|
||||||
|
|
||||||
lfs := lfsCommitAndPushTest(t, dstPath, littleSize)[0]
|
lfs := lfsCommitAndPushTest(t, dstPath, testFileSizeSmall)[0]
|
||||||
|
|
||||||
reqLFS := NewRequest(t, "GET", "/api/v1/repos/user2/repo1/media/"+lfs)
|
reqLFS := NewRequest(t, "GET", "/api/v1/repos/user2/repo1/media/"+lfs)
|
||||||
respLFS := MakeRequestNilResponseRecorder(t, reqLFS, http.StatusOK)
|
respLFS := MakeRequestNilResponseRecorder(t, reqLFS, http.StatusOK)
|
||||||
assert.Equal(t, littleSize, respLFS.Length)
|
assert.Equal(t, testFileSizeSmall, respLFS.Length)
|
||||||
|
|
||||||
doAPIDeleteRepository(httpContext)
|
doAPIDeleteRepository(httpContext)
|
||||||
})
|
})
|
||||||
|
|
|
@ -4,9 +4,10 @@
|
||||||
package integration
|
package integration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
mathRand "math/rand/v2"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
@ -34,8 +35,8 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
littleSize = 1024 // 1K
|
testFileSizeSmall = 10
|
||||||
bigSize = 128 * 1024 * 1024 // 128M
|
testFileSizeLarge = 10 * 1024 * 1024 // 10M
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGitGeneral(t *testing.T) {
|
func TestGitGeneral(t *testing.T) {
|
||||||
|
@ -73,8 +74,8 @@ func testGitGeneral(t *testing.T, u *url.URL) {
|
||||||
|
|
||||||
t.Run("Partial Clone", doPartialGitClone(dstPath2, u))
|
t.Run("Partial Clone", doPartialGitClone(dstPath2, u))
|
||||||
|
|
||||||
pushedFilesStandard := standardCommitAndPushTest(t, dstPath, littleSize, bigSize)
|
pushedFilesStandard := standardCommitAndPushTest(t, dstPath, testFileSizeSmall, testFileSizeLarge)
|
||||||
pushedFilesLFS := lfsCommitAndPushTest(t, dstPath, littleSize, bigSize)
|
pushedFilesLFS := lfsCommitAndPushTest(t, dstPath, testFileSizeSmall, testFileSizeLarge)
|
||||||
rawTest(t, &httpContext, pushedFilesStandard[0], pushedFilesStandard[1], pushedFilesLFS[0], pushedFilesLFS[1])
|
rawTest(t, &httpContext, pushedFilesStandard[0], pushedFilesStandard[1], pushedFilesLFS[0], pushedFilesLFS[1])
|
||||||
mediaTest(t, &httpContext, pushedFilesStandard[0], pushedFilesStandard[1], pushedFilesLFS[0], pushedFilesLFS[1])
|
mediaTest(t, &httpContext, pushedFilesStandard[0], pushedFilesStandard[1], pushedFilesLFS[0], pushedFilesLFS[1])
|
||||||
|
|
||||||
|
@ -114,8 +115,8 @@ func testGitGeneral(t *testing.T, u *url.URL) {
|
||||||
|
|
||||||
t.Run("Clone", doGitClone(dstPath, sshURL))
|
t.Run("Clone", doGitClone(dstPath, sshURL))
|
||||||
|
|
||||||
pushedFilesStandard := standardCommitAndPushTest(t, dstPath, littleSize, bigSize)
|
pushedFilesStandard := standardCommitAndPushTest(t, dstPath, testFileSizeSmall, testFileSizeLarge)
|
||||||
pushedFilesLFS := lfsCommitAndPushTest(t, dstPath, littleSize, bigSize)
|
pushedFilesLFS := lfsCommitAndPushTest(t, dstPath, testFileSizeSmall, testFileSizeLarge)
|
||||||
rawTest(t, &sshContext, pushedFilesStandard[0], pushedFilesStandard[1], pushedFilesLFS[0], pushedFilesLFS[1])
|
rawTest(t, &sshContext, pushedFilesStandard[0], pushedFilesStandard[1], pushedFilesLFS[0], pushedFilesLFS[1])
|
||||||
mediaTest(t, &sshContext, pushedFilesStandard[0], pushedFilesStandard[1], pushedFilesLFS[0], pushedFilesLFS[1])
|
mediaTest(t, &sshContext, pushedFilesStandard[0], pushedFilesStandard[1], pushedFilesLFS[0], pushedFilesLFS[1])
|
||||||
|
|
||||||
|
@ -202,14 +203,14 @@ func rawTest(t *testing.T, ctx *APITestContext, little, big, littleLFS, bigLFS s
|
||||||
// Request raw paths
|
// Request raw paths
|
||||||
req := NewRequest(t, "GET", path.Join("/", username, reponame, "/raw/branch/master/", little))
|
req := NewRequest(t, "GET", path.Join("/", username, reponame, "/raw/branch/master/", little))
|
||||||
resp := session.MakeRequestNilResponseRecorder(t, req, http.StatusOK)
|
resp := session.MakeRequestNilResponseRecorder(t, req, http.StatusOK)
|
||||||
assert.Equal(t, littleSize, resp.Length)
|
assert.Equal(t, testFileSizeSmall, resp.Length)
|
||||||
|
|
||||||
if setting.LFS.StartServer {
|
if setting.LFS.StartServer {
|
||||||
req = NewRequest(t, "GET", path.Join("/", username, reponame, "/raw/branch/master/", littleLFS))
|
req = NewRequest(t, "GET", path.Join("/", username, reponame, "/raw/branch/master/", littleLFS))
|
||||||
resp := session.MakeRequest(t, req, http.StatusOK)
|
resp := session.MakeRequest(t, req, http.StatusOK)
|
||||||
assert.NotEqual(t, littleSize, resp.Body.Len())
|
assert.NotEqual(t, testFileSizeSmall, resp.Body.Len())
|
||||||
assert.LessOrEqual(t, resp.Body.Len(), 1024)
|
assert.LessOrEqual(t, resp.Body.Len(), 1024)
|
||||||
if resp.Body.Len() != littleSize && resp.Body.Len() <= 1024 {
|
if resp.Body.Len() != testFileSizeSmall && resp.Body.Len() <= 1024 {
|
||||||
assert.Contains(t, resp.Body.String(), lfs.MetaFileIdentifier)
|
assert.Contains(t, resp.Body.String(), lfs.MetaFileIdentifier)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -217,13 +218,13 @@ func rawTest(t *testing.T, ctx *APITestContext, little, big, littleLFS, bigLFS s
|
||||||
if !testing.Short() {
|
if !testing.Short() {
|
||||||
req = NewRequest(t, "GET", path.Join("/", username, reponame, "/raw/branch/master/", big))
|
req = NewRequest(t, "GET", path.Join("/", username, reponame, "/raw/branch/master/", big))
|
||||||
resp := session.MakeRequestNilResponseRecorder(t, req, http.StatusOK)
|
resp := session.MakeRequestNilResponseRecorder(t, req, http.StatusOK)
|
||||||
assert.Equal(t, bigSize, resp.Length)
|
assert.Equal(t, testFileSizeLarge, resp.Length)
|
||||||
|
|
||||||
if setting.LFS.StartServer {
|
if setting.LFS.StartServer {
|
||||||
req = NewRequest(t, "GET", path.Join("/", username, reponame, "/raw/branch/master/", bigLFS))
|
req = NewRequest(t, "GET", path.Join("/", username, reponame, "/raw/branch/master/", bigLFS))
|
||||||
resp := session.MakeRequest(t, req, http.StatusOK)
|
resp := session.MakeRequest(t, req, http.StatusOK)
|
||||||
assert.NotEqual(t, bigSize, resp.Body.Len())
|
assert.NotEqual(t, testFileSizeLarge, resp.Body.Len())
|
||||||
if resp.Body.Len() != bigSize && resp.Body.Len() <= 1024 {
|
if resp.Body.Len() != testFileSizeLarge && resp.Body.Len() <= 1024 {
|
||||||
assert.Contains(t, resp.Body.String(), lfs.MetaFileIdentifier)
|
assert.Contains(t, resp.Body.String(), lfs.MetaFileIdentifier)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -243,21 +244,21 @@ func mediaTest(t *testing.T, ctx *APITestContext, little, big, littleLFS, bigLFS
|
||||||
// Request media paths
|
// Request media paths
|
||||||
req := NewRequest(t, "GET", path.Join("/", username, reponame, "/media/branch/master/", little))
|
req := NewRequest(t, "GET", path.Join("/", username, reponame, "/media/branch/master/", little))
|
||||||
resp := session.MakeRequestNilResponseRecorder(t, req, http.StatusOK)
|
resp := session.MakeRequestNilResponseRecorder(t, req, http.StatusOK)
|
||||||
assert.Equal(t, littleSize, resp.Length)
|
assert.Equal(t, testFileSizeSmall, resp.Length)
|
||||||
|
|
||||||
req = NewRequest(t, "GET", path.Join("/", username, reponame, "/media/branch/master/", littleLFS))
|
req = NewRequest(t, "GET", path.Join("/", username, reponame, "/media/branch/master/", littleLFS))
|
||||||
resp = session.MakeRequestNilResponseRecorder(t, req, http.StatusOK)
|
resp = session.MakeRequestNilResponseRecorder(t, req, http.StatusOK)
|
||||||
assert.Equal(t, littleSize, resp.Length)
|
assert.Equal(t, testFileSizeSmall, resp.Length)
|
||||||
|
|
||||||
if !testing.Short() {
|
if !testing.Short() {
|
||||||
req = NewRequest(t, "GET", path.Join("/", username, reponame, "/media/branch/master/", big))
|
req = NewRequest(t, "GET", path.Join("/", username, reponame, "/media/branch/master/", big))
|
||||||
resp = session.MakeRequestNilResponseRecorder(t, req, http.StatusOK)
|
resp = session.MakeRequestNilResponseRecorder(t, req, http.StatusOK)
|
||||||
assert.Equal(t, bigSize, resp.Length)
|
assert.Equal(t, testFileSizeLarge, resp.Length)
|
||||||
|
|
||||||
if setting.LFS.StartServer {
|
if setting.LFS.StartServer {
|
||||||
req = NewRequest(t, "GET", path.Join("/", username, reponame, "/media/branch/master/", bigLFS))
|
req = NewRequest(t, "GET", path.Join("/", username, reponame, "/media/branch/master/", bigLFS))
|
||||||
resp = session.MakeRequestNilResponseRecorder(t, req, http.StatusOK)
|
resp = session.MakeRequestNilResponseRecorder(t, req, http.StatusOK)
|
||||||
assert.Equal(t, bigSize, resp.Length)
|
assert.Equal(t, testFileSizeLarge, resp.Length)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -287,35 +288,19 @@ func doCommitAndPush(t *testing.T, size int, repoPath, prefix string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateCommitWithNewData(size int, repoPath, email, fullName, prefix string) (string, error) {
|
func generateCommitWithNewData(size int, repoPath, email, fullName, prefix string) (string, error) {
|
||||||
// Generate random file
|
|
||||||
bufSize := 4 * 1024
|
|
||||||
if bufSize > size {
|
|
||||||
bufSize = size
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer := make([]byte, bufSize)
|
|
||||||
|
|
||||||
tmpFile, err := os.CreateTemp(repoPath, prefix)
|
tmpFile, err := os.CreateTemp(repoPath, prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
defer tmpFile.Close()
|
defer tmpFile.Close()
|
||||||
written := 0
|
|
||||||
for written < size {
|
var seed [32]byte
|
||||||
n := size - written
|
rander := mathRand.NewChaCha8(seed) // for testing only, no need to seed
|
||||||
if n > bufSize {
|
_, err = io.CopyN(tmpFile, rander, int64(size))
|
||||||
n = bufSize
|
if err != nil {
|
||||||
}
|
return "", err
|
||||||
_, err := rand.Read(buffer[:n])
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
n, err = tmpFile.Write(buffer[:n])
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
written += n
|
|
||||||
}
|
}
|
||||||
|
_ = tmpFile.Close()
|
||||||
|
|
||||||
// Commit
|
// Commit
|
||||||
// Now here we should explicitly allow lfs filters to run
|
// Now here we should explicitly allow lfs filters to run
|
||||||
|
@ -355,7 +340,7 @@ func doBranchProtectPRMerge(baseCtx *APITestContext, dstPath string) func(t *tes
|
||||||
|
|
||||||
// Try to push without permissions, which should fail
|
// Try to push without permissions, which should fail
|
||||||
t.Run("TryPushWithoutPermissions", func(t *testing.T) {
|
t.Run("TryPushWithoutPermissions", func(t *testing.T) {
|
||||||
_, err := generateCommitWithNewData(littleSize, dstPath, "user2@example.com", "User Two", "branch-data-file-")
|
_, err := generateCommitWithNewData(testFileSizeSmall, dstPath, "user2@example.com", "User Two", "branch-data-file-")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
doGitPushTestRepositoryFail(dstPath, "origin", "protected")
|
doGitPushTestRepositoryFail(dstPath, "origin", "protected")
|
||||||
})
|
})
|
||||||
|
@ -367,7 +352,7 @@ func doBranchProtectPRMerge(baseCtx *APITestContext, dstPath string) func(t *tes
|
||||||
|
|
||||||
// Normal push should work
|
// Normal push should work
|
||||||
t.Run("NormalPushWithPermissions", func(t *testing.T) {
|
t.Run("NormalPushWithPermissions", func(t *testing.T) {
|
||||||
_, err := generateCommitWithNewData(littleSize, dstPath, "user2@example.com", "User Two", "branch-data-file-")
|
_, err := generateCommitWithNewData(testFileSizeSmall, dstPath, "user2@example.com", "User Two", "branch-data-file-")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
doGitPushTestRepository(dstPath, "origin", "protected")
|
doGitPushTestRepository(dstPath, "origin", "protected")
|
||||||
})
|
})
|
||||||
|
@ -376,7 +361,7 @@ func doBranchProtectPRMerge(baseCtx *APITestContext, dstPath string) func(t *tes
|
||||||
t.Run("ForcePushWithoutForcePermissions", func(t *testing.T) {
|
t.Run("ForcePushWithoutForcePermissions", func(t *testing.T) {
|
||||||
t.Run("CreateDivergentHistory", func(t *testing.T) {
|
t.Run("CreateDivergentHistory", func(t *testing.T) {
|
||||||
git.NewCommand(git.DefaultContext, "reset", "--hard", "HEAD~1").Run(&git.RunOpts{Dir: dstPath})
|
git.NewCommand(git.DefaultContext, "reset", "--hard", "HEAD~1").Run(&git.RunOpts{Dir: dstPath})
|
||||||
_, err := generateCommitWithNewData(littleSize, dstPath, "user2@example.com", "User Two", "branch-data-file-new")
|
_, err := generateCommitWithNewData(testFileSizeSmall, dstPath, "user2@example.com", "User Two", "branch-data-file-new")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
})
|
})
|
||||||
doGitPushTestRepositoryFail(dstPath, "-f", "origin", "protected")
|
doGitPushTestRepositoryFail(dstPath, "-f", "origin", "protected")
|
||||||
|
@ -411,7 +396,7 @@ func doBranchProtectPRMerge(baseCtx *APITestContext, dstPath string) func(t *tes
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
})
|
})
|
||||||
t.Run("GenerateCommit", func(t *testing.T) {
|
t.Run("GenerateCommit", func(t *testing.T) {
|
||||||
_, err := generateCommitWithNewData(littleSize, dstPath, "user2@example.com", "User Two", "branch-data-file-")
|
_, err := generateCommitWithNewData(testFileSizeSmall, dstPath, "user2@example.com", "User Two", "branch-data-file-")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
})
|
})
|
||||||
t.Run("PushToUnprotectedBranch", doGitPushTestRepository(dstPath, "origin", "protected:unprotected-2"))
|
t.Run("PushToUnprotectedBranch", doGitPushTestRepository(dstPath, "origin", "protected:unprotected-2"))
|
||||||
|
@ -426,7 +411,7 @@ func doBranchProtectPRMerge(baseCtx *APITestContext, dstPath string) func(t *tes
|
||||||
|
|
||||||
t.Run("ProtectProtectedBranchUnprotectedFilePaths", doProtectBranch(ctx, "protected", "", "", "unprotected-file-*"))
|
t.Run("ProtectProtectedBranchUnprotectedFilePaths", doProtectBranch(ctx, "protected", "", "", "unprotected-file-*"))
|
||||||
t.Run("GenerateCommit", func(t *testing.T) {
|
t.Run("GenerateCommit", func(t *testing.T) {
|
||||||
_, err := generateCommitWithNewData(littleSize, dstPath, "user2@example.com", "User Two", "unprotected-file-")
|
_, err := generateCommitWithNewData(testFileSizeSmall, dstPath, "user2@example.com", "User Two", "unprotected-file-")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
})
|
})
|
||||||
t.Run("PushUnprotectedFilesToProtectedBranch", doGitPushTestRepository(dstPath, "origin", "protected"))
|
t.Run("PushUnprotectedFilesToProtectedBranch", doGitPushTestRepository(dstPath, "origin", "protected"))
|
||||||
|
@ -436,7 +421,7 @@ func doBranchProtectPRMerge(baseCtx *APITestContext, dstPath string) func(t *tes
|
||||||
t.Run("CheckoutMaster", doGitCheckoutBranch(dstPath, "master"))
|
t.Run("CheckoutMaster", doGitCheckoutBranch(dstPath, "master"))
|
||||||
t.Run("CreateBranchForced", doGitCreateBranch(dstPath, "toforce"))
|
t.Run("CreateBranchForced", doGitCreateBranch(dstPath, "toforce"))
|
||||||
t.Run("GenerateCommit", func(t *testing.T) {
|
t.Run("GenerateCommit", func(t *testing.T) {
|
||||||
_, err := generateCommitWithNewData(littleSize, dstPath, "user2@example.com", "User Two", "branch-data-file-")
|
_, err := generateCommitWithNewData(testFileSizeSmall, dstPath, "user2@example.com", "User Two", "branch-data-file-")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
})
|
})
|
||||||
t.Run("FailToForcePushToProtectedBranch", doGitPushTestRepositoryFail(dstPath, "-f", "origin", "toforce:protected"))
|
t.Run("FailToForcePushToProtectedBranch", doGitPushTestRepositoryFail(dstPath, "-f", "origin", "toforce:protected"))
|
||||||
|
@ -649,7 +634,7 @@ func doAutoPRMerge(baseCtx *APITestContext, dstPath string) func(t *testing.T) {
|
||||||
t.Run("CheckoutProtected", doGitCheckoutBranch(dstPath, "protected"))
|
t.Run("CheckoutProtected", doGitCheckoutBranch(dstPath, "protected"))
|
||||||
t.Run("PullProtected", doGitPull(dstPath, "origin", "protected"))
|
t.Run("PullProtected", doGitPull(dstPath, "origin", "protected"))
|
||||||
t.Run("GenerateCommit", func(t *testing.T) {
|
t.Run("GenerateCommit", func(t *testing.T) {
|
||||||
_, err := generateCommitWithNewData(littleSize, dstPath, "user2@example.com", "User Two", "branch-data-file-")
|
_, err := generateCommitWithNewData(testFileSizeSmall, dstPath, "user2@example.com", "User Two", "branch-data-file-")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
})
|
})
|
||||||
t.Run("PushToUnprotectedBranch", doGitPushTestRepository(dstPath, "origin", "protected:unprotected3"))
|
t.Run("PushToUnprotectedBranch", doGitPushTestRepository(dstPath, "origin", "protected:unprotected3"))
|
||||||
|
|
|
@ -98,7 +98,7 @@ func TestAgitPullPush(t *testing.T) {
|
||||||
doGitCreateBranch(dstPath, "test-agit-push")
|
doGitCreateBranch(dstPath, "test-agit-push")
|
||||||
|
|
||||||
// commit 1
|
// commit 1
|
||||||
_, err = generateCommitWithNewData(littleSize, dstPath, "user2@example.com", "User Two", "branch-data-file-")
|
_, err = generateCommitWithNewData(testFileSizeSmall, dstPath, "user2@example.com", "User Two", "branch-data-file-")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// push to create an agit pull request
|
// push to create an agit pull request
|
||||||
|
@ -115,7 +115,7 @@ func TestAgitPullPush(t *testing.T) {
|
||||||
assert.Equal(t, "test-description", pr.Issue.Content)
|
assert.Equal(t, "test-description", pr.Issue.Content)
|
||||||
|
|
||||||
// commit 2
|
// commit 2
|
||||||
_, err = generateCommitWithNewData(littleSize, dstPath, "user2@example.com", "User Two", "branch-data-file-2-")
|
_, err = generateCommitWithNewData(testFileSizeSmall, dstPath, "user2@example.com", "User Two", "branch-data-file-2-")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// push 2
|
// push 2
|
||||||
|
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"code.gitea.io/gitea/models/auth"
|
"code.gitea.io/gitea/models/auth"
|
||||||
"code.gitea.io/gitea/models/unittest"
|
"code.gitea.io/gitea/models/unittest"
|
||||||
|
@ -28,7 +27,6 @@ import (
|
||||||
"code.gitea.io/gitea/modules/json"
|
"code.gitea.io/gitea/modules/json"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/testlogger"
|
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
"code.gitea.io/gitea/modules/web"
|
"code.gitea.io/gitea/modules/web"
|
||||||
"code.gitea.io/gitea/routers"
|
"code.gitea.io/gitea/routers"
|
||||||
|
@ -90,27 +88,6 @@ func TestMain(m *testing.M) {
|
||||||
tests.InitTest(true)
|
tests.InitTest(true)
|
||||||
testWebRoutes = routers.NormalRoutes()
|
testWebRoutes = routers.NormalRoutes()
|
||||||
|
|
||||||
// integration test settings...
|
|
||||||
if setting.CfgProvider != nil {
|
|
||||||
testingCfg := setting.CfgProvider.Section("integration-tests")
|
|
||||||
testlogger.SlowTest = testingCfg.Key("SLOW_TEST").MustDuration(testlogger.SlowTest)
|
|
||||||
testlogger.SlowFlush = testingCfg.Key("SLOW_FLUSH").MustDuration(testlogger.SlowFlush)
|
|
||||||
}
|
|
||||||
|
|
||||||
if os.Getenv("GITEA_SLOW_TEST_TIME") != "" {
|
|
||||||
duration, err := time.ParseDuration(os.Getenv("GITEA_SLOW_TEST_TIME"))
|
|
||||||
if err == nil {
|
|
||||||
testlogger.SlowTest = duration
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if os.Getenv("GITEA_SLOW_FLUSH_TIME") != "" {
|
|
||||||
duration, err := time.ParseDuration(os.Getenv("GITEA_SLOW_FLUSH_TIME"))
|
|
||||||
if err == nil {
|
|
||||||
testlogger.SlowFlush = duration
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Unsetenv("GIT_AUTHOR_NAME")
|
os.Unsetenv("GIT_AUTHOR_NAME")
|
||||||
os.Unsetenv("GIT_AUTHOR_EMAIL")
|
os.Unsetenv("GIT_AUTHOR_EMAIL")
|
||||||
os.Unsetenv("GIT_AUTHOR_DATE")
|
os.Unsetenv("GIT_AUTHOR_DATE")
|
||||||
|
@ -132,8 +109,6 @@ func TestMain(m *testing.M) {
|
||||||
// Instead, "No tests were found", last nonsense log is "According to the configuration, subsequent logs will not be printed to the console"
|
// Instead, "No tests were found", last nonsense log is "According to the configuration, subsequent logs will not be printed to the console"
|
||||||
exitCode := m.Run()
|
exitCode := m.Run()
|
||||||
|
|
||||||
testlogger.WriterCloser.Reset()
|
|
||||||
|
|
||||||
if err = util.RemoveAll(setting.Indexer.IssuePath); err != nil {
|
if err = util.RemoveAll(setting.Indexer.IssuePath); err != nil {
|
||||||
fmt.Printf("util.RemoveAll: %v\n", err)
|
fmt.Printf("util.RemoveAll: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
|
|
@ -6,6 +6,7 @@ package integration
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -19,6 +20,7 @@ import (
|
||||||
"code.gitea.io/gitea/modules/queue"
|
"code.gitea.io/gitea/modules/queue"
|
||||||
repo_service "code.gitea.io/gitea/services/repository"
|
repo_service "code.gitea.io/gitea/services/repository"
|
||||||
files_service "code.gitea.io/gitea/services/repository/files"
|
files_service "code.gitea.io/gitea/services/repository/files"
|
||||||
|
"code.gitea.io/gitea/tests"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
@ -218,42 +220,43 @@ func TestLinguist(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
repo, err := repo_service.CreateRepository(db.DefaultContext, user, user, repo_service.CreateRepoOptions{
|
t.Run("Case-"+strconv.Itoa(i), func(t *testing.T) {
|
||||||
Name: "linguist-test",
|
defer tests.PrintCurrentTest(t)()
|
||||||
|
repo, err := repo_service.CreateRepository(db.DefaultContext, user, user, repo_service.CreateRepoOptions{
|
||||||
|
Name: "linguist-test-" + strconv.Itoa(i),
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
files := []*files_service.ChangeRepoFile{
|
||||||
|
{
|
||||||
|
TreePath: ".gitattributes",
|
||||||
|
ContentReader: strings.NewReader(c.GitAttributesContent),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
files = append(files, c.FilesToAdd...)
|
||||||
|
for _, f := range files {
|
||||||
|
f.Operation = "create"
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = files_service.ChangeRepoFiles(git.DefaultContext, repo, user, &files_service.ChangeRepoFilesOptions{
|
||||||
|
Files: files,
|
||||||
|
OldBranch: repo.DefaultBranch,
|
||||||
|
NewBranch: repo.DefaultBranch,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.NoError(t, stats.UpdateRepoIndexer(repo))
|
||||||
|
assert.NoError(t, queue.GetManager().FlushAll(context.Background(), 10*time.Second))
|
||||||
|
|
||||||
|
stats, err := repo_model.GetTopLanguageStats(db.DefaultContext, repo, len(c.FilesToAdd))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
languages := make([]string, 0, len(stats))
|
||||||
|
for _, s := range stats {
|
||||||
|
languages = append(languages, s.Language)
|
||||||
|
}
|
||||||
|
assert.Equal(t, c.ExpectedLanguageOrder, languages, "case %d: unexpected language stats", i)
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
files := []*files_service.ChangeRepoFile{
|
|
||||||
{
|
|
||||||
TreePath: ".gitattributes",
|
|
||||||
ContentReader: strings.NewReader(c.GitAttributesContent),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
files = append(files, c.FilesToAdd...)
|
|
||||||
for _, f := range files {
|
|
||||||
f.Operation = "create"
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = files_service.ChangeRepoFiles(git.DefaultContext, repo, user, &files_service.ChangeRepoFilesOptions{
|
|
||||||
Files: files,
|
|
||||||
OldBranch: repo.DefaultBranch,
|
|
||||||
NewBranch: repo.DefaultBranch,
|
|
||||||
})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
assert.NoError(t, stats.UpdateRepoIndexer(repo))
|
|
||||||
assert.NoError(t, queue.GetManager().FlushAll(context.Background(), 10*time.Second))
|
|
||||||
|
|
||||||
stats, err := repo_model.GetTopLanguageStats(db.DefaultContext, repo, len(c.FilesToAdd))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
languages := make([]string, 0, len(stats))
|
|
||||||
for _, s := range stats {
|
|
||||||
languages = append(languages, s.Language)
|
|
||||||
}
|
|
||||||
assert.Equal(t, c.ExpectedLanguageOrder, languages, "case %d: unexpected language stats", i)
|
|
||||||
|
|
||||||
assert.NoError(t, repo_service.DeleteRepository(db.DefaultContext, user, repo, false))
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,33 +28,29 @@ import (
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/testlogger"
|
"code.gitea.io/gitea/modules/testlogger"
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
"code.gitea.io/gitea/tests"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
"xorm.io/xorm"
|
"xorm.io/xorm"
|
||||||
)
|
)
|
||||||
|
|
||||||
var currentEngine *xorm.Engine
|
var currentEngine *xorm.Engine
|
||||||
|
|
||||||
func initMigrationTest(t *testing.T) func() {
|
func initMigrationTest(t *testing.T) func() {
|
||||||
log.RegisterEventWriter("test", testlogger.NewTestLoggerWriter)
|
testlogger.Init()
|
||||||
|
|
||||||
deferFn := tests.PrintCurrentTest(t, 2)
|
deferFn := testlogger.PrintCurrentTest(t, 2)
|
||||||
giteaRoot := base.SetupGiteaRoot()
|
giteaRoot := base.SetupGiteaRoot()
|
||||||
if giteaRoot == "" {
|
if giteaRoot == "" {
|
||||||
tests.Printf("Environment variable $GITEA_ROOT not set\n")
|
testlogger.Fatalf("Environment variable $GITEA_ROOT not set\n")
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
setting.AppPath = path.Join(giteaRoot, "gitea")
|
setting.AppPath = path.Join(giteaRoot, "gitea")
|
||||||
if _, err := os.Stat(setting.AppPath); err != nil {
|
if _, err := os.Stat(setting.AppPath); err != nil {
|
||||||
tests.Printf("Could not find gitea binary at %s\n", setting.AppPath)
|
testlogger.Fatalf(fmt.Sprintf("Could not find gitea binary at %s\n", setting.AppPath))
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
giteaConf := os.Getenv("GITEA_CONF")
|
giteaConf := os.Getenv("GITEA_CONF")
|
||||||
if giteaConf == "" {
|
if giteaConf == "" {
|
||||||
tests.Printf("Environment variable $GITEA_CONF not set\n")
|
testlogger.Fatalf("Environment variable $GITEA_CONF not set\n")
|
||||||
os.Exit(1)
|
|
||||||
} else if !path.IsAbs(giteaConf) {
|
} else if !path.IsAbs(giteaConf) {
|
||||||
setting.CustomConf = path.Join(giteaRoot, giteaConf)
|
setting.CustomConf = path.Join(giteaRoot, giteaConf)
|
||||||
} else {
|
} else {
|
||||||
|
@ -123,13 +119,10 @@ func readSQLFromFile(version string) (string, error) {
|
||||||
return string(charset.MaybeRemoveBOM(bytes, charset.ConvertOpts{})), nil
|
return string(charset.MaybeRemoveBOM(bytes, charset.ConvertOpts{})), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func restoreOldDB(t *testing.T, version string) bool {
|
func restoreOldDB(t *testing.T, version string) {
|
||||||
data, err := readSQLFromFile(version)
|
data, err := readSQLFromFile(version)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if len(data) == 0 {
|
require.NotEmpty(t, data, "No data found for %s version: %s", setting.Database.Type, version)
|
||||||
tests.Printf("No db found to restore for %s version: %s\n", setting.Database.Type, version)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case setting.Database.Type.IsSQLite3():
|
case setting.Database.Type.IsSQLite3():
|
||||||
|
@ -197,15 +190,12 @@ func restoreOldDB(t *testing.T, version string) bool {
|
||||||
db, err = sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s",
|
db, err = sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s",
|
||||||
setting.Database.User, setting.Database.Passwd, setting.Database.Host, setting.Database.Name, setting.Database.SSLMode))
|
setting.Database.User, setting.Database.Passwd, setting.Database.Host, setting.Database.Name, setting.Database.SSLMode))
|
||||||
}
|
}
|
||||||
if !assert.NoError(t, err) {
|
require.NoError(t, err)
|
||||||
return false
|
|
||||||
}
|
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
schrows, err := db.Query(fmt.Sprintf("SELECT 1 FROM information_schema.schemata WHERE schema_name = '%s'", setting.Database.Schema))
|
schrows, err := db.Query(fmt.Sprintf("SELECT 1 FROM information_schema.schemata WHERE schema_name = '%s'", setting.Database.Schema))
|
||||||
if !assert.NoError(t, err) || !assert.NotEmpty(t, schrows) {
|
require.NoError(t, err)
|
||||||
return false
|
require.NotEmpty(t, schrows)
|
||||||
}
|
|
||||||
|
|
||||||
if !schrows.Next() {
|
if !schrows.Next() {
|
||||||
// Create and setup a DB schema
|
// Create and setup a DB schema
|
||||||
|
@ -260,7 +250,6 @@ func restoreOldDB(t *testing.T, version string) bool {
|
||||||
}
|
}
|
||||||
db.Close()
|
db.Close()
|
||||||
}
|
}
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func wrappedMigrate(x *xorm.Engine) error {
|
func wrappedMigrate(x *xorm.Engine) error {
|
||||||
|
@ -269,11 +258,8 @@ func wrappedMigrate(x *xorm.Engine) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func doMigrationTest(t *testing.T, version string) {
|
func doMigrationTest(t *testing.T, version string) {
|
||||||
defer tests.PrintCurrentTest(t)()
|
defer testlogger.PrintCurrentTest(t)()
|
||||||
tests.Printf("Performing migration test for %s version: %s\n", setting.Database.Type, version)
|
restoreOldDB(t, version)
|
||||||
if !restoreOldDB(t, version) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
setting.InitSQLLoggersForCli(log.INFO)
|
setting.InitSQLLoggersForCli(log.INFO)
|
||||||
|
|
||||||
|
@ -305,14 +291,9 @@ func TestMigrations(t *testing.T) {
|
||||||
|
|
||||||
dialect := setting.Database.Type
|
dialect := setting.Database.Type
|
||||||
versions, err := availableVersions()
|
versions, err := availableVersions()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.NotEmpty(t, versions, "No old database versions available to migration test for %s", dialect)
|
||||||
|
|
||||||
if len(versions) == 0 {
|
|
||||||
tests.Printf("No old database versions available to migration test for %s\n", dialect)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
tests.Printf("Preparing to test %d migrations for %s\n", len(versions), dialect)
|
|
||||||
for _, version := range versions {
|
for _, version := range versions {
|
||||||
t.Run(fmt.Sprintf("Migrate-%s-%s", dialect, version), func(t *testing.T) {
|
t.Run(fmt.Sprintf("Migrate-%s-%s", dialect, version), func(t *testing.T) {
|
||||||
doMigrationTest(t, version)
|
doMigrationTest(t, version)
|
||||||
|
|
|
@ -209,8 +209,6 @@ func checkRecentlyPushedNewBranches(t *testing.T, session *TestSession, repoPath
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRecentlyPushedNewBranches(t *testing.T) {
|
func TestRecentlyPushedNewBranches(t *testing.T) {
|
||||||
defer tests.PrepareTestEnv(t)()
|
|
||||||
|
|
||||||
onGiteaRun(t, func(t *testing.T, u *url.URL) {
|
onGiteaRun(t, func(t *testing.T, u *url.URL) {
|
||||||
user1Session := loginUser(t, "user1")
|
user1Session := loginUser(t, "user1")
|
||||||
user2Session := loginUser(t, "user2")
|
user2Session := loginUser(t, "user2")
|
||||||
|
|
|
@ -29,17 +29,12 @@ import (
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func exitf(format string, args ...any) {
|
|
||||||
fmt.Printf(format+"\n", args...)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitTest(requireGitea bool) {
|
func InitTest(requireGitea bool) {
|
||||||
log.RegisterEventWriter("test", testlogger.NewTestLoggerWriter)
|
testlogger.Init()
|
||||||
|
|
||||||
giteaRoot := base.SetupGiteaRoot()
|
giteaRoot := base.SetupGiteaRoot()
|
||||||
if giteaRoot == "" {
|
if giteaRoot == "" {
|
||||||
exitf("Environment variable $GITEA_ROOT not set")
|
testlogger.Fatalf("Environment variable $GITEA_ROOT not set\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Speedup tests that rely on the event source ticker, confirm whether there is any bug or failure.
|
// TODO: Speedup tests that rely on the event source ticker, confirm whether there is any bug or failure.
|
||||||
|
@ -54,7 +49,7 @@ func InitTest(requireGitea bool) {
|
||||||
}
|
}
|
||||||
setting.AppPath = filepath.Join(giteaRoot, giteaBinary)
|
setting.AppPath = filepath.Join(giteaRoot, giteaBinary)
|
||||||
if _, err := os.Stat(setting.AppPath); err != nil {
|
if _, err := os.Stat(setting.AppPath); err != nil {
|
||||||
exitf("Could not find gitea binary at %s", setting.AppPath)
|
testlogger.Fatalf("Could not find gitea binary at %s\n", setting.AppPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
giteaConf := os.Getenv("GITEA_CONF")
|
giteaConf := os.Getenv("GITEA_CONF")
|
||||||
|
@ -66,7 +61,7 @@ func InitTest(requireGitea bool) {
|
||||||
_ = os.Setenv("GITEA_CONF", giteaConf)
|
_ = os.Setenv("GITEA_CONF", giteaConf)
|
||||||
fmt.Printf("Environment variable $GITEA_CONF not set, use default: %s\n", giteaConf)
|
fmt.Printf("Environment variable $GITEA_CONF not set, use default: %s\n", giteaConf)
|
||||||
if !setting.EnableSQLite3 {
|
if !setting.EnableSQLite3 {
|
||||||
exitf(`sqlite3 requires: import _ "github.com/mattn/go-sqlite3" or -tags sqlite,sqlite_unlock_notify`)
|
testlogger.Fatalf(`sqlite3 requires: import _ "github.com/mattn/go-sqlite3" or -tags sqlite,sqlite_unlock_notify` + "\n")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !filepath.IsAbs(giteaConf) {
|
if !filepath.IsAbs(giteaConf) {
|
||||||
|
@ -85,7 +80,7 @@ func InitTest(requireGitea bool) {
|
||||||
|
|
||||||
setting.LoadDBSetting()
|
setting.LoadDBSetting()
|
||||||
if err := storage.Init(); err != nil {
|
if err := storage.Init(); err != nil {
|
||||||
exitf("Init storage failed: %v", err)
|
testlogger.Fatalf("Init storage failed: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
@ -258,8 +253,3 @@ func PrintCurrentTest(t testing.TB, skip ...int) func() {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
return testlogger.PrintCurrentTest(t, util.OptionalArg(skip)+1)
|
return testlogger.PrintCurrentTest(t, util.OptionalArg(skip)+1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Printf takes a format and args and prints the string to os.Stdout
|
|
||||||
func Printf(format string, args ...any) {
|
|
||||||
testlogger.Printf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in a new issue