2019-05-07 03:12:51 +02:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
|
|
|
// Copyright 2018 Jonas Franz. All rights reserved.
|
2022-11-27 19:20:29 +01:00
|
|
|
// SPDX-License-Identifier: MIT
|
2019-05-07 03:12:51 +02:00
|
|
|
|
|
|
|
package migrations
|
|
|
|
|
|
|
|
import (
|
2019-12-17 05:16:54 +01:00
|
|
|
"context"
|
2019-10-13 15:23:14 +02:00
|
|
|
"fmt"
|
2020-11-29 01:37:58 +01:00
|
|
|
"net"
|
|
|
|
"net/url"
|
2021-03-15 22:52:11 +01:00
|
|
|
"path/filepath"
|
2020-11-29 01:37:58 +01:00
|
|
|
"strings"
|
2019-10-13 15:23:14 +02:00
|
|
|
|
2019-05-07 03:12:51 +02:00
|
|
|
"code.gitea.io/gitea/models"
|
2021-12-10 02:27:50 +01:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2022-10-17 01:29:26 +02:00
|
|
|
system_model "code.gitea.io/gitea/models/system"
|
2021-11-24 10:49:20 +01:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2021-11-20 10:34:05 +01:00
|
|
|
"code.gitea.io/gitea/modules/hostmatcher"
|
2019-05-07 03:12:51 +02:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
2021-11-16 16:25:33 +01:00
|
|
|
base "code.gitea.io/gitea/modules/migration"
|
2019-11-16 09:30:06 +01:00
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2021-03-15 22:52:11 +01:00
|
|
|
"code.gitea.io/gitea/modules/util"
|
2019-05-07 03:12:51 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// MigrateOptions is equal to base.MigrateOptions
|
|
|
|
type MigrateOptions = base.MigrateOptions
|
|
|
|
|
|
|
|
var (
|
|
|
|
factories []base.DownloaderFactory
|
2020-11-29 01:37:58 +01:00
|
|
|
|
2021-11-20 10:34:05 +01:00
|
|
|
allowList *hostmatcher.HostMatchList
|
|
|
|
blockList *hostmatcher.HostMatchList
|
2019-05-07 03:12:51 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// RegisterDownloaderFactory registers a downloader factory
|
|
|
|
func RegisterDownloaderFactory(factory base.DownloaderFactory) {
|
|
|
|
factories = append(factories, factory)
|
|
|
|
}
|
|
|
|
|
2021-03-15 22:52:11 +01:00
|
|
|
// IsMigrateURLAllowed checks if an URL is allowed to be migrated from
|
2021-11-24 10:49:20 +01:00
|
|
|
func IsMigrateURLAllowed(remoteURL string, doer *user_model.User) error {
|
2021-03-15 22:52:11 +01:00
|
|
|
// Remote address can be HTTP/HTTPS/Git URL or local path.
|
2021-03-18 14:58:47 +01:00
|
|
|
u, err := url.Parse(remoteURL)
|
2020-11-29 01:37:58 +01:00
|
|
|
if err != nil {
|
2022-06-12 07:43:27 +02:00
|
|
|
return &models.ErrInvalidCloneAddr{IsURLError: true, Host: remoteURL}
|
2020-11-29 01:37:58 +01:00
|
|
|
}
|
|
|
|
|
2021-03-15 22:52:11 +01:00
|
|
|
if u.Scheme == "file" || u.Scheme == "" {
|
|
|
|
if !doer.CanImportLocal() {
|
|
|
|
return &models.ErrInvalidCloneAddr{Host: "<LOCAL_FILESYSTEM>", IsPermissionDenied: true, LocalPath: true}
|
2020-11-29 01:37:58 +01:00
|
|
|
}
|
2021-03-15 22:52:11 +01:00
|
|
|
isAbs := filepath.IsAbs(u.Host + u.Path)
|
|
|
|
if !isAbs {
|
|
|
|
return &models.ErrInvalidCloneAddr{Host: "<LOCAL_FILESYSTEM>", IsInvalidPath: true, LocalPath: true}
|
|
|
|
}
|
|
|
|
isDir, err := util.IsDir(u.Host + u.Path)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Unable to check if %s is a directory: %v", u.Host+u.Path, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !isDir {
|
|
|
|
return &models.ErrInvalidCloneAddr{Host: "<LOCAL_FILESYSTEM>", IsInvalidPath: true, LocalPath: true}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if u.Scheme == "git" && u.Port() != "" && (strings.Contains(remoteURL, "%0d") || strings.Contains(remoteURL, "%0a")) {
|
|
|
|
return &models.ErrInvalidCloneAddr{Host: u.Host, IsURLError: true}
|
2020-11-29 01:37:58 +01:00
|
|
|
}
|
|
|
|
|
2021-03-15 22:52:11 +01:00
|
|
|
if u.Opaque != "" || u.Scheme != "" && u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "git" {
|
|
|
|
return &models.ErrInvalidCloneAddr{Host: u.Host, IsProtocolInvalid: true, IsPermissionDenied: true, IsURLError: true}
|
|
|
|
}
|
|
|
|
|
2021-11-20 10:34:05 +01:00
|
|
|
hostName, _, err := net.SplitHostPort(u.Host)
|
|
|
|
if err != nil {
|
|
|
|
// u.Host can be "host" or "host:port"
|
|
|
|
err = nil //nolint
|
|
|
|
hostName = u.Host
|
|
|
|
}
|
2022-05-02 06:02:17 +02:00
|
|
|
|
|
|
|
// some users only use proxy, there is no DNS resolver. it's safe to ignore the LookupIP error
|
|
|
|
addrList, _ := net.LookupIP(hostName)
|
2022-07-13 03:07:16 +02:00
|
|
|
return checkByAllowBlockList(hostName, addrList)
|
|
|
|
}
|
2021-03-08 14:10:17 +01:00
|
|
|
|
2022-07-13 03:07:16 +02:00
|
|
|
func checkByAllowBlockList(hostName string, addrList []net.IP) error {
|
2021-11-20 10:34:05 +01:00
|
|
|
var ipAllowed bool
|
|
|
|
var ipBlocked bool
|
|
|
|
for _, addr := range addrList {
|
|
|
|
ipAllowed = ipAllowed || allowList.MatchIPAddr(addr)
|
|
|
|
ipBlocked = ipBlocked || blockList.MatchIPAddr(addr)
|
|
|
|
}
|
|
|
|
var blockedError error
|
|
|
|
if blockList.MatchHostName(hostName) || ipBlocked {
|
2022-07-13 03:07:16 +02:00
|
|
|
blockedError = &models.ErrInvalidCloneAddr{Host: hostName, IsPermissionDenied: true}
|
2021-11-20 10:34:05 +01:00
|
|
|
}
|
2022-07-13 03:07:16 +02:00
|
|
|
// if we have an allow-list, check the allow-list before return to get the more accurate error
|
2021-11-20 10:34:05 +01:00
|
|
|
if !allowList.IsEmpty() {
|
|
|
|
if !allowList.MatchHostName(hostName) && !ipAllowed {
|
2022-07-13 03:07:16 +02:00
|
|
|
return &models.ErrInvalidCloneAddr{Host: hostName, IsPermissionDenied: true}
|
2020-11-29 01:37:58 +01:00
|
|
|
}
|
|
|
|
}
|
2021-11-20 10:34:05 +01:00
|
|
|
// otherwise, we always follow the blocked list
|
|
|
|
return blockedError
|
2020-11-29 01:37:58 +01:00
|
|
|
}
|
|
|
|
|
2019-05-07 03:12:51 +02:00
|
|
|
// MigrateRepository migrate repository according MigrateOptions
|
2021-12-10 02:27:50 +01:00
|
|
|
func MigrateRepository(ctx context.Context, doer *user_model.User, ownerName string, opts base.MigrateOptions, messenger base.Messenger) (*repo_model.Repository, error) {
|
2021-03-15 22:52:11 +01:00
|
|
|
err := IsMigrateURLAllowed(opts.CloneAddr, doer)
|
2020-11-29 01:37:58 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-04-09 00:25:57 +02:00
|
|
|
if opts.LFS && len(opts.LFSEndpoint) > 0 {
|
|
|
|
err := IsMigrateURLAllowed(opts.LFSEndpoint, doer)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2020-12-27 04:34:19 +01:00
|
|
|
downloader, err := newDownloader(ctx, ownerName, opts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-01-20 18:46:10 +01:00
|
|
|
uploader := NewGiteaLocalUploader(ctx, doer, ownerName, opts.RepoName)
|
2020-12-27 04:34:19 +01:00
|
|
|
uploader.gitServiceType = opts.GitServiceType
|
|
|
|
|
2022-08-21 15:28:15 +02:00
|
|
|
if err := migrateRepository(doer, downloader, uploader, opts, messenger); err != nil {
|
2020-12-27 04:34:19 +01:00
|
|
|
if err1 := uploader.Rollback(); err1 != nil {
|
|
|
|
log.Error("rollback failed: %v", err1)
|
|
|
|
}
|
2022-10-17 01:29:26 +02:00
|
|
|
if err2 := system_model.CreateRepositoryNotice(fmt.Sprintf("Migrate repository from %s failed: %v", opts.OriginalURL, err)); err2 != nil {
|
2020-12-27 04:34:19 +01:00
|
|
|
log.Error("create respotiry notice failed: ", err2)
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return uploader.repo, nil
|
|
|
|
}
|
2020-11-29 01:37:58 +01:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
func newDownloader(ctx context.Context, ownerName string, opts base.MigrateOptions) (base.Downloader, error) {
|
2019-05-07 03:12:51 +02:00
|
|
|
var (
|
|
|
|
downloader base.Downloader
|
2020-12-27 04:34:19 +01:00
|
|
|
err error
|
2019-05-07 03:12:51 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
for _, factory := range factories {
|
2020-08-28 03:36:37 +02:00
|
|
|
if factory.GitServiceType() == opts.GitServiceType {
|
2020-09-02 19:49:25 +02:00
|
|
|
downloader, err = factory.New(ctx, opts)
|
2019-05-07 03:12:51 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if downloader == nil {
|
|
|
|
opts.Wiki = true
|
|
|
|
opts.Milestones = false
|
|
|
|
opts.Labels = false
|
|
|
|
opts.Releases = false
|
|
|
|
opts.Comments = false
|
|
|
|
opts.Issues = false
|
|
|
|
opts.PullRequests = false
|
2019-10-13 15:23:14 +02:00
|
|
|
downloader = NewPlainGitDownloader(ownerName, opts.RepoName, opts.CloneAddr)
|
2019-12-18 22:49:56 +01:00
|
|
|
log.Trace("Will migrate from git: %s", opts.OriginalURL)
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
|
2019-11-16 09:30:06 +01:00
|
|
|
if setting.Migrations.MaxAttempts > 1 {
|
2020-09-02 19:49:25 +02:00
|
|
|
downloader = base.NewRetryDownloader(ctx, downloader, setting.Migrations.MaxAttempts, setting.Migrations.RetryBackoff)
|
2019-11-16 09:30:06 +01:00
|
|
|
}
|
2020-12-27 04:34:19 +01:00
|
|
|
return downloader, nil
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
|
2020-04-20 14:30:46 +02:00
|
|
|
// migrateRepository will download information and then upload it to Uploader, this is a simple
|
2019-05-07 03:12:51 +02:00
|
|
|
// process for small repository. For a big repository, save all the data to disk
|
|
|
|
// before upload is better
|
2022-08-21 15:28:15 +02:00
|
|
|
func migrateRepository(doer *user_model.User, downloader base.Downloader, uploader base.Uploader, opts base.MigrateOptions, messenger base.Messenger) error {
|
2021-06-17 00:02:24 +02:00
|
|
|
if messenger == nil {
|
|
|
|
messenger = base.NilMessenger
|
|
|
|
}
|
|
|
|
|
2019-05-07 03:12:51 +02:00
|
|
|
repo, err := downloader.GetRepoInfo()
|
|
|
|
if err != nil {
|
2021-01-21 20:33:58 +01:00
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Info("migrating repo infos is not supported, ignored")
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
repo.IsPrivate = opts.Private
|
|
|
|
repo.IsMirror = opts.Mirror
|
2019-05-20 14:43:43 +02:00
|
|
|
if opts.Description != "" {
|
|
|
|
repo.Description = opts.Description
|
|
|
|
}
|
2021-01-21 20:33:58 +01:00
|
|
|
if repo.CloneURL, err = downloader.FormatCloneURL(opts, repo.CloneURL); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-09-04 12:47:56 +02:00
|
|
|
// SECURITY: If the downloader is not a RepositoryRestorer then we need to recheck the CloneURL
|
2022-08-21 15:28:15 +02:00
|
|
|
if _, ok := downloader.(*RepositoryRestorer); !ok {
|
|
|
|
// Now the clone URL can be rewritten by the downloader so we must recheck
|
|
|
|
if err := IsMigrateURLAllowed(repo.CloneURL, doer); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-09-04 12:47:56 +02:00
|
|
|
// SECURITY: Ensure that we haven't been redirected from an external to a local filesystem
|
|
|
|
// Now we know all of these must parse
|
|
|
|
cloneAddrURL, _ := url.Parse(opts.CloneAddr)
|
|
|
|
cloneURL, _ := url.Parse(repo.CloneURL)
|
|
|
|
|
|
|
|
if cloneURL.Scheme == "file" || cloneURL.Scheme == "" {
|
|
|
|
if cloneAddrURL.Scheme != "file" && cloneAddrURL.Scheme != "" {
|
|
|
|
return fmt.Errorf("repo info has changed from external to local filesystem")
|
2022-08-21 15:28:15 +02:00
|
|
|
}
|
|
|
|
}
|
2022-09-04 12:47:56 +02:00
|
|
|
|
|
|
|
// We don't actually need to check the OriginalURL as it isn't used anywhere
|
2022-08-21 15:28:15 +02:00
|
|
|
}
|
|
|
|
|
2021-06-04 15:14:20 +02:00
|
|
|
log.Trace("migrating git data from %s", repo.CloneURL)
|
2021-06-17 00:02:24 +02:00
|
|
|
messenger("repo.migrate.migrating_git")
|
2021-01-21 20:33:58 +01:00
|
|
|
if err = uploader.CreateRepo(repo, opts); err != nil {
|
2019-05-07 03:12:51 +02:00
|
|
|
return err
|
|
|
|
}
|
2019-11-13 08:01:19 +01:00
|
|
|
defer uploader.Close()
|
2019-05-07 03:12:51 +02:00
|
|
|
|
2019-08-14 08:16:12 +02:00
|
|
|
log.Trace("migrating topics")
|
2021-06-17 00:02:24 +02:00
|
|
|
messenger("repo.migrate.migrating_topics")
|
2019-08-14 08:16:12 +02:00
|
|
|
topics, err := downloader.GetTopics()
|
|
|
|
if err != nil {
|
2021-01-21 20:33:58 +01:00
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating topics is not supported, ignored")
|
2019-08-14 08:16:12 +02:00
|
|
|
}
|
2021-01-21 20:33:58 +01:00
|
|
|
if len(topics) != 0 {
|
|
|
|
if err = uploader.CreateTopics(topics...); err != nil {
|
2019-08-14 08:16:12 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-07 03:12:51 +02:00
|
|
|
if opts.Milestones {
|
|
|
|
log.Trace("migrating milestones")
|
2021-06-17 00:02:24 +02:00
|
|
|
messenger("repo.migrate.migrating_milestones")
|
2019-05-07 03:12:51 +02:00
|
|
|
milestones, err := downloader.GetMilestones()
|
|
|
|
if err != nil {
|
2021-01-21 20:33:58 +01:00
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating milestones is not supported, ignored")
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
|
2019-07-06 21:24:50 +02:00
|
|
|
msBatchSize := uploader.MaxBatchInsertSize("milestone")
|
|
|
|
for len(milestones) > 0 {
|
|
|
|
if len(milestones) < msBatchSize {
|
|
|
|
msBatchSize = len(milestones)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := uploader.CreateMilestones(milestones...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
milestones = milestones[msBatchSize:]
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Labels {
|
|
|
|
log.Trace("migrating labels")
|
2021-06-17 00:02:24 +02:00
|
|
|
messenger("repo.migrate.migrating_labels")
|
2019-05-07 03:12:51 +02:00
|
|
|
labels, err := downloader.GetLabels()
|
|
|
|
if err != nil {
|
2021-01-21 20:33:58 +01:00
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating labels is not supported, ignored")
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
|
2019-07-06 21:24:50 +02:00
|
|
|
lbBatchSize := uploader.MaxBatchInsertSize("label")
|
|
|
|
for len(labels) > 0 {
|
|
|
|
if len(labels) < lbBatchSize {
|
|
|
|
lbBatchSize = len(labels)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := uploader.CreateLabels(labels...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
labels = labels[lbBatchSize:]
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Releases {
|
|
|
|
log.Trace("migrating releases")
|
2021-06-17 00:02:24 +02:00
|
|
|
messenger("repo.migrate.migrating_releases")
|
2019-05-07 03:12:51 +02:00
|
|
|
releases, err := downloader.GetReleases()
|
|
|
|
if err != nil {
|
2021-01-21 20:33:58 +01:00
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating releases is not supported, ignored")
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
|
2019-07-06 21:24:50 +02:00
|
|
|
relBatchSize := uploader.MaxBatchInsertSize("release")
|
|
|
|
for len(releases) > 0 {
|
2019-12-12 01:20:11 +01:00
|
|
|
if len(releases) < relBatchSize {
|
|
|
|
relBatchSize = len(releases)
|
2019-07-06 21:24:50 +02:00
|
|
|
}
|
|
|
|
|
2021-01-21 20:33:58 +01:00
|
|
|
if err = uploader.CreateReleases(releases[:relBatchSize]...); err != nil {
|
2019-07-06 21:24:50 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
releases = releases[relBatchSize:]
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
2019-12-12 01:20:11 +01:00
|
|
|
|
|
|
|
// Once all releases (if any) are inserted, sync any remaining non-release tags
|
2021-01-21 20:33:58 +01:00
|
|
|
if err = uploader.SyncTags(); err != nil {
|
2019-12-12 01:20:11 +01:00
|
|
|
return err
|
|
|
|
}
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
|
2020-01-23 18:28:15 +01:00
|
|
|
var (
|
|
|
|
commentBatchSize = uploader.MaxBatchInsertSize("comment")
|
|
|
|
reviewBatchSize = uploader.MaxBatchInsertSize("review")
|
|
|
|
)
|
2019-07-06 21:24:50 +02:00
|
|
|
|
2021-06-30 09:23:49 +02:00
|
|
|
supportAllComments := downloader.SupportGetRepoComments()
|
|
|
|
|
2019-05-07 03:12:51 +02:00
|
|
|
if opts.Issues {
|
|
|
|
log.Trace("migrating issues and comments")
|
2021-06-17 00:02:24 +02:00
|
|
|
messenger("repo.migrate.migrating_issues")
|
2022-01-20 18:46:10 +01:00
|
|
|
issueBatchSize := uploader.MaxBatchInsertSize("issue")
|
2019-07-06 21:24:50 +02:00
|
|
|
|
2019-05-30 22:26:57 +02:00
|
|
|
for i := 1; ; i++ {
|
2019-07-06 21:24:50 +02:00
|
|
|
issues, isEnd, err := downloader.GetIssues(i, issueBatchSize)
|
2019-05-07 03:12:51 +02:00
|
|
|
if err != nil {
|
2021-01-21 20:33:58 +01:00
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating issues is not supported, ignored")
|
|
|
|
break
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
|
2019-06-29 15:38:22 +02:00
|
|
|
if err := uploader.CreateIssues(issues...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-07 03:12:51 +02:00
|
|
|
|
2021-06-30 09:23:49 +02:00
|
|
|
if opts.Comments && !supportAllComments {
|
2022-01-20 18:46:10 +01:00
|
|
|
allComments := make([]*base.Comment, 0, commentBatchSize)
|
2020-12-27 04:34:19 +01:00
|
|
|
for _, issue := range issues {
|
|
|
|
log.Trace("migrating issue %d's comments", issue.Number)
|
Store the foreign ID of issues during migration (#18446)
Storing the foreign identifier of an imported issue in the database is a prerequisite to implement idempotent migrations or mirror for issues. It is a baby step towards mirroring that introduces a new table.
At the moment when an issue is created by the Gitea uploader, it fails if the issue already exists. The Gitea uploader could be modified so that, instead of failing, it looks up the database to find an existing issue. And if it does it would update the issue instead of creating a new one. However this is not currently possible because an information is missing from the database: the foreign identifier that uniquely represents the issue being migrated is not persisted. With this change, the foreign identifier is stored in the database and the Gitea uploader will then be able to run a query to figure out if a given issue being imported already exists.
The implementation of mirroring for issues, pull requests, releases, etc. can be done in three steps:
1. Store an identifier for the element being mirrored (issue, pull request...) in the database (this is the purpose of these changes)
2. Modify the Gitea uploader to be able to update an existing repository with all it contains (issues, pull request...) instead of failing if it exists
3. Optimize the Gitea uploader to speed up the updates, when possible.
The second step creates code that does not yet exist to enable idempotent migrations with the Gitea uploader. When a migration is done for the first time, the behavior is not changed. But when a migration is done for a repository that already exists, this new code is used to update it.
The third step can use the code created in the second step to optimize and speed up migrations. For instance, when a migration is resumed, an issue that has an update time that is not more recent can be skipped and only newly created issues or updated ones will be updated. Another example of optimization could be that a webhook notifies Gitea when an issue is updated. The code triggered by the webhook would download only this issue and call the code created in the second step to update the issue, as if it was in the process of an idempotent migration.
The ForeignReferences table is added to contain local and foreign ID pairs relative to a given repository. It can later be used for pull requests and other artifacts that can be mirrored. Although the foreign id could be added as a single field in issues or pull requests, it would need to be added to all tables that represent something that can be mirrored. Creating a new table makes for a simpler and more generic design. The drawback is that it requires an extra lookup to obtain the information. However, this extra information is only required during migration or mirroring and does not impact the way Gitea currently works.
The foreign identifier of an issue or pull request is similar to the identifier of an external user, which is stored in reactions, issues, etc. as OriginalPosterID and so on. The representation of a user is however different and the ability of users to link their account to an external user at a later time is also a logic that is different from what is involved in mirroring or migrations. For these reasons, despite some commonalities, it is unclear at this time how the two tables (foreign reference and external user) could be merged together.
The ForeignID field is extracted from the issue migration context so that it can be dumped in files with dump-repo and later restored via restore-repo.
The GetAllComments downloader method is introduced to simplify the implementation and not overload the Context for the purpose of pagination. It also clarifies in which context the comments are paginated and in which context they are not.
The Context interface is no longer useful for the purpose of retrieving the LocalID and ForeignID since they are now both available from the PullRequest and Issue struct. The Reviewable and Commentable interfaces replace and serve the same purpose.
The Context data member of PullRequest and Issue becomes a DownloaderContext to clarify that its purpose is not to support in memory operations while the current downloader is acting but is not otherwise persisted. It is, for instance, used by the GitLab downloader to store the IsMergeRequest boolean and sort out issues.
---
[source](https://lab.forgefriends.org/forgefriends/forgefriends/-/merge_requests/36)
Signed-off-by: Loïc Dachary <loic@dachary.org>
Co-authored-by: Loïc Dachary <loic@dachary.org>
2022-03-17 18:08:35 +01:00
|
|
|
comments, _, err := downloader.GetComments(issue)
|
2020-12-27 04:34:19 +01:00
|
|
|
if err != nil {
|
2021-01-21 20:33:58 +01:00
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating comments is not supported, ignored")
|
2020-12-27 04:34:19 +01:00
|
|
|
}
|
2019-05-07 03:12:51 +02:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
allComments = append(allComments, comments...)
|
2019-07-08 04:14:12 +02:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
if len(allComments) >= commentBatchSize {
|
2021-01-21 20:33:58 +01:00
|
|
|
if err = uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
|
2020-12-27 04:34:19 +01:00
|
|
|
return err
|
|
|
|
}
|
2019-06-29 15:38:22 +02:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
allComments = allComments[commentBatchSize:]
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
2019-06-29 15:38:22 +02:00
|
|
|
}
|
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
if len(allComments) > 0 {
|
2021-01-21 20:33:58 +01:00
|
|
|
if err = uploader.CreateComments(allComments...); err != nil {
|
2020-12-27 04:34:19 +01:00
|
|
|
return err
|
|
|
|
}
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-30 22:26:57 +02:00
|
|
|
if isEnd {
|
2019-05-07 03:12:51 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.PullRequests {
|
|
|
|
log.Trace("migrating pull requests and comments")
|
2021-06-17 00:02:24 +02:00
|
|
|
messenger("repo.migrate.migrating_pulls")
|
2022-01-20 18:46:10 +01:00
|
|
|
prBatchSize := uploader.MaxBatchInsertSize("pullrequest")
|
2019-05-30 22:26:57 +02:00
|
|
|
for i := 1; ; i++ {
|
2020-10-14 06:06:00 +02:00
|
|
|
prs, isEnd, err := downloader.GetPullRequests(i, prBatchSize)
|
2019-05-07 03:12:51 +02:00
|
|
|
if err != nil {
|
2021-01-21 20:33:58 +01:00
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating pull requests is not supported, ignored")
|
|
|
|
break
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
|
2019-06-29 15:38:22 +02:00
|
|
|
if err := uploader.CreatePullRequests(prs...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-07 03:12:51 +02:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
if opts.Comments {
|
2021-06-30 09:23:49 +02:00
|
|
|
if !supportAllComments {
|
|
|
|
// plain comments
|
2022-01-20 18:46:10 +01:00
|
|
|
allComments := make([]*base.Comment, 0, commentBatchSize)
|
2021-06-30 09:23:49 +02:00
|
|
|
for _, pr := range prs {
|
|
|
|
log.Trace("migrating pull request %d's comments", pr.Number)
|
Store the foreign ID of issues during migration (#18446)
Storing the foreign identifier of an imported issue in the database is a prerequisite to implement idempotent migrations or mirror for issues. It is a baby step towards mirroring that introduces a new table.
At the moment when an issue is created by the Gitea uploader, it fails if the issue already exists. The Gitea uploader could be modified so that, instead of failing, it looks up the database to find an existing issue. And if it does it would update the issue instead of creating a new one. However this is not currently possible because an information is missing from the database: the foreign identifier that uniquely represents the issue being migrated is not persisted. With this change, the foreign identifier is stored in the database and the Gitea uploader will then be able to run a query to figure out if a given issue being imported already exists.
The implementation of mirroring for issues, pull requests, releases, etc. can be done in three steps:
1. Store an identifier for the element being mirrored (issue, pull request...) in the database (this is the purpose of these changes)
2. Modify the Gitea uploader to be able to update an existing repository with all it contains (issues, pull request...) instead of failing if it exists
3. Optimize the Gitea uploader to speed up the updates, when possible.
The second step creates code that does not yet exist to enable idempotent migrations with the Gitea uploader. When a migration is done for the first time, the behavior is not changed. But when a migration is done for a repository that already exists, this new code is used to update it.
The third step can use the code created in the second step to optimize and speed up migrations. For instance, when a migration is resumed, an issue that has an update time that is not more recent can be skipped and only newly created issues or updated ones will be updated. Another example of optimization could be that a webhook notifies Gitea when an issue is updated. The code triggered by the webhook would download only this issue and call the code created in the second step to update the issue, as if it was in the process of an idempotent migration.
The ForeignReferences table is added to contain local and foreign ID pairs relative to a given repository. It can later be used for pull requests and other artifacts that can be mirrored. Although the foreign id could be added as a single field in issues or pull requests, it would need to be added to all tables that represent something that can be mirrored. Creating a new table makes for a simpler and more generic design. The drawback is that it requires an extra lookup to obtain the information. However, this extra information is only required during migration or mirroring and does not impact the way Gitea currently works.
The foreign identifier of an issue or pull request is similar to the identifier of an external user, which is stored in reactions, issues, etc. as OriginalPosterID and so on. The representation of a user is however different and the ability of users to link their account to an external user at a later time is also a logic that is different from what is involved in mirroring or migrations. For these reasons, despite some commonalities, it is unclear at this time how the two tables (foreign reference and external user) could be merged together.
The ForeignID field is extracted from the issue migration context so that it can be dumped in files with dump-repo and later restored via restore-repo.
The GetAllComments downloader method is introduced to simplify the implementation and not overload the Context for the purpose of pagination. It also clarifies in which context the comments are paginated and in which context they are not.
The Context interface is no longer useful for the purpose of retrieving the LocalID and ForeignID since they are now both available from the PullRequest and Issue struct. The Reviewable and Commentable interfaces replace and serve the same purpose.
The Context data member of PullRequest and Issue becomes a DownloaderContext to clarify that its purpose is not to support in memory operations while the current downloader is acting but is not otherwise persisted. It is, for instance, used by the GitLab downloader to store the IsMergeRequest boolean and sort out issues.
---
[source](https://lab.forgefriends.org/forgefriends/forgefriends/-/merge_requests/36)
Signed-off-by: Loïc Dachary <loic@dachary.org>
Co-authored-by: Loïc Dachary <loic@dachary.org>
2022-03-17 18:08:35 +01:00
|
|
|
comments, _, err := downloader.GetComments(pr)
|
2021-06-30 09:23:49 +02:00
|
|
|
if err != nil {
|
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating comments is not supported, ignored")
|
2021-01-21 20:33:58 +01:00
|
|
|
}
|
2019-06-29 15:38:22 +02:00
|
|
|
|
2021-06-30 09:23:49 +02:00
|
|
|
allComments = append(allComments, comments...)
|
2019-06-29 15:38:22 +02:00
|
|
|
|
2021-06-30 09:23:49 +02:00
|
|
|
if len(allComments) >= commentBatchSize {
|
|
|
|
if err = uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
allComments = allComments[commentBatchSize:]
|
2020-12-27 04:34:19 +01:00
|
|
|
}
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
2021-06-30 09:23:49 +02:00
|
|
|
if len(allComments) > 0 {
|
|
|
|
if err = uploader.CreateComments(allComments...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-12-27 04:34:19 +01:00
|
|
|
}
|
2019-06-29 15:38:22 +02:00
|
|
|
}
|
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
// migrate reviews
|
2022-01-20 18:46:10 +01:00
|
|
|
allReviews := make([]*base.Review, 0, reviewBatchSize)
|
2020-12-27 04:34:19 +01:00
|
|
|
for _, pr := range prs {
|
Store the foreign ID of issues during migration (#18446)
Storing the foreign identifier of an imported issue in the database is a prerequisite to implement idempotent migrations or mirror for issues. It is a baby step towards mirroring that introduces a new table.
At the moment when an issue is created by the Gitea uploader, it fails if the issue already exists. The Gitea uploader could be modified so that, instead of failing, it looks up the database to find an existing issue. And if it does it would update the issue instead of creating a new one. However this is not currently possible because an information is missing from the database: the foreign identifier that uniquely represents the issue being migrated is not persisted. With this change, the foreign identifier is stored in the database and the Gitea uploader will then be able to run a query to figure out if a given issue being imported already exists.
The implementation of mirroring for issues, pull requests, releases, etc. can be done in three steps:
1. Store an identifier for the element being mirrored (issue, pull request...) in the database (this is the purpose of these changes)
2. Modify the Gitea uploader to be able to update an existing repository with all it contains (issues, pull request...) instead of failing if it exists
3. Optimize the Gitea uploader to speed up the updates, when possible.
The second step creates code that does not yet exist to enable idempotent migrations with the Gitea uploader. When a migration is done for the first time, the behavior is not changed. But when a migration is done for a repository that already exists, this new code is used to update it.
The third step can use the code created in the second step to optimize and speed up migrations. For instance, when a migration is resumed, an issue that has an update time that is not more recent can be skipped and only newly created issues or updated ones will be updated. Another example of optimization could be that a webhook notifies Gitea when an issue is updated. The code triggered by the webhook would download only this issue and call the code created in the second step to update the issue, as if it was in the process of an idempotent migration.
The ForeignReferences table is added to contain local and foreign ID pairs relative to a given repository. It can later be used for pull requests and other artifacts that can be mirrored. Although the foreign id could be added as a single field in issues or pull requests, it would need to be added to all tables that represent something that can be mirrored. Creating a new table makes for a simpler and more generic design. The drawback is that it requires an extra lookup to obtain the information. However, this extra information is only required during migration or mirroring and does not impact the way Gitea currently works.
The foreign identifier of an issue or pull request is similar to the identifier of an external user, which is stored in reactions, issues, etc. as OriginalPosterID and so on. The representation of a user is however different and the ability of users to link their account to an external user at a later time is also a logic that is different from what is involved in mirroring or migrations. For these reasons, despite some commonalities, it is unclear at this time how the two tables (foreign reference and external user) could be merged together.
The ForeignID field is extracted from the issue migration context so that it can be dumped in files with dump-repo and later restored via restore-repo.
The GetAllComments downloader method is introduced to simplify the implementation and not overload the Context for the purpose of pagination. It also clarifies in which context the comments are paginated and in which context they are not.
The Context interface is no longer useful for the purpose of retrieving the LocalID and ForeignID since they are now both available from the PullRequest and Issue struct. The Reviewable and Commentable interfaces replace and serve the same purpose.
The Context data member of PullRequest and Issue becomes a DownloaderContext to clarify that its purpose is not to support in memory operations while the current downloader is acting but is not otherwise persisted. It is, for instance, used by the GitLab downloader to store the IsMergeRequest boolean and sort out issues.
---
[source](https://lab.forgefriends.org/forgefriends/forgefriends/-/merge_requests/36)
Signed-off-by: Loïc Dachary <loic@dachary.org>
Co-authored-by: Loïc Dachary <loic@dachary.org>
2022-03-17 18:08:35 +01:00
|
|
|
reviews, err := downloader.GetReviews(pr)
|
2021-01-21 20:33:58 +01:00
|
|
|
if err != nil {
|
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating reviews is not supported, ignored")
|
|
|
|
break
|
|
|
|
}
|
2020-01-23 18:28:15 +01:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
allReviews = append(allReviews, reviews...)
|
2020-01-23 18:28:15 +01:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
if len(allReviews) >= reviewBatchSize {
|
2021-01-21 20:33:58 +01:00
|
|
|
if err = uploader.CreateReviews(allReviews[:reviewBatchSize]...); err != nil {
|
2020-12-27 04:34:19 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
allReviews = allReviews[reviewBatchSize:]
|
2020-01-23 18:28:15 +01:00
|
|
|
}
|
|
|
|
}
|
2020-12-27 04:34:19 +01:00
|
|
|
if len(allReviews) > 0 {
|
2021-01-21 20:33:58 +01:00
|
|
|
if err = uploader.CreateReviews(allReviews...); err != nil {
|
2020-12-27 04:34:19 +01:00
|
|
|
return err
|
|
|
|
}
|
2020-01-23 18:28:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:06:00 +02:00
|
|
|
if isEnd {
|
2019-05-07 03:12:51 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-30 09:23:49 +02:00
|
|
|
if opts.Comments && supportAllComments {
|
|
|
|
log.Trace("migrating comments")
|
|
|
|
for i := 1; ; i++ {
|
Store the foreign ID of issues during migration (#18446)
Storing the foreign identifier of an imported issue in the database is a prerequisite to implement idempotent migrations or mirror for issues. It is a baby step towards mirroring that introduces a new table.
At the moment when an issue is created by the Gitea uploader, it fails if the issue already exists. The Gitea uploader could be modified so that, instead of failing, it looks up the database to find an existing issue. And if it does it would update the issue instead of creating a new one. However this is not currently possible because an information is missing from the database: the foreign identifier that uniquely represents the issue being migrated is not persisted. With this change, the foreign identifier is stored in the database and the Gitea uploader will then be able to run a query to figure out if a given issue being imported already exists.
The implementation of mirroring for issues, pull requests, releases, etc. can be done in three steps:
1. Store an identifier for the element being mirrored (issue, pull request...) in the database (this is the purpose of these changes)
2. Modify the Gitea uploader to be able to update an existing repository with all it contains (issues, pull request...) instead of failing if it exists
3. Optimize the Gitea uploader to speed up the updates, when possible.
The second step creates code that does not yet exist to enable idempotent migrations with the Gitea uploader. When a migration is done for the first time, the behavior is not changed. But when a migration is done for a repository that already exists, this new code is used to update it.
The third step can use the code created in the second step to optimize and speed up migrations. For instance, when a migration is resumed, an issue that has an update time that is not more recent can be skipped and only newly created issues or updated ones will be updated. Another example of optimization could be that a webhook notifies Gitea when an issue is updated. The code triggered by the webhook would download only this issue and call the code created in the second step to update the issue, as if it was in the process of an idempotent migration.
The ForeignReferences table is added to contain local and foreign ID pairs relative to a given repository. It can later be used for pull requests and other artifacts that can be mirrored. Although the foreign id could be added as a single field in issues or pull requests, it would need to be added to all tables that represent something that can be mirrored. Creating a new table makes for a simpler and more generic design. The drawback is that it requires an extra lookup to obtain the information. However, this extra information is only required during migration or mirroring and does not impact the way Gitea currently works.
The foreign identifier of an issue or pull request is similar to the identifier of an external user, which is stored in reactions, issues, etc. as OriginalPosterID and so on. The representation of a user is however different and the ability of users to link their account to an external user at a later time is also a logic that is different from what is involved in mirroring or migrations. For these reasons, despite some commonalities, it is unclear at this time how the two tables (foreign reference and external user) could be merged together.
The ForeignID field is extracted from the issue migration context so that it can be dumped in files with dump-repo and later restored via restore-repo.
The GetAllComments downloader method is introduced to simplify the implementation and not overload the Context for the purpose of pagination. It also clarifies in which context the comments are paginated and in which context they are not.
The Context interface is no longer useful for the purpose of retrieving the LocalID and ForeignID since they are now both available from the PullRequest and Issue struct. The Reviewable and Commentable interfaces replace and serve the same purpose.
The Context data member of PullRequest and Issue becomes a DownloaderContext to clarify that its purpose is not to support in memory operations while the current downloader is acting but is not otherwise persisted. It is, for instance, used by the GitLab downloader to store the IsMergeRequest boolean and sort out issues.
---
[source](https://lab.forgefriends.org/forgefriends/forgefriends/-/merge_requests/36)
Signed-off-by: Loïc Dachary <loic@dachary.org>
Co-authored-by: Loïc Dachary <loic@dachary.org>
2022-03-17 18:08:35 +01:00
|
|
|
comments, isEnd, err := downloader.GetAllComments(i, commentBatchSize)
|
2021-06-30 09:23:49 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := uploader.CreateComments(comments...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if isEnd {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
return uploader.Finish()
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
2020-11-29 01:37:58 +01:00
|
|
|
|
|
|
|
// Init migrations service
|
|
|
|
func Init() error {
|
2021-11-20 10:34:05 +01:00
|
|
|
// TODO: maybe we can deprecate these legacy ALLOWED_DOMAINS/ALLOW_LOCALNETWORKS/BLOCKED_DOMAINS, use ALLOWED_HOST_LIST/BLOCKED_HOST_LIST instead
|
2020-11-29 01:37:58 +01:00
|
|
|
|
2021-11-20 10:34:05 +01:00
|
|
|
blockList = hostmatcher.ParseSimpleMatchList("migrations.BLOCKED_DOMAINS", setting.Migrations.BlockedDomains)
|
2020-11-29 01:37:58 +01:00
|
|
|
|
2021-11-20 10:34:05 +01:00
|
|
|
allowList = hostmatcher.ParseSimpleMatchList("migrations.ALLOWED_DOMAINS/ALLOW_LOCALNETWORKS", setting.Migrations.AllowedDomains)
|
|
|
|
if allowList.IsEmpty() {
|
|
|
|
// the default policy is that migration module can access external hosts
|
|
|
|
allowList.AppendBuiltin(hostmatcher.MatchBuiltinExternal)
|
|
|
|
}
|
|
|
|
if setting.Migrations.AllowLocalNetworks {
|
|
|
|
allowList.AppendBuiltin(hostmatcher.MatchBuiltinPrivate)
|
|
|
|
allowList.AppendBuiltin(hostmatcher.MatchBuiltinLoopback)
|
|
|
|
}
|
2022-07-13 03:07:16 +02:00
|
|
|
// TODO: at the moment, if ALLOW_LOCALNETWORKS=false, ALLOWED_DOMAINS=domain.com, and domain.com has IP 127.0.0.1, then it's still allowed.
|
|
|
|
// if we want to block such case, the private&loopback should be added to the blockList when ALLOW_LOCALNETWORKS=false
|
2022-08-17 02:15:54 +02:00
|
|
|
|
|
|
|
if setting.Proxy.Enabled && setting.Proxy.ProxyURLFixed != nil {
|
|
|
|
allowList.AppendPattern(setting.Proxy.ProxyURLFixed.Host)
|
|
|
|
}
|
|
|
|
|
2020-11-29 01:37:58 +01:00
|
|
|
return nil
|
|
|
|
}
|