2019-05-07 03:12:51 +02:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
|
|
|
// Copyright 2018 Jonas Franz. All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package migrations
|
|
|
|
|
|
|
|
import (
|
2019-12-17 05:16:54 +01:00
|
|
|
"context"
|
2019-10-13 15:23:14 +02:00
|
|
|
"fmt"
|
2020-11-29 01:37:58 +01:00
|
|
|
"net"
|
|
|
|
"net/url"
|
|
|
|
"strings"
|
2019-10-13 15:23:14 +02:00
|
|
|
|
2019-05-07 03:12:51 +02:00
|
|
|
"code.gitea.io/gitea/models"
|
|
|
|
"code.gitea.io/gitea/modules/log"
|
2020-11-29 01:37:58 +01:00
|
|
|
"code.gitea.io/gitea/modules/matchlist"
|
2019-05-07 03:12:51 +02:00
|
|
|
"code.gitea.io/gitea/modules/migrations/base"
|
2019-11-16 09:30:06 +01:00
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2019-05-07 03:12:51 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// MigrateOptions is equal to base.MigrateOptions
|
|
|
|
type MigrateOptions = base.MigrateOptions
|
|
|
|
|
|
|
|
var (
|
|
|
|
factories []base.DownloaderFactory
|
2020-11-29 01:37:58 +01:00
|
|
|
|
|
|
|
allowList *matchlist.Matchlist
|
|
|
|
blockList *matchlist.Matchlist
|
2019-05-07 03:12:51 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// RegisterDownloaderFactory registers a downloader factory
|
|
|
|
func RegisterDownloaderFactory(factory base.DownloaderFactory) {
|
|
|
|
factories = append(factories, factory)
|
|
|
|
}
|
|
|
|
|
2020-11-29 01:37:58 +01:00
|
|
|
func isMigrateURLAllowed(remoteURL string) error {
|
|
|
|
u, err := url.Parse(strings.ToLower(remoteURL))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.EqualFold(u.Scheme, "http") || strings.EqualFold(u.Scheme, "https") {
|
|
|
|
if len(setting.Migrations.AllowedDomains) > 0 {
|
|
|
|
if !allowList.Match(u.Host) {
|
|
|
|
return &models.ErrMigrationNotAllowed{Host: u.Host}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if blockList.Match(u.Host) {
|
|
|
|
return &models.ErrMigrationNotAllowed{Host: u.Host}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !setting.Migrations.AllowLocalNetworks {
|
|
|
|
addrList, err := net.LookupIP(strings.Split(u.Host, ":")[0])
|
|
|
|
if err != nil {
|
|
|
|
return &models.ErrMigrationNotAllowed{Host: u.Host, NotResolvedIP: true}
|
|
|
|
}
|
|
|
|
for _, addr := range addrList {
|
|
|
|
if isIPPrivate(addr) || !addr.IsGlobalUnicast() {
|
|
|
|
return &models.ErrMigrationNotAllowed{Host: u.Host, PrivateNet: addr.String()}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-05-07 03:12:51 +02:00
|
|
|
// MigrateRepository migrate repository according MigrateOptions
|
2019-12-17 05:16:54 +01:00
|
|
|
func MigrateRepository(ctx context.Context, doer *models.User, ownerName string, opts base.MigrateOptions) (*models.Repository, error) {
|
2020-11-29 01:37:58 +01:00
|
|
|
err := isMigrateURLAllowed(opts.CloneAddr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-12-27 04:34:19 +01:00
|
|
|
downloader, err := newDownloader(ctx, ownerName, opts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var uploader = NewGiteaLocalUploader(ctx, doer, ownerName, opts.RepoName)
|
|
|
|
uploader.gitServiceType = opts.GitServiceType
|
|
|
|
|
|
|
|
if err := migrateRepository(downloader, uploader, opts); err != nil {
|
|
|
|
if err1 := uploader.Rollback(); err1 != nil {
|
|
|
|
log.Error("rollback failed: %v", err1)
|
|
|
|
}
|
|
|
|
if err2 := models.CreateRepositoryNotice(fmt.Sprintf("Migrate repository from %s failed: %v", opts.OriginalURL, err)); err2 != nil {
|
|
|
|
log.Error("create respotiry notice failed: ", err2)
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return uploader.repo, nil
|
|
|
|
}
|
2020-11-29 01:37:58 +01:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
func newDownloader(ctx context.Context, ownerName string, opts base.MigrateOptions) (base.Downloader, error) {
|
2019-05-07 03:12:51 +02:00
|
|
|
var (
|
|
|
|
downloader base.Downloader
|
2020-12-27 04:34:19 +01:00
|
|
|
err error
|
2019-05-07 03:12:51 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
for _, factory := range factories {
|
2020-08-28 03:36:37 +02:00
|
|
|
if factory.GitServiceType() == opts.GitServiceType {
|
2020-09-02 19:49:25 +02:00
|
|
|
downloader, err = factory.New(ctx, opts)
|
2019-05-07 03:12:51 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if downloader == nil {
|
|
|
|
opts.Wiki = true
|
|
|
|
opts.Milestones = false
|
|
|
|
opts.Labels = false
|
|
|
|
opts.Releases = false
|
|
|
|
opts.Comments = false
|
|
|
|
opts.Issues = false
|
|
|
|
opts.PullRequests = false
|
2019-10-13 15:23:14 +02:00
|
|
|
downloader = NewPlainGitDownloader(ownerName, opts.RepoName, opts.CloneAddr)
|
2019-12-18 22:49:56 +01:00
|
|
|
log.Trace("Will migrate from git: %s", opts.OriginalURL)
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
|
2019-11-16 09:30:06 +01:00
|
|
|
if setting.Migrations.MaxAttempts > 1 {
|
2020-09-02 19:49:25 +02:00
|
|
|
downloader = base.NewRetryDownloader(ctx, downloader, setting.Migrations.MaxAttempts, setting.Migrations.RetryBackoff)
|
2019-11-16 09:30:06 +01:00
|
|
|
}
|
2020-12-27 04:34:19 +01:00
|
|
|
return downloader, nil
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
|
2020-04-20 14:30:46 +02:00
|
|
|
// migrateRepository will download information and then upload it to Uploader, this is a simple
|
2019-05-07 03:12:51 +02:00
|
|
|
// process for small repository. For a big repository, save all the data to disk
|
|
|
|
// before upload is better
|
|
|
|
func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts base.MigrateOptions) error {
|
|
|
|
repo, err := downloader.GetRepoInfo()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
repo.IsPrivate = opts.Private
|
|
|
|
repo.IsMirror = opts.Mirror
|
2019-05-20 14:43:43 +02:00
|
|
|
if opts.Description != "" {
|
|
|
|
repo.Description = opts.Description
|
|
|
|
}
|
2019-05-07 03:12:51 +02:00
|
|
|
log.Trace("migrating git data")
|
2019-07-01 23:17:16 +02:00
|
|
|
if err := uploader.CreateRepo(repo, opts); err != nil {
|
2019-05-07 03:12:51 +02:00
|
|
|
return err
|
|
|
|
}
|
2019-11-13 08:01:19 +01:00
|
|
|
defer uploader.Close()
|
2019-05-07 03:12:51 +02:00
|
|
|
|
2019-08-14 08:16:12 +02:00
|
|
|
log.Trace("migrating topics")
|
|
|
|
topics, err := downloader.GetTopics()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if len(topics) > 0 {
|
|
|
|
if err := uploader.CreateTopics(topics...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-07 03:12:51 +02:00
|
|
|
if opts.Milestones {
|
|
|
|
log.Trace("migrating milestones")
|
|
|
|
milestones, err := downloader.GetMilestones()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-06 21:24:50 +02:00
|
|
|
msBatchSize := uploader.MaxBatchInsertSize("milestone")
|
|
|
|
for len(milestones) > 0 {
|
|
|
|
if len(milestones) < msBatchSize {
|
|
|
|
msBatchSize = len(milestones)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := uploader.CreateMilestones(milestones...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
milestones = milestones[msBatchSize:]
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Labels {
|
|
|
|
log.Trace("migrating labels")
|
|
|
|
labels, err := downloader.GetLabels()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-06 21:24:50 +02:00
|
|
|
lbBatchSize := uploader.MaxBatchInsertSize("label")
|
|
|
|
for len(labels) > 0 {
|
|
|
|
if len(labels) < lbBatchSize {
|
|
|
|
lbBatchSize = len(labels)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := uploader.CreateLabels(labels...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
labels = labels[lbBatchSize:]
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Releases {
|
|
|
|
log.Trace("migrating releases")
|
|
|
|
releases, err := downloader.GetReleases()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-06 21:24:50 +02:00
|
|
|
relBatchSize := uploader.MaxBatchInsertSize("release")
|
|
|
|
for len(releases) > 0 {
|
2019-12-12 01:20:11 +01:00
|
|
|
if len(releases) < relBatchSize {
|
|
|
|
relBatchSize = len(releases)
|
2019-07-06 21:24:50 +02:00
|
|
|
}
|
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
if err := uploader.CreateReleases(releases[:relBatchSize]...); err != nil {
|
2019-07-06 21:24:50 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
releases = releases[relBatchSize:]
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
2019-12-12 01:20:11 +01:00
|
|
|
|
|
|
|
// Once all releases (if any) are inserted, sync any remaining non-release tags
|
|
|
|
if err := uploader.SyncTags(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
|
2020-01-23 18:28:15 +01:00
|
|
|
var (
|
|
|
|
commentBatchSize = uploader.MaxBatchInsertSize("comment")
|
|
|
|
reviewBatchSize = uploader.MaxBatchInsertSize("review")
|
|
|
|
)
|
2019-07-06 21:24:50 +02:00
|
|
|
|
2019-05-07 03:12:51 +02:00
|
|
|
if opts.Issues {
|
|
|
|
log.Trace("migrating issues and comments")
|
2019-07-06 21:24:50 +02:00
|
|
|
var issueBatchSize = uploader.MaxBatchInsertSize("issue")
|
|
|
|
|
2019-05-30 22:26:57 +02:00
|
|
|
for i := 1; ; i++ {
|
2019-07-06 21:24:50 +02:00
|
|
|
issues, isEnd, err := downloader.GetIssues(i, issueBatchSize)
|
2019-05-07 03:12:51 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-29 15:38:22 +02:00
|
|
|
if err := uploader.CreateIssues(issues...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-07 03:12:51 +02:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
if opts.Comments {
|
|
|
|
var allComments = make([]*base.Comment, 0, commentBatchSize)
|
|
|
|
for _, issue := range issues {
|
|
|
|
log.Trace("migrating issue %d's comments", issue.Number)
|
|
|
|
comments, err := downloader.GetComments(issue.Number)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-07 03:12:51 +02:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
allComments = append(allComments, comments...)
|
2019-07-08 04:14:12 +02:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
if len(allComments) >= commentBatchSize {
|
|
|
|
if err := uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-06-29 15:38:22 +02:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
allComments = allComments[commentBatchSize:]
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
2019-06-29 15:38:22 +02:00
|
|
|
}
|
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
if len(allComments) > 0 {
|
|
|
|
if err := uploader.CreateComments(allComments...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-30 22:26:57 +02:00
|
|
|
if isEnd {
|
2019-05-07 03:12:51 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.PullRequests {
|
|
|
|
log.Trace("migrating pull requests and comments")
|
2019-07-06 22:32:15 +02:00
|
|
|
var prBatchSize = uploader.MaxBatchInsertSize("pullrequest")
|
2019-05-30 22:26:57 +02:00
|
|
|
for i := 1; ; i++ {
|
2020-10-14 06:06:00 +02:00
|
|
|
prs, isEnd, err := downloader.GetPullRequests(i, prBatchSize)
|
2019-05-07 03:12:51 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-29 15:38:22 +02:00
|
|
|
if err := uploader.CreatePullRequests(prs...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-07 03:12:51 +02:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
if opts.Comments {
|
|
|
|
// plain comments
|
|
|
|
var allComments = make([]*base.Comment, 0, commentBatchSize)
|
|
|
|
for _, pr := range prs {
|
|
|
|
log.Trace("migrating pull request %d's comments", pr.Number)
|
|
|
|
comments, err := downloader.GetComments(pr.Number)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-06-29 15:38:22 +02:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
allComments = append(allComments, comments...)
|
2019-06-29 15:38:22 +02:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
if len(allComments) >= commentBatchSize {
|
|
|
|
if err := uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
allComments = allComments[commentBatchSize:]
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
|
|
|
}
|
2020-12-27 04:34:19 +01:00
|
|
|
if len(allComments) > 0 {
|
|
|
|
if err := uploader.CreateComments(allComments...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-06-29 15:38:22 +02:00
|
|
|
}
|
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
// migrate reviews
|
|
|
|
var allReviews = make([]*base.Review, 0, reviewBatchSize)
|
|
|
|
for _, pr := range prs {
|
|
|
|
number := pr.Number
|
2020-04-20 14:30:46 +02:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
// on gitlab migrations pull number change
|
|
|
|
if pr.OriginalNumber > 0 {
|
|
|
|
number = pr.OriginalNumber
|
|
|
|
}
|
2020-04-20 14:30:46 +02:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
reviews, err := downloader.GetReviews(number)
|
|
|
|
if pr.OriginalNumber > 0 {
|
|
|
|
for i := range reviews {
|
|
|
|
reviews[i].IssueIndex = pr.Number
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-04-20 14:30:46 +02:00
|
|
|
}
|
2020-01-23 18:28:15 +01:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
allReviews = append(allReviews, reviews...)
|
2020-01-23 18:28:15 +01:00
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
if len(allReviews) >= reviewBatchSize {
|
|
|
|
if err := uploader.CreateReviews(allReviews[:reviewBatchSize]...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
allReviews = allReviews[reviewBatchSize:]
|
2020-01-23 18:28:15 +01:00
|
|
|
}
|
|
|
|
}
|
2020-12-27 04:34:19 +01:00
|
|
|
if len(allReviews) > 0 {
|
|
|
|
if err := uploader.CreateReviews(allReviews...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-01-23 18:28:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:06:00 +02:00
|
|
|
if isEnd {
|
2019-05-07 03:12:51 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-27 04:34:19 +01:00
|
|
|
return uploader.Finish()
|
2019-05-07 03:12:51 +02:00
|
|
|
}
|
2020-11-29 01:37:58 +01:00
|
|
|
|
|
|
|
// Init migrations service
|
|
|
|
func Init() error {
|
|
|
|
var err error
|
|
|
|
allowList, err = matchlist.NewMatchlist(setting.Migrations.AllowedDomains...)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("init migration allowList domains failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
blockList, err = matchlist.NewMatchlist(setting.Migrations.BlockedDomains...)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("init migration blockList domains failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// isIPPrivate reports whether ip is a private address, according to
|
|
|
|
// RFC 1918 (IPv4 addresses) and RFC 4193 (IPv6 addresses).
|
|
|
|
// from https://github.com/golang/go/pull/42793
|
|
|
|
// TODO remove if https://github.com/golang/go/issues/29146 got resolved
|
|
|
|
func isIPPrivate(ip net.IP) bool {
|
|
|
|
if ip4 := ip.To4(); ip4 != nil {
|
|
|
|
return ip4[0] == 10 ||
|
|
|
|
(ip4[0] == 172 && ip4[1]&0xf0 == 16) ||
|
|
|
|
(ip4[0] == 192 && ip4[1] == 168)
|
|
|
|
}
|
|
|
|
return len(ip) == net.IPv6len && ip[0]&0xfe == 0xfc
|
|
|
|
}
|