2019-12-14 18:30:01 +01:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
2022-11-27 19:20:29 +01:00
|
|
|
// SPDX-License-Identifier: MIT
|
2019-12-14 18:30:01 +01:00
|
|
|
|
|
|
|
package repository
|
|
|
|
|
|
|
|
import (
|
2020-12-02 19:36:06 +01:00
|
|
|
"context"
|
2022-11-12 19:58:26 +01:00
|
|
|
"errors"
|
2019-12-14 18:30:01 +01:00
|
|
|
"fmt"
|
2021-06-14 19:20:43 +02:00
|
|
|
"io"
|
2021-11-20 10:34:05 +01:00
|
|
|
"net/http"
|
2019-12-14 18:30:01 +01:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2021-09-19 13:49:59 +02:00
|
|
|
"code.gitea.io/gitea/models/db"
|
2022-06-12 17:51:54 +02:00
|
|
|
git_model "code.gitea.io/gitea/models/git"
|
2022-03-29 08:29:02 +02:00
|
|
|
"code.gitea.io/gitea/models/organization"
|
2021-12-10 02:27:50 +01:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-24 10:49:20 +01:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2022-10-12 07:18:26 +02:00
|
|
|
"code.gitea.io/gitea/modules/container"
|
2019-12-14 18:30:01 +01:00
|
|
|
"code.gitea.io/gitea/modules/git"
|
2021-04-09 00:25:57 +02:00
|
|
|
"code.gitea.io/gitea/modules/lfs"
|
2019-12-14 18:30:01 +01:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
2021-11-16 16:25:33 +01:00
|
|
|
"code.gitea.io/gitea/modules/migration"
|
2019-12-14 18:30:01 +01:00
|
|
|
"code.gitea.io/gitea/modules/setting"
|
|
|
|
"code.gitea.io/gitea/modules/timeutil"
|
2020-08-11 22:05:34 +02:00
|
|
|
"code.gitea.io/gitea/modules/util"
|
2019-12-14 18:30:01 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
2022-08-25 04:31:57 +02:00
|
|
|
GitHub, GitLab, Gogs: *.wiki.git
|
|
|
|
BitBucket: *.git/wiki
|
2019-12-14 18:30:01 +01:00
|
|
|
*/
|
|
|
|
var commonWikiURLSuffixes = []string{".wiki.git", ".git/wiki"}
|
|
|
|
|
2020-07-06 04:08:32 +02:00
|
|
|
// WikiRemoteURL returns accessible repository URL for wiki if exists.
|
2019-12-14 18:30:01 +01:00
|
|
|
// Otherwise, it returns an empty string.
|
2022-01-20 00:26:57 +01:00
|
|
|
func WikiRemoteURL(ctx context.Context, remote string) string {
|
2019-12-14 18:30:01 +01:00
|
|
|
remote = strings.TrimSuffix(remote, ".git")
|
|
|
|
for _, suffix := range commonWikiURLSuffixes {
|
|
|
|
wikiURL := remote + suffix
|
2022-01-20 00:26:57 +01:00
|
|
|
if git.IsRepoURLAccessible(ctx, wikiURL) {
|
2019-12-14 18:30:01 +01:00
|
|
|
return wikiURL
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
// MigrateRepositoryGitData starts migrating git related data after created migrating repository
|
2021-11-24 10:49:20 +01:00
|
|
|
func MigrateRepositoryGitData(ctx context.Context, u *user_model.User,
|
2021-12-10 02:27:50 +01:00
|
|
|
repo *repo_model.Repository, opts migration.MigrateOptions,
|
2021-11-20 10:34:05 +01:00
|
|
|
httpTransport *http.Transport,
|
2021-12-10 02:27:50 +01:00
|
|
|
) (*repo_model.Repository, error) {
|
|
|
|
repoPath := repo_model.RepoPath(u.Name, opts.RepoName)
|
2019-12-14 18:30:01 +01:00
|
|
|
|
|
|
|
if u.IsOrganization() {
|
2023-02-08 07:44:42 +01:00
|
|
|
t, err := organization.OrgFromUser(u).GetOwnerTeam(ctx)
|
2019-12-14 18:30:01 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
repo.NumWatches = t.NumMembers
|
|
|
|
} else {
|
|
|
|
repo.NumWatches = 1
|
|
|
|
}
|
|
|
|
|
|
|
|
migrateTimeout := time.Duration(setting.Git.Timeout.Migrate) * time.Second
|
|
|
|
|
|
|
|
var err error
|
2020-08-11 22:05:34 +02:00
|
|
|
if err = util.RemoveAll(repoPath); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("Failed to remove %s: %w", repoPath, err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2022-01-20 00:26:57 +01:00
|
|
|
if err = git.Clone(ctx, opts.CloneAddr, repoPath, git.CloneRepoOptions{
|
2022-03-19 15:16:38 +01:00
|
|
|
Mirror: true,
|
|
|
|
Quiet: true,
|
|
|
|
Timeout: migrateTimeout,
|
|
|
|
SkipTLSVerify: setting.Migrations.SkipTLSVerify,
|
2019-12-14 18:30:01 +01:00
|
|
|
}); err != nil {
|
2022-11-12 19:58:26 +01:00
|
|
|
if errors.Is(err, context.DeadlineExceeded) {
|
|
|
|
return repo, fmt.Errorf("Clone timed out. Consider increasing [git.timeout] MIGRATE in app.ini. Underlying Error: %w", err)
|
|
|
|
}
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("Clone: %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2022-03-29 19:12:33 +02:00
|
|
|
if err := git.WriteCommitGraph(ctx, repoPath); err != nil {
|
|
|
|
return repo, err
|
|
|
|
}
|
|
|
|
|
2019-12-14 18:30:01 +01:00
|
|
|
if opts.Wiki {
|
2021-12-10 02:27:50 +01:00
|
|
|
wikiPath := repo_model.WikiPath(u.Name, opts.RepoName)
|
2022-01-20 00:26:57 +01:00
|
|
|
wikiRemotePath := WikiRemoteURL(ctx, opts.CloneAddr)
|
2019-12-14 18:30:01 +01:00
|
|
|
if len(wikiRemotePath) > 0 {
|
2020-08-11 22:05:34 +02:00
|
|
|
if err := util.RemoveAll(wikiPath); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("Failed to remove %s: %w", wikiPath, err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2022-04-30 14:50:56 +02:00
|
|
|
if err := git.Clone(ctx, wikiRemotePath, wikiPath, git.CloneRepoOptions{
|
2022-03-19 15:16:38 +01:00
|
|
|
Mirror: true,
|
|
|
|
Quiet: true,
|
|
|
|
Timeout: migrateTimeout,
|
|
|
|
Branch: "master",
|
|
|
|
SkipTLSVerify: setting.Migrations.SkipTLSVerify,
|
2019-12-14 18:30:01 +01:00
|
|
|
}); err != nil {
|
|
|
|
log.Warn("Clone wiki: %v", err)
|
2020-08-11 22:05:34 +02:00
|
|
|
if err := util.RemoveAll(wikiPath); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("Failed to remove %s: %w", wikiPath, err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
2022-04-30 14:50:56 +02:00
|
|
|
} else {
|
|
|
|
if err := git.WriteCommitGraph(ctx, wikiPath); err != nil {
|
|
|
|
return repo, err
|
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-13 21:47:02 +02:00
|
|
|
if repo.OwnerID == u.ID {
|
|
|
|
repo.Owner = u
|
|
|
|
}
|
|
|
|
|
2022-06-06 10:01:49 +02:00
|
|
|
if err = CheckDaemonExportOK(ctx, repo); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("checkDaemonExportOK: %w", err)
|
2021-10-13 21:47:02 +02:00
|
|
|
}
|
|
|
|
|
2022-04-01 04:55:30 +02:00
|
|
|
if stdout, _, err := git.NewCommand(ctx, "update-server-info").
|
2021-10-13 21:47:02 +02:00
|
|
|
SetDescription(fmt.Sprintf("MigrateRepositoryGitData(git update-server-info): %s", repoPath)).
|
2022-04-01 04:55:30 +02:00
|
|
|
RunStdString(&git.RunOpts{Dir: repoPath}); err != nil {
|
2021-10-13 21:47:02 +02:00
|
|
|
log.Error("MigrateRepositoryGitData(git update-server-info) in %v: Stdout: %s\nError: %v", repo, stdout, err)
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("error in MigrateRepositoryGitData(git update-server-info): %w", err)
|
2021-10-13 21:47:02 +02:00
|
|
|
}
|
|
|
|
|
2022-03-29 21:13:41 +02:00
|
|
|
gitRepo, err := git.OpenRepository(ctx, repoPath)
|
2019-12-14 18:30:01 +01:00
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("OpenRepository: %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
defer gitRepo.Close()
|
|
|
|
|
|
|
|
repo.IsEmpty, err = gitRepo.IsEmpty()
|
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("git.IsEmpty: %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2020-09-15 16:37:44 +02:00
|
|
|
if !repo.IsEmpty {
|
|
|
|
if len(repo.DefaultBranch) == 0 {
|
|
|
|
// Try to get HEAD branch and set it as default branch.
|
|
|
|
headBranch, err := gitRepo.GetHEADBranch()
|
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("GetHEADBranch: %w", err)
|
2020-09-15 16:37:44 +02:00
|
|
|
}
|
|
|
|
if headBranch != nil {
|
|
|
|
repo.DefaultBranch = headBranch.Name
|
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2020-09-15 16:37:44 +02:00
|
|
|
if !opts.Releases {
|
2022-03-31 14:30:40 +02:00
|
|
|
// note: this will greatly improve release (tag) sync
|
|
|
|
// for pull-mirrors with many tags
|
|
|
|
repo.IsMirror = opts.Mirror
|
2020-09-15 16:37:44 +02:00
|
|
|
if err = SyncReleasesWithTags(repo, gitRepo); err != nil {
|
|
|
|
log.Error("Failed to synchronize tags to releases for repository: %v", err)
|
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
2021-04-09 00:25:57 +02:00
|
|
|
|
|
|
|
if opts.LFS {
|
2021-11-20 10:34:05 +01:00
|
|
|
endpoint := lfs.DetermineEndpoint(opts.CloneAddr, opts.LFSEndpoint)
|
|
|
|
lfsClient := lfs.NewClient(endpoint, httpTransport)
|
|
|
|
if err = StoreMissingLfsObjectsInRepository(ctx, repo, gitRepo, lfsClient); err != nil {
|
2021-04-09 00:25:57 +02:00
|
|
|
log.Error("Failed to store missing LFS objects for repository: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2022-11-12 21:18:50 +01:00
|
|
|
ctx, committer, err := db.TxContext(db.DefaultContext)
|
2022-06-06 10:01:49 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
2022-06-06 10:01:49 +02:00
|
|
|
defer committer.Close()
|
2019-12-14 18:30:01 +01:00
|
|
|
|
|
|
|
if opts.Mirror {
|
2021-12-10 02:27:50 +01:00
|
|
|
mirrorModel := repo_model.Mirror{
|
2019-12-14 18:30:01 +01:00
|
|
|
RepoID: repo.ID,
|
|
|
|
Interval: setting.Mirror.DefaultInterval,
|
|
|
|
EnablePrune: true,
|
|
|
|
NextUpdateUnix: timeutil.TimeStampNow().AddDuration(setting.Mirror.DefaultInterval),
|
2021-04-09 00:25:57 +02:00
|
|
|
LFS: opts.LFS,
|
|
|
|
}
|
|
|
|
if opts.LFS {
|
|
|
|
mirrorModel.LFSEndpoint = opts.LFSEndpoint
|
2021-01-03 00:47:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if opts.MirrorInterval != "" {
|
|
|
|
parsedInterval, err := time.ParseDuration(opts.MirrorInterval)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Failed to set Interval: %v", err)
|
|
|
|
return repo, err
|
|
|
|
}
|
|
|
|
if parsedInterval == 0 {
|
|
|
|
mirrorModel.Interval = 0
|
|
|
|
mirrorModel.NextUpdateUnix = 0
|
|
|
|
} else if parsedInterval < setting.Mirror.MinInterval {
|
2023-05-26 03:04:48 +02:00
|
|
|
err := fmt.Errorf("interval %s is set below Minimum Interval of %s", parsedInterval, setting.Mirror.MinInterval)
|
2021-01-03 00:47:47 +01:00
|
|
|
log.Error("Interval: %s is too frequent", opts.MirrorInterval)
|
|
|
|
return repo, err
|
|
|
|
} else {
|
|
|
|
mirrorModel.Interval = parsedInterval
|
|
|
|
mirrorModel.NextUpdateUnix = timeutil.TimeStampNow().AddDuration(parsedInterval)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-06 10:01:49 +02:00
|
|
|
if err = repo_model.InsertMirror(ctx, &mirrorModel); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("InsertOne: %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
repo.IsMirror = true
|
2022-06-06 10:01:49 +02:00
|
|
|
if err = UpdateRepository(ctx, repo, false); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-05-26 03:04:48 +02:00
|
|
|
|
|
|
|
// this is necessary for sync local tags from remote
|
|
|
|
configName := fmt.Sprintf("remote.%s.fetch", mirrorModel.GetRemoteName())
|
|
|
|
if stdout, _, err := git.NewCommand(ctx, "config").
|
|
|
|
AddOptionValues("--add", configName, `+refs/tags/*:refs/tags/*`).
|
|
|
|
RunStdString(&git.RunOpts{Dir: repoPath}); err != nil {
|
|
|
|
log.Error("MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*) in %v: Stdout: %s\nError: %v", repo, stdout, err)
|
|
|
|
return repo, fmt.Errorf("error in MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*): %w", err)
|
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
} else {
|
2022-06-06 10:01:49 +02:00
|
|
|
if err = UpdateRepoSize(ctx, repo); err != nil {
|
|
|
|
log.Error("Failed to update size for repository: %v", err)
|
|
|
|
}
|
|
|
|
if repo, err = CleanUpMigrateInfo(ctx, repo); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2022-06-06 10:01:49 +02:00
|
|
|
return repo, committer.Commit()
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// cleanUpMigrateGitConfig removes mirror info which prevents "push --all".
|
|
|
|
// This also removes possible user credentials.
|
2023-06-05 12:05:31 +02:00
|
|
|
func cleanUpMigrateGitConfig(ctx context.Context, repoPath string) error {
|
|
|
|
cmd := git.NewCommand(ctx, "remote", "rm", "origin")
|
|
|
|
// if the origin does not exist
|
|
|
|
_, stderr, err := cmd.RunStdString(&git.RunOpts{
|
|
|
|
Dir: repoPath,
|
|
|
|
})
|
|
|
|
if err != nil && !strings.HasPrefix(stderr, "fatal: No such remote") {
|
|
|
|
return err
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CleanUpMigrateInfo finishes migrating repository and/or wiki with things that don't need to be done for mirrors.
|
2022-01-20 00:26:57 +01:00
|
|
|
func CleanUpMigrateInfo(ctx context.Context, repo *repo_model.Repository) (*repo_model.Repository, error) {
|
2019-12-14 18:30:01 +01:00
|
|
|
repoPath := repo.RepoPath()
|
2020-01-20 21:01:19 +01:00
|
|
|
if err := createDelegateHooks(repoPath); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("createDelegateHooks: %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
if repo.HasWiki() {
|
2020-01-20 21:01:19 +01:00
|
|
|
if err := createDelegateHooks(repo.WikiPath()); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("createDelegateHooks.(wiki): %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-01 04:55:30 +02:00
|
|
|
_, _, err := git.NewCommand(ctx, "remote", "rm", "origin").RunStdString(&git.RunOpts{Dir: repoPath})
|
2019-12-14 18:30:01 +01:00
|
|
|
if err != nil && !strings.HasPrefix(err.Error(), "exit status 128 - fatal: No such remote ") {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("CleanUpMigrateInfo: %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if repo.HasWiki() {
|
2023-06-05 12:05:31 +02:00
|
|
|
if err := cleanUpMigrateGitConfig(ctx, repo.WikiPath()); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("cleanUpMigrateGitConfig (wiki): %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-06 10:01:49 +02:00
|
|
|
return repo, UpdateRepository(ctx, repo, false)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// SyncReleasesWithTags synchronizes release table with repository tags
|
2021-12-10 02:27:50 +01:00
|
|
|
func SyncReleasesWithTags(repo *repo_model.Repository, gitRepo *git.Repository) error {
|
2022-03-31 14:30:40 +02:00
|
|
|
log.Debug("SyncReleasesWithTags: in Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
|
|
|
|
|
|
|
|
// optimized procedure for pull-mirrors which saves a lot of time (in
|
|
|
|
// particular for repos with many tags).
|
|
|
|
if repo.IsMirror {
|
|
|
|
return pullMirrorReleaseSync(repo, gitRepo)
|
|
|
|
}
|
|
|
|
|
2022-10-12 07:18:26 +02:00
|
|
|
existingRelTags := make(container.Set[string])
|
2022-08-25 04:31:57 +02:00
|
|
|
opts := repo_model.FindReleasesOptions{
|
2021-09-24 13:32:56 +02:00
|
|
|
IncludeDrafts: true,
|
|
|
|
IncludeTags: true,
|
|
|
|
ListOptions: db.ListOptions{PageSize: 50},
|
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
for page := 1; ; page++ {
|
2020-01-24 20:00:29 +01:00
|
|
|
opts.Page = page
|
2022-11-19 09:12:33 +01:00
|
|
|
rels, err := repo_model.GetReleasesByRepoID(gitRepo.Ctx, repo.ID, opts)
|
2019-12-14 18:30:01 +01:00
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to GetReleasesByRepoID in Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
if len(rels) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
for _, rel := range rels {
|
|
|
|
if rel.IsDraft {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
commitID, err := gitRepo.GetTagCommitID(rel.TagName)
|
|
|
|
if err != nil && !git.IsErrNotExist(err) {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to GetTagCommitID for %q in Repo[%d:%s/%s]: %w", rel.TagName, repo.ID, repo.OwnerName, repo.Name, err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
if git.IsErrNotExist(err) || commitID != rel.Sha1 {
|
2022-08-25 04:31:57 +02:00
|
|
|
if err := repo_model.PushUpdateDeleteTag(repo, rel.TagName); err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to PushUpdateDeleteTag: %q in Repo[%d:%s/%s]: %w", rel.TagName, repo.ID, repo.OwnerName, repo.Name, err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
} else {
|
2022-10-12 07:18:26 +02:00
|
|
|
existingRelTags.Add(strings.ToLower(rel.TagName))
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-03-29 19:12:33 +02:00
|
|
|
|
|
|
|
_, err := gitRepo.WalkReferences(git.ObjectTag, 0, 0, func(sha1, refname string) error {
|
|
|
|
tagName := strings.TrimPrefix(refname, git.TagPrefix)
|
2022-10-12 07:18:26 +02:00
|
|
|
if existingRelTags.Contains(strings.ToLower(tagName)) {
|
2022-03-29 19:12:33 +02:00
|
|
|
return nil
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
2022-03-29 19:12:33 +02:00
|
|
|
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 14:37:34 +01:00
|
|
|
if err := PushUpdateAddTag(db.DefaultContext, repo, gitRepo, tagName, sha1, refname); err != nil {
|
2022-03-29 19:12:33 +02:00
|
|
|
return fmt.Errorf("unable to PushUpdateAddTag: %q to Repo[%d:%s/%s]: %w", tagName, repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return err
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
2020-01-10 10:34:21 +01:00
|
|
|
|
|
|
|
// PushUpdateAddTag must be called for any push actions to add tag
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 14:37:34 +01:00
|
|
|
func PushUpdateAddTag(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository, tagName, sha1, refname string) error {
|
2022-03-29 19:12:33 +02:00
|
|
|
tag, err := gitRepo.GetTagWithID(sha1, tagName)
|
2020-01-10 10:34:21 +01:00
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to GetTag: %w", err)
|
2020-01-10 10:34:21 +01:00
|
|
|
}
|
2022-01-12 21:37:46 +01:00
|
|
|
commit, err := tag.Commit(gitRepo)
|
2020-01-10 10:34:21 +01:00
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to get tag Commit: %w", err)
|
2020-01-10 10:34:21 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
sig := tag.Tagger
|
|
|
|
if sig == nil {
|
|
|
|
sig = commit.Author
|
|
|
|
}
|
|
|
|
if sig == nil {
|
|
|
|
sig = commit.Committer
|
|
|
|
}
|
|
|
|
|
2021-11-24 10:49:20 +01:00
|
|
|
var author *user_model.User
|
2022-01-20 18:46:10 +01:00
|
|
|
createdAt := time.Unix(1, 0)
|
2020-01-10 10:34:21 +01:00
|
|
|
|
|
|
|
if sig != nil {
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 14:37:34 +01:00
|
|
|
author, err = user_model.GetUserByEmail(ctx, sig.Email)
|
2021-11-24 10:49:20 +01:00
|
|
|
if err != nil && !user_model.IsErrUserNotExist(err) {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to GetUserByEmail for %q: %w", sig.Email, err)
|
2020-01-10 10:34:21 +01:00
|
|
|
}
|
|
|
|
createdAt = sig.When
|
|
|
|
}
|
|
|
|
|
|
|
|
commitsCount, err := commit.CommitsCount()
|
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to get CommitsCount: %w", err)
|
2020-01-10 10:34:21 +01:00
|
|
|
}
|
|
|
|
|
2022-08-25 04:31:57 +02:00
|
|
|
rel := repo_model.Release{
|
2020-01-10 10:34:21 +01:00
|
|
|
RepoID: repo.ID,
|
|
|
|
TagName: tagName,
|
|
|
|
LowerTagName: strings.ToLower(tagName),
|
|
|
|
Sha1: commit.ID.String(),
|
|
|
|
NumCommits: commitsCount,
|
|
|
|
CreatedUnix: timeutil.TimeStamp(createdAt.Unix()),
|
|
|
|
IsTag: true,
|
|
|
|
}
|
|
|
|
if author != nil {
|
|
|
|
rel.PublisherID = author.ID
|
|
|
|
}
|
|
|
|
|
2022-08-25 04:31:57 +02:00
|
|
|
return repo_model.SaveOrUpdateTag(repo, &rel)
|
2020-01-10 10:34:21 +01:00
|
|
|
}
|
2021-04-09 00:25:57 +02:00
|
|
|
|
|
|
|
// StoreMissingLfsObjectsInRepository downloads missing LFS objects
|
2021-12-10 02:27:50 +01:00
|
|
|
func StoreMissingLfsObjectsInRepository(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository, lfsClient lfs.Client) error {
|
2021-04-09 00:25:57 +02:00
|
|
|
contentStore := lfs.NewContentStore()
|
|
|
|
|
|
|
|
pointerChan := make(chan lfs.PointerBlob)
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
go lfs.SearchPointerBlobs(ctx, gitRepo, pointerChan, errChan)
|
|
|
|
|
2021-06-14 19:20:43 +02:00
|
|
|
downloadObjects := func(pointers []lfs.Pointer) error {
|
2021-11-20 10:34:05 +01:00
|
|
|
err := lfsClient.Download(ctx, pointers, func(p lfs.Pointer, content io.ReadCloser, objectError error) error {
|
2021-06-14 19:20:43 +02:00
|
|
|
if objectError != nil {
|
|
|
|
return objectError
|
2021-04-09 00:25:57 +02:00
|
|
|
}
|
|
|
|
|
2021-06-14 19:20:43 +02:00
|
|
|
defer content.Close()
|
2021-04-09 00:25:57 +02:00
|
|
|
|
2023-01-09 04:50:54 +01:00
|
|
|
_, err := git_model.NewLFSMetaObject(ctx, &git_model.LFSMetaObject{Pointer: p, RepositoryID: repo.ID})
|
2021-04-09 00:25:57 +02:00
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error creating LFS meta object %-v: %v", repo, p, err)
|
2021-06-14 19:20:43 +02:00
|
|
|
return err
|
|
|
|
}
|
2021-04-09 00:25:57 +02:00
|
|
|
|
2021-06-14 19:20:43 +02:00
|
|
|
if err := contentStore.Put(p, content); err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error storing content for LFS meta object %-v: %v", repo, p, err)
|
2023-01-09 04:50:54 +01:00
|
|
|
if _, err2 := git_model.RemoveLFSMetaObjectByOid(ctx, repo.ID, p.Oid); err2 != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error removing LFS meta object %-v: %v", repo, p, err2)
|
2021-04-09 00:25:57 +02:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2021-06-14 19:20:43 +02:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
2021-04-09 00:25:57 +02:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-06-14 19:20:43 +02:00
|
|
|
var batch []lfs.Pointer
|
|
|
|
for pointerBlob := range pointerChan {
|
2023-01-09 04:50:54 +01:00
|
|
|
meta, err := git_model.GetLFSMetaObjectByOid(ctx, repo.ID, pointerBlob.Oid)
|
2022-06-12 17:51:54 +02:00
|
|
|
if err != nil && err != git_model.ErrLFSObjectNotExist {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error querying LFS meta object %-v: %v", repo, pointerBlob.Pointer, err)
|
2021-06-14 19:20:43 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if meta != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Trace("Repo[%-v]: Skipping unknown LFS meta object %-v", repo, pointerBlob.Pointer)
|
2021-06-14 19:20:43 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Trace("Repo[%-v]: LFS object %-v not present in repository", repo, pointerBlob.Pointer)
|
2021-06-14 19:20:43 +02:00
|
|
|
|
|
|
|
exist, err := contentStore.Exists(pointerBlob.Pointer)
|
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error checking if LFS object %-v exists: %v", repo, pointerBlob.Pointer, err)
|
2021-06-14 19:20:43 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if exist {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Trace("Repo[%-v]: LFS object %-v already present; creating meta object", repo, pointerBlob.Pointer)
|
2023-01-09 04:50:54 +01:00
|
|
|
_, err := git_model.NewLFSMetaObject(ctx, &git_model.LFSMetaObject{Pointer: pointerBlob.Pointer, RepositoryID: repo.ID})
|
2021-06-14 19:20:43 +02:00
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error creating LFS meta object %-v: %v", repo, pointerBlob.Pointer, err)
|
2021-06-14 19:20:43 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if setting.LFS.MaxFileSize > 0 && pointerBlob.Size > setting.LFS.MaxFileSize {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Info("Repo[%-v]: LFS object %-v download denied because of LFS_MAX_FILE_SIZE=%d < size %d", repo, pointerBlob.Pointer, setting.LFS.MaxFileSize, pointerBlob.Size)
|
2021-06-14 19:20:43 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
batch = append(batch, pointerBlob.Pointer)
|
2021-11-20 10:34:05 +01:00
|
|
|
if len(batch) >= lfsClient.BatchSize() {
|
2021-06-14 19:20:43 +02:00
|
|
|
if err := downloadObjects(batch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
batch = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(batch) > 0 {
|
|
|
|
if err := downloadObjects(batch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-09 00:25:57 +02:00
|
|
|
err, has := <-errChan
|
|
|
|
if has {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error enumerating LFS objects for repository: %v", repo, err)
|
2021-04-09 00:25:57 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2022-03-31 14:30:40 +02:00
|
|
|
|
|
|
|
// pullMirrorReleaseSync is a pull-mirror specific tag<->release table
|
|
|
|
// synchronization which overwrites all Releases from the repository tags. This
|
|
|
|
// can be relied on since a pull-mirror is always identical to its
|
|
|
|
// upstream. Hence, after each sync we want the pull-mirror release set to be
|
|
|
|
// identical to the upstream tag set. This is much more efficient for
|
|
|
|
// repositories like https://github.com/vim/vim (with over 13000 tags).
|
|
|
|
func pullMirrorReleaseSync(repo *repo_model.Repository, gitRepo *git.Repository) error {
|
|
|
|
log.Trace("pullMirrorReleaseSync: rebuilding releases for pull-mirror Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
|
|
|
|
tags, numTags, err := gitRepo.GetTagInfos(0, 0)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to GetTagInfos in pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
2022-11-12 21:18:50 +01:00
|
|
|
err = db.WithTx(db.DefaultContext, func(ctx context.Context) error {
|
2022-03-31 14:30:40 +02:00
|
|
|
//
|
|
|
|
// clear out existing releases
|
|
|
|
//
|
2022-08-25 04:31:57 +02:00
|
|
|
if _, err := db.DeleteByBean(ctx, &repo_model.Release{RepoID: repo.ID}); err != nil {
|
2022-03-31 14:30:40 +02:00
|
|
|
return fmt.Errorf("unable to clear releases for pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
//
|
|
|
|
// make release set identical to upstream tags
|
|
|
|
//
|
|
|
|
for _, tag := range tags {
|
2022-08-25 04:31:57 +02:00
|
|
|
release := repo_model.Release{
|
2022-03-31 14:30:40 +02:00
|
|
|
RepoID: repo.ID,
|
|
|
|
TagName: tag.Name,
|
|
|
|
LowerTagName: strings.ToLower(tag.Name),
|
|
|
|
Sha1: tag.Object.String(),
|
|
|
|
// NOTE: ignored, since NumCommits are unused
|
|
|
|
// for pull-mirrors (only relevant when
|
|
|
|
// displaying releases, IsTag: false)
|
|
|
|
NumCommits: -1,
|
|
|
|
CreatedUnix: timeutil.TimeStamp(tag.Tagger.When.Unix()),
|
|
|
|
IsTag: true,
|
|
|
|
}
|
|
|
|
if err := db.Insert(ctx, release); err != nil {
|
|
|
|
return fmt.Errorf("unable insert tag %s for pull-mirror Repo[%d:%s/%s]: %w", tag.Name, repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to rebuild release table for pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Trace("pullMirrorReleaseSync: done rebuilding %d releases", numTags)
|
|
|
|
return nil
|
|
|
|
}
|