2019-12-14 18:30:01 +01:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
[GITEA] Allow changing the repo Wiki branch to main
Previously, the repo wiki was hardcoded to use `master` as its branch,
this change makes it possible to use `main` (or something else, governed
by `[repository].DEFAULT_BRANCH`, a setting that already exists and
defaults to `main`).
The way it is done is that a new column is added to the `repository`
table: `wiki_branch`. The migration will make existing repositories
default to `master`, for compatibility's sake, even if they don't have a
Wiki (because it's easier to do that). Newly created repositories will
default to `[repository].DEFAULT_BRANCH` instead.
The Wiki service was updated to use the branch name stored in the
database, and fall back to the default if it is empty.
Old repositories with Wikis using the older `master` branch will have
the option to do a one-time transition to `main`, available via the
repository settings in the "Danger Zone". This option will only be
available for repositories that have the internal wiki enabled, it is
not empty, and the wiki branch is not `[repository].DEFAULT_BRANCH`.
When migrating a repository with a Wiki, Forgejo will use the same
branch name for the wiki as the source repository did. If that's not the
same as the default, the option to normalize it will be available after
the migration's done.
Additionally, the `/api/v1/{owner}/{repo}` endpoint was updated: it will
now include the wiki branch name in `GET` requests, and allow changing
the wiki branch via `PATCH`.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
(cherry picked from commit d87c526d2a313fa45093ab49b78bb30322b33298)
2024-01-30 12:18:53 +01:00
|
|
|
// Copyright 2024 The Forgejo Authors c/o Codeberg e.V.. All rights reserved.
|
2022-11-27 19:20:29 +01:00
|
|
|
// SPDX-License-Identifier: MIT
|
2019-12-14 18:30:01 +01:00
|
|
|
|
|
|
|
package repository
|
|
|
|
|
|
|
|
import (
|
2020-12-02 19:36:06 +01:00
|
|
|
"context"
|
2022-11-12 19:58:26 +01:00
|
|
|
"errors"
|
2019-12-14 18:30:01 +01:00
|
|
|
"fmt"
|
2021-06-14 19:20:43 +02:00
|
|
|
"io"
|
2021-11-20 10:34:05 +01:00
|
|
|
"net/http"
|
2019-12-14 18:30:01 +01:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2021-09-19 13:49:59 +02:00
|
|
|
"code.gitea.io/gitea/models/db"
|
2022-06-12 17:51:54 +02:00
|
|
|
git_model "code.gitea.io/gitea/models/git"
|
2022-03-29 08:29:02 +02:00
|
|
|
"code.gitea.io/gitea/models/organization"
|
2021-12-10 02:27:50 +01:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-24 10:49:20 +01:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2022-10-12 07:18:26 +02:00
|
|
|
"code.gitea.io/gitea/modules/container"
|
2019-12-14 18:30:01 +01:00
|
|
|
"code.gitea.io/gitea/modules/git"
|
Simplify how git repositories are opened (#28937)
## Purpose
This is a refactor toward building an abstraction over managing git
repositories.
Afterwards, it does not matter anymore if they are stored on the local
disk or somewhere remote.
## What this PR changes
We used `git.OpenRepository` everywhere previously.
Now, we should split them into two distinct functions:
Firstly, there are temporary repositories which do not change:
```go
git.OpenRepository(ctx, diskPath)
```
Gitea managed repositories having a record in the database in the
`repository` table are moved into the new package `gitrepo`:
```go
gitrepo.OpenRepository(ctx, repo_model.Repo)
```
Why is `repo_model.Repository` the second parameter instead of file
path?
Because then we can easily adapt our repository storage strategy.
The repositories can be stored locally, however, they could just as well
be stored on a remote server.
## Further changes in other PRs
- A Git Command wrapper on package `gitrepo` could be created. i.e.
`NewCommand(ctx, repo_model.Repository, commands...)`. `git.RunOpts{Dir:
repo.RepoPath()}`, the directory should be empty before invoking this
method and it can be filled in the function only. #28940
- Remove the `RepoPath()`/`WikiPath()` functions to reduce the
possibility of mistakes.
---------
Co-authored-by: delvh <dev.lh@web.de>
2024-01-27 21:09:51 +01:00
|
|
|
"code.gitea.io/gitea/modules/gitrepo"
|
2021-04-09 00:25:57 +02:00
|
|
|
"code.gitea.io/gitea/modules/lfs"
|
2019-12-14 18:30:01 +01:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
2021-11-16 16:25:33 +01:00
|
|
|
"code.gitea.io/gitea/modules/migration"
|
2019-12-14 18:30:01 +01:00
|
|
|
"code.gitea.io/gitea/modules/setting"
|
|
|
|
"code.gitea.io/gitea/modules/timeutil"
|
2020-08-11 22:05:34 +02:00
|
|
|
"code.gitea.io/gitea/modules/util"
|
2019-12-14 18:30:01 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
2022-08-25 04:31:57 +02:00
|
|
|
GitHub, GitLab, Gogs: *.wiki.git
|
|
|
|
BitBucket: *.git/wiki
|
2019-12-14 18:30:01 +01:00
|
|
|
*/
|
|
|
|
var commonWikiURLSuffixes = []string{".wiki.git", ".git/wiki"}
|
|
|
|
|
2020-07-06 04:08:32 +02:00
|
|
|
// WikiRemoteURL returns accessible repository URL for wiki if exists.
|
2019-12-14 18:30:01 +01:00
|
|
|
// Otherwise, it returns an empty string.
|
2022-01-20 00:26:57 +01:00
|
|
|
func WikiRemoteURL(ctx context.Context, remote string) string {
|
2019-12-14 18:30:01 +01:00
|
|
|
remote = strings.TrimSuffix(remote, ".git")
|
|
|
|
for _, suffix := range commonWikiURLSuffixes {
|
|
|
|
wikiURL := remote + suffix
|
2022-01-20 00:26:57 +01:00
|
|
|
if git.IsRepoURLAccessible(ctx, wikiURL) {
|
2019-12-14 18:30:01 +01:00
|
|
|
return wikiURL
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
// MigrateRepositoryGitData starts migrating git related data after created migrating repository
|
2021-11-24 10:49:20 +01:00
|
|
|
func MigrateRepositoryGitData(ctx context.Context, u *user_model.User,
|
2021-12-10 02:27:50 +01:00
|
|
|
repo *repo_model.Repository, opts migration.MigrateOptions,
|
2021-11-20 10:34:05 +01:00
|
|
|
httpTransport *http.Transport,
|
2021-12-10 02:27:50 +01:00
|
|
|
) (*repo_model.Repository, error) {
|
|
|
|
repoPath := repo_model.RepoPath(u.Name, opts.RepoName)
|
2019-12-14 18:30:01 +01:00
|
|
|
|
|
|
|
if u.IsOrganization() {
|
2023-02-08 07:44:42 +01:00
|
|
|
t, err := organization.OrgFromUser(u).GetOwnerTeam(ctx)
|
2019-12-14 18:30:01 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
repo.NumWatches = t.NumMembers
|
|
|
|
} else {
|
|
|
|
repo.NumWatches = 1
|
|
|
|
}
|
|
|
|
|
|
|
|
migrateTimeout := time.Duration(setting.Git.Timeout.Migrate) * time.Second
|
|
|
|
|
|
|
|
var err error
|
2020-08-11 22:05:34 +02:00
|
|
|
if err = util.RemoveAll(repoPath); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("Failed to remove %s: %w", repoPath, err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2022-01-20 00:26:57 +01:00
|
|
|
if err = git.Clone(ctx, opts.CloneAddr, repoPath, git.CloneRepoOptions{
|
2022-03-19 15:16:38 +01:00
|
|
|
Mirror: true,
|
|
|
|
Quiet: true,
|
|
|
|
Timeout: migrateTimeout,
|
|
|
|
SkipTLSVerify: setting.Migrations.SkipTLSVerify,
|
2019-12-14 18:30:01 +01:00
|
|
|
}); err != nil {
|
2022-11-12 19:58:26 +01:00
|
|
|
if errors.Is(err, context.DeadlineExceeded) {
|
|
|
|
return repo, fmt.Errorf("Clone timed out. Consider increasing [git.timeout] MIGRATE in app.ini. Underlying Error: %w", err)
|
|
|
|
}
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("Clone: %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2022-03-29 19:12:33 +02:00
|
|
|
if err := git.WriteCommitGraph(ctx, repoPath); err != nil {
|
|
|
|
return repo, err
|
|
|
|
}
|
|
|
|
|
2019-12-14 18:30:01 +01:00
|
|
|
if opts.Wiki {
|
2021-12-10 02:27:50 +01:00
|
|
|
wikiPath := repo_model.WikiPath(u.Name, opts.RepoName)
|
2022-01-20 00:26:57 +01:00
|
|
|
wikiRemotePath := WikiRemoteURL(ctx, opts.CloneAddr)
|
2019-12-14 18:30:01 +01:00
|
|
|
if len(wikiRemotePath) > 0 {
|
2020-08-11 22:05:34 +02:00
|
|
|
if err := util.RemoveAll(wikiPath); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("Failed to remove %s: %w", wikiPath, err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2022-04-30 14:50:56 +02:00
|
|
|
if err := git.Clone(ctx, wikiRemotePath, wikiPath, git.CloneRepoOptions{
|
2022-03-19 15:16:38 +01:00
|
|
|
Mirror: true,
|
|
|
|
Quiet: true,
|
|
|
|
Timeout: migrateTimeout,
|
|
|
|
SkipTLSVerify: setting.Migrations.SkipTLSVerify,
|
2019-12-14 18:30:01 +01:00
|
|
|
}); err != nil {
|
|
|
|
log.Warn("Clone wiki: %v", err)
|
2020-08-11 22:05:34 +02:00
|
|
|
if err := util.RemoveAll(wikiPath); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("Failed to remove %s: %w", wikiPath, err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
2022-04-30 14:50:56 +02:00
|
|
|
} else {
|
[GITEA] Allow changing the repo Wiki branch to main
Previously, the repo wiki was hardcoded to use `master` as its branch,
this change makes it possible to use `main` (or something else, governed
by `[repository].DEFAULT_BRANCH`, a setting that already exists and
defaults to `main`).
The way it is done is that a new column is added to the `repository`
table: `wiki_branch`. The migration will make existing repositories
default to `master`, for compatibility's sake, even if they don't have a
Wiki (because it's easier to do that). Newly created repositories will
default to `[repository].DEFAULT_BRANCH` instead.
The Wiki service was updated to use the branch name stored in the
database, and fall back to the default if it is empty.
Old repositories with Wikis using the older `master` branch will have
the option to do a one-time transition to `main`, available via the
repository settings in the "Danger Zone". This option will only be
available for repositories that have the internal wiki enabled, it is
not empty, and the wiki branch is not `[repository].DEFAULT_BRANCH`.
When migrating a repository with a Wiki, Forgejo will use the same
branch name for the wiki as the source repository did. If that's not the
same as the default, the option to normalize it will be available after
the migration's done.
Additionally, the `/api/v1/{owner}/{repo}` endpoint was updated: it will
now include the wiki branch name in `GET` requests, and allow changing
the wiki branch via `PATCH`.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
(cherry picked from commit d87c526d2a313fa45093ab49b78bb30322b33298)
2024-01-30 12:18:53 +01:00
|
|
|
// Figure out the branch of the wiki we just cloned. We assume
|
|
|
|
// that the default branch is to be used, and we'll use the same
|
|
|
|
// name as the source.
|
|
|
|
gitRepo, err := git.OpenRepository(ctx, wikiPath)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Failed to open wiki repository during migration: %v", err)
|
|
|
|
if err := util.RemoveAll(wikiPath); err != nil {
|
|
|
|
return repo, fmt.Errorf("Failed to remove %s: %w", wikiPath, err)
|
|
|
|
}
|
|
|
|
return repo, err
|
|
|
|
}
|
|
|
|
defer gitRepo.Close()
|
|
|
|
|
|
|
|
branch, err := gitRepo.GetDefaultBranch()
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Failed to get the default branch of a migrated wiki repo: %v", err)
|
|
|
|
if err := util.RemoveAll(wikiPath); err != nil {
|
|
|
|
return repo, fmt.Errorf("Failed to remove %s: %w", wikiPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return repo, err
|
|
|
|
}
|
|
|
|
repo.WikiBranch = branch
|
|
|
|
|
2022-04-30 14:50:56 +02:00
|
|
|
if err := git.WriteCommitGraph(ctx, wikiPath); err != nil {
|
|
|
|
return repo, err
|
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-13 21:47:02 +02:00
|
|
|
if repo.OwnerID == u.ID {
|
|
|
|
repo.Owner = u
|
|
|
|
}
|
|
|
|
|
2022-06-06 10:01:49 +02:00
|
|
|
if err = CheckDaemonExportOK(ctx, repo); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("checkDaemonExportOK: %w", err)
|
2021-10-13 21:47:02 +02:00
|
|
|
}
|
|
|
|
|
2022-04-01 04:55:30 +02:00
|
|
|
if stdout, _, err := git.NewCommand(ctx, "update-server-info").
|
2021-10-13 21:47:02 +02:00
|
|
|
SetDescription(fmt.Sprintf("MigrateRepositoryGitData(git update-server-info): %s", repoPath)).
|
2022-04-01 04:55:30 +02:00
|
|
|
RunStdString(&git.RunOpts{Dir: repoPath}); err != nil {
|
2021-10-13 21:47:02 +02:00
|
|
|
log.Error("MigrateRepositoryGitData(git update-server-info) in %v: Stdout: %s\nError: %v", repo, stdout, err)
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("error in MigrateRepositoryGitData(git update-server-info): %w", err)
|
2021-10-13 21:47:02 +02:00
|
|
|
}
|
|
|
|
|
2022-03-29 21:13:41 +02:00
|
|
|
gitRepo, err := git.OpenRepository(ctx, repoPath)
|
2019-12-14 18:30:01 +01:00
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("OpenRepository: %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
defer gitRepo.Close()
|
|
|
|
|
|
|
|
repo.IsEmpty, err = gitRepo.IsEmpty()
|
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("git.IsEmpty: %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2020-09-15 16:37:44 +02:00
|
|
|
if !repo.IsEmpty {
|
|
|
|
if len(repo.DefaultBranch) == 0 {
|
|
|
|
// Try to get HEAD branch and set it as default branch.
|
|
|
|
headBranch, err := gitRepo.GetHEADBranch()
|
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("GetHEADBranch: %w", err)
|
2020-09-15 16:37:44 +02:00
|
|
|
}
|
|
|
|
if headBranch != nil {
|
|
|
|
repo.DefaultBranch = headBranch.Name
|
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2023-06-29 12:03:20 +02:00
|
|
|
if _, err := SyncRepoBranchesWithRepo(ctx, repo, gitRepo, u.ID); err != nil {
|
|
|
|
return repo, fmt.Errorf("SyncRepoBranchesWithRepo: %v", err)
|
|
|
|
}
|
|
|
|
|
2020-09-15 16:37:44 +02:00
|
|
|
if !opts.Releases {
|
2022-03-31 14:30:40 +02:00
|
|
|
// note: this will greatly improve release (tag) sync
|
|
|
|
// for pull-mirrors with many tags
|
|
|
|
repo.IsMirror = opts.Mirror
|
2023-09-25 15:17:37 +02:00
|
|
|
if err = SyncReleasesWithTags(ctx, repo, gitRepo); err != nil {
|
2020-09-15 16:37:44 +02:00
|
|
|
log.Error("Failed to synchronize tags to releases for repository: %v", err)
|
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
2021-04-09 00:25:57 +02:00
|
|
|
|
|
|
|
if opts.LFS {
|
2021-11-20 10:34:05 +01:00
|
|
|
endpoint := lfs.DetermineEndpoint(opts.CloneAddr, opts.LFSEndpoint)
|
|
|
|
lfsClient := lfs.NewClient(endpoint, httpTransport)
|
|
|
|
if err = StoreMissingLfsObjectsInRepository(ctx, repo, gitRepo, lfsClient); err != nil {
|
2021-04-09 00:25:57 +02:00
|
|
|
log.Error("Failed to store missing LFS objects for repository: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2023-06-29 12:03:20 +02:00
|
|
|
ctx, committer, err := db.TxContext(ctx)
|
2022-06-06 10:01:49 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
2022-06-06 10:01:49 +02:00
|
|
|
defer committer.Close()
|
2019-12-14 18:30:01 +01:00
|
|
|
|
|
|
|
if opts.Mirror {
|
2023-09-16 18:03:02 +02:00
|
|
|
remoteAddress, err := util.SanitizeURL(opts.CloneAddr)
|
|
|
|
if err != nil {
|
|
|
|
return repo, err
|
|
|
|
}
|
2021-12-10 02:27:50 +01:00
|
|
|
mirrorModel := repo_model.Mirror{
|
2019-12-14 18:30:01 +01:00
|
|
|
RepoID: repo.ID,
|
|
|
|
Interval: setting.Mirror.DefaultInterval,
|
|
|
|
EnablePrune: true,
|
|
|
|
NextUpdateUnix: timeutil.TimeStampNow().AddDuration(setting.Mirror.DefaultInterval),
|
2021-04-09 00:25:57 +02:00
|
|
|
LFS: opts.LFS,
|
2023-09-16 18:03:02 +02:00
|
|
|
RemoteAddress: remoteAddress,
|
2021-04-09 00:25:57 +02:00
|
|
|
}
|
|
|
|
if opts.LFS {
|
|
|
|
mirrorModel.LFSEndpoint = opts.LFSEndpoint
|
2021-01-03 00:47:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if opts.MirrorInterval != "" {
|
|
|
|
parsedInterval, err := time.ParseDuration(opts.MirrorInterval)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Failed to set Interval: %v", err)
|
|
|
|
return repo, err
|
|
|
|
}
|
|
|
|
if parsedInterval == 0 {
|
|
|
|
mirrorModel.Interval = 0
|
|
|
|
mirrorModel.NextUpdateUnix = 0
|
|
|
|
} else if parsedInterval < setting.Mirror.MinInterval {
|
2023-05-26 03:04:48 +02:00
|
|
|
err := fmt.Errorf("interval %s is set below Minimum Interval of %s", parsedInterval, setting.Mirror.MinInterval)
|
2021-01-03 00:47:47 +01:00
|
|
|
log.Error("Interval: %s is too frequent", opts.MirrorInterval)
|
|
|
|
return repo, err
|
|
|
|
} else {
|
|
|
|
mirrorModel.Interval = parsedInterval
|
|
|
|
mirrorModel.NextUpdateUnix = timeutil.TimeStampNow().AddDuration(parsedInterval)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-06 10:01:49 +02:00
|
|
|
if err = repo_model.InsertMirror(ctx, &mirrorModel); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("InsertOne: %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
repo.IsMirror = true
|
2022-06-06 10:01:49 +02:00
|
|
|
if err = UpdateRepository(ctx, repo, false); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-05-26 03:04:48 +02:00
|
|
|
|
|
|
|
// this is necessary for sync local tags from remote
|
|
|
|
configName := fmt.Sprintf("remote.%s.fetch", mirrorModel.GetRemoteName())
|
|
|
|
if stdout, _, err := git.NewCommand(ctx, "config").
|
|
|
|
AddOptionValues("--add", configName, `+refs/tags/*:refs/tags/*`).
|
|
|
|
RunStdString(&git.RunOpts{Dir: repoPath}); err != nil {
|
|
|
|
log.Error("MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*) in %v: Stdout: %s\nError: %v", repo, stdout, err)
|
|
|
|
return repo, fmt.Errorf("error in MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*): %w", err)
|
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
} else {
|
2022-06-06 10:01:49 +02:00
|
|
|
if err = UpdateRepoSize(ctx, repo); err != nil {
|
|
|
|
log.Error("Failed to update size for repository: %v", err)
|
|
|
|
}
|
|
|
|
if repo, err = CleanUpMigrateInfo(ctx, repo); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2022-06-06 10:01:49 +02:00
|
|
|
return repo, committer.Commit()
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// cleanUpMigrateGitConfig removes mirror info which prevents "push --all".
|
|
|
|
// This also removes possible user credentials.
|
2023-06-05 12:05:31 +02:00
|
|
|
func cleanUpMigrateGitConfig(ctx context.Context, repoPath string) error {
|
|
|
|
cmd := git.NewCommand(ctx, "remote", "rm", "origin")
|
|
|
|
// if the origin does not exist
|
|
|
|
_, stderr, err := cmd.RunStdString(&git.RunOpts{
|
|
|
|
Dir: repoPath,
|
|
|
|
})
|
|
|
|
if err != nil && !strings.HasPrefix(stderr, "fatal: No such remote") {
|
|
|
|
return err
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CleanUpMigrateInfo finishes migrating repository and/or wiki with things that don't need to be done for mirrors.
|
2022-01-20 00:26:57 +01:00
|
|
|
func CleanUpMigrateInfo(ctx context.Context, repo *repo_model.Repository) (*repo_model.Repository, error) {
|
2019-12-14 18:30:01 +01:00
|
|
|
repoPath := repo.RepoPath()
|
2023-09-06 14:08:51 +02:00
|
|
|
if err := CreateDelegateHooks(repoPath); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("createDelegateHooks: %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
if repo.HasWiki() {
|
2023-09-06 14:08:51 +02:00
|
|
|
if err := CreateDelegateHooks(repo.WikiPath()); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("createDelegateHooks.(wiki): %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-01 04:55:30 +02:00
|
|
|
_, _, err := git.NewCommand(ctx, "remote", "rm", "origin").RunStdString(&git.RunOpts{Dir: repoPath})
|
2019-12-14 18:30:01 +01:00
|
|
|
if err != nil && !strings.HasPrefix(err.Error(), "exit status 128 - fatal: No such remote ") {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("CleanUpMigrateInfo: %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if repo.HasWiki() {
|
2023-06-05 12:05:31 +02:00
|
|
|
if err := cleanUpMigrateGitConfig(ctx, repo.WikiPath()); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return repo, fmt.Errorf("cleanUpMigrateGitConfig (wiki): %w", err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-06 10:01:49 +02:00
|
|
|
return repo, UpdateRepository(ctx, repo, false)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
|
2024-01-24 04:02:04 +01:00
|
|
|
// SyncRepoTags synchronizes releases table with repository tags
|
|
|
|
func SyncRepoTags(ctx context.Context, repoID int64) error {
|
|
|
|
repo, err := repo_model.GetRepositoryByID(ctx, repoID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
Simplify how git repositories are opened (#28937)
## Purpose
This is a refactor toward building an abstraction over managing git
repositories.
Afterwards, it does not matter anymore if they are stored on the local
disk or somewhere remote.
## What this PR changes
We used `git.OpenRepository` everywhere previously.
Now, we should split them into two distinct functions:
Firstly, there are temporary repositories which do not change:
```go
git.OpenRepository(ctx, diskPath)
```
Gitea managed repositories having a record in the database in the
`repository` table are moved into the new package `gitrepo`:
```go
gitrepo.OpenRepository(ctx, repo_model.Repo)
```
Why is `repo_model.Repository` the second parameter instead of file
path?
Because then we can easily adapt our repository storage strategy.
The repositories can be stored locally, however, they could just as well
be stored on a remote server.
## Further changes in other PRs
- A Git Command wrapper on package `gitrepo` could be created. i.e.
`NewCommand(ctx, repo_model.Repository, commands...)`. `git.RunOpts{Dir:
repo.RepoPath()}`, the directory should be empty before invoking this
method and it can be filled in the function only. #28940
- Remove the `RepoPath()`/`WikiPath()` functions to reduce the
possibility of mistakes.
---------
Co-authored-by: delvh <dev.lh@web.de>
2024-01-27 21:09:51 +01:00
|
|
|
gitRepo, err := gitrepo.OpenRepository(ctx, repo)
|
2024-01-24 04:02:04 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer gitRepo.Close()
|
|
|
|
|
|
|
|
return SyncReleasesWithTags(ctx, repo, gitRepo)
|
|
|
|
}
|
|
|
|
|
2019-12-14 18:30:01 +01:00
|
|
|
// SyncReleasesWithTags synchronizes release table with repository tags
|
2023-09-25 15:17:37 +02:00
|
|
|
func SyncReleasesWithTags(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository) error {
|
2022-03-31 14:30:40 +02:00
|
|
|
log.Debug("SyncReleasesWithTags: in Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
|
|
|
|
|
|
|
|
// optimized procedure for pull-mirrors which saves a lot of time (in
|
|
|
|
// particular for repos with many tags).
|
|
|
|
if repo.IsMirror {
|
2023-09-25 15:17:37 +02:00
|
|
|
return pullMirrorReleaseSync(ctx, repo, gitRepo)
|
2022-03-31 14:30:40 +02:00
|
|
|
}
|
|
|
|
|
2022-10-12 07:18:26 +02:00
|
|
|
existingRelTags := make(container.Set[string])
|
2022-08-25 04:31:57 +02:00
|
|
|
opts := repo_model.FindReleasesOptions{
|
2021-09-24 13:32:56 +02:00
|
|
|
IncludeDrafts: true,
|
|
|
|
IncludeTags: true,
|
|
|
|
ListOptions: db.ListOptions{PageSize: 50},
|
2024-01-15 03:19:25 +01:00
|
|
|
RepoID: repo.ID,
|
2021-09-24 13:32:56 +02:00
|
|
|
}
|
2019-12-14 18:30:01 +01:00
|
|
|
for page := 1; ; page++ {
|
2020-01-24 20:00:29 +01:00
|
|
|
opts.Page = page
|
2024-01-15 03:19:25 +01:00
|
|
|
rels, err := db.Find[repo_model.Release](gitRepo.Ctx, opts)
|
2019-12-14 18:30:01 +01:00
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to GetReleasesByRepoID in Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
if len(rels) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
for _, rel := range rels {
|
|
|
|
if rel.IsDraft {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
commitID, err := gitRepo.GetTagCommitID(rel.TagName)
|
|
|
|
if err != nil && !git.IsErrNotExist(err) {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to GetTagCommitID for %q in Repo[%d:%s/%s]: %w", rel.TagName, repo.ID, repo.OwnerName, repo.Name, err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
if git.IsErrNotExist(err) || commitID != rel.Sha1 {
|
2023-09-25 15:17:37 +02:00
|
|
|
if err := repo_model.PushUpdateDeleteTag(ctx, repo, rel.TagName); err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to PushUpdateDeleteTag: %q in Repo[%d:%s/%s]: %w", rel.TagName, repo.ID, repo.OwnerName, repo.Name, err)
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
} else {
|
2022-10-12 07:18:26 +02:00
|
|
|
existingRelTags.Add(strings.ToLower(rel.TagName))
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-03-29 19:12:33 +02:00
|
|
|
|
|
|
|
_, err := gitRepo.WalkReferences(git.ObjectTag, 0, 0, func(sha1, refname string) error {
|
|
|
|
tagName := strings.TrimPrefix(refname, git.TagPrefix)
|
2022-10-12 07:18:26 +02:00
|
|
|
if existingRelTags.Contains(strings.ToLower(tagName)) {
|
2022-03-29 19:12:33 +02:00
|
|
|
return nil
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
2022-03-29 19:12:33 +02:00
|
|
|
|
2023-09-25 15:17:37 +02:00
|
|
|
if err := PushUpdateAddTag(ctx, repo, gitRepo, tagName, sha1, refname); err != nil {
|
2022-03-29 19:12:33 +02:00
|
|
|
return fmt.Errorf("unable to PushUpdateAddTag: %q to Repo[%d:%s/%s]: %w", tagName, repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return err
|
2019-12-14 18:30:01 +01:00
|
|
|
}
|
2020-01-10 10:34:21 +01:00
|
|
|
|
|
|
|
// PushUpdateAddTag must be called for any push actions to add tag
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 14:37:34 +01:00
|
|
|
func PushUpdateAddTag(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository, tagName, sha1, refname string) error {
|
2022-03-29 19:12:33 +02:00
|
|
|
tag, err := gitRepo.GetTagWithID(sha1, tagName)
|
2020-01-10 10:34:21 +01:00
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to GetTag: %w", err)
|
2020-01-10 10:34:21 +01:00
|
|
|
}
|
2022-01-12 21:37:46 +01:00
|
|
|
commit, err := tag.Commit(gitRepo)
|
2020-01-10 10:34:21 +01:00
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to get tag Commit: %w", err)
|
2020-01-10 10:34:21 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
sig := tag.Tagger
|
|
|
|
if sig == nil {
|
|
|
|
sig = commit.Author
|
|
|
|
}
|
|
|
|
if sig == nil {
|
|
|
|
sig = commit.Committer
|
|
|
|
}
|
|
|
|
|
2021-11-24 10:49:20 +01:00
|
|
|
var author *user_model.User
|
2022-01-20 18:46:10 +01:00
|
|
|
createdAt := time.Unix(1, 0)
|
2020-01-10 10:34:21 +01:00
|
|
|
|
|
|
|
if sig != nil {
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 14:37:34 +01:00
|
|
|
author, err = user_model.GetUserByEmail(ctx, sig.Email)
|
2021-11-24 10:49:20 +01:00
|
|
|
if err != nil && !user_model.IsErrUserNotExist(err) {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to GetUserByEmail for %q: %w", sig.Email, err)
|
2020-01-10 10:34:21 +01:00
|
|
|
}
|
|
|
|
createdAt = sig.When
|
|
|
|
}
|
|
|
|
|
|
|
|
commitsCount, err := commit.CommitsCount()
|
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
return fmt.Errorf("unable to get CommitsCount: %w", err)
|
2020-01-10 10:34:21 +01:00
|
|
|
}
|
|
|
|
|
2022-08-25 04:31:57 +02:00
|
|
|
rel := repo_model.Release{
|
2020-01-10 10:34:21 +01:00
|
|
|
RepoID: repo.ID,
|
|
|
|
TagName: tagName,
|
|
|
|
LowerTagName: strings.ToLower(tagName),
|
|
|
|
Sha1: commit.ID.String(),
|
|
|
|
NumCommits: commitsCount,
|
|
|
|
CreatedUnix: timeutil.TimeStamp(createdAt.Unix()),
|
|
|
|
IsTag: true,
|
|
|
|
}
|
|
|
|
if author != nil {
|
|
|
|
rel.PublisherID = author.ID
|
|
|
|
}
|
|
|
|
|
2023-09-25 15:17:37 +02:00
|
|
|
return repo_model.SaveOrUpdateTag(ctx, repo, &rel)
|
2020-01-10 10:34:21 +01:00
|
|
|
}
|
2021-04-09 00:25:57 +02:00
|
|
|
|
|
|
|
// StoreMissingLfsObjectsInRepository downloads missing LFS objects
|
2021-12-10 02:27:50 +01:00
|
|
|
func StoreMissingLfsObjectsInRepository(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository, lfsClient lfs.Client) error {
|
2021-04-09 00:25:57 +02:00
|
|
|
contentStore := lfs.NewContentStore()
|
|
|
|
|
|
|
|
pointerChan := make(chan lfs.PointerBlob)
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
go lfs.SearchPointerBlobs(ctx, gitRepo, pointerChan, errChan)
|
|
|
|
|
2021-06-14 19:20:43 +02:00
|
|
|
downloadObjects := func(pointers []lfs.Pointer) error {
|
2021-11-20 10:34:05 +01:00
|
|
|
err := lfsClient.Download(ctx, pointers, func(p lfs.Pointer, content io.ReadCloser, objectError error) error {
|
2021-06-14 19:20:43 +02:00
|
|
|
if objectError != nil {
|
|
|
|
return objectError
|
2021-04-09 00:25:57 +02:00
|
|
|
}
|
|
|
|
|
2021-06-14 19:20:43 +02:00
|
|
|
defer content.Close()
|
2021-04-09 00:25:57 +02:00
|
|
|
|
2023-12-07 08:27:36 +01:00
|
|
|
_, err := git_model.NewLFSMetaObject(ctx, repo.ID, p)
|
2021-04-09 00:25:57 +02:00
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error creating LFS meta object %-v: %v", repo, p, err)
|
2021-06-14 19:20:43 +02:00
|
|
|
return err
|
|
|
|
}
|
2021-04-09 00:25:57 +02:00
|
|
|
|
2021-06-14 19:20:43 +02:00
|
|
|
if err := contentStore.Put(p, content); err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error storing content for LFS meta object %-v: %v", repo, p, err)
|
2023-01-09 04:50:54 +01:00
|
|
|
if _, err2 := git_model.RemoveLFSMetaObjectByOid(ctx, repo.ID, p.Oid); err2 != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error removing LFS meta object %-v: %v", repo, p, err2)
|
2021-04-09 00:25:57 +02:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2021-06-14 19:20:43 +02:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
2021-04-09 00:25:57 +02:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-06-14 19:20:43 +02:00
|
|
|
var batch []lfs.Pointer
|
|
|
|
for pointerBlob := range pointerChan {
|
2023-01-09 04:50:54 +01:00
|
|
|
meta, err := git_model.GetLFSMetaObjectByOid(ctx, repo.ID, pointerBlob.Oid)
|
2022-06-12 17:51:54 +02:00
|
|
|
if err != nil && err != git_model.ErrLFSObjectNotExist {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error querying LFS meta object %-v: %v", repo, pointerBlob.Pointer, err)
|
2021-06-14 19:20:43 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if meta != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Trace("Repo[%-v]: Skipping unknown LFS meta object %-v", repo, pointerBlob.Pointer)
|
2021-06-14 19:20:43 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Trace("Repo[%-v]: LFS object %-v not present in repository", repo, pointerBlob.Pointer)
|
2021-06-14 19:20:43 +02:00
|
|
|
|
|
|
|
exist, err := contentStore.Exists(pointerBlob.Pointer)
|
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error checking if LFS object %-v exists: %v", repo, pointerBlob.Pointer, err)
|
2021-06-14 19:20:43 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if exist {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Trace("Repo[%-v]: LFS object %-v already present; creating meta object", repo, pointerBlob.Pointer)
|
2023-12-07 08:27:36 +01:00
|
|
|
_, err := git_model.NewLFSMetaObject(ctx, repo.ID, pointerBlob.Pointer)
|
2021-06-14 19:20:43 +02:00
|
|
|
if err != nil {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error creating LFS meta object %-v: %v", repo, pointerBlob.Pointer, err)
|
2021-06-14 19:20:43 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if setting.LFS.MaxFileSize > 0 && pointerBlob.Size > setting.LFS.MaxFileSize {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Info("Repo[%-v]: LFS object %-v download denied because of LFS_MAX_FILE_SIZE=%d < size %d", repo, pointerBlob.Pointer, setting.LFS.MaxFileSize, pointerBlob.Size)
|
2021-06-14 19:20:43 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
batch = append(batch, pointerBlob.Pointer)
|
2021-11-20 10:34:05 +01:00
|
|
|
if len(batch) >= lfsClient.BatchSize() {
|
2021-06-14 19:20:43 +02:00
|
|
|
if err := downloadObjects(batch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
batch = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(batch) > 0 {
|
|
|
|
if err := downloadObjects(batch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-09 00:25:57 +02:00
|
|
|
err, has := <-errChan
|
|
|
|
if has {
|
2022-03-10 11:09:48 +01:00
|
|
|
log.Error("Repo[%-v]: Error enumerating LFS objects for repository: %v", repo, err)
|
2021-04-09 00:25:57 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2022-03-31 14:30:40 +02:00
|
|
|
|
2024-01-26 07:18:19 +01:00
|
|
|
// shortRelease to reduce load memory, this struct can replace repo_model.Release
|
|
|
|
type shortRelease struct {
|
|
|
|
ID int64
|
|
|
|
TagName string
|
|
|
|
Sha1 string
|
|
|
|
IsTag bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (shortRelease) TableName() string {
|
|
|
|
return "release"
|
|
|
|
}
|
|
|
|
|
2022-03-31 14:30:40 +02:00
|
|
|
// pullMirrorReleaseSync is a pull-mirror specific tag<->release table
|
|
|
|
// synchronization which overwrites all Releases from the repository tags. This
|
|
|
|
// can be relied on since a pull-mirror is always identical to its
|
|
|
|
// upstream. Hence, after each sync we want the pull-mirror release set to be
|
|
|
|
// identical to the upstream tag set. This is much more efficient for
|
|
|
|
// repositories like https://github.com/vim/vim (with over 13000 tags).
|
2023-09-25 15:17:37 +02:00
|
|
|
func pullMirrorReleaseSync(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository) error {
|
2022-03-31 14:30:40 +02:00
|
|
|
log.Trace("pullMirrorReleaseSync: rebuilding releases for pull-mirror Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
|
|
|
|
tags, numTags, err := gitRepo.GetTagInfos(0, 0)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to GetTagInfos in pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
2023-09-25 15:17:37 +02:00
|
|
|
err = db.WithTx(ctx, func(ctx context.Context) error {
|
2024-01-26 07:18:19 +01:00
|
|
|
dbReleases, err := db.Find[shortRelease](ctx, repo_model.FindReleasesOptions{
|
|
|
|
RepoID: repo.ID,
|
|
|
|
IncludeDrafts: true,
|
|
|
|
IncludeTags: true,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to FindReleases in pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
2022-03-31 14:30:40 +02:00
|
|
|
}
|
2024-01-26 07:18:19 +01:00
|
|
|
|
|
|
|
inserts, deletes, updates := calcSync(tags, dbReleases)
|
2022-03-31 14:30:40 +02:00
|
|
|
//
|
|
|
|
// make release set identical to upstream tags
|
|
|
|
//
|
2024-01-26 07:18:19 +01:00
|
|
|
for _, tag := range inserts {
|
2022-08-25 04:31:57 +02:00
|
|
|
release := repo_model.Release{
|
2022-03-31 14:30:40 +02:00
|
|
|
RepoID: repo.ID,
|
|
|
|
TagName: tag.Name,
|
|
|
|
LowerTagName: strings.ToLower(tag.Name),
|
|
|
|
Sha1: tag.Object.String(),
|
|
|
|
// NOTE: ignored, since NumCommits are unused
|
|
|
|
// for pull-mirrors (only relevant when
|
|
|
|
// displaying releases, IsTag: false)
|
|
|
|
NumCommits: -1,
|
|
|
|
CreatedUnix: timeutil.TimeStamp(tag.Tagger.When.Unix()),
|
|
|
|
IsTag: true,
|
|
|
|
}
|
|
|
|
if err := db.Insert(ctx, release); err != nil {
|
|
|
|
return fmt.Errorf("unable insert tag %s for pull-mirror Repo[%d:%s/%s]: %w", tag.Name, repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
}
|
2024-01-26 07:18:19 +01:00
|
|
|
|
|
|
|
// only delete tags releases
|
|
|
|
if len(deletes) > 0 {
|
|
|
|
if _, err := db.GetEngine(ctx).Where("repo_id=?", repo.ID).
|
|
|
|
In("id", deletes).
|
|
|
|
Delete(&repo_model.Release{}); err != nil {
|
|
|
|
return fmt.Errorf("unable to delete tags for pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tag := range updates {
|
|
|
|
if _, err := db.GetEngine(ctx).Where("repo_id = ? AND lower_tag_name = ?", repo.ID, strings.ToLower(tag.Name)).
|
|
|
|
Cols("sha1").
|
|
|
|
Update(&repo_model.Release{
|
|
|
|
Sha1: tag.Object.String(),
|
|
|
|
}); err != nil {
|
|
|
|
return fmt.Errorf("unable to update tag %s for pull-mirror Repo[%d:%s/%s]: %w", tag.Name, repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
}
|
2022-03-31 14:30:40 +02:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to rebuild release table for pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Trace("pullMirrorReleaseSync: done rebuilding %d releases", numTags)
|
|
|
|
return nil
|
|
|
|
}
|
2024-01-26 07:18:19 +01:00
|
|
|
|
|
|
|
func calcSync(destTags []*git.Tag, dbTags []*shortRelease) ([]*git.Tag, []int64, []*git.Tag) {
|
|
|
|
destTagMap := make(map[string]*git.Tag)
|
|
|
|
for _, tag := range destTags {
|
|
|
|
destTagMap[tag.Name] = tag
|
|
|
|
}
|
|
|
|
dbTagMap := make(map[string]*shortRelease)
|
|
|
|
for _, rel := range dbTags {
|
|
|
|
dbTagMap[rel.TagName] = rel
|
|
|
|
}
|
|
|
|
|
|
|
|
inserted := make([]*git.Tag, 0, 10)
|
|
|
|
updated := make([]*git.Tag, 0, 10)
|
|
|
|
for _, tag := range destTags {
|
|
|
|
rel := dbTagMap[tag.Name]
|
|
|
|
if rel == nil {
|
|
|
|
inserted = append(inserted, tag)
|
|
|
|
} else if rel.Sha1 != tag.Object.String() {
|
|
|
|
updated = append(updated, tag)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
deleted := make([]int64, 0, 10)
|
|
|
|
for _, tag := range dbTags {
|
|
|
|
if destTagMap[tag.TagName] == nil && tag.IsTag {
|
|
|
|
deleted = append(deleted, tag.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return inserted, deleted, updated
|
|
|
|
}
|