2020-01-12 13:11:17 +01:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
2022-11-27 19:20:29 +01:00
|
|
|
// SPDX-License-Identifier: MIT
|
2020-01-12 13:11:17 +01:00
|
|
|
|
|
|
|
package repository
|
|
|
|
|
|
|
|
import (
|
2021-09-23 17:45:36 +02:00
|
|
|
"context"
|
2020-01-12 13:11:17 +01:00
|
|
|
"fmt"
|
2022-06-06 10:01:49 +02:00
|
|
|
"os"
|
|
|
|
"path"
|
2023-01-13 19:54:02 +01:00
|
|
|
"path/filepath"
|
2020-01-12 13:11:17 +01:00
|
|
|
"strings"
|
|
|
|
|
|
|
|
"code.gitea.io/gitea/models"
|
2022-08-25 04:31:57 +02:00
|
|
|
activities_model "code.gitea.io/gitea/models/activities"
|
2021-09-19 13:49:59 +02:00
|
|
|
"code.gitea.io/gitea/models/db"
|
2022-06-12 17:51:54 +02:00
|
|
|
git_model "code.gitea.io/gitea/models/git"
|
2022-08-25 04:31:57 +02:00
|
|
|
"code.gitea.io/gitea/models/organization"
|
|
|
|
"code.gitea.io/gitea/models/perm"
|
2022-06-06 10:01:49 +02:00
|
|
|
access_model "code.gitea.io/gitea/models/perm/access"
|
2021-12-10 02:27:50 +01:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2022-08-25 04:31:57 +02:00
|
|
|
"code.gitea.io/gitea/models/unit"
|
2021-11-24 10:49:20 +01:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2022-08-25 04:31:57 +02:00
|
|
|
"code.gitea.io/gitea/models/webhook"
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 08:28:53 +02:00
|
|
|
issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
|
2020-01-12 13:11:17 +01:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2022-06-06 10:01:49 +02:00
|
|
|
api "code.gitea.io/gitea/modules/structs"
|
2020-08-11 22:05:34 +02:00
|
|
|
"code.gitea.io/gitea/modules/util"
|
2020-01-12 13:11:17 +01:00
|
|
|
)
|
|
|
|
|
2022-08-25 04:31:57 +02:00
|
|
|
// CreateRepositoryByExample creates a repository for the user/organization.
|
2023-02-04 07:48:38 +01:00
|
|
|
func CreateRepositoryByExample(ctx context.Context, doer, u *user_model.User, repo *repo_model.Repository, overwriteOrAdopt, isFork bool) (err error) {
|
2022-08-25 04:31:57 +02:00
|
|
|
if err = repo_model.IsUsableRepoName(repo.Name); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-04-28 20:14:26 +02:00
|
|
|
has, err := repo_model.IsRepositoryModelExist(ctx, u, repo.Name)
|
2022-08-25 04:31:57 +02:00
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("IsRepositoryExist: %w", err)
|
2022-08-25 04:31:57 +02:00
|
|
|
} else if has {
|
|
|
|
return repo_model.ErrRepoAlreadyExist{
|
|
|
|
Uname: u.Name,
|
|
|
|
Name: repo.Name,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
repoPath := repo_model.RepoPath(u.Name, repo.Name)
|
|
|
|
isExist, err := util.IsExist(repoPath)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Unable to check if %s exists. Error: %v", repoPath, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !overwriteOrAdopt && isExist {
|
|
|
|
log.Error("Files already exist in %s and we are not going to adopt or delete.", repoPath)
|
|
|
|
return repo_model.ErrRepoFilesAlreadyExist{
|
|
|
|
Uname: u.Name,
|
|
|
|
Name: repo.Name,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = db.Insert(ctx, repo); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err = repo_model.DeleteRedirect(ctx, u.ID, repo.Name); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// insert units for repo
|
2023-02-04 07:48:38 +01:00
|
|
|
defaultUnits := unit.DefaultRepoUnits
|
|
|
|
if isFork {
|
|
|
|
defaultUnits = unit.DefaultForkRepoUnits
|
|
|
|
}
|
|
|
|
units := make([]repo_model.RepoUnit, 0, len(defaultUnits))
|
|
|
|
for _, tp := range defaultUnits {
|
2022-08-25 04:31:57 +02:00
|
|
|
if tp == unit.TypeIssues {
|
|
|
|
units = append(units, repo_model.RepoUnit{
|
|
|
|
RepoID: repo.ID,
|
|
|
|
Type: tp,
|
|
|
|
Config: &repo_model.IssuesConfig{
|
|
|
|
EnableTimetracker: setting.Service.DefaultEnableTimetracking,
|
|
|
|
AllowOnlyContributorsToTrackTime: setting.Service.DefaultAllowOnlyContributorsToTrackTime,
|
|
|
|
EnableDependencies: setting.Service.DefaultEnableDependencies,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
} else if tp == unit.TypePullRequests {
|
|
|
|
units = append(units, repo_model.RepoUnit{
|
|
|
|
RepoID: repo.ID,
|
|
|
|
Type: tp,
|
2024-02-12 23:37:23 +01:00
|
|
|
Config: &repo_model.PullRequestsConfig{
|
|
|
|
AllowMerge: true, AllowRebase: true, AllowRebaseMerge: true, AllowSquash: true, AllowFastForwardOnly: true,
|
|
|
|
DefaultMergeStyle: repo_model.MergeStyle(setting.Repository.PullRequest.DefaultMergeStyle),
|
|
|
|
AllowRebaseUpdate: true,
|
|
|
|
},
|
2022-08-25 04:31:57 +02:00
|
|
|
})
|
2024-03-04 03:56:52 +01:00
|
|
|
} else if tp == unit.TypeProjects {
|
|
|
|
units = append(units, repo_model.RepoUnit{
|
|
|
|
RepoID: repo.ID,
|
|
|
|
Type: tp,
|
|
|
|
Config: &repo_model.ProjectsConfig{ProjectsMode: repo_model.ProjectsModeAll},
|
|
|
|
})
|
2022-08-25 04:31:57 +02:00
|
|
|
} else {
|
|
|
|
units = append(units, repo_model.RepoUnit{
|
|
|
|
RepoID: repo.ID,
|
|
|
|
Type: tp,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = db.Insert(ctx, units); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remember visibility preference.
|
|
|
|
u.LastRepoVisibility = repo.IsPrivate
|
|
|
|
if err = user_model.UpdateUserCols(ctx, u, "last_repo_visibility"); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("UpdateUserCols: %w", err)
|
2022-08-25 04:31:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if err = user_model.IncrUserRepoNum(ctx, u.ID); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("IncrUserRepoNum: %w", err)
|
2022-08-25 04:31:57 +02:00
|
|
|
}
|
|
|
|
u.NumRepos++
|
|
|
|
|
|
|
|
// Give access to all members in teams with access to all repositories.
|
|
|
|
if u.IsOrganization() {
|
|
|
|
teams, err := organization.FindOrgTeams(ctx, u.ID)
|
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("FindOrgTeams: %w", err)
|
2022-08-25 04:31:57 +02:00
|
|
|
}
|
|
|
|
for _, t := range teams {
|
|
|
|
if t.IncludesAllRepositories {
|
|
|
|
if err := models.AddRepository(ctx, t, repo); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("AddRepository: %w", err)
|
2022-08-25 04:31:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if isAdmin, err := access_model.IsUserRepoAdmin(ctx, repo, doer); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("IsUserRepoAdmin: %w", err)
|
2022-08-25 04:31:57 +02:00
|
|
|
} else if !isAdmin {
|
|
|
|
// Make creator repo admin if it wasn't assigned automatically
|
2022-12-10 03:46:31 +01:00
|
|
|
if err = AddCollaborator(ctx, repo, doer); err != nil {
|
|
|
|
return fmt.Errorf("AddCollaborator: %w", err)
|
2022-08-25 04:31:57 +02:00
|
|
|
}
|
2022-12-10 03:46:31 +01:00
|
|
|
if err = repo_model.ChangeCollaborationAccessMode(ctx, repo, doer.ID, perm.AccessModeAdmin); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("ChangeCollaborationAccessModeCtx: %w", err)
|
2022-08-25 04:31:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if err = access_model.RecalculateAccesses(ctx, repo); err != nil {
|
|
|
|
// Organization automatically called this in AddRepository method.
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("RecalculateAccesses: %w", err)
|
2022-08-25 04:31:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if setting.Service.AutoWatchNewRepos {
|
2024-03-04 09:16:03 +01:00
|
|
|
if err = repo_model.WatchRepo(ctx, doer, repo, true); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("WatchRepo: %w", err)
|
2022-08-25 04:31:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = webhook.CopyDefaultWebhooksToRepo(ctx, repo.ID); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("CopyDefaultWebhooksToRepo: %w", err)
|
2022-08-25 04:31:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-13 19:54:02 +01:00
|
|
|
const notRegularFileMode = os.ModeSymlink | os.ModeNamedPipe | os.ModeSocket | os.ModeDevice | os.ModeCharDevice | os.ModeIrregular
|
|
|
|
|
|
|
|
// getDirectorySize returns the disk consumption for a given path
|
|
|
|
func getDirectorySize(path string) (int64, error) {
|
|
|
|
var size int64
|
2023-11-29 06:08:58 +01:00
|
|
|
err := filepath.WalkDir(path, func(_ string, entry os.DirEntry, err error) error {
|
|
|
|
if os.IsNotExist(err) { // ignore the error because some files (like temp/lock file) may be deleted during traversing.
|
|
|
|
return nil
|
|
|
|
} else if err != nil {
|
2023-01-13 19:54:02 +01:00
|
|
|
return err
|
|
|
|
}
|
2023-11-29 06:08:58 +01:00
|
|
|
if entry.IsDir() {
|
2023-01-13 19:54:02 +01:00
|
|
|
return nil
|
|
|
|
}
|
2023-11-29 06:08:58 +01:00
|
|
|
info, err := entry.Info()
|
|
|
|
if os.IsNotExist(err) { // ignore the error as above
|
|
|
|
return nil
|
|
|
|
} else if err != nil {
|
2023-01-13 19:54:02 +01:00
|
|
|
return err
|
|
|
|
}
|
2023-11-29 06:08:58 +01:00
|
|
|
if (info.Mode() & notRegularFileMode) == 0 {
|
|
|
|
size += info.Size()
|
2023-01-13 19:54:02 +01:00
|
|
|
}
|
2023-11-29 06:08:58 +01:00
|
|
|
return nil
|
2023-01-13 19:54:02 +01:00
|
|
|
})
|
|
|
|
return size, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateRepoSize updates the repository size, calculating it using getDirectorySize
|
2022-06-06 10:01:49 +02:00
|
|
|
func UpdateRepoSize(ctx context.Context, repo *repo_model.Repository) error {
|
2023-01-13 19:54:02 +01:00
|
|
|
size, err := getDirectorySize(repo.RepoPath())
|
2022-06-06 10:01:49 +02:00
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("updateSize: %w", err)
|
2022-06-06 10:01:49 +02:00
|
|
|
}
|
|
|
|
|
2022-06-12 17:51:54 +02:00
|
|
|
lfsSize, err := git_model.GetRepoLFSSize(ctx, repo.ID)
|
2022-06-06 10:01:49 +02:00
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("updateSize: GetLFSMetaObjects: %w", err)
|
2022-06-06 10:01:49 +02:00
|
|
|
}
|
|
|
|
|
2023-06-29 00:41:02 +02:00
|
|
|
return repo_model.UpdateRepoSize(ctx, repo.ID, size, lfsSize)
|
2022-06-06 10:01:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// CheckDaemonExportOK creates/removes git-daemon-export-ok for git-daemon...
|
|
|
|
func CheckDaemonExportOK(ctx context.Context, repo *repo_model.Repository) error {
|
2023-02-18 13:11:03 +01:00
|
|
|
if err := repo.LoadOwner(ctx); err != nil {
|
2022-06-06 10:01:49 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create/Remove git-daemon-export-ok for git-daemon...
|
|
|
|
daemonExportFile := path.Join(repo.RepoPath(), `git-daemon-export-ok`)
|
|
|
|
|
|
|
|
isExist, err := util.IsExist(daemonExportFile)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Unable to check if %s exists. Error: %v", daemonExportFile, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
isPublic := !repo.IsPrivate && repo.Owner.Visibility == api.VisibleTypePublic
|
|
|
|
if !isPublic && isExist {
|
|
|
|
if err = util.Remove(daemonExportFile); err != nil {
|
|
|
|
log.Error("Failed to remove %s: %v", daemonExportFile, err)
|
|
|
|
}
|
|
|
|
} else if isPublic && !isExist {
|
|
|
|
if f, err := os.Create(daemonExportFile); err != nil {
|
|
|
|
log.Error("Failed to create %s: %v", daemonExportFile, err)
|
|
|
|
} else {
|
|
|
|
f.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateRepository updates a repository with db context
|
|
|
|
func UpdateRepository(ctx context.Context, repo *repo_model.Repository, visibilityChanged bool) (err error) {
|
|
|
|
repo.LowerName = strings.ToLower(repo.Name)
|
|
|
|
|
|
|
|
e := db.GetEngine(ctx)
|
|
|
|
|
|
|
|
if _, err = e.ID(repo.ID).AllCols().Update(repo); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("update: %w", err)
|
2022-06-06 10:01:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if err = UpdateRepoSize(ctx, repo); err != nil {
|
|
|
|
log.Error("Failed to update size for repository: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if visibilityChanged {
|
2023-02-18 13:11:03 +01:00
|
|
|
if err = repo.LoadOwner(ctx); err != nil {
|
|
|
|
return fmt.Errorf("LoadOwner: %w", err)
|
2022-06-06 10:01:49 +02:00
|
|
|
}
|
|
|
|
if repo.Owner.IsOrganization() {
|
|
|
|
// Organization repository need to recalculate access table when visibility is changed.
|
|
|
|
if err = access_model.RecalculateTeamAccesses(ctx, repo, 0); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("recalculateTeamAccesses: %w", err)
|
2022-06-06 10:01:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If repo has become private, we need to set its actions to private.
|
|
|
|
if repo.IsPrivate {
|
2022-08-25 04:31:57 +02:00
|
|
|
_, err = e.Where("repo_id = ?", repo.ID).Cols("is_private").Update(&activities_model.Action{
|
2022-06-06 10:01:49 +02:00
|
|
|
IsPrivate: true,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-06-05 15:25:43 +02:00
|
|
|
|
|
|
|
if err = repo_model.ClearRepoStars(ctx, repo.ID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-06-06 10:01:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create/Remove git-daemon-export-ok for git-daemon...
|
2022-06-20 14:38:58 +02:00
|
|
|
if err := CheckDaemonExportOK(ctx, repo); err != nil {
|
2022-06-06 10:01:49 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
forkRepos, err := repo_model.GetRepositoriesByForkID(ctx, repo.ID)
|
|
|
|
if err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("getRepositoriesByForkID: %w", err)
|
2022-06-06 10:01:49 +02:00
|
|
|
}
|
|
|
|
for i := range forkRepos {
|
|
|
|
forkRepos[i].IsPrivate = repo.IsPrivate || repo.Owner.Visibility == api.VisibleTypePrivate
|
|
|
|
if err = UpdateRepository(ctx, forkRepos[i], true); err != nil {
|
2022-10-24 21:29:17 +02:00
|
|
|
return fmt.Errorf("updateRepository[%d]: %w", forkRepos[i].ID, err)
|
2022-06-06 10:01:49 +02:00
|
|
|
}
|
|
|
|
}
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 08:28:53 +02:00
|
|
|
|
|
|
|
// If visibility is changed, we need to update the issue indexer.
|
|
|
|
// Since the data in the issue indexer have field to indicate if the repo is public or not.
|
|
|
|
issue_indexer.UpdateRepoIndexer(ctx, repo.ID)
|
2022-06-06 10:01:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|