0
0
Fork 0
mirror of https://github.com/go-gitea/gitea synced 2024-12-23 19:24:38 +01:00
gitea/services/repository/files/upload.go
Mihir Joshi b8270240bf
Fix reverting a merge commit failing (#28794)
Fixes #22236

---
Error occurring currently while trying to revert commit using read-tree
-m approach:
> 2022/12/26 16:04:43 ...rvices/pull/patch.go:240:AttemptThreeWayMerge()
[E] [63a9c61a] Unable to run read-tree -m! Error: exit status 128 -
fatal: this operation must be run in a work tree
> 	 - fatal: this operation must be run in a work tree

We need to clone a non-bare repository for `git read-tree -m` to work.

bb371aee6e
adds support to create a non-bare cloned temporary upload repository.

After cloning a non-bare temporary upload repository, we [set default
index](https://github.com/go-gitea/gitea/blob/main/services/repository/files/cherry_pick.go#L37)
(`git read-tree HEAD`).
This operation ends up resetting the git index file (see investigation
details below), due to which, we need to call `git update-index
--refresh` afterward.


Here's the diff of the index file before and after we execute
SetDefaultIndex: https://www.diffchecker.com/hyOP3eJy/

Notice the **ctime**, **mtime** are set to 0 after SetDefaultIndex.

You can reproduce the same behavior using these steps:
```bash
$ git clone https://try.gitea.io/me-heer/test.git -s -b main
$ cd test
$ git read-tree HEAD
$ git read-tree -m 1f085d7ed8 1f085d7ed8 9933caed00
error: Entry '1' not uptodate. Cannot merge.
```

After which, we can fix like this:
```
$ git update-index --refresh
$ git read-tree -m 1f085d7ed8 1f085d7ed8 9933caed00
```
2024-01-16 15:06:51 +00:00

225 lines
6.1 KiB
Go

// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package files
import (
"context"
"fmt"
"os"
"path"
"strings"
git_model "code.gitea.io/gitea/models/git"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/setting"
)
// UploadRepoFileOptions contains the uploaded repository file options
type UploadRepoFileOptions struct {
LastCommitID string
OldBranch string
NewBranch string
TreePath string
Message string
Files []string // In UUID format.
Signoff bool
}
type uploadInfo struct {
upload *repo_model.Upload
lfsMetaObject *git_model.LFSMetaObject
}
func cleanUpAfterFailure(ctx context.Context, infos *[]uploadInfo, t *TemporaryUploadRepository, original error) error {
for _, info := range *infos {
if info.lfsMetaObject == nil {
continue
}
if !info.lfsMetaObject.Existing {
if _, err := git_model.RemoveLFSMetaObjectByOid(ctx, t.repo.ID, info.lfsMetaObject.Oid); err != nil {
original = fmt.Errorf("%w, %v", original, err) // We wrap the original error - as this is the underlying error that required the fallback
}
}
}
return original
}
// UploadRepoFiles uploads files to the given repository
func UploadRepoFiles(ctx context.Context, repo *repo_model.Repository, doer *user_model.User, opts *UploadRepoFileOptions) error {
if len(opts.Files) == 0 {
return nil
}
uploads, err := repo_model.GetUploadsByUUIDs(ctx, opts.Files)
if err != nil {
return fmt.Errorf("GetUploadsByUUIDs [uuids: %v]: %w", opts.Files, err)
}
names := make([]string, len(uploads))
infos := make([]uploadInfo, len(uploads))
for i, upload := range uploads {
// Check file is not lfs locked, will return nil if lock setting not enabled
filepath := path.Join(opts.TreePath, upload.Name)
lfsLock, err := git_model.GetTreePathLock(ctx, repo.ID, filepath)
if err != nil {
return err
}
if lfsLock != nil && lfsLock.OwnerID != doer.ID {
u, err := user_model.GetUserByID(ctx, lfsLock.OwnerID)
if err != nil {
return err
}
return git_model.ErrLFSFileLocked{RepoID: repo.ID, Path: filepath, UserName: u.Name}
}
names[i] = upload.Name
infos[i] = uploadInfo{upload: upload}
}
t, err := NewTemporaryUploadRepository(ctx, repo)
if err != nil {
return err
}
defer t.Close()
hasOldBranch := true
if err = t.Clone(opts.OldBranch, true); err != nil {
if !git.IsErrBranchNotExist(err) || !repo.IsEmpty {
return err
}
if err = t.Init(repo.ObjectFormatName); err != nil {
return err
}
hasOldBranch = false
opts.LastCommitID = ""
}
if hasOldBranch {
if err = t.SetDefaultIndex(); err != nil {
return err
}
}
var filename2attribute2info map[string]map[string]string
if setting.LFS.StartServer {
filename2attribute2info, err = t.gitRepo.CheckAttribute(git.CheckAttributeOpts{
Attributes: []string{"filter"},
Filenames: names,
CachedOnly: true,
})
if err != nil {
return err
}
}
// Copy uploaded files into repository.
for i := range infos {
if err := copyUploadedLFSFileIntoRepository(&infos[i], filename2attribute2info, t, opts.TreePath); err != nil {
return err
}
}
// Now write the tree
treeHash, err := t.WriteTree()
if err != nil {
return err
}
// make author and committer the doer
author := doer
committer := doer
// Now commit the tree
commitHash, err := t.CommitTree(opts.LastCommitID, author, committer, treeHash, opts.Message, opts.Signoff)
if err != nil {
return err
}
// Now deal with LFS objects
for i := range infos {
if infos[i].lfsMetaObject == nil {
continue
}
infos[i].lfsMetaObject, err = git_model.NewLFSMetaObject(ctx, infos[i].lfsMetaObject.RepositoryID, infos[i].lfsMetaObject.Pointer)
if err != nil {
// OK Now we need to cleanup
return cleanUpAfterFailure(ctx, &infos, t, err)
}
// Don't move the files yet - we need to ensure that
// everything can be inserted first
}
// OK now we can insert the data into the store - there's no way to clean up the store
// once it's in there, it's in there.
contentStore := lfs.NewContentStore()
for _, info := range infos {
if err := uploadToLFSContentStore(info, contentStore); err != nil {
return cleanUpAfterFailure(ctx, &infos, t, err)
}
}
// Then push this tree to NewBranch
if err := t.Push(doer, commitHash, opts.NewBranch); err != nil {
return err
}
return repo_model.DeleteUploads(ctx, uploads...)
}
func copyUploadedLFSFileIntoRepository(info *uploadInfo, filename2attribute2info map[string]map[string]string, t *TemporaryUploadRepository, treePath string) error {
file, err := os.Open(info.upload.LocalPath())
if err != nil {
return err
}
defer file.Close()
var objectHash string
if setting.LFS.StartServer && filename2attribute2info[info.upload.Name] != nil && filename2attribute2info[info.upload.Name]["filter"] == "lfs" {
// Handle LFS
// FIXME: Inefficient! this should probably happen in models.Upload
pointer, err := lfs.GeneratePointer(file)
if err != nil {
return err
}
info.lfsMetaObject = &git_model.LFSMetaObject{Pointer: pointer, RepositoryID: t.repo.ID}
if objectHash, err = t.HashObject(strings.NewReader(pointer.StringContent())); err != nil {
return err
}
} else if objectHash, err = t.HashObject(file); err != nil {
return err
}
// Add the object to the index
return t.AddObjectToIndex("100644", objectHash, path.Join(treePath, info.upload.Name))
}
func uploadToLFSContentStore(info uploadInfo, contentStore *lfs.ContentStore) error {
if info.lfsMetaObject == nil {
return nil
}
exist, err := contentStore.Exists(info.lfsMetaObject.Pointer)
if err != nil {
return err
}
if !exist {
file, err := os.Open(info.upload.LocalPath())
if err != nil {
return err
}
defer file.Close()
// FIXME: Put regenerates the hash and copies the file over.
// I guess this strictly ensures the soundness of the store but this is inefficient.
if err := contentStore.Put(info.lfsMetaObject.Pointer, file); err != nil {
// OK Now we need to cleanup
// Can't clean up the store, once uploaded there they're there.
return err
}
}
return nil
}