0
0
Fork 0
mirror of https://github.com/go-gitea/gitea synced 2024-12-30 19:24:40 +01:00
gitea/modules/repofiles/upload.go
KN4CK3R c03e488e14
Add LFS Migration and Mirror (#14726)
* Implemented LFS client.

* Implemented scanning for pointer files.

* Implemented downloading of lfs files.

* Moved model-dependent code into services.

* Removed models dependency. Added TryReadPointerFromBuffer.

* Migrated code from service to module.

* Centralised storage creation.

* Removed dependency from models.

* Moved ContentStore into modules.

* Share structs between server and client.

* Moved method to services.

* Implemented lfs download on clone.

* Implemented LFS sync on clone and mirror update.

* Added form fields.

* Updated templates.

* Fixed condition.

* Use alternate endpoint.

* Added missing methods.

* Fixed typo and make linter happy.

* Detached pointer parser from gogit dependency.

* Fixed TestGetLFSRange test.

* Added context to support cancellation.

* Use ReadFull to probably read more data.

* Removed duplicated code from models.

* Moved scan implementation into pointer_scanner_nogogit.

* Changed method name.

* Added comments.

* Added more/specific log/error messages.

* Embedded lfs.Pointer into models.LFSMetaObject.

* Moved code from models to module.

* Moved code from models to module.

* Moved code from models to module.

* Reduced pointer usage.

* Embedded type.

* Use promoted fields.

* Fixed unexpected eof.

* Added unit tests.

* Implemented migration of local file paths.

* Show an error on invalid LFS endpoints.

* Hide settings if not used.

* Added LFS info to mirror struct.

* Fixed comment.

* Check LFS endpoint.

* Manage LFS settings from mirror page.

* Fixed selector.

* Adjusted selector.

* Added more tests.

* Added local filesystem migration test.

* Fixed typo.

* Reset settings.

* Added special windows path handling.

* Added unit test for HTTPClient.

* Added unit test for BasicTransferAdapter.

* Moved into util package.

* Test if LFS endpoint is allowed.

* Added support for git://

* Just use a static placeholder as the displayed url may be invalid.

* Reverted to original code.

* Added "Advanced Settings".

* Updated wording.

* Added discovery info link.

* Implemented suggestion.

* Fixed missing format parameter.

* Added Pointer.IsValid().

* Always remove model on error.

* Added suggestions.

* Use channel instead of array.

* Update routers/repo/migrate.go

* fmt

Signed-off-by: Andrew Thornton <art27@cantab.net>

Co-authored-by: zeripath <art27@cantab.net>
2021-04-08 18:25:57 -04:00

207 lines
5.5 KiB
Go

// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repofiles
import (
"fmt"
"os"
"path"
"strings"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/setting"
)
// UploadRepoFileOptions contains the uploaded repository file options
type UploadRepoFileOptions struct {
LastCommitID string
OldBranch string
NewBranch string
TreePath string
Message string
Files []string // In UUID format.
Signoff bool
}
type uploadInfo struct {
upload *models.Upload
lfsMetaObject *models.LFSMetaObject
}
func cleanUpAfterFailure(infos *[]uploadInfo, t *TemporaryUploadRepository, original error) error {
for _, info := range *infos {
if info.lfsMetaObject == nil {
continue
}
if !info.lfsMetaObject.Existing {
if _, err := t.repo.RemoveLFSMetaObjectByOid(info.lfsMetaObject.Oid); err != nil {
original = fmt.Errorf("%v, %v", original, err)
}
}
}
return original
}
// UploadRepoFiles uploads files to the given repository
func UploadRepoFiles(repo *models.Repository, doer *models.User, opts *UploadRepoFileOptions) error {
if len(opts.Files) == 0 {
return nil
}
uploads, err := models.GetUploadsByUUIDs(opts.Files)
if err != nil {
return fmt.Errorf("GetUploadsByUUIDs [uuids: %v]: %v", opts.Files, err)
}
names := make([]string, len(uploads))
infos := make([]uploadInfo, len(uploads))
for i, upload := range uploads {
// Check file is not lfs locked, will return nil if lock setting not enabled
filepath := path.Join(opts.TreePath, upload.Name)
lfsLock, err := repo.GetTreePathLock(filepath)
if err != nil {
return err
}
if lfsLock != nil && lfsLock.OwnerID != doer.ID {
return models.ErrLFSFileLocked{RepoID: repo.ID, Path: filepath, UserName: lfsLock.Owner.Name}
}
names[i] = upload.Name
infos[i] = uploadInfo{upload: upload}
}
t, err := NewTemporaryUploadRepository(repo)
if err != nil {
return err
}
defer t.Close()
if err := t.Clone(opts.OldBranch); err != nil {
return err
}
if err := t.SetDefaultIndex(); err != nil {
return err
}
var filename2attribute2info map[string]map[string]string
if setting.LFS.StartServer {
filename2attribute2info, err = t.gitRepo.CheckAttribute(git.CheckAttributeOpts{
Attributes: []string{"filter"},
Filenames: names,
})
if err != nil {
return err
}
}
// Copy uploaded files into repository.
for i := range infos {
if err := copyUploadedLFSFileIntoRepository(&infos[i], filename2attribute2info, t, opts.TreePath); err != nil {
return err
}
}
// Now write the tree
treeHash, err := t.WriteTree()
if err != nil {
return err
}
// make author and committer the doer
author := doer
committer := doer
// Now commit the tree
commitHash, err := t.CommitTree(author, committer, treeHash, opts.Message, opts.Signoff)
if err != nil {
return err
}
// Now deal with LFS objects
for i := range infos {
if infos[i].lfsMetaObject == nil {
continue
}
infos[i].lfsMetaObject, err = models.NewLFSMetaObject(infos[i].lfsMetaObject)
if err != nil {
// OK Now we need to cleanup
return cleanUpAfterFailure(&infos, t, err)
}
// Don't move the files yet - we need to ensure that
// everything can be inserted first
}
// OK now we can insert the data into the store - there's no way to clean up the store
// once it's in there, it's in there.
contentStore := lfs.NewContentStore()
for _, info := range infos {
if err := uploadToLFSContentStore(info, contentStore); err != nil {
return cleanUpAfterFailure(&infos, t, err)
}
}
// Then push this tree to NewBranch
if err := t.Push(doer, commitHash, opts.NewBranch); err != nil {
return err
}
return models.DeleteUploads(uploads...)
}
func copyUploadedLFSFileIntoRepository(info *uploadInfo, filename2attribute2info map[string]map[string]string, t *TemporaryUploadRepository, treePath string) error {
file, err := os.Open(info.upload.LocalPath())
if err != nil {
return err
}
defer file.Close()
var objectHash string
if setting.LFS.StartServer && filename2attribute2info[info.upload.Name] != nil && filename2attribute2info[info.upload.Name]["filter"] == "lfs" {
// Handle LFS
// FIXME: Inefficient! this should probably happen in models.Upload
pointer, err := lfs.GeneratePointer(file)
if err != nil {
return err
}
info.lfsMetaObject = &models.LFSMetaObject{Pointer: pointer, RepositoryID: t.repo.ID}
if objectHash, err = t.HashObject(strings.NewReader(pointer.StringContent())); err != nil {
return err
}
} else if objectHash, err = t.HashObject(file); err != nil {
return err
}
// Add the object to the index
return t.AddObjectToIndex("100644", objectHash, path.Join(treePath, info.upload.Name))
}
func uploadToLFSContentStore(info uploadInfo, contentStore *lfs.ContentStore) error {
if info.lfsMetaObject == nil {
return nil
}
exist, err := contentStore.Exists(info.lfsMetaObject.Pointer)
if err != nil {
return err
}
if !exist {
file, err := os.Open(info.upload.LocalPath())
if err != nil {
return err
}
defer file.Close()
// FIXME: Put regenerates the hash and copies the file over.
// I guess this strictly ensures the soundness of the store but this is inefficient.
if err := contentStore.Put(info.lfsMetaObject.Pointer, file); err != nil {
// OK Now we need to cleanup
// Can't clean up the store, once uploaded there they're there.
return err
}
}
return nil
}