mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-11-20 14:14:39 +01:00
Add LFS Migration and Mirror (#14726)
* Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
This commit is contained in:
parent
f544414a23
commit
c03e488e14
75 changed files with 2159 additions and 711 deletions
|
@ -17,11 +17,11 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"code.gitea.io/gitea/models"
|
"code.gitea.io/gitea/models"
|
||||||
"code.gitea.io/gitea/modules/lfs"
|
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/pprof"
|
"code.gitea.io/gitea/modules/pprof"
|
||||||
"code.gitea.io/gitea/modules/private"
|
"code.gitea.io/gitea/modules/private"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
|
"code.gitea.io/gitea/services/lfs"
|
||||||
|
|
||||||
"github.com/dgrijalva/jwt-go"
|
"github.com/dgrijalva/jwt-go"
|
||||||
jsoniter "github.com/json-iterator/go"
|
jsoniter "github.com/json-iterator/go"
|
||||||
|
|
49
integrations/api_repo_lfs_migrate_test.go
Normal file
49
integrations/api_repo_lfs_migrate_test.go
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package integrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"path"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/models"
|
||||||
|
"code.gitea.io/gitea/modules/lfs"
|
||||||
|
"code.gitea.io/gitea/modules/setting"
|
||||||
|
api "code.gitea.io/gitea/modules/structs"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAPIRepoLFSMigrateLocal(t *testing.T) {
|
||||||
|
defer prepareTestEnv(t)()
|
||||||
|
|
||||||
|
oldImportLocalPaths := setting.ImportLocalPaths
|
||||||
|
oldAllowLocalNetworks := setting.Migrations.AllowLocalNetworks
|
||||||
|
setting.ImportLocalPaths = true
|
||||||
|
setting.Migrations.AllowLocalNetworks = true
|
||||||
|
|
||||||
|
user := models.AssertExistsAndLoadBean(t, &models.User{ID: 1}).(*models.User)
|
||||||
|
session := loginUser(t, user.Name)
|
||||||
|
token := getTokenForLoggedInUser(t, session)
|
||||||
|
|
||||||
|
req := NewRequestWithJSON(t, "POST", "/api/v1/repos/migrate?token="+token, &api.MigrateRepoOptions{
|
||||||
|
CloneAddr: path.Join(setting.RepoRootPath, "migration/lfs-test.git"),
|
||||||
|
RepoOwnerID: user.ID,
|
||||||
|
RepoName: "lfs-test-local",
|
||||||
|
LFS: true,
|
||||||
|
})
|
||||||
|
resp := MakeRequest(t, req, NoExpectedStatus)
|
||||||
|
assert.EqualValues(t, http.StatusCreated, resp.Code)
|
||||||
|
|
||||||
|
store := lfs.NewContentStore()
|
||||||
|
ok, _ := store.Verify(lfs.Pointer{Oid: "fb8f7d8435968c4f82a726a92395be4d16f2f63116caf36c8ad35c60831ab041", Size: 6})
|
||||||
|
assert.True(t, ok)
|
||||||
|
ok, _ = store.Verify(lfs.Pointer{Oid: "d6f175817f886ec6fbbc1515326465fa96c3bfd54a4ea06cfd6dbbd8340e0152", Size: 6})
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
|
setting.ImportLocalPaths = oldImportLocalPaths
|
||||||
|
setting.Migrations.AllowLocalNetworks = oldAllowLocalNetworks
|
||||||
|
}
|
|
@ -18,6 +18,7 @@ import (
|
||||||
|
|
||||||
"code.gitea.io/gitea/models"
|
"code.gitea.io/gitea/models"
|
||||||
"code.gitea.io/gitea/modules/git"
|
"code.gitea.io/gitea/modules/git"
|
||||||
|
"code.gitea.io/gitea/modules/lfs"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
api "code.gitea.io/gitea/modules/structs"
|
api "code.gitea.io/gitea/modules/structs"
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
|
@ -218,7 +219,7 @@ func rawTest(t *testing.T, ctx *APITestContext, little, big, littleLFS, bigLFS s
|
||||||
assert.NotEqual(t, littleSize, resp.Body.Len())
|
assert.NotEqual(t, littleSize, resp.Body.Len())
|
||||||
assert.LessOrEqual(t, resp.Body.Len(), 1024)
|
assert.LessOrEqual(t, resp.Body.Len(), 1024)
|
||||||
if resp.Body.Len() != littleSize && resp.Body.Len() <= 1024 {
|
if resp.Body.Len() != littleSize && resp.Body.Len() <= 1024 {
|
||||||
assert.Contains(t, resp.Body.String(), models.LFSMetaFileIdentifier)
|
assert.Contains(t, resp.Body.String(), lfs.MetaFileIdentifier)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,7 +233,7 @@ func rawTest(t *testing.T, ctx *APITestContext, little, big, littleLFS, bigLFS s
|
||||||
resp := session.MakeRequest(t, req, http.StatusOK)
|
resp := session.MakeRequest(t, req, http.StatusOK)
|
||||||
assert.NotEqual(t, bigSize, resp.Body.Len())
|
assert.NotEqual(t, bigSize, resp.Body.Len())
|
||||||
if resp.Body.Len() != bigSize && resp.Body.Len() <= 1024 {
|
if resp.Body.Len() != bigSize && resp.Body.Len() <= 1024 {
|
||||||
assert.Contains(t, resp.Body.String(), models.LFSMetaFileIdentifier)
|
assert.Contains(t, resp.Body.String(), lfs.MetaFileIdentifier)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
ref: refs/heads/master
|
|
@ -0,0 +1,7 @@
|
||||||
|
[core]
|
||||||
|
bare = false
|
||||||
|
repositoryformatversion = 0
|
||||||
|
filemode = false
|
||||||
|
symlinks = false
|
||||||
|
ignorecase = true
|
||||||
|
logallrefupdates = true
|
|
@ -0,0 +1 @@
|
||||||
|
Unnamed repository; edit this file 'description' to name the repository.
|
|
@ -0,0 +1,3 @@
|
||||||
|
#!/bin/sh
|
||||||
|
command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-checkout.\n"; exit 2; }
|
||||||
|
git lfs post-checkout "$@"
|
|
@ -0,0 +1,3 @@
|
||||||
|
#!/bin/sh
|
||||||
|
command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-commit.\n"; exit 2; }
|
||||||
|
git lfs post-commit "$@"
|
|
@ -0,0 +1,3 @@
|
||||||
|
#!/bin/sh
|
||||||
|
command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-merge.\n"; exit 2; }
|
||||||
|
git lfs post-merge "$@"
|
|
@ -0,0 +1,3 @@
|
||||||
|
#!/bin/sh
|
||||||
|
command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/pre-push.\n"; exit 2; }
|
||||||
|
git lfs pre-push "$@"
|
Binary file not shown.
|
@ -0,0 +1 @@
|
||||||
|
dummy2
|
|
@ -0,0 +1 @@
|
||||||
|
dummy1
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1 @@
|
||||||
|
546244003622c64b2fc3c2cd544d7a29882c8383
|
|
@ -7,9 +7,6 @@ package integrations
|
||||||
import (
|
import (
|
||||||
"archive/zip"
|
"archive/zip"
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
@ -18,46 +15,36 @@ import (
|
||||||
"code.gitea.io/gitea/models"
|
"code.gitea.io/gitea/models"
|
||||||
"code.gitea.io/gitea/modules/lfs"
|
"code.gitea.io/gitea/modules/lfs"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/storage"
|
|
||||||
"code.gitea.io/gitea/routers/routes"
|
"code.gitea.io/gitea/routers/routes"
|
||||||
|
|
||||||
gzipp "github.com/klauspost/compress/gzip"
|
gzipp "github.com/klauspost/compress/gzip"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func GenerateLFSOid(content io.Reader) (string, error) {
|
|
||||||
h := sha256.New()
|
|
||||||
if _, err := io.Copy(h, content); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
sum := h.Sum(nil)
|
|
||||||
return hex.EncodeToString(sum), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var lfsID = int64(20000)
|
var lfsID = int64(20000)
|
||||||
|
|
||||||
func storeObjectInRepo(t *testing.T, repositoryID int64, content *[]byte) string {
|
func storeObjectInRepo(t *testing.T, repositoryID int64, content *[]byte) string {
|
||||||
oid, err := GenerateLFSOid(bytes.NewReader(*content))
|
pointer, err := lfs.GeneratePointer(bytes.NewReader(*content))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
var lfsMetaObject *models.LFSMetaObject
|
var lfsMetaObject *models.LFSMetaObject
|
||||||
|
|
||||||
if setting.Database.UsePostgreSQL {
|
if setting.Database.UsePostgreSQL {
|
||||||
lfsMetaObject = &models.LFSMetaObject{ID: lfsID, Oid: oid, Size: int64(len(*content)), RepositoryID: repositoryID}
|
lfsMetaObject = &models.LFSMetaObject{ID: lfsID, Pointer: pointer, RepositoryID: repositoryID}
|
||||||
} else {
|
} else {
|
||||||
lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: int64(len(*content)), RepositoryID: repositoryID}
|
lfsMetaObject = &models.LFSMetaObject{Pointer: pointer, RepositoryID: repositoryID}
|
||||||
}
|
}
|
||||||
|
|
||||||
lfsID++
|
lfsID++
|
||||||
lfsMetaObject, err = models.NewLFSMetaObject(lfsMetaObject)
|
lfsMetaObject, err = models.NewLFSMetaObject(lfsMetaObject)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
contentStore := &lfs.ContentStore{ObjectStorage: storage.LFS}
|
contentStore := lfs.NewContentStore()
|
||||||
exist, err := contentStore.Exists(lfsMetaObject)
|
exist, err := contentStore.Exists(pointer)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
if !exist {
|
if !exist {
|
||||||
err := contentStore.Put(lfsMetaObject, bytes.NewReader(*content))
|
err := contentStore.Put(pointer, bytes.NewReader(*content))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
return oid
|
return pointer.Oid
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeAndGetLfs(t *testing.T, content *[]byte, extraHeader *http.Header, expectedStatus int) *httptest.ResponseRecorder {
|
func storeAndGetLfs(t *testing.T, content *[]byte, extraHeader *http.Header, expectedStatus int) *httptest.ResponseRecorder {
|
||||||
|
|
117
integrations/lfs_local_endpoint_test.go
Normal file
117
integrations/lfs_local_endpoint_test.go
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package integrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/lfs"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func str2url(raw string) *url.URL {
|
||||||
|
u, _ := url.Parse(raw)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDetermineLocalEndpoint(t *testing.T) {
|
||||||
|
defer prepareTestEnv(t)()
|
||||||
|
|
||||||
|
root, _ := ioutil.TempDir("", "lfs_test")
|
||||||
|
defer os.RemoveAll(root)
|
||||||
|
|
||||||
|
rootdotgit, _ := ioutil.TempDir("", "lfs_test")
|
||||||
|
defer os.RemoveAll(rootdotgit)
|
||||||
|
os.Mkdir(filepath.Join(rootdotgit, ".git"), 0700)
|
||||||
|
|
||||||
|
lfsroot, _ := ioutil.TempDir("", "lfs_test")
|
||||||
|
defer os.RemoveAll(lfsroot)
|
||||||
|
|
||||||
|
// Test cases
|
||||||
|
var cases = []struct {
|
||||||
|
cloneurl string
|
||||||
|
lfsurl string
|
||||||
|
expected *url.URL
|
||||||
|
}{
|
||||||
|
// case 0
|
||||||
|
{
|
||||||
|
cloneurl: root,
|
||||||
|
lfsurl: "",
|
||||||
|
expected: str2url(fmt.Sprintf("file://%s", root)),
|
||||||
|
},
|
||||||
|
// case 1
|
||||||
|
{
|
||||||
|
cloneurl: root,
|
||||||
|
lfsurl: lfsroot,
|
||||||
|
expected: str2url(fmt.Sprintf("file://%s", lfsroot)),
|
||||||
|
},
|
||||||
|
// case 2
|
||||||
|
{
|
||||||
|
cloneurl: "https://git.com/repo.git",
|
||||||
|
lfsurl: lfsroot,
|
||||||
|
expected: str2url(fmt.Sprintf("file://%s", lfsroot)),
|
||||||
|
},
|
||||||
|
// case 3
|
||||||
|
{
|
||||||
|
cloneurl: rootdotgit,
|
||||||
|
lfsurl: "",
|
||||||
|
expected: str2url(fmt.Sprintf("file://%s", filepath.Join(rootdotgit, ".git"))),
|
||||||
|
},
|
||||||
|
// case 4
|
||||||
|
{
|
||||||
|
cloneurl: "",
|
||||||
|
lfsurl: rootdotgit,
|
||||||
|
expected: str2url(fmt.Sprintf("file://%s", filepath.Join(rootdotgit, ".git"))),
|
||||||
|
},
|
||||||
|
// case 5
|
||||||
|
{
|
||||||
|
cloneurl: rootdotgit,
|
||||||
|
lfsurl: rootdotgit,
|
||||||
|
expected: str2url(fmt.Sprintf("file://%s", filepath.Join(rootdotgit, ".git"))),
|
||||||
|
},
|
||||||
|
// case 6
|
||||||
|
{
|
||||||
|
cloneurl: fmt.Sprintf("file://%s", root),
|
||||||
|
lfsurl: "",
|
||||||
|
expected: str2url(fmt.Sprintf("file://%s", root)),
|
||||||
|
},
|
||||||
|
// case 7
|
||||||
|
{
|
||||||
|
cloneurl: fmt.Sprintf("file://%s", root),
|
||||||
|
lfsurl: fmt.Sprintf("file://%s", lfsroot),
|
||||||
|
expected: str2url(fmt.Sprintf("file://%s", lfsroot)),
|
||||||
|
},
|
||||||
|
// case 8
|
||||||
|
{
|
||||||
|
cloneurl: root,
|
||||||
|
lfsurl: fmt.Sprintf("file://%s", lfsroot),
|
||||||
|
expected: str2url(fmt.Sprintf("file://%s", lfsroot)),
|
||||||
|
},
|
||||||
|
// case 9
|
||||||
|
{
|
||||||
|
cloneurl: "",
|
||||||
|
lfsurl: "/does/not/exist",
|
||||||
|
expected: nil,
|
||||||
|
},
|
||||||
|
// case 10
|
||||||
|
{
|
||||||
|
cloneurl: "",
|
||||||
|
lfsurl: "file:///does/not/exist",
|
||||||
|
expected: str2url("file:///does/not/exist"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for n, c := range cases {
|
||||||
|
ep := lfs.DetermineEndpoint(c.cloneurl, c.lfsurl)
|
||||||
|
|
||||||
|
assert.Equal(t, c.expected, ep, "case %d: error should match", n)
|
||||||
|
}
|
||||||
|
}
|
|
@ -5,13 +5,9 @@
|
||||||
package models
|
package models
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/lfs"
|
||||||
"code.gitea.io/gitea/modules/timeutil"
|
"code.gitea.io/gitea/modules/timeutil"
|
||||||
|
|
||||||
"xorm.io/builder"
|
"xorm.io/builder"
|
||||||
|
@ -19,28 +15,13 @@ import (
|
||||||
|
|
||||||
// LFSMetaObject stores metadata for LFS tracked files.
|
// LFSMetaObject stores metadata for LFS tracked files.
|
||||||
type LFSMetaObject struct {
|
type LFSMetaObject struct {
|
||||||
ID int64 `xorm:"pk autoincr"`
|
ID int64 `xorm:"pk autoincr"`
|
||||||
Oid string `xorm:"UNIQUE(s) INDEX NOT NULL"`
|
lfs.Pointer `xorm:"extends"`
|
||||||
Size int64 `xorm:"NOT NULL"`
|
|
||||||
RepositoryID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
|
RepositoryID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
|
||||||
Existing bool `xorm:"-"`
|
Existing bool `xorm:"-"`
|
||||||
CreatedUnix timeutil.TimeStamp `xorm:"created"`
|
CreatedUnix timeutil.TimeStamp `xorm:"created"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RelativePath returns the relative path of the lfs object
|
|
||||||
func (m *LFSMetaObject) RelativePath() string {
|
|
||||||
if len(m.Oid) < 5 {
|
|
||||||
return m.Oid
|
|
||||||
}
|
|
||||||
|
|
||||||
return path.Join(m.Oid[0:2], m.Oid[2:4], m.Oid[4:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pointer returns the string representation of an LFS pointer file
|
|
||||||
func (m *LFSMetaObject) Pointer() string {
|
|
||||||
return fmt.Sprintf("%s\n%s%s\nsize %d\n", LFSMetaFileIdentifier, LFSMetaFileOidPrefix, m.Oid, m.Size)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LFSTokenResponse defines the JSON structure in which the JWT token is stored.
|
// LFSTokenResponse defines the JSON structure in which the JWT token is stored.
|
||||||
// This structure is fetched via SSH and passed by the Git LFS client to the server
|
// This structure is fetched via SSH and passed by the Git LFS client to the server
|
||||||
// endpoint for authorization.
|
// endpoint for authorization.
|
||||||
|
@ -53,15 +34,6 @@ type LFSTokenResponse struct {
|
||||||
// to differentiate between database and missing object errors.
|
// to differentiate between database and missing object errors.
|
||||||
var ErrLFSObjectNotExist = errors.New("LFS Meta object does not exist")
|
var ErrLFSObjectNotExist = errors.New("LFS Meta object does not exist")
|
||||||
|
|
||||||
const (
|
|
||||||
// LFSMetaFileIdentifier is the string appearing at the first line of LFS pointer files.
|
|
||||||
// https://github.com/git-lfs/git-lfs/blob/master/docs/spec.md
|
|
||||||
LFSMetaFileIdentifier = "version https://git-lfs.github.com/spec/v1"
|
|
||||||
|
|
||||||
// LFSMetaFileOidPrefix appears in LFS pointer files on a line before the sha256 hash.
|
|
||||||
LFSMetaFileOidPrefix = "oid sha256:"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewLFSMetaObject stores a given populated LFSMetaObject structure in the database
|
// NewLFSMetaObject stores a given populated LFSMetaObject structure in the database
|
||||||
// if it is not already present.
|
// if it is not already present.
|
||||||
func NewLFSMetaObject(m *LFSMetaObject) (*LFSMetaObject, error) {
|
func NewLFSMetaObject(m *LFSMetaObject) (*LFSMetaObject, error) {
|
||||||
|
@ -90,16 +62,6 @@ func NewLFSMetaObject(m *LFSMetaObject) (*LFSMetaObject, error) {
|
||||||
return m, sess.Commit()
|
return m, sess.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateLFSOid generates a Sha256Sum to represent an oid for arbitrary content
|
|
||||||
func GenerateLFSOid(content io.Reader) (string, error) {
|
|
||||||
h := sha256.New()
|
|
||||||
if _, err := io.Copy(h, content); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
sum := h.Sum(nil)
|
|
||||||
return hex.EncodeToString(sum), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLFSMetaObjectByOid selects a LFSMetaObject entry from database by its OID.
|
// GetLFSMetaObjectByOid selects a LFSMetaObject entry from database by its OID.
|
||||||
// It may return ErrLFSObjectNotExist or a database error. If the error is nil,
|
// It may return ErrLFSObjectNotExist or a database error. If the error is nil,
|
||||||
// the returned pointer is a valid LFSMetaObject.
|
// the returned pointer is a valid LFSMetaObject.
|
||||||
|
@ -108,7 +70,7 @@ func (repo *Repository) GetLFSMetaObjectByOid(oid string) (*LFSMetaObject, error
|
||||||
return nil, ErrLFSObjectNotExist
|
return nil, ErrLFSObjectNotExist
|
||||||
}
|
}
|
||||||
|
|
||||||
m := &LFSMetaObject{Oid: oid, RepositoryID: repo.ID}
|
m := &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}, RepositoryID: repo.ID}
|
||||||
has, err := x.Get(m)
|
has, err := x.Get(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -131,12 +93,12 @@ func (repo *Repository) RemoveLFSMetaObjectByOid(oid string) (int64, error) {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
m := &LFSMetaObject{Oid: oid, RepositoryID: repo.ID}
|
m := &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}, RepositoryID: repo.ID}
|
||||||
if _, err := sess.Delete(m); err != nil {
|
if _, err := sess.Delete(m); err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
count, err := sess.Count(&LFSMetaObject{Oid: oid})
|
count, err := sess.Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return count, err
|
return count, err
|
||||||
}
|
}
|
||||||
|
@ -168,11 +130,11 @@ func (repo *Repository) CountLFSMetaObjects() (int64, error) {
|
||||||
// LFSObjectAccessible checks if a provided Oid is accessible to the user
|
// LFSObjectAccessible checks if a provided Oid is accessible to the user
|
||||||
func LFSObjectAccessible(user *User, oid string) (bool, error) {
|
func LFSObjectAccessible(user *User, oid string) (bool, error) {
|
||||||
if user.IsAdmin {
|
if user.IsAdmin {
|
||||||
count, err := x.Count(&LFSMetaObject{Oid: oid})
|
count, err := x.Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
|
||||||
return (count > 0), err
|
return (count > 0), err
|
||||||
}
|
}
|
||||||
cond := accessibleRepositoryCondition(user)
|
cond := accessibleRepositoryCondition(user)
|
||||||
count, err := x.Where(cond).Join("INNER", "repository", "`lfs_meta_object`.repository_id = `repository`.id").Count(&LFSMetaObject{Oid: oid})
|
count, err := x.Where(cond).Join("INNER", "repository", "`lfs_meta_object`.repository_id = `repository`.id").Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
|
||||||
return (count > 0), err
|
return (count > 0), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -302,6 +302,8 @@ var migrations = []Migration{
|
||||||
NewMigration("Remove invalid labels from comments", removeInvalidLabels),
|
NewMigration("Remove invalid labels from comments", removeInvalidLabels),
|
||||||
// v177 -> v178
|
// v177 -> v178
|
||||||
NewMigration("Delete orphaned IssueLabels", deleteOrphanedIssueLabels),
|
NewMigration("Delete orphaned IssueLabels", deleteOrphanedIssueLabels),
|
||||||
|
// v178 -> v179
|
||||||
|
NewMigration("Add LFS columns to Mirror", addLFSMirrorColumns),
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCurrentDBVersion returns the current db version
|
// GetCurrentDBVersion returns the current db version
|
||||||
|
|
18
models/migrations/v178.go
Normal file
18
models/migrations/v178.go
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"xorm.io/xorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func addLFSMirrorColumns(x *xorm.Engine) error {
|
||||||
|
type Mirror struct {
|
||||||
|
LFS bool `xorm:"lfs_enabled NOT NULL DEFAULT false"`
|
||||||
|
LFSEndpoint string `xorm:"lfs_endpoint TEXT"`
|
||||||
|
}
|
||||||
|
|
||||||
|
return x.Sync2(new(Mirror))
|
||||||
|
}
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/lfs"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/markup"
|
"code.gitea.io/gitea/modules/markup"
|
||||||
"code.gitea.io/gitea/modules/options"
|
"code.gitea.io/gitea/modules/options"
|
||||||
|
@ -1531,7 +1532,7 @@ func DeleteRepository(doer *User, uid, repoID int64) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range lfsObjects {
|
for _, v := range lfsObjects {
|
||||||
count, err := sess.Count(&LFSMetaObject{Oid: v.Oid})
|
count, err := sess.Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: v.Oid}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,9 @@ type Mirror struct {
|
||||||
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX"`
|
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX"`
|
||||||
NextUpdateUnix timeutil.TimeStamp `xorm:"INDEX"`
|
NextUpdateUnix timeutil.TimeStamp `xorm:"INDEX"`
|
||||||
|
|
||||||
|
LFS bool `xorm:"lfs_enabled NOT NULL DEFAULT false"`
|
||||||
|
LFSEndpoint string `xorm:"lfs_endpoint TEXT"`
|
||||||
|
|
||||||
Address string `xorm:"-"`
|
Address string `xorm:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
24
modules/lfs/client.go
Normal file
24
modules/lfs/client.go
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client is used to communicate with a LFS source
|
||||||
|
type Client interface {
|
||||||
|
Download(ctx context.Context, oid string, size int64) (io.ReadCloser, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient creates a LFS client
|
||||||
|
func NewClient(endpoint *url.URL) Client {
|
||||||
|
if endpoint.Scheme == "file" {
|
||||||
|
return newFilesystemClient(endpoint)
|
||||||
|
}
|
||||||
|
return newHTTPClient(endpoint)
|
||||||
|
}
|
23
modules/lfs/client_test.go
Normal file
23
modules/lfs/client_test.go
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewClient(t *testing.T) {
|
||||||
|
u, _ := url.Parse("file:///test")
|
||||||
|
c := NewClient(u)
|
||||||
|
assert.IsType(t, &FilesystemClient{}, c)
|
||||||
|
|
||||||
|
u, _ = url.Parse("https://test.com/lfs")
|
||||||
|
c = NewClient(u)
|
||||||
|
assert.IsType(t, &HTTPClient{}, c)
|
||||||
|
}
|
|
@ -13,14 +13,15 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"code.gitea.io/gitea/models"
|
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/storage"
|
"code.gitea.io/gitea/modules/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errHashMismatch = errors.New("Content hash does not match OID")
|
// ErrHashMismatch occurs if the content has does not match OID
|
||||||
errSizeMismatch = errors.New("Content size does not match")
|
ErrHashMismatch = errors.New("Content hash does not match OID")
|
||||||
|
// ErrSizeMismatch occurs if the content size does not match
|
||||||
|
ErrSizeMismatch = errors.New("Content size does not match")
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrRangeNotSatisfiable represents an error which request range is not satisfiable.
|
// ErrRangeNotSatisfiable represents an error which request range is not satisfiable.
|
||||||
|
@ -28,61 +29,67 @@ type ErrRangeNotSatisfiable struct {
|
||||||
FromByte int64
|
FromByte int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (err ErrRangeNotSatisfiable) Error() string {
|
|
||||||
return fmt.Sprintf("Requested range %d is not satisfiable", err.FromByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsErrRangeNotSatisfiable returns true if the error is an ErrRangeNotSatisfiable
|
// IsErrRangeNotSatisfiable returns true if the error is an ErrRangeNotSatisfiable
|
||||||
func IsErrRangeNotSatisfiable(err error) bool {
|
func IsErrRangeNotSatisfiable(err error) bool {
|
||||||
_, ok := err.(ErrRangeNotSatisfiable)
|
_, ok := err.(ErrRangeNotSatisfiable)
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (err ErrRangeNotSatisfiable) Error() string {
|
||||||
|
return fmt.Sprintf("Requested range %d is not satisfiable", err.FromByte)
|
||||||
|
}
|
||||||
|
|
||||||
// ContentStore provides a simple file system based storage.
|
// ContentStore provides a simple file system based storage.
|
||||||
type ContentStore struct {
|
type ContentStore struct {
|
||||||
storage.ObjectStorage
|
storage.ObjectStorage
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewContentStore creates the default ContentStore
|
||||||
|
func NewContentStore() *ContentStore {
|
||||||
|
contentStore := &ContentStore{ObjectStorage: storage.LFS}
|
||||||
|
return contentStore
|
||||||
|
}
|
||||||
|
|
||||||
// Get takes a Meta object and retrieves the content from the store, returning
|
// Get takes a Meta object and retrieves the content from the store, returning
|
||||||
// it as an io.ReadSeekCloser.
|
// it as an io.ReadSeekCloser.
|
||||||
func (s *ContentStore) Get(meta *models.LFSMetaObject) (storage.Object, error) {
|
func (s *ContentStore) Get(pointer Pointer) (storage.Object, error) {
|
||||||
f, err := s.Open(meta.RelativePath())
|
f, err := s.Open(pointer.RelativePath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Whilst trying to read LFS OID[%s]: Unable to open Error: %v", meta.Oid, err)
|
log.Error("Whilst trying to read LFS OID[%s]: Unable to open Error: %v", pointer.Oid, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put takes a Meta object and an io.Reader and writes the content to the store.
|
// Put takes a Meta object and an io.Reader and writes the content to the store.
|
||||||
func (s *ContentStore) Put(meta *models.LFSMetaObject, r io.Reader) error {
|
func (s *ContentStore) Put(pointer Pointer, r io.Reader) error {
|
||||||
p := meta.RelativePath()
|
p := pointer.RelativePath()
|
||||||
|
|
||||||
// Wrap the provided reader with an inline hashing and size checker
|
// Wrap the provided reader with an inline hashing and size checker
|
||||||
wrappedRd := newHashingReader(meta.Size, meta.Oid, r)
|
wrappedRd := newHashingReader(pointer.Size, pointer.Oid, r)
|
||||||
|
|
||||||
// now pass the wrapped reader to Save - if there is a size mismatch or hash mismatch then
|
// now pass the wrapped reader to Save - if there is a size mismatch or hash mismatch then
|
||||||
// the errors returned by the newHashingReader should percolate up to here
|
// the errors returned by the newHashingReader should percolate up to here
|
||||||
written, err := s.Save(p, wrappedRd, meta.Size)
|
written, err := s.Save(p, wrappedRd, pointer.Size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Whilst putting LFS OID[%s]: Failed to copy to tmpPath: %s Error: %v", meta.Oid, p, err)
|
log.Error("Whilst putting LFS OID[%s]: Failed to copy to tmpPath: %s Error: %v", pointer.Oid, p, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// This shouldn't happen but it is sensible to test
|
// This shouldn't happen but it is sensible to test
|
||||||
if written != meta.Size {
|
if written != pointer.Size {
|
||||||
if err := s.Delete(p); err != nil {
|
if err := s.Delete(p); err != nil {
|
||||||
log.Error("Cleaning the LFS OID[%s] failed: %v", meta.Oid, err)
|
log.Error("Cleaning the LFS OID[%s] failed: %v", pointer.Oid, err)
|
||||||
}
|
}
|
||||||
return errSizeMismatch
|
return ErrSizeMismatch
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exists returns true if the object exists in the content store.
|
// Exists returns true if the object exists in the content store.
|
||||||
func (s *ContentStore) Exists(meta *models.LFSMetaObject) (bool, error) {
|
func (s *ContentStore) Exists(pointer Pointer) (bool, error) {
|
||||||
_, err := s.ObjectStorage.Stat(meta.RelativePath())
|
_, err := s.ObjectStorage.Stat(pointer.RelativePath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -93,19 +100,25 @@ func (s *ContentStore) Exists(meta *models.LFSMetaObject) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify returns true if the object exists in the content store and size is correct.
|
// Verify returns true if the object exists in the content store and size is correct.
|
||||||
func (s *ContentStore) Verify(meta *models.LFSMetaObject) (bool, error) {
|
func (s *ContentStore) Verify(pointer Pointer) (bool, error) {
|
||||||
p := meta.RelativePath()
|
p := pointer.RelativePath()
|
||||||
fi, err := s.ObjectStorage.Stat(p)
|
fi, err := s.ObjectStorage.Stat(p)
|
||||||
if os.IsNotExist(err) || (err == nil && fi.Size() != meta.Size) {
|
if os.IsNotExist(err) || (err == nil && fi.Size() != pointer.Size) {
|
||||||
return false, nil
|
return false, nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
log.Error("Unable stat file: %s for LFS OID[%s] Error: %v", p, meta.Oid, err)
|
log.Error("Unable stat file: %s for LFS OID[%s] Error: %v", p, pointer.Oid, err)
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadMetaObject will read a models.LFSMetaObject and return a reader
|
||||||
|
func ReadMetaObject(pointer Pointer) (io.ReadCloser, error) {
|
||||||
|
contentStore := NewContentStore()
|
||||||
|
return contentStore.Get(pointer)
|
||||||
|
}
|
||||||
|
|
||||||
type hashingReader struct {
|
type hashingReader struct {
|
||||||
internal io.Reader
|
internal io.Reader
|
||||||
currentSize int64
|
currentSize int64
|
||||||
|
@ -127,12 +140,12 @@ func (r *hashingReader) Read(b []byte) (int, error) {
|
||||||
|
|
||||||
if err != nil && err == io.EOF {
|
if err != nil && err == io.EOF {
|
||||||
if r.currentSize != r.expectedSize {
|
if r.currentSize != r.expectedSize {
|
||||||
return n, errSizeMismatch
|
return n, ErrSizeMismatch
|
||||||
}
|
}
|
||||||
|
|
||||||
shaStr := hex.EncodeToString(r.hash.Sum(nil))
|
shaStr := hex.EncodeToString(r.hash.Sum(nil))
|
||||||
if shaStr != r.expectedHash {
|
if shaStr != r.expectedHash {
|
||||||
return n, errHashMismatch
|
return n, ErrHashMismatch
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
106
modules/lfs/endpoint.go
Normal file
106
modules/lfs/endpoint.go
Normal file
|
@ -0,0 +1,106 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DetermineEndpoint determines an endpoint from the clone url or uses the specified LFS url.
|
||||||
|
func DetermineEndpoint(cloneurl, lfsurl string) *url.URL {
|
||||||
|
if len(lfsurl) > 0 {
|
||||||
|
return endpointFromURL(lfsurl)
|
||||||
|
}
|
||||||
|
return endpointFromCloneURL(cloneurl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func endpointFromCloneURL(rawurl string) *url.URL {
|
||||||
|
ep := endpointFromURL(rawurl)
|
||||||
|
if ep == nil {
|
||||||
|
return ep
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasSuffix(ep.Path, "/") {
|
||||||
|
ep.Path = ep.Path[:len(ep.Path)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if ep.Scheme == "file" {
|
||||||
|
return ep
|
||||||
|
}
|
||||||
|
|
||||||
|
if path.Ext(ep.Path) == ".git" {
|
||||||
|
ep.Path += "/info/lfs"
|
||||||
|
} else {
|
||||||
|
ep.Path += ".git/info/lfs"
|
||||||
|
}
|
||||||
|
|
||||||
|
return ep
|
||||||
|
}
|
||||||
|
|
||||||
|
func endpointFromURL(rawurl string) *url.URL {
|
||||||
|
if strings.HasPrefix(rawurl, "/") {
|
||||||
|
return endpointFromLocalPath(rawurl)
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := url.Parse(rawurl)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("lfs.endpointFromUrl: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch u.Scheme {
|
||||||
|
case "http", "https":
|
||||||
|
return u
|
||||||
|
case "git":
|
||||||
|
u.Scheme = "https"
|
||||||
|
return u
|
||||||
|
case "file":
|
||||||
|
return u
|
||||||
|
default:
|
||||||
|
if _, err := os.Stat(rawurl); err == nil {
|
||||||
|
return endpointFromLocalPath(rawurl)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Error("lfs.endpointFromUrl: unknown url")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func endpointFromLocalPath(path string) *url.URL {
|
||||||
|
var slash string
|
||||||
|
if abs, err := filepath.Abs(path); err == nil {
|
||||||
|
if !strings.HasPrefix(abs, "/") {
|
||||||
|
slash = "/"
|
||||||
|
}
|
||||||
|
path = abs
|
||||||
|
}
|
||||||
|
|
||||||
|
var gitpath string
|
||||||
|
if filepath.Base(path) == ".git" {
|
||||||
|
gitpath = path
|
||||||
|
path = filepath.Dir(path)
|
||||||
|
} else {
|
||||||
|
gitpath = filepath.Join(path, ".git")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(gitpath); err == nil {
|
||||||
|
path = gitpath
|
||||||
|
} else if _, err := os.Stat(path); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
path = fmt.Sprintf("file://%s%s", slash, filepath.ToSlash(path))
|
||||||
|
|
||||||
|
u, _ := url.Parse(path)
|
||||||
|
|
||||||
|
return u
|
||||||
|
}
|
75
modules/lfs/endpoint_test.go
Normal file
75
modules/lfs/endpoint_test.go
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func str2url(raw string) *url.URL {
|
||||||
|
u, _ := url.Parse(raw)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDetermineEndpoint(t *testing.T) {
|
||||||
|
// Test cases
|
||||||
|
var cases = []struct {
|
||||||
|
cloneurl string
|
||||||
|
lfsurl string
|
||||||
|
expected *url.URL
|
||||||
|
}{
|
||||||
|
// case 0
|
||||||
|
{
|
||||||
|
cloneurl: "",
|
||||||
|
lfsurl: "",
|
||||||
|
expected: nil,
|
||||||
|
},
|
||||||
|
// case 1
|
||||||
|
{
|
||||||
|
cloneurl: "https://git.com/repo",
|
||||||
|
lfsurl: "",
|
||||||
|
expected: str2url("https://git.com/repo.git/info/lfs"),
|
||||||
|
},
|
||||||
|
// case 2
|
||||||
|
{
|
||||||
|
cloneurl: "https://git.com/repo.git",
|
||||||
|
lfsurl: "",
|
||||||
|
expected: str2url("https://git.com/repo.git/info/lfs"),
|
||||||
|
},
|
||||||
|
// case 3
|
||||||
|
{
|
||||||
|
cloneurl: "",
|
||||||
|
lfsurl: "https://gitlfs.com/repo",
|
||||||
|
expected: str2url("https://gitlfs.com/repo"),
|
||||||
|
},
|
||||||
|
// case 4
|
||||||
|
{
|
||||||
|
cloneurl: "https://git.com/repo.git",
|
||||||
|
lfsurl: "https://gitlfs.com/repo",
|
||||||
|
expected: str2url("https://gitlfs.com/repo"),
|
||||||
|
},
|
||||||
|
// case 5
|
||||||
|
{
|
||||||
|
cloneurl: "git://git.com/repo.git",
|
||||||
|
lfsurl: "",
|
||||||
|
expected: str2url("https://git.com/repo.git/info/lfs"),
|
||||||
|
},
|
||||||
|
// case 6
|
||||||
|
{
|
||||||
|
cloneurl: "",
|
||||||
|
lfsurl: "git://gitlfs.com/repo",
|
||||||
|
expected: str2url("https://gitlfs.com/repo"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for n, c := range cases {
|
||||||
|
ep := DetermineEndpoint(c.cloneurl, c.lfsurl)
|
||||||
|
|
||||||
|
assert.Equal(t, c.expected, ep, "case %d: error should match", n)
|
||||||
|
}
|
||||||
|
}
|
50
modules/lfs/filesystem_client.go
Normal file
50
modules/lfs/filesystem_client.go
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FilesystemClient is used to read LFS data from a filesystem path
|
||||||
|
type FilesystemClient struct {
|
||||||
|
lfsdir string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFilesystemClient(endpoint *url.URL) *FilesystemClient {
|
||||||
|
path, _ := util.FileURLToPath(endpoint)
|
||||||
|
|
||||||
|
lfsdir := filepath.Join(path, "lfs", "objects")
|
||||||
|
|
||||||
|
client := &FilesystemClient{lfsdir}
|
||||||
|
|
||||||
|
return client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FilesystemClient) objectPath(oid string) string {
|
||||||
|
return filepath.Join(c.lfsdir, oid[0:2], oid[2:4], oid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download reads the specific LFS object from the target repository
|
||||||
|
func (c *FilesystemClient) Download(ctx context.Context, oid string, size int64) (io.ReadCloser, error) {
|
||||||
|
objectPath := c.objectPath(oid)
|
||||||
|
|
||||||
|
if _, err := os.Stat(objectPath); os.IsNotExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := os.Open(objectPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return file, nil
|
||||||
|
}
|
129
modules/lfs/http_client.go
Normal file
129
modules/lfs/http_client.go
Normal file
|
@ -0,0 +1,129 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTTPClient is used to communicate with the LFS server
|
||||||
|
// https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md
|
||||||
|
type HTTPClient struct {
|
||||||
|
client *http.Client
|
||||||
|
endpoint string
|
||||||
|
transfers map[string]TransferAdapter
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHTTPClient(endpoint *url.URL) *HTTPClient {
|
||||||
|
hc := &http.Client{}
|
||||||
|
|
||||||
|
client := &HTTPClient{
|
||||||
|
client: hc,
|
||||||
|
endpoint: strings.TrimSuffix(endpoint.String(), "/"),
|
||||||
|
transfers: make(map[string]TransferAdapter),
|
||||||
|
}
|
||||||
|
|
||||||
|
basic := &BasicTransferAdapter{hc}
|
||||||
|
|
||||||
|
client.transfers[basic.Name()] = basic
|
||||||
|
|
||||||
|
return client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HTTPClient) transferNames() []string {
|
||||||
|
keys := make([]string, len(c.transfers))
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for k := range c.transfers {
|
||||||
|
keys[i] = k
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HTTPClient) batch(ctx context.Context, operation string, objects []Pointer) (*BatchResponse, error) {
|
||||||
|
url := fmt.Sprintf("%s/objects/batch", c.endpoint)
|
||||||
|
|
||||||
|
request := &BatchRequest{operation, c.transferNames(), nil, objects}
|
||||||
|
|
||||||
|
payload := new(bytes.Buffer)
|
||||||
|
err := json.NewEncoder(payload).Encode(request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("lfs.HTTPClient.batch json.Encode: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Trace("lfs.HTTPClient.batch NewRequestWithContext: %s", url)
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "POST", url, payload)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("lfs.HTTPClient.batch http.NewRequestWithContext: %w", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-type", MediaType)
|
||||||
|
req.Header.Set("Accept", MediaType)
|
||||||
|
|
||||||
|
res, err := c.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("lfs.HTTPClient.batch http.Do: %w", err)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("lfs.HTTPClient.batch: Unexpected servers response: %s", res.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var response BatchResponse
|
||||||
|
err = json.NewDecoder(res.Body).Decode(&response)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("lfs.HTTPClient.batch json.Decode: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(response.Transfer) == 0 {
|
||||||
|
response.Transfer = "basic"
|
||||||
|
}
|
||||||
|
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download reads the specific LFS object from the LFS server
|
||||||
|
func (c *HTTPClient) Download(ctx context.Context, oid string, size int64) (io.ReadCloser, error) {
|
||||||
|
var objects []Pointer
|
||||||
|
objects = append(objects, Pointer{oid, size})
|
||||||
|
|
||||||
|
result, err := c.batch(ctx, "download", objects)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
transferAdapter, ok := c.transfers[result.Transfer]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("lfs.HTTPClient.Download Transferadapter not found: %s", result.Transfer)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(result.Objects) == 0 {
|
||||||
|
return nil, errors.New("lfs.HTTPClient.Download: No objects in result")
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := transferAdapter.Download(ctx, result.Objects[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return content, nil
|
||||||
|
}
|
144
modules/lfs/http_client_test.go
Normal file
144
modules/lfs/http_client_test.go
Normal file
|
@ -0,0 +1,144 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RoundTripFunc func(req *http.Request) *http.Response
|
||||||
|
|
||||||
|
func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
return f(req), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type DummyTransferAdapter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *DummyTransferAdapter) Name() string {
|
||||||
|
return "dummy"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *DummyTransferAdapter) Download(ctx context.Context, r *ObjectResponse) (io.ReadCloser, error) {
|
||||||
|
return ioutil.NopCloser(bytes.NewBufferString("dummy")), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHTTPClientDownload(t *testing.T) {
|
||||||
|
oid := "fb8f7d8435968c4f82a726a92395be4d16f2f63116caf36c8ad35c60831ab041"
|
||||||
|
size := int64(6)
|
||||||
|
|
||||||
|
roundTripHandler := func(req *http.Request) *http.Response {
|
||||||
|
url := req.URL.String()
|
||||||
|
if strings.Contains(url, "status-not-ok") {
|
||||||
|
return &http.Response{StatusCode: http.StatusBadRequest}
|
||||||
|
}
|
||||||
|
if strings.Contains(url, "invalid-json-response") {
|
||||||
|
return &http.Response{StatusCode: http.StatusOK, Body: ioutil.NopCloser(bytes.NewBufferString("invalid json"))}
|
||||||
|
}
|
||||||
|
if strings.Contains(url, "valid-batch-request-download") {
|
||||||
|
assert.Equal(t, "POST", req.Method)
|
||||||
|
assert.Equal(t, MediaType, req.Header.Get("Content-type"), "case %s: error should match", url)
|
||||||
|
assert.Equal(t, MediaType, req.Header.Get("Accept"), "case %s: error should match", url)
|
||||||
|
|
||||||
|
var batchRequest BatchRequest
|
||||||
|
err := json.NewDecoder(req.Body).Decode(&batchRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, "download", batchRequest.Operation)
|
||||||
|
assert.Equal(t, 1, len(batchRequest.Objects))
|
||||||
|
assert.Equal(t, oid, batchRequest.Objects[0].Oid)
|
||||||
|
assert.Equal(t, size, batchRequest.Objects[0].Size)
|
||||||
|
|
||||||
|
batchResponse := &BatchResponse{
|
||||||
|
Transfer: "dummy",
|
||||||
|
Objects: make([]*ObjectResponse, 1),
|
||||||
|
}
|
||||||
|
|
||||||
|
payload := new(bytes.Buffer)
|
||||||
|
json.NewEncoder(payload).Encode(batchResponse)
|
||||||
|
|
||||||
|
return &http.Response{StatusCode: http.StatusOK, Body: ioutil.NopCloser(payload)}
|
||||||
|
}
|
||||||
|
if strings.Contains(url, "invalid-response-no-objects") {
|
||||||
|
batchResponse := &BatchResponse{Transfer: "dummy"}
|
||||||
|
|
||||||
|
payload := new(bytes.Buffer)
|
||||||
|
json.NewEncoder(payload).Encode(batchResponse)
|
||||||
|
|
||||||
|
return &http.Response{StatusCode: http.StatusOK, Body: ioutil.NopCloser(payload)}
|
||||||
|
}
|
||||||
|
if strings.Contains(url, "unknown-transfer-adapter") {
|
||||||
|
batchResponse := &BatchResponse{Transfer: "unknown_adapter"}
|
||||||
|
|
||||||
|
payload := new(bytes.Buffer)
|
||||||
|
json.NewEncoder(payload).Encode(batchResponse)
|
||||||
|
|
||||||
|
return &http.Response{StatusCode: http.StatusOK, Body: ioutil.NopCloser(payload)}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Errorf("Unknown test case: %s", url)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
hc := &http.Client{Transport: RoundTripFunc(roundTripHandler)}
|
||||||
|
dummy := &DummyTransferAdapter{}
|
||||||
|
|
||||||
|
var cases = []struct {
|
||||||
|
endpoint string
|
||||||
|
expectederror string
|
||||||
|
}{
|
||||||
|
// case 0
|
||||||
|
{
|
||||||
|
endpoint: "https://status-not-ok.io",
|
||||||
|
expectederror: "Unexpected servers response: ",
|
||||||
|
},
|
||||||
|
// case 1
|
||||||
|
{
|
||||||
|
endpoint: "https://invalid-json-response.io",
|
||||||
|
expectederror: "json.Decode: ",
|
||||||
|
},
|
||||||
|
// case 2
|
||||||
|
{
|
||||||
|
endpoint: "https://valid-batch-request-download.io",
|
||||||
|
expectederror: "",
|
||||||
|
},
|
||||||
|
// case 3
|
||||||
|
{
|
||||||
|
endpoint: "https://invalid-response-no-objects.io",
|
||||||
|
expectederror: "No objects in result",
|
||||||
|
},
|
||||||
|
// case 4
|
||||||
|
{
|
||||||
|
endpoint: "https://unknown-transfer-adapter.io",
|
||||||
|
expectederror: "Transferadapter not found: ",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for n, c := range cases {
|
||||||
|
client := &HTTPClient{
|
||||||
|
client: hc,
|
||||||
|
endpoint: c.endpoint,
|
||||||
|
transfers: make(map[string]TransferAdapter),
|
||||||
|
}
|
||||||
|
client.transfers["dummy"] = dummy
|
||||||
|
|
||||||
|
_, err := client.Download(context.Background(), oid, size)
|
||||||
|
if len(c.expectederror) > 0 {
|
||||||
|
assert.True(t, strings.Contains(err.Error(), c.expectederror), "case %d: '%s' should contain '%s'", n, err.Error(), c.expectederror)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err, "case %d", n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
123
modules/lfs/pointer.go
Normal file
123
modules/lfs/pointer.go
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
blobSizeCutoff = 1024
|
||||||
|
|
||||||
|
// MetaFileIdentifier is the string appearing at the first line of LFS pointer files.
|
||||||
|
// https://github.com/git-lfs/git-lfs/blob/master/docs/spec.md
|
||||||
|
MetaFileIdentifier = "version https://git-lfs.github.com/spec/v1"
|
||||||
|
|
||||||
|
// MetaFileOidPrefix appears in LFS pointer files on a line before the sha256 hash.
|
||||||
|
MetaFileOidPrefix = "oid sha256:"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrMissingPrefix occurs if the content lacks the LFS prefix
|
||||||
|
ErrMissingPrefix = errors.New("Content lacks the LFS prefix")
|
||||||
|
|
||||||
|
// ErrInvalidStructure occurs if the content has an invalid structure
|
||||||
|
ErrInvalidStructure = errors.New("Content has an invalid structure")
|
||||||
|
|
||||||
|
// ErrInvalidOIDFormat occurs if the oid has an invalid format
|
||||||
|
ErrInvalidOIDFormat = errors.New("OID has an invalid format")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadPointer tries to read LFS pointer data from the reader
|
||||||
|
func ReadPointer(reader io.Reader) (Pointer, error) {
|
||||||
|
buf := make([]byte, blobSizeCutoff)
|
||||||
|
n, err := io.ReadFull(reader, buf)
|
||||||
|
if err != nil && err != io.ErrUnexpectedEOF {
|
||||||
|
return Pointer{}, err
|
||||||
|
}
|
||||||
|
buf = buf[:n]
|
||||||
|
|
||||||
|
return ReadPointerFromBuffer(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
var oidPattern = regexp.MustCompile(`^[a-f\d]{64}$`)
|
||||||
|
|
||||||
|
// ReadPointerFromBuffer will return a pointer if the provided byte slice is a pointer file or an error otherwise.
|
||||||
|
func ReadPointerFromBuffer(buf []byte) (Pointer, error) {
|
||||||
|
var p Pointer
|
||||||
|
|
||||||
|
headString := string(buf)
|
||||||
|
if !strings.HasPrefix(headString, MetaFileIdentifier) {
|
||||||
|
return p, ErrMissingPrefix
|
||||||
|
}
|
||||||
|
|
||||||
|
splitLines := strings.Split(headString, "\n")
|
||||||
|
if len(splitLines) < 3 {
|
||||||
|
return p, ErrInvalidStructure
|
||||||
|
}
|
||||||
|
|
||||||
|
oid := strings.TrimPrefix(splitLines[1], MetaFileOidPrefix)
|
||||||
|
if len(oid) != 64 || !oidPattern.MatchString(oid) {
|
||||||
|
return p, ErrInvalidOIDFormat
|
||||||
|
}
|
||||||
|
size, err := strconv.ParseInt(strings.TrimPrefix(splitLines[2], "size "), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return p, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Oid = oid
|
||||||
|
p.Size = size
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid checks if the pointer has a valid structure.
|
||||||
|
// It doesn't check if the pointed-to-content exists.
|
||||||
|
func (p Pointer) IsValid() bool {
|
||||||
|
if len(p.Oid) != 64 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !oidPattern.MatchString(p.Oid) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if p.Size < 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringContent returns the string representation of the pointer
|
||||||
|
// https://github.com/git-lfs/git-lfs/blob/main/docs/spec.md#the-pointer
|
||||||
|
func (p Pointer) StringContent() string {
|
||||||
|
return fmt.Sprintf("%s\n%s%s\nsize %d\n", MetaFileIdentifier, MetaFileOidPrefix, p.Oid, p.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RelativePath returns the relative storage path of the pointer
|
||||||
|
func (p Pointer) RelativePath() string {
|
||||||
|
if len(p.Oid) < 5 {
|
||||||
|
return p.Oid
|
||||||
|
}
|
||||||
|
|
||||||
|
return path.Join(p.Oid[0:2], p.Oid[2:4], p.Oid[4:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// GeneratePointer generates a pointer for arbitrary content
|
||||||
|
func GeneratePointer(content io.Reader) (Pointer, error) {
|
||||||
|
h := sha256.New()
|
||||||
|
c, err := io.Copy(h, content)
|
||||||
|
if err != nil {
|
||||||
|
return Pointer{}, err
|
||||||
|
}
|
||||||
|
sum := h.Sum(nil)
|
||||||
|
return Pointer{Oid: hex.EncodeToString(sum), Size: c}, nil
|
||||||
|
}
|
64
modules/lfs/pointer_scanner_gogit.go
Normal file
64
modules/lfs/pointer_scanner_gogit.go
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build gogit
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/git"
|
||||||
|
|
||||||
|
"github.com/go-git/go-git/v5/plumbing/object"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SearchPointerBlobs scans the whole repository for LFS pointer files
|
||||||
|
func SearchPointerBlobs(ctx context.Context, repo *git.Repository, pointerChan chan<- PointerBlob, errChan chan<- error) {
|
||||||
|
gitRepo := repo.GoGitRepo()
|
||||||
|
|
||||||
|
err := func() error {
|
||||||
|
blobs, err := gitRepo.BlobObjects()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("lfs.SearchPointerBlobs BlobObjects: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return blobs.ForEach(func(blob *object.Blob) error {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
if blob.Size > blobSizeCutoff {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
reader, err := blob.Reader()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("lfs.SearchPointerBlobs blob.Reader: %w", err)
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
|
pointer, _ := ReadPointer(reader)
|
||||||
|
if pointer.IsValid() {
|
||||||
|
pointerChan <- PointerBlob{Hash: blob.Hash.String(), Pointer: pointer}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
default:
|
||||||
|
errChan <- err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
close(pointerChan)
|
||||||
|
close(errChan)
|
||||||
|
}
|
110
modules/lfs/pointer_scanner_nogogit.go
Normal file
110
modules/lfs/pointer_scanner_nogogit.go
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !gogit
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/git"
|
||||||
|
"code.gitea.io/gitea/modules/git/pipeline"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SearchPointerBlobs scans the whole repository for LFS pointer files
|
||||||
|
func SearchPointerBlobs(ctx context.Context, repo *git.Repository, pointerChan chan<- PointerBlob, errChan chan<- error) {
|
||||||
|
basePath := repo.Path
|
||||||
|
|
||||||
|
catFileCheckReader, catFileCheckWriter := io.Pipe()
|
||||||
|
shasToBatchReader, shasToBatchWriter := io.Pipe()
|
||||||
|
catFileBatchReader, catFileBatchWriter := io.Pipe()
|
||||||
|
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(4)
|
||||||
|
|
||||||
|
// Create the go-routines in reverse order.
|
||||||
|
|
||||||
|
// 4. Take the output of cat-file --batch and check if each file in turn
|
||||||
|
// to see if they're pointers to files in the LFS store
|
||||||
|
go createPointerResultsFromCatFileBatch(ctx, catFileBatchReader, &wg, pointerChan)
|
||||||
|
|
||||||
|
// 3. Take the shas of the blobs and batch read them
|
||||||
|
go pipeline.CatFileBatch(shasToBatchReader, catFileBatchWriter, &wg, basePath)
|
||||||
|
|
||||||
|
// 2. From the provided objects restrict to blobs <=1k
|
||||||
|
go pipeline.BlobsLessThan1024FromCatFileBatchCheck(catFileCheckReader, shasToBatchWriter, &wg)
|
||||||
|
|
||||||
|
// 1. Run batch-check on all objects in the repository
|
||||||
|
if git.CheckGitVersionAtLeast("2.6.0") != nil {
|
||||||
|
revListReader, revListWriter := io.Pipe()
|
||||||
|
shasToCheckReader, shasToCheckWriter := io.Pipe()
|
||||||
|
wg.Add(2)
|
||||||
|
go pipeline.CatFileBatchCheck(shasToCheckReader, catFileCheckWriter, &wg, basePath)
|
||||||
|
go pipeline.BlobsFromRevListObjects(revListReader, shasToCheckWriter, &wg)
|
||||||
|
go pipeline.RevListAllObjects(revListWriter, &wg, basePath, errChan)
|
||||||
|
} else {
|
||||||
|
go pipeline.CatFileBatchCheckAllObjects(catFileCheckWriter, &wg, basePath, errChan)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
close(pointerChan)
|
||||||
|
close(errChan)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createPointerResultsFromCatFileBatch(ctx context.Context, catFileBatchReader *io.PipeReader, wg *sync.WaitGroup, pointerChan chan<- PointerBlob) {
|
||||||
|
defer wg.Done()
|
||||||
|
defer catFileBatchReader.Close()
|
||||||
|
|
||||||
|
bufferedReader := bufio.NewReader(catFileBatchReader)
|
||||||
|
buf := make([]byte, 1025)
|
||||||
|
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
break loop
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// File descriptor line: sha
|
||||||
|
sha, err := bufferedReader.ReadString(' ')
|
||||||
|
if err != nil {
|
||||||
|
_ = catFileBatchReader.CloseWithError(err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Throw away the blob
|
||||||
|
if _, err := bufferedReader.ReadString(' '); err != nil {
|
||||||
|
_ = catFileBatchReader.CloseWithError(err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sizeStr, err := bufferedReader.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
_ = catFileBatchReader.CloseWithError(err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
size, err := strconv.Atoi(sizeStr[:len(sizeStr)-1])
|
||||||
|
if err != nil {
|
||||||
|
_ = catFileBatchReader.CloseWithError(err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
pointerBuf := buf[:size+1]
|
||||||
|
if _, err := io.ReadFull(bufferedReader, pointerBuf); err != nil {
|
||||||
|
_ = catFileBatchReader.CloseWithError(err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
pointerBuf = pointerBuf[:size]
|
||||||
|
// Now we need to check if the pointerBuf is an LFS pointer
|
||||||
|
pointer, _ := ReadPointerFromBuffer(pointerBuf)
|
||||||
|
if !pointer.IsValid() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pointerChan <- PointerBlob{Hash: sha, Pointer: pointer}
|
||||||
|
}
|
||||||
|
}
|
103
modules/lfs/pointer_test.go
Normal file
103
modules/lfs/pointer_test.go
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStringContent(t *testing.T) {
|
||||||
|
p := Pointer{Oid: "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", Size: 1234}
|
||||||
|
expected := "version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\n"
|
||||||
|
assert.Equal(t, p.StringContent(), expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRelativePath(t *testing.T) {
|
||||||
|
p := Pointer{Oid: "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393"}
|
||||||
|
expected := path.Join("4d", "7a", "214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393")
|
||||||
|
assert.Equal(t, p.RelativePath(), expected)
|
||||||
|
|
||||||
|
p2 := Pointer{Oid: "4d7a"}
|
||||||
|
assert.Equal(t, p2.RelativePath(), "4d7a")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsValid(t *testing.T) {
|
||||||
|
p := Pointer{}
|
||||||
|
assert.False(t, p.IsValid())
|
||||||
|
|
||||||
|
p = Pointer{Oid: "123"}
|
||||||
|
assert.False(t, p.IsValid())
|
||||||
|
|
||||||
|
p = Pointer{Oid: "z4cb57646c54a297c9807697e80a30946f79a4b82cb079d2606847825b1812cc"}
|
||||||
|
assert.False(t, p.IsValid())
|
||||||
|
|
||||||
|
p = Pointer{Oid: "94cb57646c54a297c9807697e80a30946f79a4b82cb079d2606847825b1812cc"}
|
||||||
|
assert.True(t, p.IsValid())
|
||||||
|
|
||||||
|
p = Pointer{Oid: "94cb57646c54a297c9807697e80a30946f79a4b82cb079d2606847825b1812cc", Size: -1}
|
||||||
|
assert.False(t, p.IsValid())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGeneratePointer(t *testing.T) {
|
||||||
|
p, err := GeneratePointer(strings.NewReader("Gitea"))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, p.IsValid())
|
||||||
|
assert.Equal(t, p.Oid, "94cb57646c54a297c9807697e80a30946f79a4b82cb079d2606847825b1812cc")
|
||||||
|
assert.Equal(t, p.Size, int64(5))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadPointerFromBuffer(t *testing.T) {
|
||||||
|
p, err := ReadPointerFromBuffer([]byte{})
|
||||||
|
assert.ErrorIs(t, err, ErrMissingPrefix)
|
||||||
|
assert.False(t, p.IsValid())
|
||||||
|
|
||||||
|
p, err = ReadPointerFromBuffer([]byte("test"))
|
||||||
|
assert.ErrorIs(t, err, ErrMissingPrefix)
|
||||||
|
assert.False(t, p.IsValid())
|
||||||
|
|
||||||
|
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\n"))
|
||||||
|
assert.ErrorIs(t, err, ErrInvalidStructure)
|
||||||
|
assert.False(t, p.IsValid())
|
||||||
|
|
||||||
|
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a\nsize 1234\n"))
|
||||||
|
assert.ErrorIs(t, err, ErrInvalidOIDFormat)
|
||||||
|
assert.False(t, p.IsValid())
|
||||||
|
|
||||||
|
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a2146z4ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\n"))
|
||||||
|
assert.ErrorIs(t, err, ErrInvalidOIDFormat)
|
||||||
|
assert.False(t, p.IsValid())
|
||||||
|
|
||||||
|
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\ntest 1234\n"))
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.False(t, p.IsValid())
|
||||||
|
|
||||||
|
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize test\n"))
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.False(t, p.IsValid())
|
||||||
|
|
||||||
|
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\n"))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, p.IsValid())
|
||||||
|
assert.Equal(t, p.Oid, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393")
|
||||||
|
assert.Equal(t, p.Size, int64(1234))
|
||||||
|
|
||||||
|
p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\ntest"))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, p.IsValid())
|
||||||
|
assert.Equal(t, p.Oid, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393")
|
||||||
|
assert.Equal(t, p.Size, int64(1234))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadPointer(t *testing.T) {
|
||||||
|
p, err := ReadPointer(strings.NewReader("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\n"))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, p.IsValid())
|
||||||
|
assert.Equal(t, p.Oid, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393")
|
||||||
|
assert.Equal(t, p.Size, int64(1234))
|
||||||
|
}
|
|
@ -1,71 +0,0 @@
|
||||||
// Copyright 2019 The Gitea Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"code.gitea.io/gitea/models"
|
|
||||||
"code.gitea.io/gitea/modules/base"
|
|
||||||
"code.gitea.io/gitea/modules/setting"
|
|
||||||
"code.gitea.io/gitea/modules/storage"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReadPointerFile will return a partially filled LFSMetaObject if the provided reader is a pointer file
|
|
||||||
func ReadPointerFile(reader io.Reader) (*models.LFSMetaObject, *[]byte) {
|
|
||||||
if !setting.LFS.StartServer {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, 1024)
|
|
||||||
n, _ := reader.Read(buf)
|
|
||||||
buf = buf[:n]
|
|
||||||
|
|
||||||
if isTextFile := base.IsTextFile(buf); !isTextFile {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return IsPointerFile(&buf), &buf
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPointerFile will return a partially filled LFSMetaObject if the provided byte slice is a pointer file
|
|
||||||
func IsPointerFile(buf *[]byte) *models.LFSMetaObject {
|
|
||||||
if !setting.LFS.StartServer {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
headString := string(*buf)
|
|
||||||
if !strings.HasPrefix(headString, models.LFSMetaFileIdentifier) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
splitLines := strings.Split(headString, "\n")
|
|
||||||
if len(splitLines) < 3 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
oid := strings.TrimPrefix(splitLines[1], models.LFSMetaFileOidPrefix)
|
|
||||||
size, err := strconv.ParseInt(strings.TrimPrefix(splitLines[2], "size "), 10, 64)
|
|
||||||
if len(oid) != 64 || err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
contentStore := &ContentStore{ObjectStorage: storage.LFS}
|
|
||||||
meta := &models.LFSMetaObject{Oid: oid, Size: size}
|
|
||||||
exist, err := contentStore.Exists(meta)
|
|
||||||
if err != nil || !exist {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return meta
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadMetaObject will read a models.LFSMetaObject and return a reader
|
|
||||||
func ReadMetaObject(meta *models.LFSMetaObject) (io.ReadCloser, error) {
|
|
||||||
contentStore := &ContentStore{ObjectStorage: storage.LFS}
|
|
||||||
return contentStore.Get(meta)
|
|
||||||
}
|
|
69
modules/lfs/shared.go
Normal file
69
modules/lfs/shared.go
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
// Copyright 2020 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MediaType contains the media type for LFS server requests
|
||||||
|
MediaType = "application/vnd.git-lfs+json"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BatchRequest contains multiple requests processed in one batch operation.
|
||||||
|
// https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md#requests
|
||||||
|
type BatchRequest struct {
|
||||||
|
Operation string `json:"operation"`
|
||||||
|
Transfers []string `json:"transfers,omitempty"`
|
||||||
|
Ref *Reference `json:"ref,omitempty"`
|
||||||
|
Objects []Pointer `json:"objects"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference contains a git reference.
|
||||||
|
// https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md#ref-property
|
||||||
|
type Reference struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pointer contains LFS pointer data
|
||||||
|
type Pointer struct {
|
||||||
|
Oid string `json:"oid" xorm:"UNIQUE(s) INDEX NOT NULL"`
|
||||||
|
Size int64 `json:"size" xorm:"NOT NULL"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchResponse contains multiple object metadata Representation structures
|
||||||
|
// for use with the batch API.
|
||||||
|
// https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md#successful-responses
|
||||||
|
type BatchResponse struct {
|
||||||
|
Transfer string `json:"transfer,omitempty"`
|
||||||
|
Objects []*ObjectResponse `json:"objects"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectResponse is object metadata as seen by clients of the LFS server.
|
||||||
|
type ObjectResponse struct {
|
||||||
|
Pointer
|
||||||
|
Actions map[string]*Link `json:"actions"`
|
||||||
|
Error *ObjectError `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Link provides a structure used to build a hypermedia representation of an HTTP link.
|
||||||
|
type Link struct {
|
||||||
|
Href string `json:"href"`
|
||||||
|
Header map[string]string `json:"header,omitempty"`
|
||||||
|
ExpiresAt time.Time `json:"expires_at,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectError defines the JSON structure returned to the client in case of an error
|
||||||
|
type ObjectError struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PointerBlob associates a Git blob with a Pointer.
|
||||||
|
type PointerBlob struct {
|
||||||
|
Hash string
|
||||||
|
Pointer
|
||||||
|
}
|
58
modules/lfs/transferadapter.go
Normal file
58
modules/lfs/transferadapter.go
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TransferAdapter represents an adapter for downloading/uploading LFS objects
|
||||||
|
type TransferAdapter interface {
|
||||||
|
Name() string
|
||||||
|
Download(ctx context.Context, r *ObjectResponse) (io.ReadCloser, error)
|
||||||
|
//Upload(ctx context.Context, reader io.Reader) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// BasicTransferAdapter implements the "basic" adapter
|
||||||
|
type BasicTransferAdapter struct {
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the name of the adapter
|
||||||
|
func (a *BasicTransferAdapter) Name() string {
|
||||||
|
return "basic"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download reads the download location and downloads the data
|
||||||
|
func (a *BasicTransferAdapter) Download(ctx context.Context, r *ObjectResponse) (io.ReadCloser, error) {
|
||||||
|
download, ok := r.Actions["download"]
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("lfs.BasicTransferAdapter.Download: Action 'download' not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "GET", download.Href, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("lfs.BasicTransferAdapter.Download http.NewRequestWithContext: %w", err)
|
||||||
|
}
|
||||||
|
for key, value := range download.Header {
|
||||||
|
req.Header.Set(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := a.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("lfs.BasicTransferAdapter.Download http.Do: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.Body, nil
|
||||||
|
}
|
78
modules/lfs/transferadapter_test.go
Normal file
78
modules/lfs/transferadapter_test.go
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package lfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBasicTransferAdapterName(t *testing.T) {
|
||||||
|
a := &BasicTransferAdapter{}
|
||||||
|
|
||||||
|
assert.Equal(t, "basic", a.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBasicTransferAdapterDownload(t *testing.T) {
|
||||||
|
roundTripHandler := func(req *http.Request) *http.Response {
|
||||||
|
url := req.URL.String()
|
||||||
|
if strings.Contains(url, "valid-download-request") {
|
||||||
|
assert.Equal(t, "GET", req.Method)
|
||||||
|
assert.Equal(t, "test-value", req.Header.Get("test-header"))
|
||||||
|
|
||||||
|
return &http.Response{StatusCode: http.StatusOK, Body: ioutil.NopCloser(bytes.NewBufferString("dummy"))}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Errorf("Unknown test case: %s", url)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
hc := &http.Client{Transport: RoundTripFunc(roundTripHandler)}
|
||||||
|
a := &BasicTransferAdapter{hc}
|
||||||
|
|
||||||
|
var cases = []struct {
|
||||||
|
response *ObjectResponse
|
||||||
|
expectederror string
|
||||||
|
}{
|
||||||
|
// case 0
|
||||||
|
{
|
||||||
|
response: &ObjectResponse{},
|
||||||
|
expectederror: "Action 'download' not found",
|
||||||
|
},
|
||||||
|
// case 1
|
||||||
|
{
|
||||||
|
response: &ObjectResponse{
|
||||||
|
Actions: map[string]*Link{"upload": nil},
|
||||||
|
},
|
||||||
|
expectederror: "Action 'download' not found",
|
||||||
|
},
|
||||||
|
// case 2
|
||||||
|
{
|
||||||
|
response: &ObjectResponse{
|
||||||
|
Actions: map[string]*Link{"download": {
|
||||||
|
Href: "https://valid-download-request.io",
|
||||||
|
Header: map[string]string{"test-header": "test-value"},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
expectederror: "",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for n, c := range cases {
|
||||||
|
_, err := a.Download(context.Background(), c.response)
|
||||||
|
if len(c.expectederror) > 0 {
|
||||||
|
assert.True(t, strings.Contains(err.Error(), c.expectederror), "case %d: '%s' should contain '%s'", n, err.Error(), c.expectederror)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err, "case %d", n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -20,6 +20,8 @@ type MigrateOptions struct {
|
||||||
// required: true
|
// required: true
|
||||||
RepoName string `json:"repo_name" binding:"Required"`
|
RepoName string `json:"repo_name" binding:"Required"`
|
||||||
Mirror bool `json:"mirror"`
|
Mirror bool `json:"mirror"`
|
||||||
|
LFS bool `json:"lfs"`
|
||||||
|
LFSEndpoint string `json:"lfs_endpoint"`
|
||||||
Private bool `json:"private"`
|
Private bool `json:"private"`
|
||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
OriginalURL string
|
OriginalURL string
|
||||||
|
|
|
@ -116,6 +116,8 @@ func (g *GiteaLocalUploader) CreateRepo(repo *base.Repository, opts base.Migrate
|
||||||
OriginalURL: repo.OriginalURL,
|
OriginalURL: repo.OriginalURL,
|
||||||
GitServiceType: opts.GitServiceType,
|
GitServiceType: opts.GitServiceType,
|
||||||
Mirror: repo.IsMirror,
|
Mirror: repo.IsMirror,
|
||||||
|
LFS: opts.LFS,
|
||||||
|
LFSEndpoint: opts.LFSEndpoint,
|
||||||
CloneAddr: repo.CloneURL,
|
CloneAddr: repo.CloneURL,
|
||||||
Private: repo.IsPrivate,
|
Private: repo.IsPrivate,
|
||||||
Wiki: opts.Wiki,
|
Wiki: opts.Wiki,
|
||||||
|
|
|
@ -104,6 +104,12 @@ func MigrateRepository(ctx context.Context, doer *models.User, ownerName string,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if opts.LFS && len(opts.LFSEndpoint) > 0 {
|
||||||
|
err := IsMigrateURLAllowed(opts.LFSEndpoint, doer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
downloader, err := newDownloader(ctx, ownerName, opts)
|
downloader, err := newDownloader(ctx, ownerName, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -18,7 +18,6 @@ import (
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
repo_module "code.gitea.io/gitea/modules/repository"
|
repo_module "code.gitea.io/gitea/modules/repository"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/storage"
|
|
||||||
"code.gitea.io/gitea/modules/structs"
|
"code.gitea.io/gitea/modules/structs"
|
||||||
|
|
||||||
stdcharset "golang.org/x/net/html/charset"
|
stdcharset "golang.org/x/net/html/charset"
|
||||||
|
@ -70,30 +69,29 @@ func detectEncodingAndBOM(entry *git.TreeEntry, repo *models.Repository) (string
|
||||||
buf = buf[:n]
|
buf = buf[:n]
|
||||||
|
|
||||||
if setting.LFS.StartServer {
|
if setting.LFS.StartServer {
|
||||||
meta := lfs.IsPointerFile(&buf)
|
pointer, _ := lfs.ReadPointerFromBuffer(buf)
|
||||||
if meta != nil {
|
if pointer.IsValid() {
|
||||||
meta, err = repo.GetLFSMetaObjectByOid(meta.Oid)
|
meta, err := repo.GetLFSMetaObjectByOid(pointer.Oid)
|
||||||
if err != nil && err != models.ErrLFSObjectNotExist {
|
if err != nil && err != models.ErrLFSObjectNotExist {
|
||||||
// return default
|
// return default
|
||||||
return "UTF-8", false
|
return "UTF-8", false
|
||||||
}
|
}
|
||||||
}
|
if meta != nil {
|
||||||
if meta != nil {
|
dataRc, err := lfs.ReadMetaObject(pointer)
|
||||||
dataRc, err := lfs.ReadMetaObject(meta)
|
if err != nil {
|
||||||
if err != nil {
|
// return default
|
||||||
// return default
|
return "UTF-8", false
|
||||||
return "UTF-8", false
|
}
|
||||||
|
defer dataRc.Close()
|
||||||
|
buf = make([]byte, 1024)
|
||||||
|
n, err = dataRc.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
// return default
|
||||||
|
return "UTF-8", false
|
||||||
|
}
|
||||||
|
buf = buf[:n]
|
||||||
}
|
}
|
||||||
defer dataRc.Close()
|
|
||||||
buf = make([]byte, 1024)
|
|
||||||
n, err = dataRc.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
// return default
|
|
||||||
return "UTF-8", false
|
|
||||||
}
|
|
||||||
buf = buf[:n]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
encoding, err := charset.DetectEncoding(buf)
|
encoding, err := charset.DetectEncoding(buf)
|
||||||
|
@ -387,12 +385,12 @@ func CreateOrUpdateRepoFile(repo *models.Repository, doer *models.User, opts *Up
|
||||||
|
|
||||||
if filename2attribute2info[treePath] != nil && filename2attribute2info[treePath]["filter"] == "lfs" {
|
if filename2attribute2info[treePath] != nil && filename2attribute2info[treePath]["filter"] == "lfs" {
|
||||||
// OK so we are supposed to LFS this data!
|
// OK so we are supposed to LFS this data!
|
||||||
oid, err := models.GenerateLFSOid(strings.NewReader(opts.Content))
|
pointer, err := lfs.GeneratePointer(strings.NewReader(opts.Content))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: int64(len(opts.Content)), RepositoryID: repo.ID}
|
lfsMetaObject = &models.LFSMetaObject{Pointer: pointer, RepositoryID: repo.ID}
|
||||||
content = lfsMetaObject.Pointer()
|
content = pointer.StringContent()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Add the object to the database
|
// Add the object to the database
|
||||||
|
@ -435,13 +433,13 @@ func CreateOrUpdateRepoFile(repo *models.Repository, doer *models.User, opts *Up
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
contentStore := &lfs.ContentStore{ObjectStorage: storage.LFS}
|
contentStore := lfs.NewContentStore()
|
||||||
exist, err := contentStore.Exists(lfsMetaObject)
|
exist, err := contentStore.Exists(lfsMetaObject.Pointer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if !exist {
|
if !exist {
|
||||||
if err := contentStore.Put(lfsMetaObject, strings.NewReader(opts.Content)); err != nil {
|
if err := contentStore.Put(lfsMetaObject.Pointer, strings.NewReader(opts.Content)); err != nil {
|
||||||
if _, err2 := repo.RemoveLFSMetaObjectByOid(lfsMetaObject.Oid); err2 != nil {
|
if _, err2 := repo.RemoveLFSMetaObjectByOid(lfsMetaObject.Oid); err2 != nil {
|
||||||
return nil, fmt.Errorf("Error whilst removing failed inserted LFS object %s: %v (Prev Error: %v)", lfsMetaObject.Oid, err2, err)
|
return nil, fmt.Errorf("Error whilst removing failed inserted LFS object %s: %v (Prev Error: %v)", lfsMetaObject.Oid, err2, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,6 @@ import (
|
||||||
"code.gitea.io/gitea/modules/git"
|
"code.gitea.io/gitea/modules/git"
|
||||||
"code.gitea.io/gitea/modules/lfs"
|
"code.gitea.io/gitea/modules/lfs"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/storage"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// UploadRepoFileOptions contains the uploaded repository file options
|
// UploadRepoFileOptions contains the uploaded repository file options
|
||||||
|
@ -137,7 +136,7 @@ func UploadRepoFiles(repo *models.Repository, doer *models.User, opts *UploadRep
|
||||||
|
|
||||||
// OK now we can insert the data into the store - there's no way to clean up the store
|
// OK now we can insert the data into the store - there's no way to clean up the store
|
||||||
// once it's in there, it's in there.
|
// once it's in there, it's in there.
|
||||||
contentStore := &lfs.ContentStore{ObjectStorage: storage.LFS}
|
contentStore := lfs.NewContentStore()
|
||||||
for _, info := range infos {
|
for _, info := range infos {
|
||||||
if err := uploadToLFSContentStore(info, contentStore); err != nil {
|
if err := uploadToLFSContentStore(info, contentStore); err != nil {
|
||||||
return cleanUpAfterFailure(&infos, t, err)
|
return cleanUpAfterFailure(&infos, t, err)
|
||||||
|
@ -163,18 +162,14 @@ func copyUploadedLFSFileIntoRepository(info *uploadInfo, filename2attribute2info
|
||||||
if setting.LFS.StartServer && filename2attribute2info[info.upload.Name] != nil && filename2attribute2info[info.upload.Name]["filter"] == "lfs" {
|
if setting.LFS.StartServer && filename2attribute2info[info.upload.Name] != nil && filename2attribute2info[info.upload.Name]["filter"] == "lfs" {
|
||||||
// Handle LFS
|
// Handle LFS
|
||||||
// FIXME: Inefficient! this should probably happen in models.Upload
|
// FIXME: Inefficient! this should probably happen in models.Upload
|
||||||
oid, err := models.GenerateLFSOid(file)
|
pointer, err := lfs.GeneratePointer(file)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fileInfo, err := file.Stat()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
info.lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: fileInfo.Size(), RepositoryID: t.repo.ID}
|
info.lfsMetaObject = &models.LFSMetaObject{Pointer: pointer, RepositoryID: t.repo.ID}
|
||||||
|
|
||||||
if objectHash, err = t.HashObject(strings.NewReader(info.lfsMetaObject.Pointer())); err != nil {
|
if objectHash, err = t.HashObject(strings.NewReader(pointer.StringContent())); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if objectHash, err = t.HashObject(file); err != nil {
|
} else if objectHash, err = t.HashObject(file); err != nil {
|
||||||
|
@ -189,7 +184,7 @@ func uploadToLFSContentStore(info uploadInfo, contentStore *lfs.ContentStore) er
|
||||||
if info.lfsMetaObject == nil {
|
if info.lfsMetaObject == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
exist, err := contentStore.Exists(info.lfsMetaObject)
|
exist, err := contentStore.Exists(info.lfsMetaObject.Pointer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -202,7 +197,7 @@ func uploadToLFSContentStore(info uploadInfo, contentStore *lfs.ContentStore) er
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
// FIXME: Put regenerates the hash and copies the file over.
|
// FIXME: Put regenerates the hash and copies the file over.
|
||||||
// I guess this strictly ensures the soundness of the store but this is inefficient.
|
// I guess this strictly ensures the soundness of the store but this is inefficient.
|
||||||
if err := contentStore.Put(info.lfsMetaObject, file); err != nil {
|
if err := contentStore.Put(info.lfsMetaObject.Pointer, file); err != nil {
|
||||||
// OK Now we need to cleanup
|
// OK Now we need to cleanup
|
||||||
// Can't clean up the store, once uploaded there they're there.
|
// Can't clean up the store, once uploaded there they're there.
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -7,12 +7,14 @@ package repository
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"code.gitea.io/gitea/models"
|
"code.gitea.io/gitea/models"
|
||||||
"code.gitea.io/gitea/modules/git"
|
"code.gitea.io/gitea/modules/git"
|
||||||
|
"code.gitea.io/gitea/modules/lfs"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
migration "code.gitea.io/gitea/modules/migrations/base"
|
migration "code.gitea.io/gitea/modules/migrations/base"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
|
@ -120,6 +122,13 @@ func MigrateRepositoryGitData(ctx context.Context, u *models.User, repo *models.
|
||||||
log.Error("Failed to synchronize tags to releases for repository: %v", err)
|
log.Error("Failed to synchronize tags to releases for repository: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opts.LFS {
|
||||||
|
ep := lfs.DetermineEndpoint(opts.CloneAddr, opts.LFSEndpoint)
|
||||||
|
if err = StoreMissingLfsObjectsInRepository(ctx, repo, gitRepo, ep); err != nil {
|
||||||
|
log.Error("Failed to store missing LFS objects for repository: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = repo.UpdateSize(models.DefaultDBContext()); err != nil {
|
if err = repo.UpdateSize(models.DefaultDBContext()); err != nil {
|
||||||
|
@ -132,6 +141,10 @@ func MigrateRepositoryGitData(ctx context.Context, u *models.User, repo *models.
|
||||||
Interval: setting.Mirror.DefaultInterval,
|
Interval: setting.Mirror.DefaultInterval,
|
||||||
EnablePrune: true,
|
EnablePrune: true,
|
||||||
NextUpdateUnix: timeutil.TimeStampNow().AddDuration(setting.Mirror.DefaultInterval),
|
NextUpdateUnix: timeutil.TimeStampNow().AddDuration(setting.Mirror.DefaultInterval),
|
||||||
|
LFS: opts.LFS,
|
||||||
|
}
|
||||||
|
if opts.LFS {
|
||||||
|
mirrorModel.LFSEndpoint = opts.LFSEndpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.MirrorInterval != "" {
|
if opts.MirrorInterval != "" {
|
||||||
|
@ -300,3 +313,76 @@ func PushUpdateAddTag(repo *models.Repository, gitRepo *git.Repository, tagName
|
||||||
|
|
||||||
return models.SaveOrUpdateTag(repo, &rel)
|
return models.SaveOrUpdateTag(repo, &rel)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StoreMissingLfsObjectsInRepository downloads missing LFS objects
|
||||||
|
func StoreMissingLfsObjectsInRepository(ctx context.Context, repo *models.Repository, gitRepo *git.Repository, endpoint *url.URL) error {
|
||||||
|
client := lfs.NewClient(endpoint)
|
||||||
|
contentStore := lfs.NewContentStore()
|
||||||
|
|
||||||
|
pointerChan := make(chan lfs.PointerBlob)
|
||||||
|
errChan := make(chan error, 1)
|
||||||
|
go lfs.SearchPointerBlobs(ctx, gitRepo, pointerChan, errChan)
|
||||||
|
|
||||||
|
err := func() error {
|
||||||
|
for pointerBlob := range pointerChan {
|
||||||
|
meta, err := models.NewLFSMetaObject(&models.LFSMetaObject{Pointer: pointerBlob.Pointer, RepositoryID: repo.ID})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("StoreMissingLfsObjectsInRepository models.NewLFSMetaObject: %w", err)
|
||||||
|
}
|
||||||
|
if meta.Existing {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Trace("StoreMissingLfsObjectsInRepository: LFS OID[%s] not present in repository %s", pointerBlob.Oid, repo.FullName())
|
||||||
|
|
||||||
|
err = func() error {
|
||||||
|
exist, err := contentStore.Exists(pointerBlob.Pointer)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("StoreMissingLfsObjectsInRepository contentStore.Exists: %w", err)
|
||||||
|
}
|
||||||
|
if !exist {
|
||||||
|
if setting.LFS.MaxFileSize > 0 && pointerBlob.Size > setting.LFS.MaxFileSize {
|
||||||
|
log.Info("LFS OID[%s] download denied because of LFS_MAX_FILE_SIZE=%d < size %d", pointerBlob.Oid, setting.LFS.MaxFileSize, pointerBlob.Size)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
stream, err := client.Download(ctx, pointerBlob.Oid, pointerBlob.Size)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("StoreMissingLfsObjectsInRepository: LFS OID[%s] failed to download: %w", pointerBlob.Oid, err)
|
||||||
|
}
|
||||||
|
defer stream.Close()
|
||||||
|
|
||||||
|
if err := contentStore.Put(pointerBlob.Pointer, stream); err != nil {
|
||||||
|
return fmt.Errorf("StoreMissingLfsObjectsInRepository LFS OID[%s] contentStore.Put: %w", pointerBlob.Oid, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Trace("StoreMissingLfsObjectsInRepository: LFS OID[%s] already present in content store", pointerBlob.Oid)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
if _, err2 := repo.RemoveLFSMetaObjectByOid(meta.Oid); err2 != nil {
|
||||||
|
log.Error("StoreMissingLfsObjectsInRepository RemoveLFSMetaObjectByOid[Oid: %s]: %w", meta.Oid, err2)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err, has := <-errChan
|
||||||
|
if has {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -260,6 +260,8 @@ type MigrateRepoOptions struct {
|
||||||
AuthToken string `json:"auth_token"`
|
AuthToken string `json:"auth_token"`
|
||||||
|
|
||||||
Mirror bool `json:"mirror"`
|
Mirror bool `json:"mirror"`
|
||||||
|
LFS bool `json:"lfs"`
|
||||||
|
LFSEndpoint string `json:"lfs_endpoint"`
|
||||||
Private bool `json:"private"`
|
Private bool `json:"private"`
|
||||||
Description string `json:"description" binding:"MaxSize(255)"`
|
Description string `json:"description" binding:"MaxSize(255)"`
|
||||||
Wiki bool `json:"wiki"`
|
Wiki bool `json:"wiki"`
|
||||||
|
|
|
@ -6,9 +6,12 @@ package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -150,3 +153,23 @@ func StatDir(rootPath string, includeDir ...bool) ([]string, error) {
|
||||||
}
|
}
|
||||||
return statDir(rootPath, "", isIncludeDir, false, false)
|
return statDir(rootPath, "", isIncludeDir, false, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileURLToPath extracts the path informations from a file://... url.
|
||||||
|
func FileURLToPath(u *url.URL) (string, error) {
|
||||||
|
if u.Scheme != "file" {
|
||||||
|
return "", errors.New("URL scheme is not 'file': " + u.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
path := u.Path
|
||||||
|
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it looks like there's a Windows drive letter at the beginning, strip off the leading slash.
|
||||||
|
re := regexp.MustCompile("/[A-Za-z]:/")
|
||||||
|
if re.MatchString(path) {
|
||||||
|
return path[1:], nil
|
||||||
|
}
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
|
58
modules/util/path_test.go
Normal file
58
modules/util/path_test.go
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFileURLToPath(t *testing.T) {
|
||||||
|
var cases = []struct {
|
||||||
|
url string
|
||||||
|
expected string
|
||||||
|
haserror bool
|
||||||
|
windows bool
|
||||||
|
}{
|
||||||
|
// case 0
|
||||||
|
{
|
||||||
|
url: "",
|
||||||
|
haserror: true,
|
||||||
|
},
|
||||||
|
// case 1
|
||||||
|
{
|
||||||
|
url: "http://test.io",
|
||||||
|
haserror: true,
|
||||||
|
},
|
||||||
|
// case 2
|
||||||
|
{
|
||||||
|
url: "file:///path",
|
||||||
|
expected: "/path",
|
||||||
|
},
|
||||||
|
// case 3
|
||||||
|
{
|
||||||
|
url: "file:///C:/path",
|
||||||
|
expected: "C:/path",
|
||||||
|
windows: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for n, c := range cases {
|
||||||
|
if c.windows && runtime.GOOS != "windows" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
u, _ := url.Parse(c.url)
|
||||||
|
p, err := FileURLToPath(u)
|
||||||
|
if c.haserror {
|
||||||
|
assert.Error(t, err, "case %d: should return error", n)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err, "case %d: should not return error", n)
|
||||||
|
assert.Equal(t, c.expected, p, "case %d: should be equal", n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -726,6 +726,10 @@ mirror_address = Clone From URL
|
||||||
mirror_address_desc = Put any required credentials in the Clone Authorization section.
|
mirror_address_desc = Put any required credentials in the Clone Authorization section.
|
||||||
mirror_address_url_invalid = The provided url is invalid. You must escape all components of the url correctly.
|
mirror_address_url_invalid = The provided url is invalid. You must escape all components of the url correctly.
|
||||||
mirror_address_protocol_invalid = The provided url is invalid. Only http(s):// or git:// locations can be mirrored from.
|
mirror_address_protocol_invalid = The provided url is invalid. Only http(s):// or git:// locations can be mirrored from.
|
||||||
|
mirror_lfs = Large File System (LFS)
|
||||||
|
mirror_lfs_desc = Activate mirroring of LFS data.
|
||||||
|
mirror_lfs_endpoint = LFS Endpoint
|
||||||
|
mirror_lfs_endpoint_desc = Sync will attempt to use the clone url to <a target="_blank" rel="noopener noreferrer" href="%s">determine the LFS server</a>. You can also specify a custom endpoint if the repository LFS data is stored somewhere else.
|
||||||
mirror_last_synced = Last Synchronized
|
mirror_last_synced = Last Synchronized
|
||||||
watchers = Watchers
|
watchers = Watchers
|
||||||
stargazers = Stargazers
|
stargazers = Stargazers
|
||||||
|
@ -784,6 +788,11 @@ migrate_options = Migration Options
|
||||||
migrate_service = Migration Service
|
migrate_service = Migration Service
|
||||||
migrate_options_mirror_helper = This repository will be a <span class="text blue">mirror</span>
|
migrate_options_mirror_helper = This repository will be a <span class="text blue">mirror</span>
|
||||||
migrate_options_mirror_disabled = Your site administrator has disabled new mirrors.
|
migrate_options_mirror_disabled = Your site administrator has disabled new mirrors.
|
||||||
|
migrate_options_lfs = Migrate LFS files
|
||||||
|
migrate_options_lfs_endpoint.label = LFS Endpoint
|
||||||
|
migrate_options_lfs_endpoint.description = Migration will attempt to use your Git remote to <a target="_blank" rel="noopener noreferrer" href="%s">determine the LFS server</a>. You can also specify a custom endpoint if the repository LFS data is stored somewhere else.
|
||||||
|
migrate_options_lfs_endpoint.description.local = A local server path is supported too.
|
||||||
|
migrate_options_lfs_endpoint.placeholder = Leave blank to derive from clone URL
|
||||||
migrate_items = Migration Items
|
migrate_items = Migration Items
|
||||||
migrate_items_wiki = Wiki
|
migrate_items_wiki = Wiki
|
||||||
migrate_items_milestones = Milestones
|
migrate_items_milestones = Milestones
|
||||||
|
@ -800,8 +809,8 @@ migrate.permission_denied = You are not allowed to import local repositories.
|
||||||
migrate.permission_denied_blocked = You are not allowed to import from blocked hosts.
|
migrate.permission_denied_blocked = You are not allowed to import from blocked hosts.
|
||||||
migrate.permission_denied_private_ip = You are not allowed to import from private IPs.
|
migrate.permission_denied_private_ip = You are not allowed to import from private IPs.
|
||||||
migrate.invalid_local_path = "The local path is invalid. It does not exist or is not a directory."
|
migrate.invalid_local_path = "The local path is invalid. It does not exist or is not a directory."
|
||||||
|
migrate.invalid_lfs_endpoint = The LFS endpoint is not valid.
|
||||||
migrate.failed = Migration failed: %v
|
migrate.failed = Migration failed: %v
|
||||||
migrate.lfs_mirror_unsupported = Mirroring LFS objects is not supported - use 'git lfs fetch --all' and 'git lfs push --all' instead.
|
|
||||||
migrate.migrate_items_options = Access Token is required to migrate additional items
|
migrate.migrate_items_options = Access Token is required to migrate additional items
|
||||||
migrated_from = Migrated from <a href="%[1]s">%[2]s</a>
|
migrated_from = Migrated from <a href="%[1]s">%[2]s</a>
|
||||||
migrated_from_fake = Migrated From %[1]s
|
migrated_from_fake = Migrated From %[1]s
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"code.gitea.io/gitea/modules/context"
|
"code.gitea.io/gitea/modules/context"
|
||||||
"code.gitea.io/gitea/modules/convert"
|
"code.gitea.io/gitea/modules/convert"
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
|
"code.gitea.io/gitea/modules/lfs"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/migrations"
|
"code.gitea.io/gitea/modules/migrations"
|
||||||
"code.gitea.io/gitea/modules/migrations/base"
|
"code.gitea.io/gitea/modules/migrations/base"
|
||||||
|
@ -101,27 +102,7 @@ func Migrate(ctx *context.APIContext) {
|
||||||
err = migrations.IsMigrateURLAllowed(remoteAddr, ctx.User)
|
err = migrations.IsMigrateURLAllowed(remoteAddr, ctx.User)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if models.IsErrInvalidCloneAddr(err) {
|
handleRemoteAddrError(ctx, err)
|
||||||
addrErr := err.(*models.ErrInvalidCloneAddr)
|
|
||||||
switch {
|
|
||||||
case addrErr.IsURLError:
|
|
||||||
ctx.Error(http.StatusUnprocessableEntity, "", err)
|
|
||||||
case addrErr.IsPermissionDenied:
|
|
||||||
if addrErr.LocalPath {
|
|
||||||
ctx.Error(http.StatusUnprocessableEntity, "", "You are not allowed to import local repositories.")
|
|
||||||
} else if len(addrErr.PrivateNet) == 0 {
|
|
||||||
ctx.Error(http.StatusUnprocessableEntity, "", "You are not allowed to import from blocked hosts.")
|
|
||||||
} else {
|
|
||||||
ctx.Error(http.StatusUnprocessableEntity, "", "You are not allowed to import from private IPs.")
|
|
||||||
}
|
|
||||||
case addrErr.IsInvalidPath:
|
|
||||||
ctx.Error(http.StatusUnprocessableEntity, "", "Invalid local path, it does not exist or not a directory.")
|
|
||||||
default:
|
|
||||||
ctx.Error(http.StatusInternalServerError, "ParseRemoteAddr", "Unknown error type (ErrInvalidCloneAddr): "+err.Error())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ctx.Error(http.StatusInternalServerError, "ParseRemoteAddr", err)
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,12 +118,29 @@ func Migrate(ctx *context.APIContext) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
form.LFS = form.LFS && setting.LFS.StartServer
|
||||||
|
|
||||||
|
if form.LFS && len(form.LFSEndpoint) > 0 {
|
||||||
|
ep := lfs.DetermineEndpoint("", form.LFSEndpoint)
|
||||||
|
if ep == nil {
|
||||||
|
ctx.Error(http.StatusInternalServerError, "", ctx.Tr("repo.migrate.invalid_lfs_endpoint"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = migrations.IsMigrateURLAllowed(ep.String(), ctx.User)
|
||||||
|
if err != nil {
|
||||||
|
handleRemoteAddrError(ctx, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var opts = migrations.MigrateOptions{
|
var opts = migrations.MigrateOptions{
|
||||||
CloneAddr: remoteAddr,
|
CloneAddr: remoteAddr,
|
||||||
RepoName: form.RepoName,
|
RepoName: form.RepoName,
|
||||||
Description: form.Description,
|
Description: form.Description,
|
||||||
Private: form.Private || setting.Repository.ForcePrivate,
|
Private: form.Private || setting.Repository.ForcePrivate,
|
||||||
Mirror: form.Mirror,
|
Mirror: form.Mirror,
|
||||||
|
LFS: form.LFS,
|
||||||
|
LFSEndpoint: form.LFSEndpoint,
|
||||||
AuthUsername: form.AuthUsername,
|
AuthUsername: form.AuthUsername,
|
||||||
AuthPassword: form.AuthPassword,
|
AuthPassword: form.AuthPassword,
|
||||||
AuthToken: form.AuthToken,
|
AuthToken: form.AuthToken,
|
||||||
|
@ -245,3 +243,27 @@ func handleMigrateError(ctx *context.APIContext, repoOwner *models.User, remoteA
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func handleRemoteAddrError(ctx *context.APIContext, err error) {
|
||||||
|
if models.IsErrInvalidCloneAddr(err) {
|
||||||
|
addrErr := err.(*models.ErrInvalidCloneAddr)
|
||||||
|
switch {
|
||||||
|
case addrErr.IsURLError:
|
||||||
|
ctx.Error(http.StatusUnprocessableEntity, "", err)
|
||||||
|
case addrErr.IsPermissionDenied:
|
||||||
|
if addrErr.LocalPath {
|
||||||
|
ctx.Error(http.StatusUnprocessableEntity, "", "You are not allowed to import local repositories.")
|
||||||
|
} else if len(addrErr.PrivateNet) == 0 {
|
||||||
|
ctx.Error(http.StatusUnprocessableEntity, "", "You are not allowed to import from blocked hosts.")
|
||||||
|
} else {
|
||||||
|
ctx.Error(http.StatusUnprocessableEntity, "", "You are not allowed to import from private IPs.")
|
||||||
|
}
|
||||||
|
case addrErr.IsInvalidPath:
|
||||||
|
ctx.Error(http.StatusUnprocessableEntity, "", "Invalid local path, it does not exist or not a directory.")
|
||||||
|
default:
|
||||||
|
ctx.Error(http.StatusInternalServerError, "ParseRemoteAddr", "Unknown error type (ErrInvalidCloneAddr): "+err.Error())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ctx.Error(http.StatusInternalServerError, "ParseRemoteAddr", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -96,12 +96,13 @@ func ServeBlobOrLFS(ctx *context.Context, blob *git.Blob) error {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if meta, _ := lfs.ReadPointerFile(dataRc); meta != nil {
|
pointer, _ := lfs.ReadPointer(dataRc)
|
||||||
meta, _ = ctx.Repo.Repository.GetLFSMetaObjectByOid(meta.Oid)
|
if pointer.IsValid() {
|
||||||
|
meta, _ := ctx.Repo.Repository.GetLFSMetaObjectByOid(pointer.Oid)
|
||||||
if meta == nil {
|
if meta == nil {
|
||||||
return ServeBlob(ctx, blob)
|
return ServeBlob(ctx, blob)
|
||||||
}
|
}
|
||||||
lfsDataRc, err := lfs.ReadMetaObject(meta)
|
lfsDataRc, err := lfs.ReadMetaObject(meta.Pointer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
package repo
|
package repo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
gotemplate "html/template"
|
gotemplate "html/template"
|
||||||
|
@ -15,7 +14,6 @@ import (
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"code.gitea.io/gitea/models"
|
"code.gitea.io/gitea/models"
|
||||||
"code.gitea.io/gitea/modules/base"
|
"code.gitea.io/gitea/modules/base"
|
||||||
|
@ -266,7 +264,7 @@ func LFSFileGet(ctx *context.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ctx.Data["LFSFile"] = meta
|
ctx.Data["LFSFile"] = meta
|
||||||
dataRc, err := lfs.ReadMetaObject(meta)
|
dataRc, err := lfs.ReadMetaObject(meta.Pointer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("LFSFileGet", err)
|
ctx.ServerError("LFSFileGet", err)
|
||||||
return
|
return
|
||||||
|
@ -385,9 +383,8 @@ func LFSFileFind(ctx *context.Context) {
|
||||||
ctx.Data["PageIsSettingsLFS"] = true
|
ctx.Data["PageIsSettingsLFS"] = true
|
||||||
var hash git.SHA1
|
var hash git.SHA1
|
||||||
if len(sha) == 0 {
|
if len(sha) == 0 {
|
||||||
meta := models.LFSMetaObject{Oid: oid, Size: size}
|
pointer := lfs.Pointer{Oid: oid, Size: size}
|
||||||
pointer := meta.Pointer()
|
hash = git.ComputeBlobHash([]byte(pointer.StringContent()))
|
||||||
hash = git.ComputeBlobHash([]byte(pointer))
|
|
||||||
sha = hash.String()
|
sha = hash.String()
|
||||||
} else {
|
} else {
|
||||||
hash = git.MustIDFromString(sha)
|
hash = git.MustIDFromString(sha)
|
||||||
|
@ -421,158 +418,99 @@ func LFSPointerFiles(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
ctx.Data["LFSFilesLink"] = ctx.Repo.RepoLink + "/settings/lfs"
|
ctx.Data["LFSFilesLink"] = ctx.Repo.RepoLink + "/settings/lfs"
|
||||||
|
|
||||||
basePath := ctx.Repo.Repository.RepoPath()
|
err = func() error {
|
||||||
|
pointerChan := make(chan lfs.PointerBlob)
|
||||||
|
errChan := make(chan error, 1)
|
||||||
|
go lfs.SearchPointerBlobs(ctx.Req.Context(), ctx.Repo.GitRepo, pointerChan, errChan)
|
||||||
|
|
||||||
pointerChan := make(chan pointerResult)
|
numPointers := 0
|
||||||
|
var numAssociated, numNoExist, numAssociatable int
|
||||||
|
|
||||||
catFileCheckReader, catFileCheckWriter := io.Pipe()
|
type pointerResult struct {
|
||||||
shasToBatchReader, shasToBatchWriter := io.Pipe()
|
SHA string
|
||||||
catFileBatchReader, catFileBatchWriter := io.Pipe()
|
Oid string
|
||||||
errChan := make(chan error, 1)
|
Size int64
|
||||||
wg := sync.WaitGroup{}
|
InRepo bool
|
||||||
wg.Add(5)
|
Exists bool
|
||||||
|
Accessible bool
|
||||||
|
}
|
||||||
|
|
||||||
var numPointers, numAssociated, numNoExist, numAssociatable int
|
results := []pointerResult{}
|
||||||
|
|
||||||
go func() {
|
contentStore := lfs.NewContentStore()
|
||||||
defer wg.Done()
|
repo := ctx.Repo.Repository
|
||||||
pointers := make([]pointerResult, 0, 50)
|
|
||||||
for pointer := range pointerChan {
|
for pointerBlob := range pointerChan {
|
||||||
pointers = append(pointers, pointer)
|
numPointers++
|
||||||
if pointer.InRepo {
|
|
||||||
|
result := pointerResult{
|
||||||
|
SHA: pointerBlob.Hash,
|
||||||
|
Oid: pointerBlob.Oid,
|
||||||
|
Size: pointerBlob.Size,
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := repo.GetLFSMetaObjectByOid(pointerBlob.Oid); err != nil {
|
||||||
|
if err != models.ErrLFSObjectNotExist {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
result.InRepo = true
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Exists, err = contentStore.Exists(pointerBlob.Pointer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if result.Exists {
|
||||||
|
if !result.InRepo {
|
||||||
|
// Can we fix?
|
||||||
|
// OK well that's "simple"
|
||||||
|
// - we need to check whether current user has access to a repo that has access to the file
|
||||||
|
result.Accessible, err = models.LFSObjectAccessible(ctx.User, pointerBlob.Oid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
result.Accessible = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if result.InRepo {
|
||||||
numAssociated++
|
numAssociated++
|
||||||
}
|
}
|
||||||
if !pointer.Exists {
|
if !result.Exists {
|
||||||
numNoExist++
|
numNoExist++
|
||||||
}
|
}
|
||||||
if !pointer.InRepo && pointer.Accessible {
|
if !result.InRepo && result.Accessible {
|
||||||
numAssociatable++
|
numAssociatable++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
results = append(results, result)
|
||||||
}
|
}
|
||||||
numPointers = len(pointers)
|
|
||||||
ctx.Data["Pointers"] = pointers
|
err, has := <-errChan
|
||||||
|
if has {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.Data["Pointers"] = results
|
||||||
ctx.Data["NumPointers"] = numPointers
|
ctx.Data["NumPointers"] = numPointers
|
||||||
ctx.Data["NumAssociated"] = numAssociated
|
ctx.Data["NumAssociated"] = numAssociated
|
||||||
ctx.Data["NumAssociatable"] = numAssociatable
|
ctx.Data["NumAssociatable"] = numAssociatable
|
||||||
ctx.Data["NumNoExist"] = numNoExist
|
ctx.Data["NumNoExist"] = numNoExist
|
||||||
ctx.Data["NumNotAssociated"] = numPointers - numAssociated
|
ctx.Data["NumNotAssociated"] = numPointers - numAssociated
|
||||||
|
|
||||||
|
return nil
|
||||||
}()
|
}()
|
||||||
go createPointerResultsFromCatFileBatch(catFileBatchReader, &wg, pointerChan, ctx.Repo.Repository, ctx.User)
|
if err != nil {
|
||||||
go pipeline.CatFileBatch(shasToBatchReader, catFileBatchWriter, &wg, basePath)
|
ctx.ServerError("LFSPointerFiles", err)
|
||||||
go pipeline.BlobsLessThan1024FromCatFileBatchCheck(catFileCheckReader, shasToBatchWriter, &wg)
|
return
|
||||||
if git.CheckGitVersionAtLeast("2.6.0") != nil {
|
|
||||||
revListReader, revListWriter := io.Pipe()
|
|
||||||
shasToCheckReader, shasToCheckWriter := io.Pipe()
|
|
||||||
wg.Add(2)
|
|
||||||
go pipeline.CatFileBatchCheck(shasToCheckReader, catFileCheckWriter, &wg, basePath)
|
|
||||||
go pipeline.BlobsFromRevListObjects(revListReader, shasToCheckWriter, &wg)
|
|
||||||
go pipeline.RevListAllObjects(revListWriter, &wg, basePath, errChan)
|
|
||||||
} else {
|
|
||||||
go pipeline.CatFileBatchCheckAllObjects(catFileCheckWriter, &wg, basePath, errChan)
|
|
||||||
}
|
}
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case err, has := <-errChan:
|
|
||||||
if has {
|
|
||||||
ctx.ServerError("LFSPointerFiles", err)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
ctx.HTML(http.StatusOK, tplSettingsLFSPointers)
|
ctx.HTML(http.StatusOK, tplSettingsLFSPointers)
|
||||||
}
|
}
|
||||||
|
|
||||||
type pointerResult struct {
|
|
||||||
SHA string
|
|
||||||
Oid string
|
|
||||||
Size int64
|
|
||||||
InRepo bool
|
|
||||||
Exists bool
|
|
||||||
Accessible bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func createPointerResultsFromCatFileBatch(catFileBatchReader *io.PipeReader, wg *sync.WaitGroup, pointerChan chan<- pointerResult, repo *models.Repository, user *models.User) {
|
|
||||||
defer wg.Done()
|
|
||||||
defer catFileBatchReader.Close()
|
|
||||||
contentStore := lfs.ContentStore{ObjectStorage: storage.LFS}
|
|
||||||
|
|
||||||
bufferedReader := bufio.NewReader(catFileBatchReader)
|
|
||||||
buf := make([]byte, 1025)
|
|
||||||
for {
|
|
||||||
// File descriptor line: sha
|
|
||||||
sha, err := bufferedReader.ReadString(' ')
|
|
||||||
if err != nil {
|
|
||||||
_ = catFileBatchReader.CloseWithError(err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Throw away the blob
|
|
||||||
if _, err := bufferedReader.ReadString(' '); err != nil {
|
|
||||||
_ = catFileBatchReader.CloseWithError(err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
sizeStr, err := bufferedReader.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
_ = catFileBatchReader.CloseWithError(err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
size, err := strconv.Atoi(sizeStr[:len(sizeStr)-1])
|
|
||||||
if err != nil {
|
|
||||||
_ = catFileBatchReader.CloseWithError(err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
pointerBuf := buf[:size+1]
|
|
||||||
if _, err := io.ReadFull(bufferedReader, pointerBuf); err != nil {
|
|
||||||
_ = catFileBatchReader.CloseWithError(err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
pointerBuf = pointerBuf[:size]
|
|
||||||
// Now we need to check if the pointerBuf is an LFS pointer
|
|
||||||
pointer := lfs.IsPointerFile(&pointerBuf)
|
|
||||||
if pointer == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
result := pointerResult{
|
|
||||||
SHA: strings.TrimSpace(sha),
|
|
||||||
Oid: pointer.Oid,
|
|
||||||
Size: pointer.Size,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then we need to check that this pointer is in the db
|
|
||||||
if _, err := repo.GetLFSMetaObjectByOid(pointer.Oid); err != nil {
|
|
||||||
if err != models.ErrLFSObjectNotExist {
|
|
||||||
_ = catFileBatchReader.CloseWithError(err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
result.InRepo = true
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Exists, err = contentStore.Exists(pointer)
|
|
||||||
if err != nil {
|
|
||||||
_ = catFileBatchReader.CloseWithError(err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.Exists {
|
|
||||||
if !result.InRepo {
|
|
||||||
// Can we fix?
|
|
||||||
// OK well that's "simple"
|
|
||||||
// - we need to check whether current user has access to a repo that has access to the file
|
|
||||||
result.Accessible, err = models.LFSObjectAccessible(user, result.Oid)
|
|
||||||
if err != nil {
|
|
||||||
_ = catFileBatchReader.CloseWithError(err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
result.Accessible = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pointerChan <- result
|
|
||||||
}
|
|
||||||
close(pointerChan)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LFSAutoAssociate auto associates accessible lfs files
|
// LFSAutoAssociate auto associates accessible lfs files
|
||||||
func LFSAutoAssociate(ctx *context.Context) {
|
func LFSAutoAssociate(ctx *context.Context) {
|
||||||
if !setting.LFS.StartServer {
|
if !setting.LFS.StartServer {
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"code.gitea.io/gitea/models"
|
"code.gitea.io/gitea/models"
|
||||||
"code.gitea.io/gitea/modules/base"
|
"code.gitea.io/gitea/modules/base"
|
||||||
"code.gitea.io/gitea/modules/context"
|
"code.gitea.io/gitea/modules/context"
|
||||||
|
"code.gitea.io/gitea/modules/lfs"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/migrations"
|
"code.gitea.io/gitea/modules/migrations"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
|
@ -47,6 +48,7 @@ func Migrate(ctx *context.Context) {
|
||||||
|
|
||||||
ctx.Data["private"] = getRepoPrivate(ctx)
|
ctx.Data["private"] = getRepoPrivate(ctx)
|
||||||
ctx.Data["mirror"] = ctx.Query("mirror") == "1"
|
ctx.Data["mirror"] = ctx.Query("mirror") == "1"
|
||||||
|
ctx.Data["lfs"] = ctx.Query("lfs") == "1"
|
||||||
ctx.Data["wiki"] = ctx.Query("wiki") == "1"
|
ctx.Data["wiki"] = ctx.Query("wiki") == "1"
|
||||||
ctx.Data["milestones"] = ctx.Query("milestones") == "1"
|
ctx.Data["milestones"] = ctx.Query("milestones") == "1"
|
||||||
ctx.Data["labels"] = ctx.Query("labels") == "1"
|
ctx.Data["labels"] = ctx.Query("labels") == "1"
|
||||||
|
@ -114,6 +116,34 @@ func handleMigrateError(ctx *context.Context, owner *models.User, err error, nam
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func handleMigrateRemoteAddrError(ctx *context.Context, err error, tpl base.TplName, form *forms.MigrateRepoForm) {
|
||||||
|
if models.IsErrInvalidCloneAddr(err) {
|
||||||
|
addrErr := err.(*models.ErrInvalidCloneAddr)
|
||||||
|
switch {
|
||||||
|
case addrErr.IsProtocolInvalid:
|
||||||
|
ctx.RenderWithErr(ctx.Tr("repo.mirror_address_protocol_invalid"), tpl, form)
|
||||||
|
case addrErr.IsURLError:
|
||||||
|
ctx.RenderWithErr(ctx.Tr("form.url_error"), tpl, form)
|
||||||
|
case addrErr.IsPermissionDenied:
|
||||||
|
if addrErr.LocalPath {
|
||||||
|
ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied"), tpl, form)
|
||||||
|
} else if len(addrErr.PrivateNet) == 0 {
|
||||||
|
ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_blocked"), tpl, form)
|
||||||
|
} else {
|
||||||
|
ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_private_ip"), tpl, form)
|
||||||
|
}
|
||||||
|
case addrErr.IsInvalidPath:
|
||||||
|
ctx.RenderWithErr(ctx.Tr("repo.migrate.invalid_local_path"), tpl, form)
|
||||||
|
default:
|
||||||
|
log.Error("Error whilst updating url: %v", err)
|
||||||
|
ctx.RenderWithErr(ctx.Tr("form.url_error"), tpl, form)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Error("Error whilst updating url: %v", err)
|
||||||
|
ctx.RenderWithErr(ctx.Tr("form.url_error"), tpl, form)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// MigratePost response for migrating from external git repository
|
// MigratePost response for migrating from external git repository
|
||||||
func MigratePost(ctx *context.Context) {
|
func MigratePost(ctx *context.Context) {
|
||||||
form := web.GetForm(ctx).(*forms.MigrateRepoForm)
|
form := web.GetForm(ctx).(*forms.MigrateRepoForm)
|
||||||
|
@ -144,35 +174,28 @@ func MigratePost(ctx *context.Context) {
|
||||||
err = migrations.IsMigrateURLAllowed(remoteAddr, ctx.User)
|
err = migrations.IsMigrateURLAllowed(remoteAddr, ctx.User)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if models.IsErrInvalidCloneAddr(err) {
|
ctx.Data["Err_CloneAddr"] = true
|
||||||
ctx.Data["Err_CloneAddr"] = true
|
handleMigrateRemoteAddrError(ctx, err, tpl, form)
|
||||||
addrErr := err.(*models.ErrInvalidCloneAddr)
|
|
||||||
switch {
|
|
||||||
case addrErr.IsProtocolInvalid:
|
|
||||||
ctx.RenderWithErr(ctx.Tr("repo.mirror_address_protocol_invalid"), tpl, &form)
|
|
||||||
case addrErr.IsURLError:
|
|
||||||
ctx.RenderWithErr(ctx.Tr("form.url_error"), tpl, &form)
|
|
||||||
case addrErr.IsPermissionDenied:
|
|
||||||
if addrErr.LocalPath {
|
|
||||||
ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied"), tpl, &form)
|
|
||||||
} else if len(addrErr.PrivateNet) == 0 {
|
|
||||||
ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_blocked"), tpl, &form)
|
|
||||||
} else {
|
|
||||||
ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_private_ip"), tpl, &form)
|
|
||||||
}
|
|
||||||
case addrErr.IsInvalidPath:
|
|
||||||
ctx.RenderWithErr(ctx.Tr("repo.migrate.invalid_local_path"), tpl, &form)
|
|
||||||
default:
|
|
||||||
log.Error("Error whilst updating url: %v", err)
|
|
||||||
ctx.RenderWithErr(ctx.Tr("form.url_error"), tpl, &form)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Error("Error whilst updating url: %v", err)
|
|
||||||
ctx.RenderWithErr(ctx.Tr("form.url_error"), tpl, &form)
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
form.LFS = form.LFS && setting.LFS.StartServer
|
||||||
|
|
||||||
|
if form.LFS && len(form.LFSEndpoint) > 0 {
|
||||||
|
ep := lfs.DetermineEndpoint("", form.LFSEndpoint)
|
||||||
|
if ep == nil {
|
||||||
|
ctx.Data["Err_LFSEndpoint"] = true
|
||||||
|
ctx.RenderWithErr(ctx.Tr("repo.migrate.invalid_lfs_endpoint"), tpl, &form)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = migrations.IsMigrateURLAllowed(ep.String(), ctx.User)
|
||||||
|
if err != nil {
|
||||||
|
ctx.Data["Err_LFSEndpoint"] = true
|
||||||
|
handleMigrateRemoteAddrError(ctx, err, tpl, form)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var opts = migrations.MigrateOptions{
|
var opts = migrations.MigrateOptions{
|
||||||
OriginalURL: form.CloneAddr,
|
OriginalURL: form.CloneAddr,
|
||||||
GitServiceType: serviceType,
|
GitServiceType: serviceType,
|
||||||
|
@ -181,6 +204,8 @@ func MigratePost(ctx *context.Context) {
|
||||||
Description: form.Description,
|
Description: form.Description,
|
||||||
Private: form.Private || setting.Repository.ForcePrivate,
|
Private: form.Private || setting.Repository.ForcePrivate,
|
||||||
Mirror: form.Mirror && !setting.Repository.DisableMirrors,
|
Mirror: form.Mirror && !setting.Repository.DisableMirrors,
|
||||||
|
LFS: form.LFS,
|
||||||
|
LFSEndpoint: form.LFSEndpoint,
|
||||||
AuthUsername: form.AuthUsername,
|
AuthUsername: form.AuthUsername,
|
||||||
AuthPassword: form.AuthPassword,
|
AuthPassword: form.AuthPassword,
|
||||||
AuthToken: form.AuthToken,
|
AuthToken: form.AuthToken,
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"code.gitea.io/gitea/modules/base"
|
"code.gitea.io/gitea/modules/base"
|
||||||
"code.gitea.io/gitea/modules/context"
|
"code.gitea.io/gitea/modules/context"
|
||||||
"code.gitea.io/gitea/modules/git"
|
"code.gitea.io/gitea/modules/git"
|
||||||
|
"code.gitea.io/gitea/modules/lfs"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/migrations"
|
"code.gitea.io/gitea/modules/migrations"
|
||||||
"code.gitea.io/gitea/modules/repository"
|
"code.gitea.io/gitea/modules/repository"
|
||||||
|
@ -170,30 +171,8 @@ func SettingsPost(ctx *context.Context) {
|
||||||
err = migrations.IsMigrateURLAllowed(address, ctx.User)
|
err = migrations.IsMigrateURLAllowed(address, ctx.User)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if models.IsErrInvalidCloneAddr(err) {
|
|
||||||
ctx.Data["Err_MirrorAddress"] = true
|
|
||||||
addrErr := err.(*models.ErrInvalidCloneAddr)
|
|
||||||
switch {
|
|
||||||
case addrErr.IsProtocolInvalid:
|
|
||||||
ctx.RenderWithErr(ctx.Tr("repo.mirror_address_protocol_invalid"), tplSettingsOptions, &form)
|
|
||||||
case addrErr.IsURLError:
|
|
||||||
ctx.RenderWithErr(ctx.Tr("form.url_error"), tplSettingsOptions, &form)
|
|
||||||
case addrErr.IsPermissionDenied:
|
|
||||||
if addrErr.LocalPath {
|
|
||||||
ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied"), tplSettingsOptions, &form)
|
|
||||||
} else if len(addrErr.PrivateNet) == 0 {
|
|
||||||
ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_blocked"), tplSettingsOptions, &form)
|
|
||||||
} else {
|
|
||||||
ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_private_ip"), tplSettingsOptions, &form)
|
|
||||||
}
|
|
||||||
case addrErr.IsInvalidPath:
|
|
||||||
ctx.RenderWithErr(ctx.Tr("repo.migrate.invalid_local_path"), tplSettingsOptions, &form)
|
|
||||||
default:
|
|
||||||
ctx.ServerError("Unknown error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ctx.Data["Err_MirrorAddress"] = true
|
ctx.Data["Err_MirrorAddress"] = true
|
||||||
ctx.RenderWithErr(ctx.Tr("repo.mirror_address_url_invalid"), tplSettingsOptions, &form)
|
handleSettingRemoteAddrError(ctx, err, form)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -202,6 +181,30 @@ func SettingsPost(ctx *context.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
form.LFS = form.LFS && setting.LFS.StartServer
|
||||||
|
|
||||||
|
if len(form.LFSEndpoint) > 0 {
|
||||||
|
ep := lfs.DetermineEndpoint("", form.LFSEndpoint)
|
||||||
|
if ep == nil {
|
||||||
|
ctx.Data["Err_LFSEndpoint"] = true
|
||||||
|
ctx.RenderWithErr(ctx.Tr("repo.migrate.invalid_lfs_endpoint"), tplSettingsOptions, &form)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = migrations.IsMigrateURLAllowed(ep.String(), ctx.User)
|
||||||
|
if err != nil {
|
||||||
|
ctx.Data["Err_LFSEndpoint"] = true
|
||||||
|
handleSettingRemoteAddrError(ctx, err, form)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.Repo.Mirror.LFS = form.LFS
|
||||||
|
ctx.Repo.Mirror.LFSEndpoint = form.LFSEndpoint
|
||||||
|
if err := models.UpdateMirror(ctx.Repo.Mirror); err != nil {
|
||||||
|
ctx.ServerError("UpdateMirror", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
ctx.Flash.Success(ctx.Tr("repo.settings.update_settings_success"))
|
ctx.Flash.Success(ctx.Tr("repo.settings.update_settings_success"))
|
||||||
ctx.Redirect(repo.Link() + "/settings")
|
ctx.Redirect(repo.Link() + "/settings")
|
||||||
|
|
||||||
|
@ -615,6 +618,31 @@ func SettingsPost(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func handleSettingRemoteAddrError(ctx *context.Context, err error, form *forms.RepoSettingForm) {
|
||||||
|
if models.IsErrInvalidCloneAddr(err) {
|
||||||
|
addrErr := err.(*models.ErrInvalidCloneAddr)
|
||||||
|
switch {
|
||||||
|
case addrErr.IsProtocolInvalid:
|
||||||
|
ctx.RenderWithErr(ctx.Tr("repo.mirror_address_protocol_invalid"), tplSettingsOptions, form)
|
||||||
|
case addrErr.IsURLError:
|
||||||
|
ctx.RenderWithErr(ctx.Tr("form.url_error"), tplSettingsOptions, form)
|
||||||
|
case addrErr.IsPermissionDenied:
|
||||||
|
if addrErr.LocalPath {
|
||||||
|
ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied"), tplSettingsOptions, form)
|
||||||
|
} else if len(addrErr.PrivateNet) == 0 {
|
||||||
|
ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_blocked"), tplSettingsOptions, form)
|
||||||
|
} else {
|
||||||
|
ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_private_ip"), tplSettingsOptions, form)
|
||||||
|
}
|
||||||
|
case addrErr.IsInvalidPath:
|
||||||
|
ctx.RenderWithErr(ctx.Tr("repo.migrate.invalid_local_path"), tplSettingsOptions, form)
|
||||||
|
default:
|
||||||
|
ctx.ServerError("Unknown error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx.RenderWithErr(ctx.Tr("repo.mirror_address_url_invalid"), tplSettingsOptions, form)
|
||||||
|
}
|
||||||
|
|
||||||
// Collaboration render a repository's collaboration page
|
// Collaboration render a repository's collaboration page
|
||||||
func Collaboration(ctx *context.Context) {
|
func Collaboration(ctx *context.Context) {
|
||||||
ctx.Data["Title"] = ctx.Tr("repo.settings")
|
ctx.Data["Title"] = ctx.Tr("repo.settings")
|
||||||
|
|
|
@ -274,43 +274,42 @@ func renderDirectory(ctx *context.Context, treeLink string) {
|
||||||
|
|
||||||
// FIXME: what happens when README file is an image?
|
// FIXME: what happens when README file is an image?
|
||||||
if isTextFile && setting.LFS.StartServer {
|
if isTextFile && setting.LFS.StartServer {
|
||||||
meta := lfs.IsPointerFile(&buf)
|
pointer, _ := lfs.ReadPointerFromBuffer(buf)
|
||||||
if meta != nil {
|
if pointer.IsValid() {
|
||||||
meta, err = ctx.Repo.Repository.GetLFSMetaObjectByOid(meta.Oid)
|
meta, err := ctx.Repo.Repository.GetLFSMetaObjectByOid(pointer.Oid)
|
||||||
if err != nil && err != models.ErrLFSObjectNotExist {
|
if err != nil && err != models.ErrLFSObjectNotExist {
|
||||||
ctx.ServerError("GetLFSMetaObject", err)
|
ctx.ServerError("GetLFSMetaObject", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
if meta != nil {
|
||||||
|
ctx.Data["IsLFSFile"] = true
|
||||||
|
isLFSFile = true
|
||||||
|
|
||||||
if meta != nil {
|
// OK read the lfs object
|
||||||
ctx.Data["IsLFSFile"] = true
|
var err error
|
||||||
isLFSFile = true
|
dataRc, err = lfs.ReadMetaObject(pointer)
|
||||||
|
if err != nil {
|
||||||
|
ctx.ServerError("ReadMetaObject", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer dataRc.Close()
|
||||||
|
|
||||||
// OK read the lfs object
|
buf = make([]byte, 1024)
|
||||||
var err error
|
n, err = dataRc.Read(buf)
|
||||||
dataRc, err = lfs.ReadMetaObject(meta)
|
if err != nil {
|
||||||
if err != nil {
|
ctx.ServerError("Data", err)
|
||||||
ctx.ServerError("ReadMetaObject", err)
|
return
|
||||||
return
|
}
|
||||||
|
buf = buf[:n]
|
||||||
|
|
||||||
|
isTextFile = base.IsTextFile(buf)
|
||||||
|
ctx.Data["IsTextFile"] = isTextFile
|
||||||
|
|
||||||
|
fileSize = meta.Size
|
||||||
|
ctx.Data["FileSize"] = meta.Size
|
||||||
|
filenameBase64 := base64.RawURLEncoding.EncodeToString([]byte(readmeFile.name))
|
||||||
|
ctx.Data["RawFileLink"] = fmt.Sprintf("%s%s.git/info/lfs/objects/%s/%s", setting.AppURL, ctx.Repo.Repository.FullName(), meta.Oid, filenameBase64)
|
||||||
}
|
}
|
||||||
defer dataRc.Close()
|
|
||||||
|
|
||||||
buf = make([]byte, 1024)
|
|
||||||
n, err = dataRc.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
ctx.ServerError("Data", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
buf = buf[:n]
|
|
||||||
|
|
||||||
isTextFile = base.IsTextFile(buf)
|
|
||||||
ctx.Data["IsTextFile"] = isTextFile
|
|
||||||
|
|
||||||
fileSize = meta.Size
|
|
||||||
ctx.Data["FileSize"] = meta.Size
|
|
||||||
filenameBase64 := base64.RawURLEncoding.EncodeToString([]byte(readmeFile.name))
|
|
||||||
ctx.Data["RawFileLink"] = fmt.Sprintf("%s%s.git/info/lfs/objects/%s/%s", setting.AppURL, ctx.Repo.Repository.FullName(), meta.Oid, filenameBase64)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -400,39 +399,39 @@ func renderFile(ctx *context.Context, entry *git.TreeEntry, treeLink, rawLink st
|
||||||
|
|
||||||
//Check for LFS meta file
|
//Check for LFS meta file
|
||||||
if isTextFile && setting.LFS.StartServer {
|
if isTextFile && setting.LFS.StartServer {
|
||||||
meta := lfs.IsPointerFile(&buf)
|
pointer, _ := lfs.ReadPointerFromBuffer(buf)
|
||||||
if meta != nil {
|
if pointer.IsValid() {
|
||||||
meta, err = ctx.Repo.Repository.GetLFSMetaObjectByOid(meta.Oid)
|
meta, err := ctx.Repo.Repository.GetLFSMetaObjectByOid(pointer.Oid)
|
||||||
if err != nil && err != models.ErrLFSObjectNotExist {
|
if err != nil && err != models.ErrLFSObjectNotExist {
|
||||||
ctx.ServerError("GetLFSMetaObject", err)
|
ctx.ServerError("GetLFSMetaObject", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
if meta != nil {
|
||||||
if meta != nil {
|
isLFSFile = true
|
||||||
isLFSFile = true
|
|
||||||
|
|
||||||
// OK read the lfs object
|
// OK read the lfs object
|
||||||
var err error
|
var err error
|
||||||
dataRc, err = lfs.ReadMetaObject(meta)
|
dataRc, err = lfs.ReadMetaObject(pointer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.ServerError("ReadMetaObject", err)
|
ctx.ServerError("ReadMetaObject", err)
|
||||||
return
|
return
|
||||||
|
}
|
||||||
|
defer dataRc.Close()
|
||||||
|
|
||||||
|
buf = make([]byte, 1024)
|
||||||
|
n, err = dataRc.Read(buf)
|
||||||
|
// Error EOF don't mean there is an error, it just means we read to
|
||||||
|
// the end
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
ctx.ServerError("Data", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
buf = buf[:n]
|
||||||
|
|
||||||
|
isTextFile = base.IsTextFile(buf)
|
||||||
|
fileSize = meta.Size
|
||||||
|
ctx.Data["RawFileLink"] = fmt.Sprintf("%s/media/%s/%s", ctx.Repo.RepoLink, ctx.Repo.BranchNameSubURL(), ctx.Repo.TreePath)
|
||||||
}
|
}
|
||||||
defer dataRc.Close()
|
|
||||||
|
|
||||||
buf = make([]byte, 1024)
|
|
||||||
n, err = dataRc.Read(buf)
|
|
||||||
// Error EOF don't mean there is an error, it just means we read to
|
|
||||||
// the end
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
ctx.ServerError("Data", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
buf = buf[:n]
|
|
||||||
|
|
||||||
isTextFile = base.IsTextFile(buf)
|
|
||||||
fileSize = meta.Size
|
|
||||||
ctx.Data["RawFileLink"] = fmt.Sprintf("%s/media/%s/%s", ctx.Repo.RepoLink, ctx.Repo.BranchNameSubURL(), ctx.Repo.TreePath)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,6 @@ import (
|
||||||
"code.gitea.io/gitea/models"
|
"code.gitea.io/gitea/models"
|
||||||
"code.gitea.io/gitea/modules/context"
|
"code.gitea.io/gitea/modules/context"
|
||||||
"code.gitea.io/gitea/modules/httpcache"
|
"code.gitea.io/gitea/modules/httpcache"
|
||||||
"code.gitea.io/gitea/modules/lfs"
|
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/metrics"
|
"code.gitea.io/gitea/modules/metrics"
|
||||||
"code.gitea.io/gitea/modules/public"
|
"code.gitea.io/gitea/modules/public"
|
||||||
|
@ -38,6 +37,7 @@ import (
|
||||||
"code.gitea.io/gitea/routers/user"
|
"code.gitea.io/gitea/routers/user"
|
||||||
userSetting "code.gitea.io/gitea/routers/user/setting"
|
userSetting "code.gitea.io/gitea/routers/user/setting"
|
||||||
"code.gitea.io/gitea/services/forms"
|
"code.gitea.io/gitea/services/forms"
|
||||||
|
"code.gitea.io/gitea/services/lfs"
|
||||||
"code.gitea.io/gitea/services/mailer"
|
"code.gitea.io/gitea/services/mailer"
|
||||||
|
|
||||||
// to registers all internal adapters
|
// to registers all internal adapters
|
||||||
|
|
|
@ -71,6 +71,8 @@ type MigrateRepoForm struct {
|
||||||
// required: true
|
// required: true
|
||||||
RepoName string `json:"repo_name" binding:"Required;AlphaDashDot;MaxSize(100)"`
|
RepoName string `json:"repo_name" binding:"Required;AlphaDashDot;MaxSize(100)"`
|
||||||
Mirror bool `json:"mirror"`
|
Mirror bool `json:"mirror"`
|
||||||
|
LFS bool `json:"lfs"`
|
||||||
|
LFSEndpoint string `json:"lfs_endpoint"`
|
||||||
Private bool `json:"private"`
|
Private bool `json:"private"`
|
||||||
Description string `json:"description" binding:"MaxSize(255)"`
|
Description string `json:"description" binding:"MaxSize(255)"`
|
||||||
Wiki bool `json:"wiki"`
|
Wiki bool `json:"wiki"`
|
||||||
|
@ -118,6 +120,8 @@ type RepoSettingForm struct {
|
||||||
MirrorAddress string
|
MirrorAddress string
|
||||||
MirrorUsername string
|
MirrorUsername string
|
||||||
MirrorPassword string
|
MirrorPassword string
|
||||||
|
LFS bool `form:"mirror_lfs"`
|
||||||
|
LFSEndpoint string `form:"mirror_lfs_endpoint"`
|
||||||
Private bool
|
Private bool
|
||||||
Template bool
|
Template bool
|
||||||
EnablePrune bool
|
EnablePrune bool
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"code.gitea.io/gitea/modules/charset"
|
"code.gitea.io/gitea/modules/charset"
|
||||||
"code.gitea.io/gitea/modules/git"
|
"code.gitea.io/gitea/modules/git"
|
||||||
"code.gitea.io/gitea/modules/highlight"
|
"code.gitea.io/gitea/modules/highlight"
|
||||||
|
"code.gitea.io/gitea/modules/lfs"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/process"
|
"code.gitea.io/gitea/modules/process"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
|
@ -1077,12 +1078,12 @@ func parseHunks(curFile *DiffFile, maxLines, maxLineCharacters int, input *bufio
|
||||||
curSection.Lines[len(curSection.Lines)-1].Content = line
|
curSection.Lines[len(curSection.Lines)-1].Content = line
|
||||||
|
|
||||||
// handle LFS
|
// handle LFS
|
||||||
if line[1:] == models.LFSMetaFileIdentifier {
|
if line[1:] == lfs.MetaFileIdentifier {
|
||||||
curFileLFSPrefix = true
|
curFileLFSPrefix = true
|
||||||
} else if curFileLFSPrefix && strings.HasPrefix(line[1:], models.LFSMetaFileOidPrefix) {
|
} else if curFileLFSPrefix && strings.HasPrefix(line[1:], lfs.MetaFileOidPrefix) {
|
||||||
oid := strings.TrimPrefix(line[1:], models.LFSMetaFileOidPrefix)
|
oid := strings.TrimPrefix(line[1:], lfs.MetaFileOidPrefix)
|
||||||
if len(oid) == 64 {
|
if len(oid) == 64 {
|
||||||
m := &models.LFSMetaObject{Oid: oid}
|
m := &models.LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}}
|
||||||
count, err := models.Count(m)
|
count, err := models.Count(m)
|
||||||
|
|
||||||
if err == nil && count > 0 {
|
if err == nil && count > 0 {
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"code.gitea.io/gitea/models"
|
"code.gitea.io/gitea/models"
|
||||||
"code.gitea.io/gitea/modules/context"
|
"code.gitea.io/gitea/modules/context"
|
||||||
"code.gitea.io/gitea/modules/convert"
|
"code.gitea.io/gitea/modules/convert"
|
||||||
|
lfs_module "code.gitea.io/gitea/modules/lfs"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
api "code.gitea.io/gitea/modules/structs"
|
api "code.gitea.io/gitea/modules/structs"
|
||||||
|
@ -26,7 +27,7 @@ func checkIsValidRequest(ctx *context.Context) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if !MetaMatcher(ctx.Req) {
|
if !MetaMatcher(ctx.Req) {
|
||||||
log.Info("Attempt access LOCKs without accepting the correct media type: %s", metaMediaType)
|
log.Info("Attempt access LOCKs without accepting the correct media type: %s", lfs_module.MediaType)
|
||||||
writeStatus(ctx, http.StatusBadRequest)
|
writeStatus(ctx, http.StatusBadRequest)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -72,9 +73,9 @@ func GetListLockHandler(ctx *context.Context) {
|
||||||
// Status is written in checkIsValidRequest
|
// Status is written in checkIsValidRequest
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
|
||||||
|
|
||||||
rv := unpack(ctx)
|
rv, _ := unpack(ctx)
|
||||||
|
|
||||||
repository, err := models.GetRepositoryByOwnerAndName(rv.User, rv.Repo)
|
repository, err := models.GetRepositoryByOwnerAndName(rv.User, rv.Repo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -159,7 +160,7 @@ func PostLockHandler(ctx *context.Context) {
|
||||||
// Status is written in checkIsValidRequest
|
// Status is written in checkIsValidRequest
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
|
||||||
|
|
||||||
userName := ctx.Params("username")
|
userName := ctx.Params("username")
|
||||||
repoName := strings.TrimSuffix(ctx.Params("reponame"), ".git")
|
repoName := strings.TrimSuffix(ctx.Params("reponame"), ".git")
|
||||||
|
@ -228,7 +229,7 @@ func VerifyLockHandler(ctx *context.Context) {
|
||||||
// Status is written in checkIsValidRequest
|
// Status is written in checkIsValidRequest
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
|
||||||
|
|
||||||
userName := ctx.Params("username")
|
userName := ctx.Params("username")
|
||||||
repoName := strings.TrimSuffix(ctx.Params("reponame"), ".git")
|
repoName := strings.TrimSuffix(ctx.Params("reponame"), ".git")
|
||||||
|
@ -295,7 +296,7 @@ func UnLockHandler(ctx *context.Context) {
|
||||||
// Status is written in checkIsValidRequest
|
// Status is written in checkIsValidRequest
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
|
||||||
|
|
||||||
userName := ctx.Params("username")
|
userName := ctx.Params("username")
|
||||||
repoName := strings.TrimSuffix(ctx.Params("reponame"), ".git")
|
repoName := strings.TrimSuffix(ctx.Params("reponame"), ".git")
|
|
@ -13,62 +13,24 @@ import (
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"code.gitea.io/gitea/models"
|
"code.gitea.io/gitea/models"
|
||||||
"code.gitea.io/gitea/modules/context"
|
"code.gitea.io/gitea/modules/context"
|
||||||
|
lfs_module "code.gitea.io/gitea/modules/lfs"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/storage"
|
|
||||||
|
|
||||||
"github.com/dgrijalva/jwt-go"
|
"github.com/dgrijalva/jwt-go"
|
||||||
jsoniter "github.com/json-iterator/go"
|
jsoniter "github.com/json-iterator/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
// requestContext contain variables from the HTTP request.
|
||||||
metaMediaType = "application/vnd.git-lfs+json"
|
type requestContext struct {
|
||||||
)
|
|
||||||
|
|
||||||
// RequestVars contain variables from the HTTP request. Variables from routing, json body decoding, and
|
|
||||||
// some headers are stored.
|
|
||||||
type RequestVars struct {
|
|
||||||
Oid string
|
|
||||||
Size int64
|
|
||||||
User string
|
User string
|
||||||
Password string
|
|
||||||
Repo string
|
Repo string
|
||||||
Authorization string
|
Authorization string
|
||||||
}
|
}
|
||||||
|
|
||||||
// BatchVars contains multiple RequestVars processed in one batch operation.
|
|
||||||
// https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
|
|
||||||
type BatchVars struct {
|
|
||||||
Transfers []string `json:"transfers,omitempty"`
|
|
||||||
Operation string `json:"operation"`
|
|
||||||
Objects []*RequestVars `json:"objects"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BatchResponse contains multiple object metadata Representation structures
|
|
||||||
// for use with the batch API.
|
|
||||||
type BatchResponse struct {
|
|
||||||
Transfer string `json:"transfer,omitempty"`
|
|
||||||
Objects []*Representation `json:"objects"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Representation is object metadata as seen by clients of the lfs server.
|
|
||||||
type Representation struct {
|
|
||||||
Oid string `json:"oid"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Actions map[string]*link `json:"actions"`
|
|
||||||
Error *ObjectError `json:"error,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectError defines the JSON structure returned to the client in case of an error
|
|
||||||
type ObjectError struct {
|
|
||||||
Code int `json:"code"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Claims is a JWT Token Claims
|
// Claims is a JWT Token Claims
|
||||||
type Claims struct {
|
type Claims struct {
|
||||||
RepoID int64
|
RepoID int64
|
||||||
|
@ -78,20 +40,13 @@ type Claims struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ObjectLink builds a URL linking to the object.
|
// ObjectLink builds a URL linking to the object.
|
||||||
func (v *RequestVars) ObjectLink() string {
|
func (rc *requestContext) ObjectLink(oid string) string {
|
||||||
return setting.AppURL + path.Join(v.User, v.Repo+".git", "info/lfs/objects", v.Oid)
|
return setting.AppURL + path.Join(rc.User, rc.Repo+".git", "info/lfs/objects", oid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyLink builds a URL for verifying the object.
|
// VerifyLink builds a URL for verifying the object.
|
||||||
func (v *RequestVars) VerifyLink() string {
|
func (rc *requestContext) VerifyLink() string {
|
||||||
return setting.AppURL + path.Join(v.User, v.Repo+".git", "info/lfs/verify")
|
return setting.AppURL + path.Join(rc.User, rc.Repo+".git", "info/lfs/verify")
|
||||||
}
|
|
||||||
|
|
||||||
// link provides a structure used to build a hypermedia representation of an HTTP link.
|
|
||||||
type link struct {
|
|
||||||
Href string `json:"href"`
|
|
||||||
Header map[string]string `json:"header,omitempty"`
|
|
||||||
ExpiresAt time.Time `json:"expires_at,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var oidRegExp = regexp.MustCompile(`^[A-Fa-f0-9]+$`)
|
var oidRegExp = regexp.MustCompile(`^[A-Fa-f0-9]+$`)
|
||||||
|
@ -125,28 +80,28 @@ func ObjectOidHandler(ctx *context.Context) {
|
||||||
writeStatus(ctx, 404)
|
writeStatus(ctx, 404)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAuthenticatedRepoAndMeta(ctx *context.Context, rv *RequestVars, requireWrite bool) (*models.LFSMetaObject, *models.Repository) {
|
func getAuthenticatedRepoAndMeta(ctx *context.Context, rc *requestContext, p lfs_module.Pointer, requireWrite bool) (*models.LFSMetaObject, *models.Repository) {
|
||||||
if !isOidValid(rv.Oid) {
|
if !isOidValid(p.Oid) {
|
||||||
log.Info("Attempt to access invalid LFS OID[%s] in %s/%s", rv.Oid, rv.User, rv.Repo)
|
log.Info("Attempt to access invalid LFS OID[%s] in %s/%s", p.Oid, rc.User, rc.Repo)
|
||||||
writeStatus(ctx, 404)
|
writeStatus(ctx, 404)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
repository, err := models.GetRepositoryByOwnerAndName(rv.User, rv.Repo)
|
repository, err := models.GetRepositoryByOwnerAndName(rc.User, rc.Repo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unable to get repository: %s/%s Error: %v", rv.User, rv.Repo, err)
|
log.Error("Unable to get repository: %s/%s Error: %v", rc.User, rc.Repo, err)
|
||||||
writeStatus(ctx, 404)
|
writeStatus(ctx, 404)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !authenticate(ctx, repository, rv.Authorization, requireWrite) {
|
if !authenticate(ctx, repository, rc.Authorization, requireWrite) {
|
||||||
requireAuth(ctx)
|
requireAuth(ctx)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
meta, err := repository.GetLFSMetaObjectByOid(rv.Oid)
|
meta, err := repository.GetLFSMetaObjectByOid(p.Oid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unable to get LFS OID[%s] Error: %v", rv.Oid, err)
|
log.Error("Unable to get LFS OID[%s] Error: %v", p.Oid, err)
|
||||||
writeStatus(ctx, 404)
|
writeStatus(ctx, 404)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@ -156,9 +111,9 @@ func getAuthenticatedRepoAndMeta(ctx *context.Context, rv *RequestVars, requireW
|
||||||
|
|
||||||
// getContentHandler gets the content from the content store
|
// getContentHandler gets the content from the content store
|
||||||
func getContentHandler(ctx *context.Context) {
|
func getContentHandler(ctx *context.Context) {
|
||||||
rv := unpack(ctx)
|
rc, p := unpack(ctx)
|
||||||
|
|
||||||
meta, _ := getAuthenticatedRepoAndMeta(ctx, rv, false)
|
meta, _ := getAuthenticatedRepoAndMeta(ctx, rc, p, false)
|
||||||
if meta == nil {
|
if meta == nil {
|
||||||
// Status already written in getAuthenticatedRepoAndMeta
|
// Status already written in getAuthenticatedRepoAndMeta
|
||||||
return
|
return
|
||||||
|
@ -192,8 +147,8 @@ func getContentHandler(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
contentStore := &ContentStore{ObjectStorage: storage.LFS}
|
contentStore := lfs_module.NewContentStore()
|
||||||
content, err := contentStore.Get(meta)
|
content, err := contentStore.Get(meta.Pointer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Errors are logged in contentStore.Get
|
// Errors are logged in contentStore.Get
|
||||||
writeStatus(ctx, http.StatusNotFound)
|
writeStatus(ctx, http.StatusNotFound)
|
||||||
|
@ -233,20 +188,20 @@ func getContentHandler(ctx *context.Context) {
|
||||||
|
|
||||||
// getMetaHandler retrieves metadata about the object
|
// getMetaHandler retrieves metadata about the object
|
||||||
func getMetaHandler(ctx *context.Context) {
|
func getMetaHandler(ctx *context.Context) {
|
||||||
rv := unpack(ctx)
|
rc, p := unpack(ctx)
|
||||||
|
|
||||||
meta, _ := getAuthenticatedRepoAndMeta(ctx, rv, false)
|
meta, _ := getAuthenticatedRepoAndMeta(ctx, rc, p, false)
|
||||||
if meta == nil {
|
if meta == nil {
|
||||||
// Status already written in getAuthenticatedRepoAndMeta
|
// Status already written in getAuthenticatedRepoAndMeta
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
|
||||||
|
|
||||||
if ctx.Req.Method == "GET" {
|
if ctx.Req.Method == "GET" {
|
||||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
enc := json.NewEncoder(ctx.Resp)
|
enc := json.NewEncoder(ctx.Resp)
|
||||||
if err := enc.Encode(Represent(rv, meta, true, false)); err != nil {
|
if err := enc.Encode(represent(rc, meta.Pointer, true, false)); err != nil {
|
||||||
log.Error("Failed to encode representation as json. Error: %v", err)
|
log.Error("Failed to encode representation as json. Error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -263,51 +218,51 @@ func PostHandler(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !MetaMatcher(ctx.Req) {
|
if !MetaMatcher(ctx.Req) {
|
||||||
log.Info("Attempt to POST without accepting the correct media type: %s", metaMediaType)
|
log.Info("Attempt to POST without accepting the correct media type: %s", lfs_module.MediaType)
|
||||||
writeStatus(ctx, 400)
|
writeStatus(ctx, 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rv := unpack(ctx)
|
rc, p := unpack(ctx)
|
||||||
|
|
||||||
repository, err := models.GetRepositoryByOwnerAndName(rv.User, rv.Repo)
|
repository, err := models.GetRepositoryByOwnerAndName(rc.User, rc.Repo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unable to get repository: %s/%s Error: %v", rv.User, rv.Repo, err)
|
log.Error("Unable to get repository: %s/%s Error: %v", rc.User, rc.Repo, err)
|
||||||
writeStatus(ctx, 404)
|
writeStatus(ctx, 404)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !authenticate(ctx, repository, rv.Authorization, true) {
|
if !authenticate(ctx, repository, rc.Authorization, true) {
|
||||||
requireAuth(ctx)
|
requireAuth(ctx)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isOidValid(rv.Oid) {
|
if !isOidValid(p.Oid) {
|
||||||
log.Info("Invalid LFS OID[%s] attempt to POST in %s/%s", rv.Oid, rv.User, rv.Repo)
|
log.Info("Invalid LFS OID[%s] attempt to POST in %s/%s", p.Oid, rc.User, rc.Repo)
|
||||||
writeStatus(ctx, 404)
|
writeStatus(ctx, 404)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if setting.LFS.MaxFileSize > 0 && rv.Size > setting.LFS.MaxFileSize {
|
if setting.LFS.MaxFileSize > 0 && p.Size > setting.LFS.MaxFileSize {
|
||||||
log.Info("Denied LFS OID[%s] upload of size %d to %s/%s because of LFS_MAX_FILE_SIZE=%d", rv.Oid, rv.Size, rv.User, rv.Repo, setting.LFS.MaxFileSize)
|
log.Info("Denied LFS OID[%s] upload of size %d to %s/%s because of LFS_MAX_FILE_SIZE=%d", p.Oid, p.Size, rc.User, rc.Repo, setting.LFS.MaxFileSize)
|
||||||
writeStatus(ctx, 413)
|
writeStatus(ctx, 413)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
meta, err := models.NewLFSMetaObject(&models.LFSMetaObject{Oid: rv.Oid, Size: rv.Size, RepositoryID: repository.ID})
|
meta, err := models.NewLFSMetaObject(&models.LFSMetaObject{Pointer: p, RepositoryID: repository.ID})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unable to write LFS OID[%s] size %d meta object in %v/%v to database. Error: %v", rv.Oid, rv.Size, rv.User, rv.Repo, err)
|
log.Error("Unable to write LFS OID[%s] size %d meta object in %v/%v to database. Error: %v", p.Oid, p.Size, rc.User, rc.Repo, err)
|
||||||
writeStatus(ctx, 404)
|
writeStatus(ctx, 404)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
|
||||||
|
|
||||||
sentStatus := 202
|
sentStatus := 202
|
||||||
contentStore := &ContentStore{ObjectStorage: storage.LFS}
|
contentStore := lfs_module.NewContentStore()
|
||||||
exist, err := contentStore.Exists(meta)
|
exist, err := contentStore.Exists(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unable to check if LFS OID[%s] exist on %s / %s. Error: %v", rv.Oid, rv.User, rv.Repo, err)
|
log.Error("Unable to check if LFS OID[%s] exist on %s / %s. Error: %v", p.Oid, rc.User, rc.Repo, err)
|
||||||
writeStatus(ctx, 500)
|
writeStatus(ctx, 500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -318,7 +273,7 @@ func PostHandler(ctx *context.Context) {
|
||||||
|
|
||||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
enc := json.NewEncoder(ctx.Resp)
|
enc := json.NewEncoder(ctx.Resp)
|
||||||
if err := enc.Encode(Represent(rv, meta, meta.Existing, true)); err != nil {
|
if err := enc.Encode(represent(rc, meta.Pointer, meta.Existing, true)); err != nil {
|
||||||
log.Error("Failed to encode representation as json. Error: %v", err)
|
log.Error("Failed to encode representation as json. Error: %v", err)
|
||||||
}
|
}
|
||||||
logRequest(ctx.Req, sentStatus)
|
logRequest(ctx.Req, sentStatus)
|
||||||
|
@ -333,25 +288,31 @@ func BatchHandler(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !MetaMatcher(ctx.Req) {
|
if !MetaMatcher(ctx.Req) {
|
||||||
log.Info("Attempt to BATCH without accepting the correct media type: %s", metaMediaType)
|
log.Info("Attempt to BATCH without accepting the correct media type: %s", lfs_module.MediaType)
|
||||||
writeStatus(ctx, 400)
|
writeStatus(ctx, 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bv := unpackbatch(ctx)
|
bv := unpackbatch(ctx)
|
||||||
|
|
||||||
var responseObjects []*Representation
|
reqCtx := &requestContext{
|
||||||
|
User: ctx.Params("username"),
|
||||||
|
Repo: strings.TrimSuffix(ctx.Params("reponame"), ".git"),
|
||||||
|
Authorization: ctx.Req.Header.Get("Authorization"),
|
||||||
|
}
|
||||||
|
|
||||||
|
var responseObjects []*lfs_module.ObjectResponse
|
||||||
|
|
||||||
// Create a response object
|
// Create a response object
|
||||||
for _, object := range bv.Objects {
|
for _, object := range bv.Objects {
|
||||||
if !isOidValid(object.Oid) {
|
if !isOidValid(object.Oid) {
|
||||||
log.Info("Invalid LFS OID[%s] attempt to BATCH in %s/%s", object.Oid, object.User, object.Repo)
|
log.Info("Invalid LFS OID[%s] attempt to BATCH in %s/%s", object.Oid, reqCtx.User, reqCtx.Repo)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
repository, err := models.GetRepositoryByOwnerAndName(object.User, object.Repo)
|
repository, err := models.GetRepositoryByOwnerAndName(reqCtx.User, reqCtx.Repo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unable to get repository: %s/%s Error: %v", object.User, object.Repo, err)
|
log.Error("Unable to get repository: %s/%s Error: %v", reqCtx.User, reqCtx.Repo, err)
|
||||||
writeStatus(ctx, 404)
|
writeStatus(ctx, 404)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -361,51 +322,51 @@ func BatchHandler(ctx *context.Context) {
|
||||||
requireWrite = true
|
requireWrite = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if !authenticate(ctx, repository, object.Authorization, requireWrite) {
|
if !authenticate(ctx, repository, reqCtx.Authorization, requireWrite) {
|
||||||
requireAuth(ctx)
|
requireAuth(ctx)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
contentStore := &ContentStore{ObjectStorage: storage.LFS}
|
contentStore := lfs_module.NewContentStore()
|
||||||
|
|
||||||
meta, err := repository.GetLFSMetaObjectByOid(object.Oid)
|
meta, err := repository.GetLFSMetaObjectByOid(object.Oid)
|
||||||
if err == nil { // Object is found and exists
|
if err == nil { // Object is found and exists
|
||||||
exist, err := contentStore.Exists(meta)
|
exist, err := contentStore.Exists(meta.Pointer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unable to check if LFS OID[%s] exist on %s / %s. Error: %v", object.Oid, object.User, object.Repo, err)
|
log.Error("Unable to check if LFS OID[%s] exist on %s / %s. Error: %v", object.Oid, reqCtx.User, reqCtx.Repo, err)
|
||||||
writeStatus(ctx, 500)
|
writeStatus(ctx, 500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if exist {
|
if exist {
|
||||||
responseObjects = append(responseObjects, Represent(object, meta, true, false))
|
responseObjects = append(responseObjects, represent(reqCtx, meta.Pointer, true, false))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if requireWrite && setting.LFS.MaxFileSize > 0 && object.Size > setting.LFS.MaxFileSize {
|
if requireWrite && setting.LFS.MaxFileSize > 0 && object.Size > setting.LFS.MaxFileSize {
|
||||||
log.Info("Denied LFS OID[%s] upload of size %d to %s/%s because of LFS_MAX_FILE_SIZE=%d", object.Oid, object.Size, object.User, object.Repo, setting.LFS.MaxFileSize)
|
log.Info("Denied LFS OID[%s] upload of size %d to %s/%s because of LFS_MAX_FILE_SIZE=%d", object.Oid, object.Size, reqCtx.User, reqCtx.Repo, setting.LFS.MaxFileSize)
|
||||||
writeStatus(ctx, 413)
|
writeStatus(ctx, 413)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object is not found
|
// Object is not found
|
||||||
meta, err = models.NewLFSMetaObject(&models.LFSMetaObject{Oid: object.Oid, Size: object.Size, RepositoryID: repository.ID})
|
meta, err = models.NewLFSMetaObject(&models.LFSMetaObject{Pointer: object, RepositoryID: repository.ID})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
exist, err := contentStore.Exists(meta)
|
exist, err := contentStore.Exists(meta.Pointer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unable to check if LFS OID[%s] exist on %s / %s. Error: %v", object.Oid, object.User, object.Repo, err)
|
log.Error("Unable to check if LFS OID[%s] exist on %s / %s. Error: %v", object.Oid, reqCtx.User, reqCtx.Repo, err)
|
||||||
writeStatus(ctx, 500)
|
writeStatus(ctx, 500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
responseObjects = append(responseObjects, Represent(object, meta, meta.Existing, !exist))
|
responseObjects = append(responseObjects, represent(reqCtx, meta.Pointer, meta.Existing, !exist))
|
||||||
} else {
|
} else {
|
||||||
log.Error("Unable to write LFS OID[%s] size %d meta object in %v/%v to database. Error: %v", object.Oid, object.Size, object.User, object.Repo, err)
|
log.Error("Unable to write LFS OID[%s] size %d meta object in %v/%v to database. Error: %v", object.Oid, object.Size, reqCtx.User, reqCtx.Repo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
|
||||||
|
|
||||||
respobj := &BatchResponse{Objects: responseObjects}
|
respobj := &lfs_module.BatchResponse{Objects: responseObjects}
|
||||||
|
|
||||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
enc := json.NewEncoder(ctx.Resp)
|
enc := json.NewEncoder(ctx.Resp)
|
||||||
|
@ -417,26 +378,26 @@ func BatchHandler(ctx *context.Context) {
|
||||||
|
|
||||||
// PutHandler receives data from the client and puts it into the content store
|
// PutHandler receives data from the client and puts it into the content store
|
||||||
func PutHandler(ctx *context.Context) {
|
func PutHandler(ctx *context.Context) {
|
||||||
rv := unpack(ctx)
|
rc, p := unpack(ctx)
|
||||||
|
|
||||||
meta, repository := getAuthenticatedRepoAndMeta(ctx, rv, true)
|
meta, repository := getAuthenticatedRepoAndMeta(ctx, rc, p, true)
|
||||||
if meta == nil {
|
if meta == nil {
|
||||||
// Status already written in getAuthenticatedRepoAndMeta
|
// Status already written in getAuthenticatedRepoAndMeta
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
contentStore := &ContentStore{ObjectStorage: storage.LFS}
|
contentStore := lfs_module.NewContentStore()
|
||||||
defer ctx.Req.Body.Close()
|
defer ctx.Req.Body.Close()
|
||||||
if err := contentStore.Put(meta, ctx.Req.Body); err != nil {
|
if err := contentStore.Put(meta.Pointer, ctx.Req.Body); err != nil {
|
||||||
// Put will log the error itself
|
// Put will log the error itself
|
||||||
ctx.Resp.WriteHeader(500)
|
ctx.Resp.WriteHeader(500)
|
||||||
if err == errSizeMismatch || err == errHashMismatch {
|
if err == lfs_module.ErrSizeMismatch || err == lfs_module.ErrHashMismatch {
|
||||||
fmt.Fprintf(ctx.Resp, `{"message":"%s"}`, err)
|
fmt.Fprintf(ctx.Resp, `{"message":"%s"}`, err)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(ctx.Resp, `{"message":"Internal Server Error"}`)
|
fmt.Fprintf(ctx.Resp, `{"message":"Internal Server Error"}`)
|
||||||
}
|
}
|
||||||
if _, err = repository.RemoveLFSMetaObjectByOid(rv.Oid); err != nil {
|
if _, err = repository.RemoveLFSMetaObjectByOid(p.Oid); err != nil {
|
||||||
log.Error("Whilst removing metaobject for LFS OID[%s] due to preceding error there was another Error: %v", rv.Oid, err)
|
log.Error("Whilst removing metaobject for LFS OID[%s] due to preceding error there was another Error: %v", p.Oid, err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -453,21 +414,21 @@ func VerifyHandler(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !MetaMatcher(ctx.Req) {
|
if !MetaMatcher(ctx.Req) {
|
||||||
log.Info("Attempt to VERIFY without accepting the correct media type: %s", metaMediaType)
|
log.Info("Attempt to VERIFY without accepting the correct media type: %s", lfs_module.MediaType)
|
||||||
writeStatus(ctx, 400)
|
writeStatus(ctx, 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rv := unpack(ctx)
|
rc, p := unpack(ctx)
|
||||||
|
|
||||||
meta, _ := getAuthenticatedRepoAndMeta(ctx, rv, true)
|
meta, _ := getAuthenticatedRepoAndMeta(ctx, rc, p, true)
|
||||||
if meta == nil {
|
if meta == nil {
|
||||||
// Status already written in getAuthenticatedRepoAndMeta
|
// Status already written in getAuthenticatedRepoAndMeta
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
contentStore := &ContentStore{ObjectStorage: storage.LFS}
|
contentStore := lfs_module.NewContentStore()
|
||||||
ok, err := contentStore.Verify(meta)
|
ok, err := contentStore.Verify(meta.Pointer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Error will be logged in Verify
|
// Error will be logged in Verify
|
||||||
ctx.Resp.WriteHeader(500)
|
ctx.Resp.WriteHeader(500)
|
||||||
|
@ -482,30 +443,29 @@ func VerifyHandler(ctx *context.Context) {
|
||||||
logRequest(ctx.Req, 200)
|
logRequest(ctx.Req, 200)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Represent takes a RequestVars and Meta and turns it into a Representation suitable
|
// represent takes a requestContext and Meta and turns it into a ObjectResponse suitable
|
||||||
// for json encoding
|
// for json encoding
|
||||||
func Represent(rv *RequestVars, meta *models.LFSMetaObject, download, upload bool) *Representation {
|
func represent(rc *requestContext, pointer lfs_module.Pointer, download, upload bool) *lfs_module.ObjectResponse {
|
||||||
rep := &Representation{
|
rep := &lfs_module.ObjectResponse{
|
||||||
Oid: meta.Oid,
|
Pointer: pointer,
|
||||||
Size: meta.Size,
|
Actions: make(map[string]*lfs_module.Link),
|
||||||
Actions: make(map[string]*link),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
header := make(map[string]string)
|
header := make(map[string]string)
|
||||||
|
|
||||||
if rv.Authorization == "" {
|
if rc.Authorization == "" {
|
||||||
//https://github.com/github/git-lfs/issues/1088
|
//https://github.com/github/git-lfs/issues/1088
|
||||||
header["Authorization"] = "Authorization: Basic dummy"
|
header["Authorization"] = "Authorization: Basic dummy"
|
||||||
} else {
|
} else {
|
||||||
header["Authorization"] = rv.Authorization
|
header["Authorization"] = rc.Authorization
|
||||||
}
|
}
|
||||||
|
|
||||||
if download {
|
if download {
|
||||||
rep.Actions["download"] = &link{Href: rv.ObjectLink(), Header: header}
|
rep.Actions["download"] = &lfs_module.Link{Href: rc.ObjectLink(pointer.Oid), Header: header}
|
||||||
}
|
}
|
||||||
|
|
||||||
if upload {
|
if upload {
|
||||||
rep.Actions["upload"] = &link{Href: rv.ObjectLink(), Header: header}
|
rep.Actions["upload"] = &lfs_module.Link{Href: rc.ObjectLink(pointer.Oid), Header: header}
|
||||||
}
|
}
|
||||||
|
|
||||||
if upload && !download {
|
if upload && !download {
|
||||||
|
@ -516,56 +476,56 @@ func Represent(rv *RequestVars, meta *models.LFSMetaObject, download, upload boo
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is only needed to workaround https://github.com/git-lfs/git-lfs/issues/3662
|
// This is only needed to workaround https://github.com/git-lfs/git-lfs/issues/3662
|
||||||
verifyHeader["Accept"] = metaMediaType
|
verifyHeader["Accept"] = lfs_module.MediaType
|
||||||
|
|
||||||
rep.Actions["verify"] = &link{Href: rv.VerifyLink(), Header: verifyHeader}
|
rep.Actions["verify"] = &lfs_module.Link{Href: rc.VerifyLink(), Header: verifyHeader}
|
||||||
}
|
}
|
||||||
|
|
||||||
return rep
|
return rep
|
||||||
}
|
}
|
||||||
|
|
||||||
// MetaMatcher provides a mux.MatcherFunc that only allows requests that contain
|
// MetaMatcher provides a mux.MatcherFunc that only allows requests that contain
|
||||||
// an Accept header with the metaMediaType
|
// an Accept header with the lfs_module.MediaType
|
||||||
func MetaMatcher(r *http.Request) bool {
|
func MetaMatcher(r *http.Request) bool {
|
||||||
mediaParts := strings.Split(r.Header.Get("Accept"), ";")
|
mediaParts := strings.Split(r.Header.Get("Accept"), ";")
|
||||||
mt := mediaParts[0]
|
mt := mediaParts[0]
|
||||||
return mt == metaMediaType
|
return mt == lfs_module.MediaType
|
||||||
}
|
}
|
||||||
|
|
||||||
func unpack(ctx *context.Context) *RequestVars {
|
func unpack(ctx *context.Context) (*requestContext, lfs_module.Pointer) {
|
||||||
r := ctx.Req
|
r := ctx.Req
|
||||||
rv := &RequestVars{
|
rc := &requestContext{
|
||||||
User: ctx.Params("username"),
|
User: ctx.Params("username"),
|
||||||
Repo: strings.TrimSuffix(ctx.Params("reponame"), ".git"),
|
Repo: strings.TrimSuffix(ctx.Params("reponame"), ".git"),
|
||||||
Oid: ctx.Params("oid"),
|
|
||||||
Authorization: r.Header.Get("Authorization"),
|
Authorization: r.Header.Get("Authorization"),
|
||||||
}
|
}
|
||||||
|
p := lfs_module.Pointer{Oid: ctx.Params("oid")}
|
||||||
|
|
||||||
if r.Method == "POST" { // Maybe also check if +json
|
if r.Method == "POST" { // Maybe also check if +json
|
||||||
var p RequestVars
|
var p2 lfs_module.Pointer
|
||||||
bodyReader := r.Body
|
bodyReader := r.Body
|
||||||
defer bodyReader.Close()
|
defer bodyReader.Close()
|
||||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
dec := json.NewDecoder(bodyReader)
|
dec := json.NewDecoder(bodyReader)
|
||||||
err := dec.Decode(&p)
|
err := dec.Decode(&p2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// The error is logged as a WARN here because this may represent misbehaviour rather than a true error
|
// The error is logged as a WARN here because this may represent misbehaviour rather than a true error
|
||||||
log.Warn("Unable to decode POST request vars for LFS OID[%s] in %s/%s: Error: %v", rv.Oid, rv.User, rv.Repo, err)
|
log.Warn("Unable to decode POST request vars for LFS OID[%s] in %s/%s: Error: %v", p.Oid, rc.User, rc.Repo, err)
|
||||||
return rv
|
return rc, p
|
||||||
}
|
}
|
||||||
|
|
||||||
rv.Oid = p.Oid
|
p.Oid = p2.Oid
|
||||||
rv.Size = p.Size
|
p.Size = p2.Size
|
||||||
}
|
}
|
||||||
|
|
||||||
return rv
|
return rc, p
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO cheap hack, unify with unpack
|
// TODO cheap hack, unify with unpack
|
||||||
func unpackbatch(ctx *context.Context) *BatchVars {
|
func unpackbatch(ctx *context.Context) *lfs_module.BatchRequest {
|
||||||
|
|
||||||
r := ctx.Req
|
r := ctx.Req
|
||||||
var bv BatchVars
|
var bv lfs_module.BatchRequest
|
||||||
|
|
||||||
bodyReader := r.Body
|
bodyReader := r.Body
|
||||||
defer bodyReader.Close()
|
defer bodyReader.Close()
|
||||||
|
@ -578,12 +538,6 @@ func unpackbatch(ctx *context.Context) *BatchVars {
|
||||||
return &bv
|
return &bv
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(bv.Objects); i++ {
|
|
||||||
bv.Objects[i].User = ctx.Params("username")
|
|
||||||
bv.Objects[i].Repo = strings.TrimSuffix(ctx.Params("reponame"), ".git")
|
|
||||||
bv.Objects[i].Authorization = r.Header.Get("Authorization")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &bv
|
return &bv
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"code.gitea.io/gitea/modules/cache"
|
"code.gitea.io/gitea/modules/cache"
|
||||||
"code.gitea.io/gitea/modules/git"
|
"code.gitea.io/gitea/modules/git"
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
|
"code.gitea.io/gitea/modules/lfs"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/notification"
|
"code.gitea.io/gitea/modules/notification"
|
||||||
repo_module "code.gitea.io/gitea/modules/repository"
|
repo_module "code.gitea.io/gitea/modules/repository"
|
||||||
|
@ -206,7 +207,7 @@ func parseRemoteUpdateOutput(output string) []*mirrorSyncResult {
|
||||||
}
|
}
|
||||||
|
|
||||||
// runSync returns true if sync finished without error.
|
// runSync returns true if sync finished without error.
|
||||||
func runSync(m *models.Mirror) ([]*mirrorSyncResult, bool) {
|
func runSync(ctx context.Context, m *models.Mirror) ([]*mirrorSyncResult, bool) {
|
||||||
repoPath := m.Repo.RepoPath()
|
repoPath := m.Repo.RepoPath()
|
||||||
wikiPath := m.Repo.WikiPath()
|
wikiPath := m.Repo.WikiPath()
|
||||||
timeout := time.Duration(setting.Git.Timeout.Mirror) * time.Second
|
timeout := time.Duration(setting.Git.Timeout.Mirror) * time.Second
|
||||||
|
@ -253,13 +254,21 @@ func runSync(m *models.Mirror) ([]*mirrorSyncResult, bool) {
|
||||||
log.Error("OpenRepository: %v", err)
|
log.Error("OpenRepository: %v", err)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
defer gitRepo.Close()
|
||||||
|
|
||||||
log.Trace("SyncMirrors [repo: %-v]: syncing releases with tags...", m.Repo)
|
log.Trace("SyncMirrors [repo: %-v]: syncing releases with tags...", m.Repo)
|
||||||
if err = repo_module.SyncReleasesWithTags(m.Repo, gitRepo); err != nil {
|
if err = repo_module.SyncReleasesWithTags(m.Repo, gitRepo); err != nil {
|
||||||
gitRepo.Close()
|
|
||||||
log.Error("Failed to synchronize tags to releases for repository: %v", err)
|
log.Error("Failed to synchronize tags to releases for repository: %v", err)
|
||||||
}
|
}
|
||||||
gitRepo.Close()
|
|
||||||
|
if m.LFS && setting.LFS.StartServer {
|
||||||
|
log.Trace("SyncMirrors [repo: %-v]: syncing LFS objects...", m.Repo)
|
||||||
|
readAddress(m)
|
||||||
|
ep := lfs.DetermineEndpoint(m.Address, m.LFSEndpoint)
|
||||||
|
if err = repo_module.StoreMissingLfsObjectsInRepository(ctx, m.Repo, gitRepo, ep); err != nil {
|
||||||
|
log.Error("Failed to synchronize LFS objects for repository: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
log.Trace("SyncMirrors [repo: %-v]: updating size of repository", m.Repo)
|
log.Trace("SyncMirrors [repo: %-v]: updating size of repository", m.Repo)
|
||||||
if err := m.Repo.UpdateSize(models.DefaultDBContext()); err != nil {
|
if err := m.Repo.UpdateSize(models.DefaultDBContext()); err != nil {
|
||||||
|
@ -378,12 +387,12 @@ func SyncMirrors(ctx context.Context) {
|
||||||
mirrorQueue.Close()
|
mirrorQueue.Close()
|
||||||
return
|
return
|
||||||
case repoID := <-mirrorQueue.Queue():
|
case repoID := <-mirrorQueue.Queue():
|
||||||
syncMirror(repoID)
|
syncMirror(ctx, repoID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func syncMirror(repoID string) {
|
func syncMirror(ctx context.Context, repoID string) {
|
||||||
log.Trace("SyncMirrors [repo_id: %v]", repoID)
|
log.Trace("SyncMirrors [repo_id: %v]", repoID)
|
||||||
defer func() {
|
defer func() {
|
||||||
err := recover()
|
err := recover()
|
||||||
|
@ -403,7 +412,7 @@ func syncMirror(repoID string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Trace("SyncMirrors [repo: %-v]: Running Sync", m.Repo)
|
log.Trace("SyncMirrors [repo: %-v]: Running Sync", m.Repo)
|
||||||
results, ok := runSync(m)
|
results, ok := runSync(ctx, m)
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,9 @@ func TestRelease_MirrorDelete(t *testing.T) {
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
mirror, err := repository.MigrateRepositoryGitData(context.Background(), user, mirrorRepo, opts)
|
ctx := context.Background()
|
||||||
|
|
||||||
|
mirror, err := repository.MigrateRepositoryGitData(ctx, user, mirrorRepo, opts)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
gitRepo, err := git.OpenRepository(repoPath)
|
gitRepo, err := git.OpenRepository(repoPath)
|
||||||
|
@ -74,7 +76,7 @@ func TestRelease_MirrorDelete(t *testing.T) {
|
||||||
err = mirror.GetMirror()
|
err = mirror.GetMirror()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
_, ok := runSync(mirror.Mirror)
|
_, ok := runSync(ctx, mirror.Mirror)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
count, err := models.GetReleaseCountByRepoID(mirror.ID, findOptions)
|
count, err := models.GetReleaseCountByRepoID(mirror.ID, findOptions)
|
||||||
|
@ -85,7 +87,7 @@ func TestRelease_MirrorDelete(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NoError(t, release_service.DeleteReleaseByID(release.ID, user, true))
|
assert.NoError(t, release_service.DeleteReleaseByID(release.ID, user, true))
|
||||||
|
|
||||||
_, ok = runSync(mirror.Mirror)
|
_, ok = runSync(ctx, mirror.Mirror)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
count, err = models.GetReleaseCountByRepoID(mirror.ID, findOptions)
|
count, err = models.GetReleaseCountByRepoID(mirror.ID, findOptions)
|
||||||
|
|
|
@ -70,6 +70,8 @@ func createLFSMetaObjectsFromCatFileBatch(catFileBatchReader *io.PipeReader, wg
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer catFileBatchReader.Close()
|
defer catFileBatchReader.Close()
|
||||||
|
|
||||||
|
contentStore := lfs.NewContentStore()
|
||||||
|
|
||||||
bufferedReader := bufio.NewReader(catFileBatchReader)
|
bufferedReader := bufio.NewReader(catFileBatchReader)
|
||||||
buf := make([]byte, 1025)
|
buf := make([]byte, 1025)
|
||||||
for {
|
for {
|
||||||
|
@ -101,10 +103,16 @@ func createLFSMetaObjectsFromCatFileBatch(catFileBatchReader *io.PipeReader, wg
|
||||||
}
|
}
|
||||||
pointerBuf = pointerBuf[:size]
|
pointerBuf = pointerBuf[:size]
|
||||||
// Now we need to check if the pointerBuf is an LFS pointer
|
// Now we need to check if the pointerBuf is an LFS pointer
|
||||||
pointer := lfs.IsPointerFile(&pointerBuf)
|
pointer, _ := lfs.ReadPointerFromBuffer(pointerBuf)
|
||||||
if pointer == nil {
|
if !pointer.IsValid() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
exist, _ := contentStore.Exists(pointer)
|
||||||
|
if !exist {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// Then we need to check that this pointer is in the db
|
// Then we need to check that this pointer is in the db
|
||||||
if _, err := pr.HeadRepo.GetLFSMetaObjectByOid(pointer.Oid); err != nil {
|
if _, err := pr.HeadRepo.GetLFSMetaObjectByOid(pointer.Oid); err != nil {
|
||||||
if err == models.ErrLFSObjectNotExist {
|
if err == models.ErrLFSObjectNotExist {
|
||||||
|
@ -117,8 +125,9 @@ func createLFSMetaObjectsFromCatFileBatch(catFileBatchReader *io.PipeReader, wg
|
||||||
// OK we have a pointer that is associated with the head repo
|
// OK we have a pointer that is associated with the head repo
|
||||||
// and is actually a file in the LFS
|
// and is actually a file in the LFS
|
||||||
// Therefore it should be associated with the base repo
|
// Therefore it should be associated with the base repo
|
||||||
pointer.RepositoryID = pr.BaseRepoID
|
meta := &models.LFSMetaObject{Pointer: pointer}
|
||||||
if _, err := models.NewLFSMetaObject(pointer); err != nil {
|
meta.RepositoryID = pr.BaseRepoID
|
||||||
|
if _, err := models.NewLFSMetaObject(meta); err != nil {
|
||||||
_ = catFileBatchReader.CloseWithError(err)
|
_ = catFileBatchReader.CloseWithError(err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
<input id="clone_addr" name="clone_addr" value="{{.clone_addr}}" autofocus required>
|
<input id="clone_addr" name="clone_addr" value="{{.clone_addr}}" autofocus required>
|
||||||
<span class="help">
|
<span class="help">
|
||||||
{{.i18n.Tr "repo.migrate.clone_address_desc"}}{{if .ContextUser.CanImportLocal}} {{.i18n.Tr "repo.migrate.clone_local_path"}}{{end}}
|
{{.i18n.Tr "repo.migrate.clone_address_desc"}}{{if .ContextUser.CanImportLocal}} {{.i18n.Tr "repo.migrate.clone_local_path"}}{{end}}
|
||||||
{{if .LFSActive}}<br/>{{.i18n.Tr "repo.migrate.lfs_mirror_unsupported"}}{{end}}
|
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
<div class="inline field {{if .Err_Auth}}error{{end}}">
|
<div class="inline field {{if .Err_Auth}}error{{end}}">
|
||||||
|
@ -28,18 +27,7 @@
|
||||||
<input id="auth_password" name="auth_password" type="password" value="{{.auth_password}}">
|
<input id="auth_password" name="auth_password" type="password" value="{{.auth_password}}">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="inline field">
|
{{template "repo/migrate/options" .}}
|
||||||
<label>{{.i18n.Tr "repo.migrate_options"}}</label>
|
|
||||||
<div class="ui checkbox">
|
|
||||||
{{if .DisableMirrors}}
|
|
||||||
<input id="mirror" name="mirror" type="checkbox" readonly>
|
|
||||||
<label>{{.i18n.Tr "repo.migrate_options_mirror_disabled"}}</label>
|
|
||||||
{{else}}
|
|
||||||
<input id="mirror" name="mirror" type="checkbox" {{if .mirror}}checked{{end}}>
|
|
||||||
<label>{{.i18n.Tr "repo.migrate_options_mirror_helper" | Safe}}</label>
|
|
||||||
{{end}}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="ui divider"></div>
|
<div class="ui divider"></div>
|
||||||
|
|
||||||
|
|
|
@ -15,28 +15,16 @@
|
||||||
<input id="clone_addr" name="clone_addr" value="{{.clone_addr}}" autofocus required>
|
<input id="clone_addr" name="clone_addr" value="{{.clone_addr}}" autofocus required>
|
||||||
<span class="help">
|
<span class="help">
|
||||||
{{.i18n.Tr "repo.migrate.clone_address_desc"}}{{if .ContextUser.CanImportLocal}} {{.i18n.Tr "repo.migrate.clone_local_path"}}{{end}}
|
{{.i18n.Tr "repo.migrate.clone_address_desc"}}{{if .ContextUser.CanImportLocal}} {{.i18n.Tr "repo.migrate.clone_local_path"}}{{end}}
|
||||||
{{if .LFSActive}}<br />{{.i18n.Tr "repo.migrate.lfs_mirror_unsupported"}}{{end}}
|
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="inline field {{if .Err_Auth}}error{{end}}">
|
<div class="inline field {{if .Err_Auth}}error{{end}}">
|
||||||
<label for="auth_token">{{.i18n.Tr "access_token"}}</label>
|
<label for="auth_token">{{.i18n.Tr "access_token"}}</label>
|
||||||
<input id="auth_token" name="auth_token" value="{{.auth_token}}" {{if not .auth_token}} data-need-clear="true" {{end}}>
|
<input id="auth_token" name="auth_token" value="{{.auth_token}}" {{if not .auth_token}} data-need-clear="true" {{end}}>
|
||||||
<a target=”_blank” href="https://docs.gitea.io/en-us/api-usage">{{svg "octicon-question"}}</a>
|
<a target="_blank" href="https://docs.gitea.io/en-us/api-usage">{{svg "octicon-question"}}</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="inline field">
|
{{template "repo/migrate/options" .}}
|
||||||
<label>{{.i18n.Tr "repo.migrate_options"}}</label>
|
|
||||||
<div class="ui checkbox">
|
|
||||||
{{if .DisableMirrors}}
|
|
||||||
<input id="mirror" name="mirror" type="checkbox" readonly>
|
|
||||||
<label>{{.i18n.Tr "repo.migrate_options_mirror_disabled"}}</label>
|
|
||||||
{{else}}
|
|
||||||
<input id="mirror" name="mirror" type="checkbox" {{if .mirror}} checked{{end}}>
|
|
||||||
<label>{{.i18n.Tr "repo.migrate_options_mirror_helper" | Safe}}</label>
|
|
||||||
{{end}}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<span class="help">{{.i18n.Tr "repo.migrate.migrate_items_options"}}</span>
|
<span class="help">{{.i18n.Tr "repo.migrate.migrate_items_options"}}</span>
|
||||||
<div id="migrate_items">
|
<div id="migrate_items">
|
||||||
|
|
|
@ -15,28 +15,16 @@
|
||||||
<input id="clone_addr" name="clone_addr" value="{{.clone_addr}}" autofocus required>
|
<input id="clone_addr" name="clone_addr" value="{{.clone_addr}}" autofocus required>
|
||||||
<span class="help">
|
<span class="help">
|
||||||
{{.i18n.Tr "repo.migrate.clone_address_desc"}}{{if .ContextUser.CanImportLocal}} {{.i18n.Tr "repo.migrate.clone_local_path"}}{{end}}
|
{{.i18n.Tr "repo.migrate.clone_address_desc"}}{{if .ContextUser.CanImportLocal}} {{.i18n.Tr "repo.migrate.clone_local_path"}}{{end}}
|
||||||
{{if .LFSActive}}<br/>{{.i18n.Tr "repo.migrate.lfs_mirror_unsupported"}}{{end}}
|
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="inline field {{if .Err_Auth}}error{{end}}">
|
<div class="inline field {{if .Err_Auth}}error{{end}}">
|
||||||
<label for="auth_token">{{.i18n.Tr "access_token"}}</label>
|
<label for="auth_token">{{.i18n.Tr "access_token"}}</label>
|
||||||
<input id="auth_token" name="auth_token" value="{{.auth_token}}" {{if not .auth_token}}data-need-clear="true"{{end}}>
|
<input id="auth_token" name="auth_token" value="{{.auth_token}}" {{if not .auth_token}}data-need-clear="true"{{end}}>
|
||||||
<a target=”_blank” href="https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token">{{svg "octicon-question"}}</a>
|
<a target="_blank" href="https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token">{{svg "octicon-question"}}</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="inline field">
|
{{template "repo/migrate/options" .}}
|
||||||
<label>{{.i18n.Tr "repo.migrate_options"}}</label>
|
|
||||||
<div class="ui checkbox">
|
|
||||||
{{if .DisableMirrors}}
|
|
||||||
<input id="mirror" name="mirror" type="checkbox" readonly>
|
|
||||||
<label>{{.i18n.Tr "repo.migrate_options_mirror_disabled"}}</label>
|
|
||||||
{{else}}
|
|
||||||
<input id="mirror" name="mirror" type="checkbox" {{if .mirror}}checked{{end}}>
|
|
||||||
<label>{{.i18n.Tr "repo.migrate_options_mirror_helper" | Safe}}</label>
|
|
||||||
{{end}}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<span class="help">{{.i18n.Tr "repo.migrate.migrate_items_options"}}</span>
|
<span class="help">{{.i18n.Tr "repo.migrate.migrate_items_options"}}</span>
|
||||||
<div id="migrate_items">
|
<div id="migrate_items">
|
||||||
|
|
|
@ -15,28 +15,16 @@
|
||||||
<input id="clone_addr" name="clone_addr" value="{{.clone_addr}}" autofocus required>
|
<input id="clone_addr" name="clone_addr" value="{{.clone_addr}}" autofocus required>
|
||||||
<span class="help">
|
<span class="help">
|
||||||
{{.i18n.Tr "repo.migrate.clone_address_desc"}}{{if .ContextUser.CanImportLocal}} {{.i18n.Tr "repo.migrate.clone_local_path"}}{{end}}
|
{{.i18n.Tr "repo.migrate.clone_address_desc"}}{{if .ContextUser.CanImportLocal}} {{.i18n.Tr "repo.migrate.clone_local_path"}}{{end}}
|
||||||
{{if .LFSActive}}<br/>{{.i18n.Tr "repo.migrate.lfs_mirror_unsupported"}}{{end}}
|
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="inline field {{if .Err_Auth}}error{{end}}">
|
<div class="inline field {{if .Err_Auth}}error{{end}}">
|
||||||
<label for="auth_token">{{.i18n.Tr "access_token"}}</label>
|
<label for="auth_token">{{.i18n.Tr "access_token"}}</label>
|
||||||
<input id="auth_token" name="auth_token" value="{{.auth_token}}" {{if not .auth_token}}data-need-clear="true"{{end}}>
|
<input id="auth_token" name="auth_token" value="{{.auth_token}}" {{if not .auth_token}}data-need-clear="true"{{end}}>
|
||||||
<a target=”_blank” href="https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html">{{svg "octicon-question"}}</a>
|
<a target="_blank" href="https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html">{{svg "octicon-question"}}</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="inline field">
|
{{template "repo/migrate/options" .}}
|
||||||
<label>{{.i18n.Tr "repo.migrate_options"}}</label>
|
|
||||||
<div class="ui checkbox">
|
|
||||||
{{if .DisableMirrors}}
|
|
||||||
<input id="mirror" name="mirror" type="checkbox" readonly>
|
|
||||||
<label>{{.i18n.Tr "repo.migrate_options_mirror_disabled"}}</label>
|
|
||||||
{{else}}
|
|
||||||
<input id="mirror" name="mirror" type="checkbox" {{if .mirror}}checked{{end}}>
|
|
||||||
<label>{{.i18n.Tr "repo.migrate_options_mirror_helper" | Safe}}</label>
|
|
||||||
{{end}}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<span class="help">{{.i18n.Tr "repo.migrate.migrate_items_options"}}</span>
|
<span class="help">{{.i18n.Tr "repo.migrate.migrate_items_options"}}</span>
|
||||||
<div id="migrate_items">
|
<div id="migrate_items">
|
||||||
|
|
|
@ -15,28 +15,16 @@
|
||||||
<input id="clone_addr" name="clone_addr" value="{{.clone_addr}}" autofocus required>
|
<input id="clone_addr" name="clone_addr" value="{{.clone_addr}}" autofocus required>
|
||||||
<span class="help">
|
<span class="help">
|
||||||
{{.i18n.Tr "repo.migrate.clone_address_desc"}}{{if .ContextUser.CanImportLocal}} {{.i18n.Tr "repo.migrate.clone_local_path"}}{{end}}
|
{{.i18n.Tr "repo.migrate.clone_address_desc"}}{{if .ContextUser.CanImportLocal}} {{.i18n.Tr "repo.migrate.clone_local_path"}}{{end}}
|
||||||
{{if .LFSActive}}<br />{{.i18n.Tr "repo.migrate.lfs_mirror_unsupported"}}{{end}}
|
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="inline field {{if .Err_Auth}}error{{end}}">
|
<div class="inline field {{if .Err_Auth}}error{{end}}">
|
||||||
<label for="auth_token">{{.i18n.Tr "access_token"}}</label>
|
<label for="auth_token">{{.i18n.Tr "access_token"}}</label>
|
||||||
<input id="auth_token" name="auth_token" value="{{.auth_token}}" {{if not .auth_token}} data-need-clear="true" {{end}}>
|
<input id="auth_token" name="auth_token" value="{{.auth_token}}" {{if not .auth_token}} data-need-clear="true" {{end}}>
|
||||||
<!-- <a target=”_blank” href="https://docs.gitea.io/en-us/api-usage">{{svg "octicon-question"}}</a> -->
|
<!-- <a target="_blank" href="https://docs.gitea.io/en-us/api-usage">{{svg "octicon-question"}}</a> -->
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="inline field">
|
{{template "repo/migrate/options" .}}
|
||||||
<label>{{.i18n.Tr "repo.migrate_options"}}</label>
|
|
||||||
<div class="ui checkbox">
|
|
||||||
{{if .DisableMirrors}}
|
|
||||||
<input id="mirror" name="mirror" type="checkbox" readonly>
|
|
||||||
<label>{{.i18n.Tr "repo.migrate_options_mirror_disabled"}}</label>
|
|
||||||
{{else}}
|
|
||||||
<input id="mirror" name="mirror" type="checkbox" {{if .mirror}} checked{{end}}>
|
|
||||||
<label>{{.i18n.Tr "repo.migrate_options_mirror_helper" | Safe}}</label>
|
|
||||||
{{end}}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<span class="help">{{.i18n.Tr "repo.migrate.migrate_items_options"}}</span>
|
<span class="help">{{.i18n.Tr "repo.migrate.migrate_items_options"}}</span>
|
||||||
<div id="migrate_items">
|
<div id="migrate_items">
|
||||||
|
|
29
templates/repo/migrate/options.tmpl
Normal file
29
templates/repo/migrate/options.tmpl
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
<div class="inline field">
|
||||||
|
<label>{{.i18n.Tr "repo.migrate_options"}}</label>
|
||||||
|
<div class="ui checkbox">
|
||||||
|
{{if .DisableMirrors}}
|
||||||
|
<input id="mirror" name="mirror" type="checkbox" readonly>
|
||||||
|
<label>{{.i18n.Tr "repo.migrate_options_mirror_disabled"}}</label>
|
||||||
|
{{else}}
|
||||||
|
<input id="mirror" name="mirror" type="checkbox" {{if .mirror}} checked{{end}}>
|
||||||
|
<label>{{.i18n.Tr "repo.migrate_options_mirror_helper" | Safe}}</label>
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{{if .LFSActive}}
|
||||||
|
<div class="inline field">
|
||||||
|
<label></label>
|
||||||
|
<div class="ui checkbox">
|
||||||
|
<input id="lfs" name="lfs" type="checkbox" {{if .lfs}} checked{{end}}>
|
||||||
|
<label>{{.i18n.Tr "repo.migrate_options_lfs"}}</label>
|
||||||
|
</div>
|
||||||
|
<span id="lfs_settings" style="display:none">(<a id="lfs_settings_show" href="#">{{.i18n.Tr "repo.settings.advanced_settings"}}</a>)</span>
|
||||||
|
</div>
|
||||||
|
<div id="lfs_endpoint" style="display:none">
|
||||||
|
<span class="help">{{.i18n.Tr "repo.migrate_options_lfs_endpoint.description" "https://github.com/git-lfs/git-lfs/blob/main/docs/api/server-discovery.md#server-discovery" | Str2html}}{{if .ContextUser.CanImportLocal}} {{.i18n.Tr "repo.migrate_options_lfs_endpoint.description.local"}}{{end}}</span>
|
||||||
|
<div class="inline field {{if .Err_LFSEndpoint}}error{{end}}">
|
||||||
|
<label>{{.i18n.Tr "repo.migrate_options_lfs_endpoint.label"}}</label>
|
||||||
|
<input name="lfs_endpoint" value="{{.lfs_endpoint}}" placeholder="{{.i18n.Tr "repo.migrate_options_lfs_endpoint.placeholder"}}">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
|
@ -81,8 +81,8 @@
|
||||||
<div class="inline field {{if .Err_EnablePrune}}error{{end}}">
|
<div class="inline field {{if .Err_EnablePrune}}error{{end}}">
|
||||||
<label>{{.i18n.Tr "repo.mirror_prune"}}</label>
|
<label>{{.i18n.Tr "repo.mirror_prune"}}</label>
|
||||||
<div class="ui checkbox">
|
<div class="ui checkbox">
|
||||||
<input id="enable_prune" name="enable_prune" type="checkbox" {{if .MirrorEnablePrune}}checked{{end}}>
|
<input id="enable_prune" name="enable_prune" type="checkbox" {{if .MirrorEnablePrune}}checked{{end}}>
|
||||||
<label>{{.i18n.Tr "repo.mirror_prune_desc"}}</label>
|
<label>{{.i18n.Tr "repo.mirror_prune_desc"}}</label>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="inline field {{if .Err_Interval}}error{{end}}">
|
<div class="inline field {{if .Err_Interval}}error{{end}}">
|
||||||
|
@ -112,6 +112,21 @@
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
{{if .LFSStartServer}}
|
||||||
|
<div class="inline field">
|
||||||
|
<label>{{.i18n.Tr "repo.mirror_lfs"}}</label>
|
||||||
|
<div class="ui checkbox">
|
||||||
|
<input id="mirror_lfs" name="mirror_lfs" type="checkbox" {{if .Mirror.LFS}}checked{{end}}>
|
||||||
|
<label>{{.i18n.Tr "repo.mirror_lfs_desc"}}</label>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="field {{if .Err_LFSEndpoint}}error{{end}}">
|
||||||
|
<label for="mirror_lfs_endpoint">{{.i18n.Tr "repo.mirror_lfs_endpoint"}}</label>
|
||||||
|
<input id="mirror_lfs_endpoint" name="mirror_lfs_endpoint" value="{{.Mirror.LFSEndpoint}}" placeholder="{{.i18n.Tr "repo.migrate_options_lfs_endpoint.placeholder"}}">
|
||||||
|
<p class="help">{{.i18n.Tr "repo.mirror_lfs_endpoint_desc" "https://github.com/git-lfs/git-lfs/blob/main/docs/api/server-discovery.md#server-discovery" | Str2html}}</p>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
|
||||||
<div class="field">
|
<div class="field">
|
||||||
<button class="ui green button">{{$.i18n.Tr "repo.settings.update_settings"}}</button>
|
<button class="ui green button">{{$.i18n.Tr "repo.settings.update_settings"}}</button>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -14669,6 +14669,14 @@
|
||||||
"type": "boolean",
|
"type": "boolean",
|
||||||
"x-go-name": "Labels"
|
"x-go-name": "Labels"
|
||||||
},
|
},
|
||||||
|
"lfs": {
|
||||||
|
"type": "boolean",
|
||||||
|
"x-go-name": "LFS"
|
||||||
|
},
|
||||||
|
"lfs_endpoint": {
|
||||||
|
"type": "string",
|
||||||
|
"x-go-name": "LFSEndpoint"
|
||||||
|
},
|
||||||
"milestones": {
|
"milestones": {
|
||||||
"type": "boolean",
|
"type": "boolean",
|
||||||
"x-go-name": "Milestones"
|
"x-go-name": "Milestones"
|
||||||
|
@ -14748,6 +14756,14 @@
|
||||||
"type": "boolean",
|
"type": "boolean",
|
||||||
"x-go-name": "Labels"
|
"x-go-name": "Labels"
|
||||||
},
|
},
|
||||||
|
"lfs": {
|
||||||
|
"type": "boolean",
|
||||||
|
"x-go-name": "LFS"
|
||||||
|
},
|
||||||
|
"lfs_endpoint": {
|
||||||
|
"type": "string",
|
||||||
|
"x-go-name": "LFSEndpoint"
|
||||||
|
},
|
||||||
"milestones": {
|
"milestones": {
|
||||||
"type": "boolean",
|
"type": "boolean",
|
||||||
"x-go-name": "Milestones"
|
"x-go-name": "Milestones"
|
||||||
|
|
|
@ -3,15 +3,21 @@ const $user = $('#auth_username');
|
||||||
const $pass = $('#auth_password');
|
const $pass = $('#auth_password');
|
||||||
const $token = $('#auth_token');
|
const $token = $('#auth_token');
|
||||||
const $mirror = $('#mirror');
|
const $mirror = $('#mirror');
|
||||||
|
const $lfs = $('#lfs');
|
||||||
|
const $lfsSettings = $('#lfs_settings');
|
||||||
|
const $lfsEndpoint = $('#lfs_endpoint');
|
||||||
const $items = $('#migrate_items').find('input[type=checkbox]');
|
const $items = $('#migrate_items').find('input[type=checkbox]');
|
||||||
|
|
||||||
export default function initMigration() {
|
export default function initMigration() {
|
||||||
checkAuth();
|
checkAuth();
|
||||||
|
setLFSSettingsVisibility();
|
||||||
|
|
||||||
$user.on('keyup', () => {checkItems(false)});
|
$user.on('keyup', () => {checkItems(false)});
|
||||||
$pass.on('keyup', () => {checkItems(false)});
|
$pass.on('keyup', () => {checkItems(false)});
|
||||||
$token.on('keyup', () => {checkItems(true)});
|
$token.on('keyup', () => {checkItems(true)});
|
||||||
$mirror.on('change', () => {checkItems(true)});
|
$mirror.on('change', () => {checkItems(true)});
|
||||||
|
$('#lfs_settings_show').on('click', () => { $lfsEndpoint.show(); return false });
|
||||||
|
$lfs.on('change', setLFSSettingsVisibility);
|
||||||
|
|
||||||
const $cloneAddr = $('#clone_addr');
|
const $cloneAddr = $('#clone_addr');
|
||||||
$cloneAddr.on('change', () => {
|
$cloneAddr.on('change', () => {
|
||||||
|
@ -46,3 +52,9 @@ function checkItems(tokenAuth) {
|
||||||
$items.attr('disabled', true);
|
$items.attr('disabled', true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function setLFSSettingsVisibility() {
|
||||||
|
const visible = $lfs.is(':checked');
|
||||||
|
$lfsSettings.toggle(visible);
|
||||||
|
$lfsEndpoint.hide();
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue