diff --git a/cmd/serv.go b/cmd/serv.go
index a8db623e16..56167f63a8 100644
--- a/cmd/serv.go
+++ b/cmd/serv.go
@@ -17,11 +17,11 @@ import (
"time"
"code.gitea.io/gitea/models"
- "code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/pprof"
"code.gitea.io/gitea/modules/private"
"code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/services/lfs"
"github.com/dgrijalva/jwt-go"
jsoniter "github.com/json-iterator/go"
diff --git a/integrations/api_repo_lfs_migrate_test.go b/integrations/api_repo_lfs_migrate_test.go
new file mode 100644
index 0000000000..7280658b74
--- /dev/null
+++ b/integrations/api_repo_lfs_migrate_test.go
@@ -0,0 +1,49 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package integrations
+
+import (
+ "net/http"
+ "path"
+ "testing"
+
+ "code.gitea.io/gitea/models"
+ "code.gitea.io/gitea/modules/lfs"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAPIRepoLFSMigrateLocal(t *testing.T) {
+ defer prepareTestEnv(t)()
+
+ oldImportLocalPaths := setting.ImportLocalPaths
+ oldAllowLocalNetworks := setting.Migrations.AllowLocalNetworks
+ setting.ImportLocalPaths = true
+ setting.Migrations.AllowLocalNetworks = true
+
+ user := models.AssertExistsAndLoadBean(t, &models.User{ID: 1}).(*models.User)
+ session := loginUser(t, user.Name)
+ token := getTokenForLoggedInUser(t, session)
+
+ req := NewRequestWithJSON(t, "POST", "/api/v1/repos/migrate?token="+token, &api.MigrateRepoOptions{
+ CloneAddr: path.Join(setting.RepoRootPath, "migration/lfs-test.git"),
+ RepoOwnerID: user.ID,
+ RepoName: "lfs-test-local",
+ LFS: true,
+ })
+ resp := MakeRequest(t, req, NoExpectedStatus)
+ assert.EqualValues(t, http.StatusCreated, resp.Code)
+
+ store := lfs.NewContentStore()
+ ok, _ := store.Verify(lfs.Pointer{Oid: "fb8f7d8435968c4f82a726a92395be4d16f2f63116caf36c8ad35c60831ab041", Size: 6})
+ assert.True(t, ok)
+ ok, _ = store.Verify(lfs.Pointer{Oid: "d6f175817f886ec6fbbc1515326465fa96c3bfd54a4ea06cfd6dbbd8340e0152", Size: 6})
+ assert.True(t, ok)
+
+ setting.ImportLocalPaths = oldImportLocalPaths
+ setting.Migrations.AllowLocalNetworks = oldAllowLocalNetworks
+}
diff --git a/integrations/git_test.go b/integrations/git_test.go
index 26907d848a..aa1b3ee2ac 100644
--- a/integrations/git_test.go
+++ b/integrations/git_test.go
@@ -18,6 +18,7 @@ import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/setting"
api "code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/util"
@@ -218,7 +219,7 @@ func rawTest(t *testing.T, ctx *APITestContext, little, big, littleLFS, bigLFS s
assert.NotEqual(t, littleSize, resp.Body.Len())
assert.LessOrEqual(t, resp.Body.Len(), 1024)
if resp.Body.Len() != littleSize && resp.Body.Len() <= 1024 {
- assert.Contains(t, resp.Body.String(), models.LFSMetaFileIdentifier)
+ assert.Contains(t, resp.Body.String(), lfs.MetaFileIdentifier)
}
}
@@ -232,7 +233,7 @@ func rawTest(t *testing.T, ctx *APITestContext, little, big, littleLFS, bigLFS s
resp := session.MakeRequest(t, req, http.StatusOK)
assert.NotEqual(t, bigSize, resp.Body.Len())
if resp.Body.Len() != bigSize && resp.Body.Len() <= 1024 {
- assert.Contains(t, resp.Body.String(), models.LFSMetaFileIdentifier)
+ assert.Contains(t, resp.Body.String(), lfs.MetaFileIdentifier)
}
}
}
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/HEAD b/integrations/gitea-repositories-meta/migration/lfs-test.git/HEAD
new file mode 100644
index 0000000000..cb089cd89a
--- /dev/null
+++ b/integrations/gitea-repositories-meta/migration/lfs-test.git/HEAD
@@ -0,0 +1 @@
+ref: refs/heads/master
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/config b/integrations/gitea-repositories-meta/migration/lfs-test.git/config
new file mode 100644
index 0000000000..3f8f41b6b4
--- /dev/null
+++ b/integrations/gitea-repositories-meta/migration/lfs-test.git/config
@@ -0,0 +1,7 @@
+[core]
+ bare = false
+ repositoryformatversion = 0
+ filemode = false
+ symlinks = false
+ ignorecase = true
+ logallrefupdates = true
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/description b/integrations/gitea-repositories-meta/migration/lfs-test.git/description
new file mode 100644
index 0000000000..498b267a8c
--- /dev/null
+++ b/integrations/gitea-repositories-meta/migration/lfs-test.git/description
@@ -0,0 +1 @@
+Unnamed repository; edit this file 'description' to name the repository.
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/hooks/post-checkout b/integrations/gitea-repositories-meta/migration/lfs-test.git/hooks/post-checkout
new file mode 100644
index 0000000000..cab40f2649
--- /dev/null
+++ b/integrations/gitea-repositories-meta/migration/lfs-test.git/hooks/post-checkout
@@ -0,0 +1,3 @@
+#!/bin/sh
+command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-checkout.\n"; exit 2; }
+git lfs post-checkout "$@"
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/hooks/post-commit b/integrations/gitea-repositories-meta/migration/lfs-test.git/hooks/post-commit
new file mode 100644
index 0000000000..9443f4161a
--- /dev/null
+++ b/integrations/gitea-repositories-meta/migration/lfs-test.git/hooks/post-commit
@@ -0,0 +1,3 @@
+#!/bin/sh
+command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-commit.\n"; exit 2; }
+git lfs post-commit "$@"
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/hooks/post-merge b/integrations/gitea-repositories-meta/migration/lfs-test.git/hooks/post-merge
new file mode 100644
index 0000000000..828b70891e
--- /dev/null
+++ b/integrations/gitea-repositories-meta/migration/lfs-test.git/hooks/post-merge
@@ -0,0 +1,3 @@
+#!/bin/sh
+command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-merge.\n"; exit 2; }
+git lfs post-merge "$@"
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/hooks/pre-push b/integrations/gitea-repositories-meta/migration/lfs-test.git/hooks/pre-push
new file mode 100644
index 0000000000..81a9cc6398
--- /dev/null
+++ b/integrations/gitea-repositories-meta/migration/lfs-test.git/hooks/pre-push
@@ -0,0 +1,3 @@
+#!/bin/sh
+command -v git-lfs >/dev/null 2>&1 || { echo >&2 "\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/pre-push.\n"; exit 2; }
+git lfs pre-push "$@"
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/index b/integrations/gitea-repositories-meta/migration/lfs-test.git/index
new file mode 100644
index 0000000000..13f8e26966
Binary files /dev/null and b/integrations/gitea-repositories-meta/migration/lfs-test.git/index differ
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/lfs/objects/d6/f1/d6f175817f886ec6fbbc1515326465fa96c3bfd54a4ea06cfd6dbbd8340e0152 b/integrations/gitea-repositories-meta/migration/lfs-test.git/lfs/objects/d6/f1/d6f175817f886ec6fbbc1515326465fa96c3bfd54a4ea06cfd6dbbd8340e0152
new file mode 100644
index 0000000000..e9b0a4e8a0
--- /dev/null
+++ b/integrations/gitea-repositories-meta/migration/lfs-test.git/lfs/objects/d6/f1/d6f175817f886ec6fbbc1515326465fa96c3bfd54a4ea06cfd6dbbd8340e0152
@@ -0,0 +1 @@
+dummy2
\ No newline at end of file
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/lfs/objects/fb/8f/fb8f7d8435968c4f82a726a92395be4d16f2f63116caf36c8ad35c60831ab041 b/integrations/gitea-repositories-meta/migration/lfs-test.git/lfs/objects/fb/8f/fb8f7d8435968c4f82a726a92395be4d16f2f63116caf36c8ad35c60831ab041
new file mode 100644
index 0000000000..71676cd9c0
--- /dev/null
+++ b/integrations/gitea-repositories-meta/migration/lfs-test.git/lfs/objects/fb/8f/fb8f7d8435968c4f82a726a92395be4d16f2f63116caf36c8ad35c60831ab041
@@ -0,0 +1 @@
+dummy1
\ No newline at end of file
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/54/6244003622c64b2fc3c2cd544d7a29882c8383 b/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/54/6244003622c64b2fc3c2cd544d7a29882c8383
new file mode 100644
index 0000000000..0db52afbca
Binary files /dev/null and b/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/54/6244003622c64b2fc3c2cd544d7a29882c8383 differ
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/6a/6ccf5d874fec134ee712572cc03a0f2dd7afec b/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/6a/6ccf5d874fec134ee712572cc03a0f2dd7afec
new file mode 100644
index 0000000000..8a96927e57
Binary files /dev/null and b/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/6a/6ccf5d874fec134ee712572cc03a0f2dd7afec differ
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/a6/7134b8484c2abe9fa954e1fd83b39b271383ed b/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/a6/7134b8484c2abe9fa954e1fd83b39b271383ed
new file mode 100644
index 0000000000..122f87efcc
Binary files /dev/null and b/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/a6/7134b8484c2abe9fa954e1fd83b39b271383ed differ
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/b7/01ed6ffe410f0c3ac204b929ea47cfec6cef54 b/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/b7/01ed6ffe410f0c3ac204b929ea47cfec6cef54
new file mode 100644
index 0000000000..554b7f05b0
Binary files /dev/null and b/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/b7/01ed6ffe410f0c3ac204b929ea47cfec6cef54 differ
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/f2/07b74f55cd7f9e800b7550d587cbc488f6eaf1 b/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/f2/07b74f55cd7f9e800b7550d587cbc488f6eaf1
new file mode 100644
index 0000000000..ae6fdce5a2
Binary files /dev/null and b/integrations/gitea-repositories-meta/migration/lfs-test.git/objects/f2/07b74f55cd7f9e800b7550d587cbc488f6eaf1 differ
diff --git a/integrations/gitea-repositories-meta/migration/lfs-test.git/refs/heads/master b/integrations/gitea-repositories-meta/migration/lfs-test.git/refs/heads/master
new file mode 100644
index 0000000000..cd602fb935
--- /dev/null
+++ b/integrations/gitea-repositories-meta/migration/lfs-test.git/refs/heads/master
@@ -0,0 +1 @@
+546244003622c64b2fc3c2cd544d7a29882c8383
diff --git a/integrations/lfs_getobject_test.go b/integrations/lfs_getobject_test.go
index f364349ef1..789c7572a7 100644
--- a/integrations/lfs_getobject_test.go
+++ b/integrations/lfs_getobject_test.go
@@ -7,9 +7,6 @@ package integrations
import (
"archive/zip"
"bytes"
- "crypto/sha256"
- "encoding/hex"
- "io"
"io/ioutil"
"net/http"
"net/http/httptest"
@@ -18,46 +15,36 @@ import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/setting"
- "code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/routers/routes"
gzipp "github.com/klauspost/compress/gzip"
"github.com/stretchr/testify/assert"
)
-func GenerateLFSOid(content io.Reader) (string, error) {
- h := sha256.New()
- if _, err := io.Copy(h, content); err != nil {
- return "", err
- }
- sum := h.Sum(nil)
- return hex.EncodeToString(sum), nil
-}
-
var lfsID = int64(20000)
func storeObjectInRepo(t *testing.T, repositoryID int64, content *[]byte) string {
- oid, err := GenerateLFSOid(bytes.NewReader(*content))
+ pointer, err := lfs.GeneratePointer(bytes.NewReader(*content))
assert.NoError(t, err)
var lfsMetaObject *models.LFSMetaObject
if setting.Database.UsePostgreSQL {
- lfsMetaObject = &models.LFSMetaObject{ID: lfsID, Oid: oid, Size: int64(len(*content)), RepositoryID: repositoryID}
+ lfsMetaObject = &models.LFSMetaObject{ID: lfsID, Pointer: pointer, RepositoryID: repositoryID}
} else {
- lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: int64(len(*content)), RepositoryID: repositoryID}
+ lfsMetaObject = &models.LFSMetaObject{Pointer: pointer, RepositoryID: repositoryID}
}
lfsID++
lfsMetaObject, err = models.NewLFSMetaObject(lfsMetaObject)
assert.NoError(t, err)
- contentStore := &lfs.ContentStore{ObjectStorage: storage.LFS}
- exist, err := contentStore.Exists(lfsMetaObject)
+ contentStore := lfs.NewContentStore()
+ exist, err := contentStore.Exists(pointer)
assert.NoError(t, err)
if !exist {
- err := contentStore.Put(lfsMetaObject, bytes.NewReader(*content))
+ err := contentStore.Put(pointer, bytes.NewReader(*content))
assert.NoError(t, err)
}
- return oid
+ return pointer.Oid
}
func storeAndGetLfs(t *testing.T, content *[]byte, extraHeader *http.Header, expectedStatus int) *httptest.ResponseRecorder {
diff --git a/integrations/lfs_local_endpoint_test.go b/integrations/lfs_local_endpoint_test.go
new file mode 100644
index 0000000000..eda418c429
--- /dev/null
+++ b/integrations/lfs_local_endpoint_test.go
@@ -0,0 +1,117 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package integrations
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "code.gitea.io/gitea/modules/lfs"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func str2url(raw string) *url.URL {
+ u, _ := url.Parse(raw)
+ return u
+}
+
+func TestDetermineLocalEndpoint(t *testing.T) {
+ defer prepareTestEnv(t)()
+
+ root, _ := ioutil.TempDir("", "lfs_test")
+ defer os.RemoveAll(root)
+
+ rootdotgit, _ := ioutil.TempDir("", "lfs_test")
+ defer os.RemoveAll(rootdotgit)
+ os.Mkdir(filepath.Join(rootdotgit, ".git"), 0700)
+
+ lfsroot, _ := ioutil.TempDir("", "lfs_test")
+ defer os.RemoveAll(lfsroot)
+
+ // Test cases
+ var cases = []struct {
+ cloneurl string
+ lfsurl string
+ expected *url.URL
+ }{
+ // case 0
+ {
+ cloneurl: root,
+ lfsurl: "",
+ expected: str2url(fmt.Sprintf("file://%s", root)),
+ },
+ // case 1
+ {
+ cloneurl: root,
+ lfsurl: lfsroot,
+ expected: str2url(fmt.Sprintf("file://%s", lfsroot)),
+ },
+ // case 2
+ {
+ cloneurl: "https://git.com/repo.git",
+ lfsurl: lfsroot,
+ expected: str2url(fmt.Sprintf("file://%s", lfsroot)),
+ },
+ // case 3
+ {
+ cloneurl: rootdotgit,
+ lfsurl: "",
+ expected: str2url(fmt.Sprintf("file://%s", filepath.Join(rootdotgit, ".git"))),
+ },
+ // case 4
+ {
+ cloneurl: "",
+ lfsurl: rootdotgit,
+ expected: str2url(fmt.Sprintf("file://%s", filepath.Join(rootdotgit, ".git"))),
+ },
+ // case 5
+ {
+ cloneurl: rootdotgit,
+ lfsurl: rootdotgit,
+ expected: str2url(fmt.Sprintf("file://%s", filepath.Join(rootdotgit, ".git"))),
+ },
+ // case 6
+ {
+ cloneurl: fmt.Sprintf("file://%s", root),
+ lfsurl: "",
+ expected: str2url(fmt.Sprintf("file://%s", root)),
+ },
+ // case 7
+ {
+ cloneurl: fmt.Sprintf("file://%s", root),
+ lfsurl: fmt.Sprintf("file://%s", lfsroot),
+ expected: str2url(fmt.Sprintf("file://%s", lfsroot)),
+ },
+ // case 8
+ {
+ cloneurl: root,
+ lfsurl: fmt.Sprintf("file://%s", lfsroot),
+ expected: str2url(fmt.Sprintf("file://%s", lfsroot)),
+ },
+ // case 9
+ {
+ cloneurl: "",
+ lfsurl: "/does/not/exist",
+ expected: nil,
+ },
+ // case 10
+ {
+ cloneurl: "",
+ lfsurl: "file:///does/not/exist",
+ expected: str2url("file:///does/not/exist"),
+ },
+ }
+
+ for n, c := range cases {
+ ep := lfs.DetermineEndpoint(c.cloneurl, c.lfsurl)
+
+ assert.Equal(t, c.expected, ep, "case %d: error should match", n)
+ }
+}
diff --git a/models/lfs.go b/models/lfs.go
index 019d85545e..90f6523d4a 100644
--- a/models/lfs.go
+++ b/models/lfs.go
@@ -5,13 +5,9 @@
package models
import (
- "crypto/sha256"
- "encoding/hex"
"errors"
- "fmt"
- "io"
- "path"
+ "code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/builder"
@@ -19,28 +15,13 @@ import (
// LFSMetaObject stores metadata for LFS tracked files.
type LFSMetaObject struct {
- ID int64 `xorm:"pk autoincr"`
- Oid string `xorm:"UNIQUE(s) INDEX NOT NULL"`
- Size int64 `xorm:"NOT NULL"`
+ ID int64 `xorm:"pk autoincr"`
+ lfs.Pointer `xorm:"extends"`
RepositoryID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
Existing bool `xorm:"-"`
CreatedUnix timeutil.TimeStamp `xorm:"created"`
}
-// RelativePath returns the relative path of the lfs object
-func (m *LFSMetaObject) RelativePath() string {
- if len(m.Oid) < 5 {
- return m.Oid
- }
-
- return path.Join(m.Oid[0:2], m.Oid[2:4], m.Oid[4:])
-}
-
-// Pointer returns the string representation of an LFS pointer file
-func (m *LFSMetaObject) Pointer() string {
- return fmt.Sprintf("%s\n%s%s\nsize %d\n", LFSMetaFileIdentifier, LFSMetaFileOidPrefix, m.Oid, m.Size)
-}
-
// LFSTokenResponse defines the JSON structure in which the JWT token is stored.
// This structure is fetched via SSH and passed by the Git LFS client to the server
// endpoint for authorization.
@@ -53,15 +34,6 @@ type LFSTokenResponse struct {
// to differentiate between database and missing object errors.
var ErrLFSObjectNotExist = errors.New("LFS Meta object does not exist")
-const (
- // LFSMetaFileIdentifier is the string appearing at the first line of LFS pointer files.
- // https://github.com/git-lfs/git-lfs/blob/master/docs/spec.md
- LFSMetaFileIdentifier = "version https://git-lfs.github.com/spec/v1"
-
- // LFSMetaFileOidPrefix appears in LFS pointer files on a line before the sha256 hash.
- LFSMetaFileOidPrefix = "oid sha256:"
-)
-
// NewLFSMetaObject stores a given populated LFSMetaObject structure in the database
// if it is not already present.
func NewLFSMetaObject(m *LFSMetaObject) (*LFSMetaObject, error) {
@@ -90,16 +62,6 @@ func NewLFSMetaObject(m *LFSMetaObject) (*LFSMetaObject, error) {
return m, sess.Commit()
}
-// GenerateLFSOid generates a Sha256Sum to represent an oid for arbitrary content
-func GenerateLFSOid(content io.Reader) (string, error) {
- h := sha256.New()
- if _, err := io.Copy(h, content); err != nil {
- return "", err
- }
- sum := h.Sum(nil)
- return hex.EncodeToString(sum), nil
-}
-
// GetLFSMetaObjectByOid selects a LFSMetaObject entry from database by its OID.
// It may return ErrLFSObjectNotExist or a database error. If the error is nil,
// the returned pointer is a valid LFSMetaObject.
@@ -108,7 +70,7 @@ func (repo *Repository) GetLFSMetaObjectByOid(oid string) (*LFSMetaObject, error
return nil, ErrLFSObjectNotExist
}
- m := &LFSMetaObject{Oid: oid, RepositoryID: repo.ID}
+ m := &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}, RepositoryID: repo.ID}
has, err := x.Get(m)
if err != nil {
return nil, err
@@ -131,12 +93,12 @@ func (repo *Repository) RemoveLFSMetaObjectByOid(oid string) (int64, error) {
return -1, err
}
- m := &LFSMetaObject{Oid: oid, RepositoryID: repo.ID}
+ m := &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}, RepositoryID: repo.ID}
if _, err := sess.Delete(m); err != nil {
return -1, err
}
- count, err := sess.Count(&LFSMetaObject{Oid: oid})
+ count, err := sess.Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
if err != nil {
return count, err
}
@@ -168,11 +130,11 @@ func (repo *Repository) CountLFSMetaObjects() (int64, error) {
// LFSObjectAccessible checks if a provided Oid is accessible to the user
func LFSObjectAccessible(user *User, oid string) (bool, error) {
if user.IsAdmin {
- count, err := x.Count(&LFSMetaObject{Oid: oid})
+ count, err := x.Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
return (count > 0), err
}
cond := accessibleRepositoryCondition(user)
- count, err := x.Where(cond).Join("INNER", "repository", "`lfs_meta_object`.repository_id = `repository`.id").Count(&LFSMetaObject{Oid: oid})
+ count, err := x.Where(cond).Join("INNER", "repository", "`lfs_meta_object`.repository_id = `repository`.id").Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
return (count > 0), err
}
diff --git a/models/migrations/migrations.go b/models/migrations/migrations.go
index c8e687b382..e9d4927ae6 100644
--- a/models/migrations/migrations.go
+++ b/models/migrations/migrations.go
@@ -302,6 +302,8 @@ var migrations = []Migration{
NewMigration("Remove invalid labels from comments", removeInvalidLabels),
// v177 -> v178
NewMigration("Delete orphaned IssueLabels", deleteOrphanedIssueLabels),
+ // v178 -> v179
+ NewMigration("Add LFS columns to Mirror", addLFSMirrorColumns),
}
// GetCurrentDBVersion returns the current db version
diff --git a/models/migrations/v178.go b/models/migrations/v178.go
new file mode 100644
index 0000000000..c2a9af618e
--- /dev/null
+++ b/models/migrations/v178.go
@@ -0,0 +1,18 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package migrations
+
+import (
+ "xorm.io/xorm"
+)
+
+func addLFSMirrorColumns(x *xorm.Engine) error {
+ type Mirror struct {
+ LFS bool `xorm:"lfs_enabled NOT NULL DEFAULT false"`
+ LFSEndpoint string `xorm:"lfs_endpoint TEXT"`
+ }
+
+ return x.Sync2(new(Mirror))
+}
diff --git a/models/repo.go b/models/repo.go
index 7f2ec1f742..bdb84ee00d 100644
--- a/models/repo.go
+++ b/models/repo.go
@@ -25,6 +25,7 @@ import (
"strings"
"time"
+ "code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/markup"
"code.gitea.io/gitea/modules/options"
@@ -1531,7 +1532,7 @@ func DeleteRepository(doer *User, uid, repoID int64) error {
}
for _, v := range lfsObjects {
- count, err := sess.Count(&LFSMetaObject{Oid: v.Oid})
+ count, err := sess.Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: v.Oid}})
if err != nil {
return err
}
diff --git a/models/repo_mirror.go b/models/repo_mirror.go
index 10b0a7b139..2c37b54aa9 100644
--- a/models/repo_mirror.go
+++ b/models/repo_mirror.go
@@ -25,6 +25,9 @@ type Mirror struct {
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX"`
NextUpdateUnix timeutil.TimeStamp `xorm:"INDEX"`
+ LFS bool `xorm:"lfs_enabled NOT NULL DEFAULT false"`
+ LFSEndpoint string `xorm:"lfs_endpoint TEXT"`
+
Address string `xorm:"-"`
}
diff --git a/modules/lfs/client.go b/modules/lfs/client.go
new file mode 100644
index 0000000000..ae35919d77
--- /dev/null
+++ b/modules/lfs/client.go
@@ -0,0 +1,24 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package lfs
+
+import (
+ "context"
+ "io"
+ "net/url"
+)
+
+// Client is used to communicate with a LFS source
+type Client interface {
+ Download(ctx context.Context, oid string, size int64) (io.ReadCloser, error)
+}
+
+// NewClient creates a LFS client
+func NewClient(endpoint *url.URL) Client {
+ if endpoint.Scheme == "file" {
+ return newFilesystemClient(endpoint)
+ }
+ return newHTTPClient(endpoint)
+}
diff --git a/modules/lfs/client_test.go b/modules/lfs/client_test.go
new file mode 100644
index 0000000000..d4eb005469
--- /dev/null
+++ b/modules/lfs/client_test.go
@@ -0,0 +1,23 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package lfs
+
+import (
+ "net/url"
+
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewClient(t *testing.T) {
+ u, _ := url.Parse("file:///test")
+ c := NewClient(u)
+ assert.IsType(t, &FilesystemClient{}, c)
+
+ u, _ = url.Parse("https://test.com/lfs")
+ c = NewClient(u)
+ assert.IsType(t, &HTTPClient{}, c)
+}
diff --git a/modules/lfs/content_store.go b/modules/lfs/content_store.go
index 520caa4c99..9fa2c7e3b2 100644
--- a/modules/lfs/content_store.go
+++ b/modules/lfs/content_store.go
@@ -13,14 +13,15 @@ import (
"io"
"os"
- "code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/storage"
)
var (
- errHashMismatch = errors.New("Content hash does not match OID")
- errSizeMismatch = errors.New("Content size does not match")
+ // ErrHashMismatch occurs if the content has does not match OID
+ ErrHashMismatch = errors.New("Content hash does not match OID")
+ // ErrSizeMismatch occurs if the content size does not match
+ ErrSizeMismatch = errors.New("Content size does not match")
)
// ErrRangeNotSatisfiable represents an error which request range is not satisfiable.
@@ -28,61 +29,67 @@ type ErrRangeNotSatisfiable struct {
FromByte int64
}
-func (err ErrRangeNotSatisfiable) Error() string {
- return fmt.Sprintf("Requested range %d is not satisfiable", err.FromByte)
-}
-
// IsErrRangeNotSatisfiable returns true if the error is an ErrRangeNotSatisfiable
func IsErrRangeNotSatisfiable(err error) bool {
_, ok := err.(ErrRangeNotSatisfiable)
return ok
}
+func (err ErrRangeNotSatisfiable) Error() string {
+ return fmt.Sprintf("Requested range %d is not satisfiable", err.FromByte)
+}
+
// ContentStore provides a simple file system based storage.
type ContentStore struct {
storage.ObjectStorage
}
+// NewContentStore creates the default ContentStore
+func NewContentStore() *ContentStore {
+ contentStore := &ContentStore{ObjectStorage: storage.LFS}
+ return contentStore
+}
+
// Get takes a Meta object and retrieves the content from the store, returning
// it as an io.ReadSeekCloser.
-func (s *ContentStore) Get(meta *models.LFSMetaObject) (storage.Object, error) {
- f, err := s.Open(meta.RelativePath())
+func (s *ContentStore) Get(pointer Pointer) (storage.Object, error) {
+ f, err := s.Open(pointer.RelativePath())
if err != nil {
- log.Error("Whilst trying to read LFS OID[%s]: Unable to open Error: %v", meta.Oid, err)
+ log.Error("Whilst trying to read LFS OID[%s]: Unable to open Error: %v", pointer.Oid, err)
return nil, err
}
return f, err
}
// Put takes a Meta object and an io.Reader and writes the content to the store.
-func (s *ContentStore) Put(meta *models.LFSMetaObject, r io.Reader) error {
- p := meta.RelativePath()
+func (s *ContentStore) Put(pointer Pointer, r io.Reader) error {
+ p := pointer.RelativePath()
// Wrap the provided reader with an inline hashing and size checker
- wrappedRd := newHashingReader(meta.Size, meta.Oid, r)
+ wrappedRd := newHashingReader(pointer.Size, pointer.Oid, r)
// now pass the wrapped reader to Save - if there is a size mismatch or hash mismatch then
// the errors returned by the newHashingReader should percolate up to here
- written, err := s.Save(p, wrappedRd, meta.Size)
+ written, err := s.Save(p, wrappedRd, pointer.Size)
if err != nil {
- log.Error("Whilst putting LFS OID[%s]: Failed to copy to tmpPath: %s Error: %v", meta.Oid, p, err)
+ log.Error("Whilst putting LFS OID[%s]: Failed to copy to tmpPath: %s Error: %v", pointer.Oid, p, err)
return err
}
// This shouldn't happen but it is sensible to test
- if written != meta.Size {
+ if written != pointer.Size {
if err := s.Delete(p); err != nil {
- log.Error("Cleaning the LFS OID[%s] failed: %v", meta.Oid, err)
+ log.Error("Cleaning the LFS OID[%s] failed: %v", pointer.Oid, err)
}
- return errSizeMismatch
+ return ErrSizeMismatch
}
return nil
}
// Exists returns true if the object exists in the content store.
-func (s *ContentStore) Exists(meta *models.LFSMetaObject) (bool, error) {
- _, err := s.ObjectStorage.Stat(meta.RelativePath())
+func (s *ContentStore) Exists(pointer Pointer) (bool, error) {
+ _, err := s.ObjectStorage.Stat(pointer.RelativePath())
if err != nil {
if os.IsNotExist(err) {
return false, nil
@@ -93,19 +100,25 @@ func (s *ContentStore) Exists(meta *models.LFSMetaObject) (bool, error) {
}
// Verify returns true if the object exists in the content store and size is correct.
-func (s *ContentStore) Verify(meta *models.LFSMetaObject) (bool, error) {
- p := meta.RelativePath()
+func (s *ContentStore) Verify(pointer Pointer) (bool, error) {
+ p := pointer.RelativePath()
fi, err := s.ObjectStorage.Stat(p)
- if os.IsNotExist(err) || (err == nil && fi.Size() != meta.Size) {
+ if os.IsNotExist(err) || (err == nil && fi.Size() != pointer.Size) {
return false, nil
} else if err != nil {
- log.Error("Unable stat file: %s for LFS OID[%s] Error: %v", p, meta.Oid, err)
+ log.Error("Unable stat file: %s for LFS OID[%s] Error: %v", p, pointer.Oid, err)
return false, err
}
return true, nil
}
+// ReadMetaObject will read a models.LFSMetaObject and return a reader
+func ReadMetaObject(pointer Pointer) (io.ReadCloser, error) {
+ contentStore := NewContentStore()
+ return contentStore.Get(pointer)
+}
+
type hashingReader struct {
internal io.Reader
currentSize int64
@@ -127,12 +140,12 @@ func (r *hashingReader) Read(b []byte) (int, error) {
if err != nil && err == io.EOF {
if r.currentSize != r.expectedSize {
- return n, errSizeMismatch
+ return n, ErrSizeMismatch
}
shaStr := hex.EncodeToString(r.hash.Sum(nil))
if shaStr != r.expectedHash {
- return n, errHashMismatch
+ return n, ErrHashMismatch
}
}
diff --git a/modules/lfs/endpoint.go b/modules/lfs/endpoint.go
new file mode 100644
index 0000000000..add16ce9f1
--- /dev/null
+++ b/modules/lfs/endpoint.go
@@ -0,0 +1,106 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package lfs
+
+import (
+ "fmt"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "code.gitea.io/gitea/modules/log"
+)
+
+// DetermineEndpoint determines an endpoint from the clone url or uses the specified LFS url.
+func DetermineEndpoint(cloneurl, lfsurl string) *url.URL {
+ if len(lfsurl) > 0 {
+ return endpointFromURL(lfsurl)
+ }
+ return endpointFromCloneURL(cloneurl)
+}
+
+func endpointFromCloneURL(rawurl string) *url.URL {
+ ep := endpointFromURL(rawurl)
+ if ep == nil {
+ return ep
+ }
+
+ if strings.HasSuffix(ep.Path, "/") {
+ ep.Path = ep.Path[:len(ep.Path)-1]
+ }
+
+ if ep.Scheme == "file" {
+ return ep
+ }
+
+ if path.Ext(ep.Path) == ".git" {
+ ep.Path += "/info/lfs"
+ } else {
+ ep.Path += ".git/info/lfs"
+ }
+
+ return ep
+}
+
+func endpointFromURL(rawurl string) *url.URL {
+ if strings.HasPrefix(rawurl, "/") {
+ return endpointFromLocalPath(rawurl)
+ }
+
+ u, err := url.Parse(rawurl)
+ if err != nil {
+ log.Error("lfs.endpointFromUrl: %v", err)
+ return nil
+ }
+
+ switch u.Scheme {
+ case "http", "https":
+ return u
+ case "git":
+ u.Scheme = "https"
+ return u
+ case "file":
+ return u
+ default:
+ if _, err := os.Stat(rawurl); err == nil {
+ return endpointFromLocalPath(rawurl)
+ }
+
+ log.Error("lfs.endpointFromUrl: unknown url")
+ return nil
+ }
+}
+
+func endpointFromLocalPath(path string) *url.URL {
+ var slash string
+ if abs, err := filepath.Abs(path); err == nil {
+ if !strings.HasPrefix(abs, "/") {
+ slash = "/"
+ }
+ path = abs
+ }
+
+ var gitpath string
+ if filepath.Base(path) == ".git" {
+ gitpath = path
+ path = filepath.Dir(path)
+ } else {
+ gitpath = filepath.Join(path, ".git")
+ }
+
+ if _, err := os.Stat(gitpath); err == nil {
+ path = gitpath
+ } else if _, err := os.Stat(path); err != nil {
+ return nil
+ }
+
+ path = fmt.Sprintf("file://%s%s", slash, filepath.ToSlash(path))
+
+ u, _ := url.Parse(path)
+
+ return u
+}
diff --git a/modules/lfs/endpoint_test.go b/modules/lfs/endpoint_test.go
new file mode 100644
index 0000000000..a7e8b1bfb7
--- /dev/null
+++ b/modules/lfs/endpoint_test.go
@@ -0,0 +1,75 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package lfs
+
+import (
+ "net/url"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func str2url(raw string) *url.URL {
+ u, _ := url.Parse(raw)
+ return u
+}
+
+func TestDetermineEndpoint(t *testing.T) {
+ // Test cases
+ var cases = []struct {
+ cloneurl string
+ lfsurl string
+ expected *url.URL
+ }{
+ // case 0
+ {
+ cloneurl: "",
+ lfsurl: "",
+ expected: nil,
+ },
+ // case 1
+ {
+ cloneurl: "https://git.com/repo",
+ lfsurl: "",
+ expected: str2url("https://git.com/repo.git/info/lfs"),
+ },
+ // case 2
+ {
+ cloneurl: "https://git.com/repo.git",
+ lfsurl: "",
+ expected: str2url("https://git.com/repo.git/info/lfs"),
+ },
+ // case 3
+ {
+ cloneurl: "",
+ lfsurl: "https://gitlfs.com/repo",
+ expected: str2url("https://gitlfs.com/repo"),
+ },
+ // case 4
+ {
+ cloneurl: "https://git.com/repo.git",
+ lfsurl: "https://gitlfs.com/repo",
+ expected: str2url("https://gitlfs.com/repo"),
+ },
+ // case 5
+ {
+ cloneurl: "git://git.com/repo.git",
+ lfsurl: "",
+ expected: str2url("https://git.com/repo.git/info/lfs"),
+ },
+ // case 6
+ {
+ cloneurl: "",
+ lfsurl: "git://gitlfs.com/repo",
+ expected: str2url("https://gitlfs.com/repo"),
+ },
+ }
+
+ for n, c := range cases {
+ ep := DetermineEndpoint(c.cloneurl, c.lfsurl)
+
+ assert.Equal(t, c.expected, ep, "case %d: error should match", n)
+ }
+}
diff --git a/modules/lfs/filesystem_client.go b/modules/lfs/filesystem_client.go
new file mode 100644
index 0000000000..3a51564a82
--- /dev/null
+++ b/modules/lfs/filesystem_client.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package lfs
+
+import (
+ "context"
+ "io"
+ "net/url"
+ "os"
+ "path/filepath"
+
+ "code.gitea.io/gitea/modules/util"
+)
+
+// FilesystemClient is used to read LFS data from a filesystem path
+type FilesystemClient struct {
+ lfsdir string
+}
+
+func newFilesystemClient(endpoint *url.URL) *FilesystemClient {
+ path, _ := util.FileURLToPath(endpoint)
+
+ lfsdir := filepath.Join(path, "lfs", "objects")
+
+ client := &FilesystemClient{lfsdir}
+
+ return client
+}
+
+func (c *FilesystemClient) objectPath(oid string) string {
+ return filepath.Join(c.lfsdir, oid[0:2], oid[2:4], oid)
+}
+
+// Download reads the specific LFS object from the target repository
+func (c *FilesystemClient) Download(ctx context.Context, oid string, size int64) (io.ReadCloser, error) {
+ objectPath := c.objectPath(oid)
+
+ if _, err := os.Stat(objectPath); os.IsNotExist(err) {
+ return nil, err
+ }
+
+ file, err := os.Open(objectPath)
+ if err != nil {
+ return nil, err
+ }
+
+ return file, nil
+}
diff --git a/modules/lfs/http_client.go b/modules/lfs/http_client.go
new file mode 100644
index 0000000000..fb45defda1
--- /dev/null
+++ b/modules/lfs/http_client.go
@@ -0,0 +1,129 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package lfs
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "code.gitea.io/gitea/modules/log"
+)
+
+// HTTPClient is used to communicate with the LFS server
+// https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md
+type HTTPClient struct {
+ client *http.Client
+ endpoint string
+ transfers map[string]TransferAdapter
+}
+
+func newHTTPClient(endpoint *url.URL) *HTTPClient {
+ hc := &http.Client{}
+
+ client := &HTTPClient{
+ client: hc,
+ endpoint: strings.TrimSuffix(endpoint.String(), "/"),
+ transfers: make(map[string]TransferAdapter),
+ }
+
+ basic := &BasicTransferAdapter{hc}
+
+ client.transfers[basic.Name()] = basic
+
+ return client
+}
+
+func (c *HTTPClient) transferNames() []string {
+ keys := make([]string, len(c.transfers))
+
+ i := 0
+ for k := range c.transfers {
+ keys[i] = k
+ i++
+ }
+
+ return keys
+}
+
+func (c *HTTPClient) batch(ctx context.Context, operation string, objects []Pointer) (*BatchResponse, error) {
+ url := fmt.Sprintf("%s/objects/batch", c.endpoint)
+
+ request := &BatchRequest{operation, c.transferNames(), nil, objects}
+
+ payload := new(bytes.Buffer)
+ err := json.NewEncoder(payload).Encode(request)
+ if err != nil {
+ return nil, fmt.Errorf("lfs.HTTPClient.batch json.Encode: %w", err)
+ }
+
+ log.Trace("lfs.HTTPClient.batch NewRequestWithContext: %s", url)
+
+ req, err := http.NewRequestWithContext(ctx, "POST", url, payload)
+ if err != nil {
+ return nil, fmt.Errorf("lfs.HTTPClient.batch http.NewRequestWithContext: %w", err)
+ }
+ req.Header.Set("Content-type", MediaType)
+ req.Header.Set("Accept", MediaType)
+
+ res, err := c.client.Do(req)
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ return nil, fmt.Errorf("lfs.HTTPClient.batch http.Do: %w", err)
+ }
+ defer res.Body.Close()
+
+ if res.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("lfs.HTTPClient.batch: Unexpected servers response: %s", res.Status)
+ }
+
+ var response BatchResponse
+ err = json.NewDecoder(res.Body).Decode(&response)
+ if err != nil {
+ return nil, fmt.Errorf("lfs.HTTPClient.batch json.Decode: %w", err)
+ }
+
+ if len(response.Transfer) == 0 {
+ response.Transfer = "basic"
+ }
+
+ return &response, nil
+}
+
+// Download reads the specific LFS object from the LFS server
+func (c *HTTPClient) Download(ctx context.Context, oid string, size int64) (io.ReadCloser, error) {
+ var objects []Pointer
+ objects = append(objects, Pointer{oid, size})
+
+ result, err := c.batch(ctx, "download", objects)
+ if err != nil {
+ return nil, err
+ }
+
+ transferAdapter, ok := c.transfers[result.Transfer]
+ if !ok {
+ return nil, fmt.Errorf("lfs.HTTPClient.Download Transferadapter not found: %s", result.Transfer)
+ }
+
+ if len(result.Objects) == 0 {
+ return nil, errors.New("lfs.HTTPClient.Download: No objects in result")
+ }
+
+ content, err := transferAdapter.Download(ctx, result.Objects[0])
+ if err != nil {
+ return nil, err
+ }
+ return content, nil
+}
diff --git a/modules/lfs/http_client_test.go b/modules/lfs/http_client_test.go
new file mode 100644
index 0000000000..043aa0214e
--- /dev/null
+++ b/modules/lfs/http_client_test.go
@@ -0,0 +1,144 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package lfs
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type RoundTripFunc func(req *http.Request) *http.Response
+
+func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
+ return f(req), nil
+}
+
+type DummyTransferAdapter struct {
+}
+
+func (a *DummyTransferAdapter) Name() string {
+ return "dummy"
+}
+
+func (a *DummyTransferAdapter) Download(ctx context.Context, r *ObjectResponse) (io.ReadCloser, error) {
+ return ioutil.NopCloser(bytes.NewBufferString("dummy")), nil
+}
+
+func TestHTTPClientDownload(t *testing.T) {
+ oid := "fb8f7d8435968c4f82a726a92395be4d16f2f63116caf36c8ad35c60831ab041"
+ size := int64(6)
+
+ roundTripHandler := func(req *http.Request) *http.Response {
+ url := req.URL.String()
+ if strings.Contains(url, "status-not-ok") {
+ return &http.Response{StatusCode: http.StatusBadRequest}
+ }
+ if strings.Contains(url, "invalid-json-response") {
+ return &http.Response{StatusCode: http.StatusOK, Body: ioutil.NopCloser(bytes.NewBufferString("invalid json"))}
+ }
+ if strings.Contains(url, "valid-batch-request-download") {
+ assert.Equal(t, "POST", req.Method)
+ assert.Equal(t, MediaType, req.Header.Get("Content-type"), "case %s: error should match", url)
+ assert.Equal(t, MediaType, req.Header.Get("Accept"), "case %s: error should match", url)
+
+ var batchRequest BatchRequest
+ err := json.NewDecoder(req.Body).Decode(&batchRequest)
+ assert.NoError(t, err)
+
+ assert.Equal(t, "download", batchRequest.Operation)
+ assert.Equal(t, 1, len(batchRequest.Objects))
+ assert.Equal(t, oid, batchRequest.Objects[0].Oid)
+ assert.Equal(t, size, batchRequest.Objects[0].Size)
+
+ batchResponse := &BatchResponse{
+ Transfer: "dummy",
+ Objects: make([]*ObjectResponse, 1),
+ }
+
+ payload := new(bytes.Buffer)
+ json.NewEncoder(payload).Encode(batchResponse)
+
+ return &http.Response{StatusCode: http.StatusOK, Body: ioutil.NopCloser(payload)}
+ }
+ if strings.Contains(url, "invalid-response-no-objects") {
+ batchResponse := &BatchResponse{Transfer: "dummy"}
+
+ payload := new(bytes.Buffer)
+ json.NewEncoder(payload).Encode(batchResponse)
+
+ return &http.Response{StatusCode: http.StatusOK, Body: ioutil.NopCloser(payload)}
+ }
+ if strings.Contains(url, "unknown-transfer-adapter") {
+ batchResponse := &BatchResponse{Transfer: "unknown_adapter"}
+
+ payload := new(bytes.Buffer)
+ json.NewEncoder(payload).Encode(batchResponse)
+
+ return &http.Response{StatusCode: http.StatusOK, Body: ioutil.NopCloser(payload)}
+ }
+
+ t.Errorf("Unknown test case: %s", url)
+
+ return nil
+ }
+
+ hc := &http.Client{Transport: RoundTripFunc(roundTripHandler)}
+ dummy := &DummyTransferAdapter{}
+
+ var cases = []struct {
+ endpoint string
+ expectederror string
+ }{
+ // case 0
+ {
+ endpoint: "https://status-not-ok.io",
+ expectederror: "Unexpected servers response: ",
+ },
+ // case 1
+ {
+ endpoint: "https://invalid-json-response.io",
+ expectederror: "json.Decode: ",
+ },
+ // case 2
+ {
+ endpoint: "https://valid-batch-request-download.io",
+ expectederror: "",
+ },
+ // case 3
+ {
+ endpoint: "https://invalid-response-no-objects.io",
+ expectederror: "No objects in result",
+ },
+ // case 4
+ {
+ endpoint: "https://unknown-transfer-adapter.io",
+ expectederror: "Transferadapter not found: ",
+ },
+ }
+
+ for n, c := range cases {
+ client := &HTTPClient{
+ client: hc,
+ endpoint: c.endpoint,
+ transfers: make(map[string]TransferAdapter),
+ }
+ client.transfers["dummy"] = dummy
+
+ _, err := client.Download(context.Background(), oid, size)
+ if len(c.expectederror) > 0 {
+ assert.True(t, strings.Contains(err.Error(), c.expectederror), "case %d: '%s' should contain '%s'", n, err.Error(), c.expectederror)
+ } else {
+ assert.NoError(t, err, "case %d", n)
+ }
+ }
+}
diff --git a/modules/lfs/pointer.go b/modules/lfs/pointer.go
new file mode 100644
index 0000000000..975b5e7dc6
--- /dev/null
+++ b/modules/lfs/pointer.go
@@ -0,0 +1,123 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package lfs
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "path"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+const (
+ blobSizeCutoff = 1024
+
+ // MetaFileIdentifier is the string appearing at the first line of LFS pointer files.
+ // https://github.com/git-lfs/git-lfs/blob/master/docs/spec.md
+ MetaFileIdentifier = "version https://git-lfs.github.com/spec/v1"
+
+ // MetaFileOidPrefix appears in LFS pointer files on a line before the sha256 hash.
+ MetaFileOidPrefix = "oid sha256:"
+)
+
+var (
+ // ErrMissingPrefix occurs if the content lacks the LFS prefix
+ ErrMissingPrefix = errors.New("Content lacks the LFS prefix")
+
+ // ErrInvalidStructure occurs if the content has an invalid structure
+ ErrInvalidStructure = errors.New("Content has an invalid structure")
+
+ // ErrInvalidOIDFormat occurs if the oid has an invalid format
+ ErrInvalidOIDFormat = errors.New("OID has an invalid format")
+)
+
+// ReadPointer tries to read LFS pointer data from the reader
+func ReadPointer(reader io.Reader) (Pointer, error) {
+ buf := make([]byte, blobSizeCutoff)
+ n, err := io.ReadFull(reader, buf)
+ if err != nil && err != io.ErrUnexpectedEOF {
+ return Pointer{}, err
+ }
+ buf = buf[:n]
+
+ return ReadPointerFromBuffer(buf)
+}
+
+var oidPattern = regexp.MustCompile(`^[a-f\d]{64}$`)
+
+// ReadPointerFromBuffer will return a pointer if the provided byte slice is a pointer file or an error otherwise.
+func ReadPointerFromBuffer(buf []byte) (Pointer, error) {
+ var p Pointer
+
+ headString := string(buf)
+ if !strings.HasPrefix(headString, MetaFileIdentifier) {
+ return p, ErrMissingPrefix
+ }
+
+ splitLines := strings.Split(headString, "\n")
+ if len(splitLines) < 3 {
+ return p, ErrInvalidStructure
+ }
+
+ oid := strings.TrimPrefix(splitLines[1], MetaFileOidPrefix)
+ if len(oid) != 64 || !oidPattern.MatchString(oid) {
+ return p, ErrInvalidOIDFormat
+ }
+ size, err := strconv.ParseInt(strings.TrimPrefix(splitLines[2], "size "), 10, 64)
+ if err != nil {
+ return p, err
+ }
+
+ p.Oid = oid
+ p.Size = size
+
+ return p, nil
+}
+
+// IsValid checks if the pointer has a valid structure.
+// It doesn't check if the pointed-to-content exists.
+func (p Pointer) IsValid() bool {
+ if len(p.Oid) != 64 {
+ return false
+ }
+ if !oidPattern.MatchString(p.Oid) {
+ return false
+ }
+ if p.Size < 0 {
+ return false
+ }
+ return true
+}
+
+// StringContent returns the string representation of the pointer
+// https://github.com/git-lfs/git-lfs/blob/main/docs/spec.md#the-pointer
+func (p Pointer) StringContent() string {
+ return fmt.Sprintf("%s\n%s%s\nsize %d\n", MetaFileIdentifier, MetaFileOidPrefix, p.Oid, p.Size)
+}
+
+// RelativePath returns the relative storage path of the pointer
+func (p Pointer) RelativePath() string {
+ if len(p.Oid) < 5 {
+ return p.Oid
+ }
+
+ return path.Join(p.Oid[0:2], p.Oid[2:4], p.Oid[4:])
+}
+
+// GeneratePointer generates a pointer for arbitrary content
+func GeneratePointer(content io.Reader) (Pointer, error) {
+ h := sha256.New()
+ c, err := io.Copy(h, content)
+ if err != nil {
+ return Pointer{}, err
+ }
+ sum := h.Sum(nil)
+ return Pointer{Oid: hex.EncodeToString(sum), Size: c}, nil
+}
diff --git a/modules/lfs/pointer_scanner_gogit.go b/modules/lfs/pointer_scanner_gogit.go
new file mode 100644
index 0000000000..abd882990c
--- /dev/null
+++ b/modules/lfs/pointer_scanner_gogit.go
@@ -0,0 +1,64 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+// +build gogit
+
+package lfs
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/modules/git"
+
+ "github.com/go-git/go-git/v5/plumbing/object"
+)
+
+// SearchPointerBlobs scans the whole repository for LFS pointer files
+func SearchPointerBlobs(ctx context.Context, repo *git.Repository, pointerChan chan<- PointerBlob, errChan chan<- error) {
+ gitRepo := repo.GoGitRepo()
+
+ err := func() error {
+ blobs, err := gitRepo.BlobObjects()
+ if err != nil {
+ return fmt.Errorf("lfs.SearchPointerBlobs BlobObjects: %w", err)
+ }
+
+ return blobs.ForEach(func(blob *object.Blob) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if blob.Size > blobSizeCutoff {
+ return nil
+ }
+
+ reader, err := blob.Reader()
+ if err != nil {
+ return fmt.Errorf("lfs.SearchPointerBlobs blob.Reader: %w", err)
+ }
+ defer reader.Close()
+
+ pointer, _ := ReadPointer(reader)
+ if pointer.IsValid() {
+ pointerChan <- PointerBlob{Hash: blob.Hash.String(), Pointer: pointer}
+ }
+
+ return nil
+ })
+ }()
+
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ default:
+ errChan <- err
+ }
+ }
+
+ close(pointerChan)
+ close(errChan)
+}
diff --git a/modules/lfs/pointer_scanner_nogogit.go b/modules/lfs/pointer_scanner_nogogit.go
new file mode 100644
index 0000000000..28d4afba61
--- /dev/null
+++ b/modules/lfs/pointer_scanner_nogogit.go
@@ -0,0 +1,110 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+// +build !gogit
+
+package lfs
+
+import (
+ "bufio"
+ "context"
+ "io"
+ "strconv"
+ "sync"
+
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/git/pipeline"
+)
+
+// SearchPointerBlobs scans the whole repository for LFS pointer files
+func SearchPointerBlobs(ctx context.Context, repo *git.Repository, pointerChan chan<- PointerBlob, errChan chan<- error) {
+ basePath := repo.Path
+
+ catFileCheckReader, catFileCheckWriter := io.Pipe()
+ shasToBatchReader, shasToBatchWriter := io.Pipe()
+ catFileBatchReader, catFileBatchWriter := io.Pipe()
+
+ wg := sync.WaitGroup{}
+ wg.Add(4)
+
+ // Create the go-routines in reverse order.
+
+ // 4. Take the output of cat-file --batch and check if each file in turn
+ // to see if they're pointers to files in the LFS store
+ go createPointerResultsFromCatFileBatch(ctx, catFileBatchReader, &wg, pointerChan)
+
+ // 3. Take the shas of the blobs and batch read them
+ go pipeline.CatFileBatch(shasToBatchReader, catFileBatchWriter, &wg, basePath)
+
+ // 2. From the provided objects restrict to blobs <=1k
+ go pipeline.BlobsLessThan1024FromCatFileBatchCheck(catFileCheckReader, shasToBatchWriter, &wg)
+
+ // 1. Run batch-check on all objects in the repository
+ if git.CheckGitVersionAtLeast("2.6.0") != nil {
+ revListReader, revListWriter := io.Pipe()
+ shasToCheckReader, shasToCheckWriter := io.Pipe()
+ wg.Add(2)
+ go pipeline.CatFileBatchCheck(shasToCheckReader, catFileCheckWriter, &wg, basePath)
+ go pipeline.BlobsFromRevListObjects(revListReader, shasToCheckWriter, &wg)
+ go pipeline.RevListAllObjects(revListWriter, &wg, basePath, errChan)
+ } else {
+ go pipeline.CatFileBatchCheckAllObjects(catFileCheckWriter, &wg, basePath, errChan)
+ }
+ wg.Wait()
+
+ close(pointerChan)
+ close(errChan)
+}
+
+func createPointerResultsFromCatFileBatch(ctx context.Context, catFileBatchReader *io.PipeReader, wg *sync.WaitGroup, pointerChan chan<- PointerBlob) {
+ defer wg.Done()
+ defer catFileBatchReader.Close()
+
+ bufferedReader := bufio.NewReader(catFileBatchReader)
+ buf := make([]byte, 1025)
+
+loop:
+ for {
+ select {
+ case <-ctx.Done():
+ break loop
+ default:
+ }
+
+ // File descriptor line: sha
+ sha, err := bufferedReader.ReadString(' ')
+ if err != nil {
+ _ = catFileBatchReader.CloseWithError(err)
+ break
+ }
+ // Throw away the blob
+ if _, err := bufferedReader.ReadString(' '); err != nil {
+ _ = catFileBatchReader.CloseWithError(err)
+ break
+ }
+ sizeStr, err := bufferedReader.ReadString('\n')
+ if err != nil {
+ _ = catFileBatchReader.CloseWithError(err)
+ break
+ }
+ size, err := strconv.Atoi(sizeStr[:len(sizeStr)-1])
+ if err != nil {
+ _ = catFileBatchReader.CloseWithError(err)
+ break
+ }
+ pointerBuf := buf[:size+1]
+ if _, err := io.ReadFull(bufferedReader, pointerBuf); err != nil {
+ _ = catFileBatchReader.CloseWithError(err)
+ break
+ }
+ pointerBuf = pointerBuf[:size]
+ // Now we need to check if the pointerBuf is an LFS pointer
+ pointer, _ := ReadPointerFromBuffer(pointerBuf)
+ if !pointer.IsValid() {
+ continue
+ }
+
+ pointerChan <- PointerBlob{Hash: sha, Pointer: pointer}
+ }
+}
diff --git a/modules/lfs/pointer_test.go b/modules/lfs/pointer_test.go
new file mode 100644
index 0000000000..0ed6df2c6d
--- /dev/null
+++ b/modules/lfs/pointer_test.go
@@ -0,0 +1,103 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package lfs
+
+import (
+ "path"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestStringContent(t *testing.T) {
+ p := Pointer{Oid: "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", Size: 1234}
+ expected := "version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\n"
+ assert.Equal(t, p.StringContent(), expected)
+}
+
+func TestRelativePath(t *testing.T) {
+ p := Pointer{Oid: "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393"}
+ expected := path.Join("4d", "7a", "214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393")
+ assert.Equal(t, p.RelativePath(), expected)
+
+ p2 := Pointer{Oid: "4d7a"}
+ assert.Equal(t, p2.RelativePath(), "4d7a")
+}
+
+func TestIsValid(t *testing.T) {
+ p := Pointer{}
+ assert.False(t, p.IsValid())
+
+ p = Pointer{Oid: "123"}
+ assert.False(t, p.IsValid())
+
+ p = Pointer{Oid: "z4cb57646c54a297c9807697e80a30946f79a4b82cb079d2606847825b1812cc"}
+ assert.False(t, p.IsValid())
+
+ p = Pointer{Oid: "94cb57646c54a297c9807697e80a30946f79a4b82cb079d2606847825b1812cc"}
+ assert.True(t, p.IsValid())
+
+ p = Pointer{Oid: "94cb57646c54a297c9807697e80a30946f79a4b82cb079d2606847825b1812cc", Size: -1}
+ assert.False(t, p.IsValid())
+}
+
+func TestGeneratePointer(t *testing.T) {
+ p, err := GeneratePointer(strings.NewReader("Gitea"))
+ assert.NoError(t, err)
+ assert.True(t, p.IsValid())
+ assert.Equal(t, p.Oid, "94cb57646c54a297c9807697e80a30946f79a4b82cb079d2606847825b1812cc")
+ assert.Equal(t, p.Size, int64(5))
+}
+
+func TestReadPointerFromBuffer(t *testing.T) {
+ p, err := ReadPointerFromBuffer([]byte{})
+ assert.ErrorIs(t, err, ErrMissingPrefix)
+ assert.False(t, p.IsValid())
+
+ p, err = ReadPointerFromBuffer([]byte("test"))
+ assert.ErrorIs(t, err, ErrMissingPrefix)
+ assert.False(t, p.IsValid())
+
+ p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\n"))
+ assert.ErrorIs(t, err, ErrInvalidStructure)
+ assert.False(t, p.IsValid())
+
+ p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a\nsize 1234\n"))
+ assert.ErrorIs(t, err, ErrInvalidOIDFormat)
+ assert.False(t, p.IsValid())
+
+ p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a2146z4ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\n"))
+ assert.ErrorIs(t, err, ErrInvalidOIDFormat)
+ assert.False(t, p.IsValid())
+
+ p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\ntest 1234\n"))
+ assert.Error(t, err)
+ assert.False(t, p.IsValid())
+
+ p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize test\n"))
+ assert.Error(t, err)
+ assert.False(t, p.IsValid())
+
+ p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\n"))
+ assert.NoError(t, err)
+ assert.True(t, p.IsValid())
+ assert.Equal(t, p.Oid, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393")
+ assert.Equal(t, p.Size, int64(1234))
+
+ p, err = ReadPointerFromBuffer([]byte("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\ntest"))
+ assert.NoError(t, err)
+ assert.True(t, p.IsValid())
+ assert.Equal(t, p.Oid, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393")
+ assert.Equal(t, p.Size, int64(1234))
+}
+
+func TestReadPointer(t *testing.T) {
+ p, err := ReadPointer(strings.NewReader("version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 1234\n"))
+ assert.NoError(t, err)
+ assert.True(t, p.IsValid())
+ assert.Equal(t, p.Oid, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393")
+ assert.Equal(t, p.Size, int64(1234))
+}
diff --git a/modules/lfs/pointers.go b/modules/lfs/pointers.go
deleted file mode 100644
index 692c81f583..0000000000
--- a/modules/lfs/pointers.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2019 The Gitea Authors. All rights reserved.
-// Use of this source code is governed by a MIT-style
-// license that can be found in the LICENSE file.
-
-package lfs
-
-import (
- "io"
- "strconv"
- "strings"
-
- "code.gitea.io/gitea/models"
- "code.gitea.io/gitea/modules/base"
- "code.gitea.io/gitea/modules/setting"
- "code.gitea.io/gitea/modules/storage"
-)
-
-// ReadPointerFile will return a partially filled LFSMetaObject if the provided reader is a pointer file
-func ReadPointerFile(reader io.Reader) (*models.LFSMetaObject, *[]byte) {
- if !setting.LFS.StartServer {
- return nil, nil
- }
-
- buf := make([]byte, 1024)
- n, _ := reader.Read(buf)
- buf = buf[:n]
-
- if isTextFile := base.IsTextFile(buf); !isTextFile {
- return nil, nil
- }
-
- return IsPointerFile(&buf), &buf
-}
-
-// IsPointerFile will return a partially filled LFSMetaObject if the provided byte slice is a pointer file
-func IsPointerFile(buf *[]byte) *models.LFSMetaObject {
- if !setting.LFS.StartServer {
- return nil
- }
-
- headString := string(*buf)
- if !strings.HasPrefix(headString, models.LFSMetaFileIdentifier) {
- return nil
- }
-
- splitLines := strings.Split(headString, "\n")
- if len(splitLines) < 3 {
- return nil
- }
-
- oid := strings.TrimPrefix(splitLines[1], models.LFSMetaFileOidPrefix)
- size, err := strconv.ParseInt(strings.TrimPrefix(splitLines[2], "size "), 10, 64)
- if len(oid) != 64 || err != nil {
- return nil
- }
-
- contentStore := &ContentStore{ObjectStorage: storage.LFS}
- meta := &models.LFSMetaObject{Oid: oid, Size: size}
- exist, err := contentStore.Exists(meta)
- if err != nil || !exist {
- return nil
- }
-
- return meta
-}
-
-// ReadMetaObject will read a models.LFSMetaObject and return a reader
-func ReadMetaObject(meta *models.LFSMetaObject) (io.ReadCloser, error) {
- contentStore := &ContentStore{ObjectStorage: storage.LFS}
- return contentStore.Get(meta)
-}
diff --git a/modules/lfs/shared.go b/modules/lfs/shared.go
new file mode 100644
index 0000000000..70b76d7512
--- /dev/null
+++ b/modules/lfs/shared.go
@@ -0,0 +1,69 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package lfs
+
+import (
+ "time"
+)
+
+const (
+ // MediaType contains the media type for LFS server requests
+ MediaType = "application/vnd.git-lfs+json"
+)
+
+// BatchRequest contains multiple requests processed in one batch operation.
+// https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md#requests
+type BatchRequest struct {
+ Operation string `json:"operation"`
+ Transfers []string `json:"transfers,omitempty"`
+ Ref *Reference `json:"ref,omitempty"`
+ Objects []Pointer `json:"objects"`
+}
+
+// Reference contains a git reference.
+// https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md#ref-property
+type Reference struct {
+ Name string `json:"name"`
+}
+
+// Pointer contains LFS pointer data
+type Pointer struct {
+ Oid string `json:"oid" xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Size int64 `json:"size" xorm:"NOT NULL"`
+}
+
+// BatchResponse contains multiple object metadata Representation structures
+// for use with the batch API.
+// https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md#successful-responses
+type BatchResponse struct {
+ Transfer string `json:"transfer,omitempty"`
+ Objects []*ObjectResponse `json:"objects"`
+}
+
+// ObjectResponse is object metadata as seen by clients of the LFS server.
+type ObjectResponse struct {
+ Pointer
+ Actions map[string]*Link `json:"actions"`
+ Error *ObjectError `json:"error,omitempty"`
+}
+
+// Link provides a structure used to build a hypermedia representation of an HTTP link.
+type Link struct {
+ Href string `json:"href"`
+ Header map[string]string `json:"header,omitempty"`
+ ExpiresAt time.Time `json:"expires_at,omitempty"`
+}
+
+// ObjectError defines the JSON structure returned to the client in case of an error
+type ObjectError struct {
+ Code int `json:"code"`
+ Message string `json:"message"`
+}
+
+// PointerBlob associates a Git blob with a Pointer.
+type PointerBlob struct {
+ Hash string
+ Pointer
+}
diff --git a/modules/lfs/transferadapter.go b/modules/lfs/transferadapter.go
new file mode 100644
index 0000000000..ea3aff0000
--- /dev/null
+++ b/modules/lfs/transferadapter.go
@@ -0,0 +1,58 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package lfs
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+// TransferAdapter represents an adapter for downloading/uploading LFS objects
+type TransferAdapter interface {
+ Name() string
+ Download(ctx context.Context, r *ObjectResponse) (io.ReadCloser, error)
+ //Upload(ctx context.Context, reader io.Reader) error
+}
+
+// BasicTransferAdapter implements the "basic" adapter
+type BasicTransferAdapter struct {
+ client *http.Client
+}
+
+// Name returns the name of the adapter
+func (a *BasicTransferAdapter) Name() string {
+ return "basic"
+}
+
+// Download reads the download location and downloads the data
+func (a *BasicTransferAdapter) Download(ctx context.Context, r *ObjectResponse) (io.ReadCloser, error) {
+ download, ok := r.Actions["download"]
+ if !ok {
+ return nil, errors.New("lfs.BasicTransferAdapter.Download: Action 'download' not found")
+ }
+
+ req, err := http.NewRequestWithContext(ctx, "GET", download.Href, nil)
+ if err != nil {
+ return nil, fmt.Errorf("lfs.BasicTransferAdapter.Download http.NewRequestWithContext: %w", err)
+ }
+ for key, value := range download.Header {
+ req.Header.Set(key, value)
+ }
+
+ res, err := a.client.Do(req)
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ return nil, fmt.Errorf("lfs.BasicTransferAdapter.Download http.Do: %w", err)
+ }
+
+ return res.Body, nil
+}
diff --git a/modules/lfs/transferadapter_test.go b/modules/lfs/transferadapter_test.go
new file mode 100644
index 0000000000..0eabd3faee
--- /dev/null
+++ b/modules/lfs/transferadapter_test.go
@@ -0,0 +1,78 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package lfs
+
+import (
+ "bytes"
+ "context"
+ "io/ioutil"
+ "net/http"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBasicTransferAdapterName(t *testing.T) {
+ a := &BasicTransferAdapter{}
+
+ assert.Equal(t, "basic", a.Name())
+}
+
+func TestBasicTransferAdapterDownload(t *testing.T) {
+ roundTripHandler := func(req *http.Request) *http.Response {
+ url := req.URL.String()
+ if strings.Contains(url, "valid-download-request") {
+ assert.Equal(t, "GET", req.Method)
+ assert.Equal(t, "test-value", req.Header.Get("test-header"))
+
+ return &http.Response{StatusCode: http.StatusOK, Body: ioutil.NopCloser(bytes.NewBufferString("dummy"))}
+ }
+
+ t.Errorf("Unknown test case: %s", url)
+
+ return nil
+ }
+
+ hc := &http.Client{Transport: RoundTripFunc(roundTripHandler)}
+ a := &BasicTransferAdapter{hc}
+
+ var cases = []struct {
+ response *ObjectResponse
+ expectederror string
+ }{
+ // case 0
+ {
+ response: &ObjectResponse{},
+ expectederror: "Action 'download' not found",
+ },
+ // case 1
+ {
+ response: &ObjectResponse{
+ Actions: map[string]*Link{"upload": nil},
+ },
+ expectederror: "Action 'download' not found",
+ },
+ // case 2
+ {
+ response: &ObjectResponse{
+ Actions: map[string]*Link{"download": {
+ Href: "https://valid-download-request.io",
+ Header: map[string]string{"test-header": "test-value"},
+ }},
+ },
+ expectederror: "",
+ },
+ }
+
+ for n, c := range cases {
+ _, err := a.Download(context.Background(), c.response)
+ if len(c.expectederror) > 0 {
+ assert.True(t, strings.Contains(err.Error(), c.expectederror), "case %d: '%s' should contain '%s'", n, err.Error(), c.expectederror)
+ } else {
+ assert.NoError(t, err, "case %d", n)
+ }
+ }
+}
diff --git a/modules/migrations/base/options.go b/modules/migrations/base/options.go
index 168f9848c8..f1d9f81e57 100644
--- a/modules/migrations/base/options.go
+++ b/modules/migrations/base/options.go
@@ -20,6 +20,8 @@ type MigrateOptions struct {
// required: true
RepoName string `json:"repo_name" binding:"Required"`
Mirror bool `json:"mirror"`
+ LFS bool `json:"lfs"`
+ LFSEndpoint string `json:"lfs_endpoint"`
Private bool `json:"private"`
Description string `json:"description"`
OriginalURL string
diff --git a/modules/migrations/gitea_uploader.go b/modules/migrations/gitea_uploader.go
index 02f97c4ff5..bd6084d6a1 100644
--- a/modules/migrations/gitea_uploader.go
+++ b/modules/migrations/gitea_uploader.go
@@ -116,6 +116,8 @@ func (g *GiteaLocalUploader) CreateRepo(repo *base.Repository, opts base.Migrate
OriginalURL: repo.OriginalURL,
GitServiceType: opts.GitServiceType,
Mirror: repo.IsMirror,
+ LFS: opts.LFS,
+ LFSEndpoint: opts.LFSEndpoint,
CloneAddr: repo.CloneURL,
Private: repo.IsPrivate,
Wiki: opts.Wiki,
diff --git a/modules/migrations/migrate.go b/modules/migrations/migrate.go
index 75fee80a39..2f8889e67b 100644
--- a/modules/migrations/migrate.go
+++ b/modules/migrations/migrate.go
@@ -104,6 +104,12 @@ func MigrateRepository(ctx context.Context, doer *models.User, ownerName string,
if err != nil {
return nil, err
}
+ if opts.LFS && len(opts.LFSEndpoint) > 0 {
+ err := IsMigrateURLAllowed(opts.LFSEndpoint, doer)
+ if err != nil {
+ return nil, err
+ }
+ }
downloader, err := newDownloader(ctx, ownerName, opts)
if err != nil {
return nil, err
diff --git a/modules/repofiles/update.go b/modules/repofiles/update.go
index d25e109b29..ad984c465a 100644
--- a/modules/repofiles/update.go
+++ b/modules/repofiles/update.go
@@ -18,7 +18,6 @@ import (
"code.gitea.io/gitea/modules/log"
repo_module "code.gitea.io/gitea/modules/repository"
"code.gitea.io/gitea/modules/setting"
- "code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/structs"
stdcharset "golang.org/x/net/html/charset"
@@ -70,30 +69,29 @@ func detectEncodingAndBOM(entry *git.TreeEntry, repo *models.Repository) (string
buf = buf[:n]
if setting.LFS.StartServer {
- meta := lfs.IsPointerFile(&buf)
- if meta != nil {
- meta, err = repo.GetLFSMetaObjectByOid(meta.Oid)
+ pointer, _ := lfs.ReadPointerFromBuffer(buf)
+ if pointer.IsValid() {
+ meta, err := repo.GetLFSMetaObjectByOid(pointer.Oid)
if err != nil && err != models.ErrLFSObjectNotExist {
// return default
return "UTF-8", false
}
- }
- if meta != nil {
- dataRc, err := lfs.ReadMetaObject(meta)
- if err != nil {
- // return default
- return "UTF-8", false
+ if meta != nil {
+ dataRc, err := lfs.ReadMetaObject(pointer)
+ if err != nil {
+ // return default
+ return "UTF-8", false
+ }
+ defer dataRc.Close()
+ buf = make([]byte, 1024)
+ n, err = dataRc.Read(buf)
+ if err != nil {
+ // return default
+ return "UTF-8", false
+ }
+ buf = buf[:n]
}
- defer dataRc.Close()
- buf = make([]byte, 1024)
- n, err = dataRc.Read(buf)
- if err != nil {
- // return default
- return "UTF-8", false
- }
- buf = buf[:n]
}
-
}
encoding, err := charset.DetectEncoding(buf)
@@ -387,12 +385,12 @@ func CreateOrUpdateRepoFile(repo *models.Repository, doer *models.User, opts *Up
if filename2attribute2info[treePath] != nil && filename2attribute2info[treePath]["filter"] == "lfs" {
// OK so we are supposed to LFS this data!
- oid, err := models.GenerateLFSOid(strings.NewReader(opts.Content))
+ pointer, err := lfs.GeneratePointer(strings.NewReader(opts.Content))
if err != nil {
return nil, err
}
- lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: int64(len(opts.Content)), RepositoryID: repo.ID}
- content = lfsMetaObject.Pointer()
+ lfsMetaObject = &models.LFSMetaObject{Pointer: pointer, RepositoryID: repo.ID}
+ content = pointer.StringContent()
}
}
// Add the object to the database
@@ -435,13 +433,13 @@ func CreateOrUpdateRepoFile(repo *models.Repository, doer *models.User, opts *Up
if err != nil {
return nil, err
}
- contentStore := &lfs.ContentStore{ObjectStorage: storage.LFS}
- exist, err := contentStore.Exists(lfsMetaObject)
+ contentStore := lfs.NewContentStore()
+ exist, err := contentStore.Exists(lfsMetaObject.Pointer)
if err != nil {
return nil, err
}
if !exist {
- if err := contentStore.Put(lfsMetaObject, strings.NewReader(opts.Content)); err != nil {
+ if err := contentStore.Put(lfsMetaObject.Pointer, strings.NewReader(opts.Content)); err != nil {
if _, err2 := repo.RemoveLFSMetaObjectByOid(lfsMetaObject.Oid); err2 != nil {
return nil, fmt.Errorf("Error whilst removing failed inserted LFS object %s: %v (Prev Error: %v)", lfsMetaObject.Oid, err2, err)
}
diff --git a/modules/repofiles/upload.go b/modules/repofiles/upload.go
index 8716e1c8f1..e97f55a656 100644
--- a/modules/repofiles/upload.go
+++ b/modules/repofiles/upload.go
@@ -14,7 +14,6 @@ import (
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/setting"
- "code.gitea.io/gitea/modules/storage"
)
// UploadRepoFileOptions contains the uploaded repository file options
@@ -137,7 +136,7 @@ func UploadRepoFiles(repo *models.Repository, doer *models.User, opts *UploadRep
// OK now we can insert the data into the store - there's no way to clean up the store
// once it's in there, it's in there.
- contentStore := &lfs.ContentStore{ObjectStorage: storage.LFS}
+ contentStore := lfs.NewContentStore()
for _, info := range infos {
if err := uploadToLFSContentStore(info, contentStore); err != nil {
return cleanUpAfterFailure(&infos, t, err)
@@ -163,18 +162,14 @@ func copyUploadedLFSFileIntoRepository(info *uploadInfo, filename2attribute2info
if setting.LFS.StartServer && filename2attribute2info[info.upload.Name] != nil && filename2attribute2info[info.upload.Name]["filter"] == "lfs" {
// Handle LFS
// FIXME: Inefficient! this should probably happen in models.Upload
- oid, err := models.GenerateLFSOid(file)
- if err != nil {
- return err
- }
- fileInfo, err := file.Stat()
+ pointer, err := lfs.GeneratePointer(file)
if err != nil {
return err
}
- info.lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: fileInfo.Size(), RepositoryID: t.repo.ID}
+ info.lfsMetaObject = &models.LFSMetaObject{Pointer: pointer, RepositoryID: t.repo.ID}
- if objectHash, err = t.HashObject(strings.NewReader(info.lfsMetaObject.Pointer())); err != nil {
+ if objectHash, err = t.HashObject(strings.NewReader(pointer.StringContent())); err != nil {
return err
}
} else if objectHash, err = t.HashObject(file); err != nil {
@@ -189,7 +184,7 @@ func uploadToLFSContentStore(info uploadInfo, contentStore *lfs.ContentStore) er
if info.lfsMetaObject == nil {
return nil
}
- exist, err := contentStore.Exists(info.lfsMetaObject)
+ exist, err := contentStore.Exists(info.lfsMetaObject.Pointer)
if err != nil {
return err
}
@@ -202,7 +197,7 @@ func uploadToLFSContentStore(info uploadInfo, contentStore *lfs.ContentStore) er
defer file.Close()
// FIXME: Put regenerates the hash and copies the file over.
// I guess this strictly ensures the soundness of the store but this is inefficient.
- if err := contentStore.Put(info.lfsMetaObject, file); err != nil {
+ if err := contentStore.Put(info.lfsMetaObject.Pointer, file); err != nil {
// OK Now we need to cleanup
// Can't clean up the store, once uploaded there they're there.
return err
diff --git a/modules/repository/repo.go b/modules/repository/repo.go
index ede714673a..50eb185daa 100644
--- a/modules/repository/repo.go
+++ b/modules/repository/repo.go
@@ -7,12 +7,14 @@ package repository
import (
"context"
"fmt"
+ "net/url"
"path"
"strings"
"time"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
migration "code.gitea.io/gitea/modules/migrations/base"
"code.gitea.io/gitea/modules/setting"
@@ -120,6 +122,13 @@ func MigrateRepositoryGitData(ctx context.Context, u *models.User, repo *models.
log.Error("Failed to synchronize tags to releases for repository: %v", err)
}
}
+
+ if opts.LFS {
+ ep := lfs.DetermineEndpoint(opts.CloneAddr, opts.LFSEndpoint)
+ if err = StoreMissingLfsObjectsInRepository(ctx, repo, gitRepo, ep); err != nil {
+ log.Error("Failed to store missing LFS objects for repository: %v", err)
+ }
+ }
}
if err = repo.UpdateSize(models.DefaultDBContext()); err != nil {
@@ -132,6 +141,10 @@ func MigrateRepositoryGitData(ctx context.Context, u *models.User, repo *models.
Interval: setting.Mirror.DefaultInterval,
EnablePrune: true,
NextUpdateUnix: timeutil.TimeStampNow().AddDuration(setting.Mirror.DefaultInterval),
+ LFS: opts.LFS,
+ }
+ if opts.LFS {
+ mirrorModel.LFSEndpoint = opts.LFSEndpoint
}
if opts.MirrorInterval != "" {
@@ -300,3 +313,76 @@ func PushUpdateAddTag(repo *models.Repository, gitRepo *git.Repository, tagName
return models.SaveOrUpdateTag(repo, &rel)
}
+
+// StoreMissingLfsObjectsInRepository downloads missing LFS objects
+func StoreMissingLfsObjectsInRepository(ctx context.Context, repo *models.Repository, gitRepo *git.Repository, endpoint *url.URL) error {
+ client := lfs.NewClient(endpoint)
+ contentStore := lfs.NewContentStore()
+
+ pointerChan := make(chan lfs.PointerBlob)
+ errChan := make(chan error, 1)
+ go lfs.SearchPointerBlobs(ctx, gitRepo, pointerChan, errChan)
+
+ err := func() error {
+ for pointerBlob := range pointerChan {
+ meta, err := models.NewLFSMetaObject(&models.LFSMetaObject{Pointer: pointerBlob.Pointer, RepositoryID: repo.ID})
+ if err != nil {
+ return fmt.Errorf("StoreMissingLfsObjectsInRepository models.NewLFSMetaObject: %w", err)
+ }
+ if meta.Existing {
+ continue
+ }
+
+ log.Trace("StoreMissingLfsObjectsInRepository: LFS OID[%s] not present in repository %s", pointerBlob.Oid, repo.FullName())
+
+ err = func() error {
+ exist, err := contentStore.Exists(pointerBlob.Pointer)
+ if err != nil {
+ return fmt.Errorf("StoreMissingLfsObjectsInRepository contentStore.Exists: %w", err)
+ }
+ if !exist {
+ if setting.LFS.MaxFileSize > 0 && pointerBlob.Size > setting.LFS.MaxFileSize {
+ log.Info("LFS OID[%s] download denied because of LFS_MAX_FILE_SIZE=%d < size %d", pointerBlob.Oid, setting.LFS.MaxFileSize, pointerBlob.Size)
+ return nil
+ }
+
+ stream, err := client.Download(ctx, pointerBlob.Oid, pointerBlob.Size)
+ if err != nil {
+ return fmt.Errorf("StoreMissingLfsObjectsInRepository: LFS OID[%s] failed to download: %w", pointerBlob.Oid, err)
+ }
+ defer stream.Close()
+
+ if err := contentStore.Put(pointerBlob.Pointer, stream); err != nil {
+ return fmt.Errorf("StoreMissingLfsObjectsInRepository LFS OID[%s] contentStore.Put: %w", pointerBlob.Oid, err)
+ }
+ } else {
+ log.Trace("StoreMissingLfsObjectsInRepository: LFS OID[%s] already present in content store", pointerBlob.Oid)
+ }
+ return nil
+ }()
+ if err != nil {
+ if _, err2 := repo.RemoveLFSMetaObjectByOid(meta.Oid); err2 != nil {
+ log.Error("StoreMissingLfsObjectsInRepository RemoveLFSMetaObjectByOid[Oid: %s]: %w", meta.Oid, err2)
+ }
+
+ select {
+ case <-ctx.Done():
+ return nil
+ default:
+ }
+ return err
+ }
+ }
+ return nil
+ }()
+ if err != nil {
+ return err
+ }
+
+ err, has := <-errChan
+ if has {
+ return err
+ }
+
+ return nil
+}
diff --git a/modules/structs/repo.go b/modules/structs/repo.go
index c23bd1033f..4fdc1e54cb 100644
--- a/modules/structs/repo.go
+++ b/modules/structs/repo.go
@@ -260,6 +260,8 @@ type MigrateRepoOptions struct {
AuthToken string `json:"auth_token"`
Mirror bool `json:"mirror"`
+ LFS bool `json:"lfs"`
+ LFSEndpoint string `json:"lfs_endpoint"`
Private bool `json:"private"`
Description string `json:"description" binding:"MaxSize(255)"`
Wiki bool `json:"wiki"`
diff --git a/modules/util/path.go b/modules/util/path.go
index aa3d009899..2ac8f4d80a 100644
--- a/modules/util/path.go
+++ b/modules/util/path.go
@@ -6,9 +6,12 @@ package util
import (
"errors"
+ "net/url"
"os"
"path"
"path/filepath"
+ "regexp"
+ "runtime"
"strings"
)
@@ -150,3 +153,23 @@ func StatDir(rootPath string, includeDir ...bool) ([]string, error) {
}
return statDir(rootPath, "", isIncludeDir, false, false)
}
+
+// FileURLToPath extracts the path informations from a file://... url.
+func FileURLToPath(u *url.URL) (string, error) {
+ if u.Scheme != "file" {
+ return "", errors.New("URL scheme is not 'file': " + u.String())
+ }
+
+ path := u.Path
+
+ if runtime.GOOS != "windows" {
+ return path, nil
+ }
+
+ // If it looks like there's a Windows drive letter at the beginning, strip off the leading slash.
+ re := regexp.MustCompile("/[A-Za-z]:/")
+ if re.MatchString(path) {
+ return path[1:], nil
+ }
+ return path, nil
+}
diff --git a/modules/util/path_test.go b/modules/util/path_test.go
new file mode 100644
index 0000000000..41104f79fc
--- /dev/null
+++ b/modules/util/path_test.go
@@ -0,0 +1,58 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package util
+
+import (
+ "net/url"
+ "runtime"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFileURLToPath(t *testing.T) {
+ var cases = []struct {
+ url string
+ expected string
+ haserror bool
+ windows bool
+ }{
+ // case 0
+ {
+ url: "",
+ haserror: true,
+ },
+ // case 1
+ {
+ url: "http://test.io",
+ haserror: true,
+ },
+ // case 2
+ {
+ url: "file:///path",
+ expected: "/path",
+ },
+ // case 3
+ {
+ url: "file:///C:/path",
+ expected: "C:/path",
+ windows: true,
+ },
+ }
+
+ for n, c := range cases {
+ if c.windows && runtime.GOOS != "windows" {
+ continue
+ }
+ u, _ := url.Parse(c.url)
+ p, err := FileURLToPath(u)
+ if c.haserror {
+ assert.Error(t, err, "case %d: should return error", n)
+ } else {
+ assert.NoError(t, err, "case %d: should not return error", n)
+ assert.Equal(t, c.expected, p, "case %d: should be equal", n)
+ }
+ }
+}
diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini
index c481414afb..a23fdd78d4 100644
--- a/options/locale/locale_en-US.ini
+++ b/options/locale/locale_en-US.ini
@@ -726,6 +726,10 @@ mirror_address = Clone From URL
mirror_address_desc = Put any required credentials in the Clone Authorization section.
mirror_address_url_invalid = The provided url is invalid. You must escape all components of the url correctly.
mirror_address_protocol_invalid = The provided url is invalid. Only http(s):// or git:// locations can be mirrored from.
+mirror_lfs = Large File System (LFS)
+mirror_lfs_desc = Activate mirroring of LFS data.
+mirror_lfs_endpoint = LFS Endpoint
+mirror_lfs_endpoint_desc = Sync will attempt to use the clone url to determine the LFS server. You can also specify a custom endpoint if the repository LFS data is stored somewhere else.
mirror_last_synced = Last Synchronized
watchers = Watchers
stargazers = Stargazers
@@ -784,6 +788,11 @@ migrate_options = Migration Options
migrate_service = Migration Service
migrate_options_mirror_helper = This repository will be a mirror
migrate_options_mirror_disabled = Your site administrator has disabled new mirrors.
+migrate_options_lfs = Migrate LFS files
+migrate_options_lfs_endpoint.label = LFS Endpoint
+migrate_options_lfs_endpoint.description = Migration will attempt to use your Git remote to determine the LFS server. You can also specify a custom endpoint if the repository LFS data is stored somewhere else.
+migrate_options_lfs_endpoint.description.local = A local server path is supported too.
+migrate_options_lfs_endpoint.placeholder = Leave blank to derive from clone URL
migrate_items = Migration Items
migrate_items_wiki = Wiki
migrate_items_milestones = Milestones
@@ -800,8 +809,8 @@ migrate.permission_denied = You are not allowed to import local repositories.
migrate.permission_denied_blocked = You are not allowed to import from blocked hosts.
migrate.permission_denied_private_ip = You are not allowed to import from private IPs.
migrate.invalid_local_path = "The local path is invalid. It does not exist or is not a directory."
+migrate.invalid_lfs_endpoint = The LFS endpoint is not valid.
migrate.failed = Migration failed: %v
-migrate.lfs_mirror_unsupported = Mirroring LFS objects is not supported - use 'git lfs fetch --all' and 'git lfs push --all' instead.
migrate.migrate_items_options = Access Token is required to migrate additional items
migrated_from = Migrated from %[2]s
migrated_from_fake = Migrated From %[1]s
diff --git a/routers/api/v1/repo/migrate.go b/routers/api/v1/repo/migrate.go
index 1c36e0cc79..edae358338 100644
--- a/routers/api/v1/repo/migrate.go
+++ b/routers/api/v1/repo/migrate.go
@@ -15,6 +15,7 @@ import (
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/convert"
"code.gitea.io/gitea/modules/graceful"
+ "code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/migrations"
"code.gitea.io/gitea/modules/migrations/base"
@@ -101,27 +102,7 @@ func Migrate(ctx *context.APIContext) {
err = migrations.IsMigrateURLAllowed(remoteAddr, ctx.User)
}
if err != nil {
- if models.IsErrInvalidCloneAddr(err) {
- addrErr := err.(*models.ErrInvalidCloneAddr)
- switch {
- case addrErr.IsURLError:
- ctx.Error(http.StatusUnprocessableEntity, "", err)
- case addrErr.IsPermissionDenied:
- if addrErr.LocalPath {
- ctx.Error(http.StatusUnprocessableEntity, "", "You are not allowed to import local repositories.")
- } else if len(addrErr.PrivateNet) == 0 {
- ctx.Error(http.StatusUnprocessableEntity, "", "You are not allowed to import from blocked hosts.")
- } else {
- ctx.Error(http.StatusUnprocessableEntity, "", "You are not allowed to import from private IPs.")
- }
- case addrErr.IsInvalidPath:
- ctx.Error(http.StatusUnprocessableEntity, "", "Invalid local path, it does not exist or not a directory.")
- default:
- ctx.Error(http.StatusInternalServerError, "ParseRemoteAddr", "Unknown error type (ErrInvalidCloneAddr): "+err.Error())
- }
- } else {
- ctx.Error(http.StatusInternalServerError, "ParseRemoteAddr", err)
- }
+ handleRemoteAddrError(ctx, err)
return
}
@@ -137,12 +118,29 @@ func Migrate(ctx *context.APIContext) {
return
}
+ form.LFS = form.LFS && setting.LFS.StartServer
+
+ if form.LFS && len(form.LFSEndpoint) > 0 {
+ ep := lfs.DetermineEndpoint("", form.LFSEndpoint)
+ if ep == nil {
+ ctx.Error(http.StatusInternalServerError, "", ctx.Tr("repo.migrate.invalid_lfs_endpoint"))
+ return
+ }
+ err = migrations.IsMigrateURLAllowed(ep.String(), ctx.User)
+ if err != nil {
+ handleRemoteAddrError(ctx, err)
+ return
+ }
+ }
+
var opts = migrations.MigrateOptions{
CloneAddr: remoteAddr,
RepoName: form.RepoName,
Description: form.Description,
Private: form.Private || setting.Repository.ForcePrivate,
Mirror: form.Mirror,
+ LFS: form.LFS,
+ LFSEndpoint: form.LFSEndpoint,
AuthUsername: form.AuthUsername,
AuthPassword: form.AuthPassword,
AuthToken: form.AuthToken,
@@ -245,3 +243,27 @@ func handleMigrateError(ctx *context.APIContext, repoOwner *models.User, remoteA
}
}
}
+
+func handleRemoteAddrError(ctx *context.APIContext, err error) {
+ if models.IsErrInvalidCloneAddr(err) {
+ addrErr := err.(*models.ErrInvalidCloneAddr)
+ switch {
+ case addrErr.IsURLError:
+ ctx.Error(http.StatusUnprocessableEntity, "", err)
+ case addrErr.IsPermissionDenied:
+ if addrErr.LocalPath {
+ ctx.Error(http.StatusUnprocessableEntity, "", "You are not allowed to import local repositories.")
+ } else if len(addrErr.PrivateNet) == 0 {
+ ctx.Error(http.StatusUnprocessableEntity, "", "You are not allowed to import from blocked hosts.")
+ } else {
+ ctx.Error(http.StatusUnprocessableEntity, "", "You are not allowed to import from private IPs.")
+ }
+ case addrErr.IsInvalidPath:
+ ctx.Error(http.StatusUnprocessableEntity, "", "Invalid local path, it does not exist or not a directory.")
+ default:
+ ctx.Error(http.StatusInternalServerError, "ParseRemoteAddr", "Unknown error type (ErrInvalidCloneAddr): "+err.Error())
+ }
+ } else {
+ ctx.Error(http.StatusInternalServerError, "ParseRemoteAddr", err)
+ }
+}
diff --git a/routers/repo/download.go b/routers/repo/download.go
index 50f893690b..63a9ca47d7 100644
--- a/routers/repo/download.go
+++ b/routers/repo/download.go
@@ -96,12 +96,13 @@ func ServeBlobOrLFS(ctx *context.Context, blob *git.Blob) error {
}
}()
- if meta, _ := lfs.ReadPointerFile(dataRc); meta != nil {
- meta, _ = ctx.Repo.Repository.GetLFSMetaObjectByOid(meta.Oid)
+ pointer, _ := lfs.ReadPointer(dataRc)
+ if pointer.IsValid() {
+ meta, _ := ctx.Repo.Repository.GetLFSMetaObjectByOid(pointer.Oid)
if meta == nil {
return ServeBlob(ctx, blob)
}
- lfsDataRc, err := lfs.ReadMetaObject(meta)
+ lfsDataRc, err := lfs.ReadMetaObject(meta.Pointer)
if err != nil {
return err
}
diff --git a/routers/repo/lfs.go b/routers/repo/lfs.go
index 07d36d67ec..457ffb6aba 100644
--- a/routers/repo/lfs.go
+++ b/routers/repo/lfs.go
@@ -5,7 +5,6 @@
package repo
import (
- "bufio"
"bytes"
"fmt"
gotemplate "html/template"
@@ -15,7 +14,6 @@ import (
"path"
"strconv"
"strings"
- "sync"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/base"
@@ -266,7 +264,7 @@ func LFSFileGet(ctx *context.Context) {
return
}
ctx.Data["LFSFile"] = meta
- dataRc, err := lfs.ReadMetaObject(meta)
+ dataRc, err := lfs.ReadMetaObject(meta.Pointer)
if err != nil {
ctx.ServerError("LFSFileGet", err)
return
@@ -385,9 +383,8 @@ func LFSFileFind(ctx *context.Context) {
ctx.Data["PageIsSettingsLFS"] = true
var hash git.SHA1
if len(sha) == 0 {
- meta := models.LFSMetaObject{Oid: oid, Size: size}
- pointer := meta.Pointer()
- hash = git.ComputeBlobHash([]byte(pointer))
+ pointer := lfs.Pointer{Oid: oid, Size: size}
+ hash = git.ComputeBlobHash([]byte(pointer.StringContent()))
sha = hash.String()
} else {
hash = git.MustIDFromString(sha)
@@ -421,158 +418,99 @@ func LFSPointerFiles(ctx *context.Context) {
}
ctx.Data["LFSFilesLink"] = ctx.Repo.RepoLink + "/settings/lfs"
- basePath := ctx.Repo.Repository.RepoPath()
+ err = func() error {
+ pointerChan := make(chan lfs.PointerBlob)
+ errChan := make(chan error, 1)
+ go lfs.SearchPointerBlobs(ctx.Req.Context(), ctx.Repo.GitRepo, pointerChan, errChan)
- pointerChan := make(chan pointerResult)
+ numPointers := 0
+ var numAssociated, numNoExist, numAssociatable int
- catFileCheckReader, catFileCheckWriter := io.Pipe()
- shasToBatchReader, shasToBatchWriter := io.Pipe()
- catFileBatchReader, catFileBatchWriter := io.Pipe()
- errChan := make(chan error, 1)
- wg := sync.WaitGroup{}
- wg.Add(5)
+ type pointerResult struct {
+ SHA string
+ Oid string
+ Size int64
+ InRepo bool
+ Exists bool
+ Accessible bool
+ }
- var numPointers, numAssociated, numNoExist, numAssociatable int
+ results := []pointerResult{}
- go func() {
- defer wg.Done()
- pointers := make([]pointerResult, 0, 50)
- for pointer := range pointerChan {
- pointers = append(pointers, pointer)
- if pointer.InRepo {
+ contentStore := lfs.NewContentStore()
+ repo := ctx.Repo.Repository
+
+ for pointerBlob := range pointerChan {
+ numPointers++
+
+ result := pointerResult{
+ SHA: pointerBlob.Hash,
+ Oid: pointerBlob.Oid,
+ Size: pointerBlob.Size,
+ }
+
+ if _, err := repo.GetLFSMetaObjectByOid(pointerBlob.Oid); err != nil {
+ if err != models.ErrLFSObjectNotExist {
+ return err
+ }
+ } else {
+ result.InRepo = true
+ }
+
+ result.Exists, err = contentStore.Exists(pointerBlob.Pointer)
+ if err != nil {
+ return err
+ }
+
+ if result.Exists {
+ if !result.InRepo {
+ // Can we fix?
+ // OK well that's "simple"
+ // - we need to check whether current user has access to a repo that has access to the file
+ result.Accessible, err = models.LFSObjectAccessible(ctx.User, pointerBlob.Oid)
+ if err != nil {
+ return err
+ }
+ } else {
+ result.Accessible = true
+ }
+ }
+
+ if result.InRepo {
numAssociated++
}
- if !pointer.Exists {
+ if !result.Exists {
numNoExist++
}
- if !pointer.InRepo && pointer.Accessible {
+ if !result.InRepo && result.Accessible {
numAssociatable++
}
+
+ results = append(results, result)
}
- numPointers = len(pointers)
- ctx.Data["Pointers"] = pointers
+
+ err, has := <-errChan
+ if has {
+ return err
+ }
+
+ ctx.Data["Pointers"] = results
ctx.Data["NumPointers"] = numPointers
ctx.Data["NumAssociated"] = numAssociated
ctx.Data["NumAssociatable"] = numAssociatable
ctx.Data["NumNoExist"] = numNoExist
ctx.Data["NumNotAssociated"] = numPointers - numAssociated
+
+ return nil
}()
- go createPointerResultsFromCatFileBatch(catFileBatchReader, &wg, pointerChan, ctx.Repo.Repository, ctx.User)
- go pipeline.CatFileBatch(shasToBatchReader, catFileBatchWriter, &wg, basePath)
- go pipeline.BlobsLessThan1024FromCatFileBatchCheck(catFileCheckReader, shasToBatchWriter, &wg)
- if git.CheckGitVersionAtLeast("2.6.0") != nil {
- revListReader, revListWriter := io.Pipe()
- shasToCheckReader, shasToCheckWriter := io.Pipe()
- wg.Add(2)
- go pipeline.CatFileBatchCheck(shasToCheckReader, catFileCheckWriter, &wg, basePath)
- go pipeline.BlobsFromRevListObjects(revListReader, shasToCheckWriter, &wg)
- go pipeline.RevListAllObjects(revListWriter, &wg, basePath, errChan)
- } else {
- go pipeline.CatFileBatchCheckAllObjects(catFileCheckWriter, &wg, basePath, errChan)
+ if err != nil {
+ ctx.ServerError("LFSPointerFiles", err)
+ return
}
- wg.Wait()
- select {
- case err, has := <-errChan:
- if has {
- ctx.ServerError("LFSPointerFiles", err)
- }
- default:
- }
ctx.HTML(http.StatusOK, tplSettingsLFSPointers)
}
-type pointerResult struct {
- SHA string
- Oid string
- Size int64
- InRepo bool
- Exists bool
- Accessible bool
-}
-
-func createPointerResultsFromCatFileBatch(catFileBatchReader *io.PipeReader, wg *sync.WaitGroup, pointerChan chan<- pointerResult, repo *models.Repository, user *models.User) {
- defer wg.Done()
- defer catFileBatchReader.Close()
- contentStore := lfs.ContentStore{ObjectStorage: storage.LFS}
-
- bufferedReader := bufio.NewReader(catFileBatchReader)
- buf := make([]byte, 1025)
- for {
- // File descriptor line: sha
- sha, err := bufferedReader.ReadString(' ')
- if err != nil {
- _ = catFileBatchReader.CloseWithError(err)
- break
- }
- // Throw away the blob
- if _, err := bufferedReader.ReadString(' '); err != nil {
- _ = catFileBatchReader.CloseWithError(err)
- break
- }
- sizeStr, err := bufferedReader.ReadString('\n')
- if err != nil {
- _ = catFileBatchReader.CloseWithError(err)
- break
- }
- size, err := strconv.Atoi(sizeStr[:len(sizeStr)-1])
- if err != nil {
- _ = catFileBatchReader.CloseWithError(err)
- break
- }
- pointerBuf := buf[:size+1]
- if _, err := io.ReadFull(bufferedReader, pointerBuf); err != nil {
- _ = catFileBatchReader.CloseWithError(err)
- break
- }
- pointerBuf = pointerBuf[:size]
- // Now we need to check if the pointerBuf is an LFS pointer
- pointer := lfs.IsPointerFile(&pointerBuf)
- if pointer == nil {
- continue
- }
-
- result := pointerResult{
- SHA: strings.TrimSpace(sha),
- Oid: pointer.Oid,
- Size: pointer.Size,
- }
-
- // Then we need to check that this pointer is in the db
- if _, err := repo.GetLFSMetaObjectByOid(pointer.Oid); err != nil {
- if err != models.ErrLFSObjectNotExist {
- _ = catFileBatchReader.CloseWithError(err)
- break
- }
- } else {
- result.InRepo = true
- }
-
- result.Exists, err = contentStore.Exists(pointer)
- if err != nil {
- _ = catFileBatchReader.CloseWithError(err)
- break
- }
-
- if result.Exists {
- if !result.InRepo {
- // Can we fix?
- // OK well that's "simple"
- // - we need to check whether current user has access to a repo that has access to the file
- result.Accessible, err = models.LFSObjectAccessible(user, result.Oid)
- if err != nil {
- _ = catFileBatchReader.CloseWithError(err)
- break
- }
- } else {
- result.Accessible = true
- }
- }
- pointerChan <- result
- }
- close(pointerChan)
-}
-
// LFSAutoAssociate auto associates accessible lfs files
func LFSAutoAssociate(ctx *context.Context) {
if !setting.LFS.StartServer {
diff --git a/routers/repo/migrate.go b/routers/repo/migrate.go
index 8da37b5ec9..231b9aedf9 100644
--- a/routers/repo/migrate.go
+++ b/routers/repo/migrate.go
@@ -12,6 +12,7 @@ import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/context"
+ "code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/migrations"
"code.gitea.io/gitea/modules/setting"
@@ -47,6 +48,7 @@ func Migrate(ctx *context.Context) {
ctx.Data["private"] = getRepoPrivate(ctx)
ctx.Data["mirror"] = ctx.Query("mirror") == "1"
+ ctx.Data["lfs"] = ctx.Query("lfs") == "1"
ctx.Data["wiki"] = ctx.Query("wiki") == "1"
ctx.Data["milestones"] = ctx.Query("milestones") == "1"
ctx.Data["labels"] = ctx.Query("labels") == "1"
@@ -114,6 +116,34 @@ func handleMigrateError(ctx *context.Context, owner *models.User, err error, nam
}
}
+func handleMigrateRemoteAddrError(ctx *context.Context, err error, tpl base.TplName, form *forms.MigrateRepoForm) {
+ if models.IsErrInvalidCloneAddr(err) {
+ addrErr := err.(*models.ErrInvalidCloneAddr)
+ switch {
+ case addrErr.IsProtocolInvalid:
+ ctx.RenderWithErr(ctx.Tr("repo.mirror_address_protocol_invalid"), tpl, form)
+ case addrErr.IsURLError:
+ ctx.RenderWithErr(ctx.Tr("form.url_error"), tpl, form)
+ case addrErr.IsPermissionDenied:
+ if addrErr.LocalPath {
+ ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied"), tpl, form)
+ } else if len(addrErr.PrivateNet) == 0 {
+ ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_blocked"), tpl, form)
+ } else {
+ ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_private_ip"), tpl, form)
+ }
+ case addrErr.IsInvalidPath:
+ ctx.RenderWithErr(ctx.Tr("repo.migrate.invalid_local_path"), tpl, form)
+ default:
+ log.Error("Error whilst updating url: %v", err)
+ ctx.RenderWithErr(ctx.Tr("form.url_error"), tpl, form)
+ }
+ } else {
+ log.Error("Error whilst updating url: %v", err)
+ ctx.RenderWithErr(ctx.Tr("form.url_error"), tpl, form)
+ }
+}
+
// MigratePost response for migrating from external git repository
func MigratePost(ctx *context.Context) {
form := web.GetForm(ctx).(*forms.MigrateRepoForm)
@@ -144,35 +174,28 @@ func MigratePost(ctx *context.Context) {
err = migrations.IsMigrateURLAllowed(remoteAddr, ctx.User)
}
if err != nil {
- if models.IsErrInvalidCloneAddr(err) {
- ctx.Data["Err_CloneAddr"] = true
- addrErr := err.(*models.ErrInvalidCloneAddr)
- switch {
- case addrErr.IsProtocolInvalid:
- ctx.RenderWithErr(ctx.Tr("repo.mirror_address_protocol_invalid"), tpl, &form)
- case addrErr.IsURLError:
- ctx.RenderWithErr(ctx.Tr("form.url_error"), tpl, &form)
- case addrErr.IsPermissionDenied:
- if addrErr.LocalPath {
- ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied"), tpl, &form)
- } else if len(addrErr.PrivateNet) == 0 {
- ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_blocked"), tpl, &form)
- } else {
- ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_private_ip"), tpl, &form)
- }
- case addrErr.IsInvalidPath:
- ctx.RenderWithErr(ctx.Tr("repo.migrate.invalid_local_path"), tpl, &form)
- default:
- log.Error("Error whilst updating url: %v", err)
- ctx.RenderWithErr(ctx.Tr("form.url_error"), tpl, &form)
- }
- } else {
- log.Error("Error whilst updating url: %v", err)
- ctx.RenderWithErr(ctx.Tr("form.url_error"), tpl, &form)
- }
+ ctx.Data["Err_CloneAddr"] = true
+ handleMigrateRemoteAddrError(ctx, err, tpl, form)
return
}
+ form.LFS = form.LFS && setting.LFS.StartServer
+
+ if form.LFS && len(form.LFSEndpoint) > 0 {
+ ep := lfs.DetermineEndpoint("", form.LFSEndpoint)
+ if ep == nil {
+ ctx.Data["Err_LFSEndpoint"] = true
+ ctx.RenderWithErr(ctx.Tr("repo.migrate.invalid_lfs_endpoint"), tpl, &form)
+ return
+ }
+ err = migrations.IsMigrateURLAllowed(ep.String(), ctx.User)
+ if err != nil {
+ ctx.Data["Err_LFSEndpoint"] = true
+ handleMigrateRemoteAddrError(ctx, err, tpl, form)
+ return
+ }
+ }
+
var opts = migrations.MigrateOptions{
OriginalURL: form.CloneAddr,
GitServiceType: serviceType,
@@ -181,6 +204,8 @@ func MigratePost(ctx *context.Context) {
Description: form.Description,
Private: form.Private || setting.Repository.ForcePrivate,
Mirror: form.Mirror && !setting.Repository.DisableMirrors,
+ LFS: form.LFS,
+ LFSEndpoint: form.LFSEndpoint,
AuthUsername: form.AuthUsername,
AuthPassword: form.AuthPassword,
AuthToken: form.AuthToken,
diff --git a/routers/repo/setting.go b/routers/repo/setting.go
index ed6ff6e2b3..533adcbdf6 100644
--- a/routers/repo/setting.go
+++ b/routers/repo/setting.go
@@ -17,6 +17,7 @@ import (
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/migrations"
"code.gitea.io/gitea/modules/repository"
@@ -170,30 +171,8 @@ func SettingsPost(ctx *context.Context) {
err = migrations.IsMigrateURLAllowed(address, ctx.User)
}
if err != nil {
- if models.IsErrInvalidCloneAddr(err) {
- ctx.Data["Err_MirrorAddress"] = true
- addrErr := err.(*models.ErrInvalidCloneAddr)
- switch {
- case addrErr.IsProtocolInvalid:
- ctx.RenderWithErr(ctx.Tr("repo.mirror_address_protocol_invalid"), tplSettingsOptions, &form)
- case addrErr.IsURLError:
- ctx.RenderWithErr(ctx.Tr("form.url_error"), tplSettingsOptions, &form)
- case addrErr.IsPermissionDenied:
- if addrErr.LocalPath {
- ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied"), tplSettingsOptions, &form)
- } else if len(addrErr.PrivateNet) == 0 {
- ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_blocked"), tplSettingsOptions, &form)
- } else {
- ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_private_ip"), tplSettingsOptions, &form)
- }
- case addrErr.IsInvalidPath:
- ctx.RenderWithErr(ctx.Tr("repo.migrate.invalid_local_path"), tplSettingsOptions, &form)
- default:
- ctx.ServerError("Unknown error", err)
- }
- }
ctx.Data["Err_MirrorAddress"] = true
- ctx.RenderWithErr(ctx.Tr("repo.mirror_address_url_invalid"), tplSettingsOptions, &form)
+ handleSettingRemoteAddrError(ctx, err, form)
return
}
@@ -202,6 +181,30 @@ func SettingsPost(ctx *context.Context) {
return
}
+ form.LFS = form.LFS && setting.LFS.StartServer
+
+ if len(form.LFSEndpoint) > 0 {
+ ep := lfs.DetermineEndpoint("", form.LFSEndpoint)
+ if ep == nil {
+ ctx.Data["Err_LFSEndpoint"] = true
+ ctx.RenderWithErr(ctx.Tr("repo.migrate.invalid_lfs_endpoint"), tplSettingsOptions, &form)
+ return
+ }
+ err = migrations.IsMigrateURLAllowed(ep.String(), ctx.User)
+ if err != nil {
+ ctx.Data["Err_LFSEndpoint"] = true
+ handleSettingRemoteAddrError(ctx, err, form)
+ return
+ }
+ }
+
+ ctx.Repo.Mirror.LFS = form.LFS
+ ctx.Repo.Mirror.LFSEndpoint = form.LFSEndpoint
+ if err := models.UpdateMirror(ctx.Repo.Mirror); err != nil {
+ ctx.ServerError("UpdateMirror", err)
+ return
+ }
+
ctx.Flash.Success(ctx.Tr("repo.settings.update_settings_success"))
ctx.Redirect(repo.Link() + "/settings")
@@ -615,6 +618,31 @@ func SettingsPost(ctx *context.Context) {
}
}
+func handleSettingRemoteAddrError(ctx *context.Context, err error, form *forms.RepoSettingForm) {
+ if models.IsErrInvalidCloneAddr(err) {
+ addrErr := err.(*models.ErrInvalidCloneAddr)
+ switch {
+ case addrErr.IsProtocolInvalid:
+ ctx.RenderWithErr(ctx.Tr("repo.mirror_address_protocol_invalid"), tplSettingsOptions, form)
+ case addrErr.IsURLError:
+ ctx.RenderWithErr(ctx.Tr("form.url_error"), tplSettingsOptions, form)
+ case addrErr.IsPermissionDenied:
+ if addrErr.LocalPath {
+ ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied"), tplSettingsOptions, form)
+ } else if len(addrErr.PrivateNet) == 0 {
+ ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_blocked"), tplSettingsOptions, form)
+ } else {
+ ctx.RenderWithErr(ctx.Tr("repo.migrate.permission_denied_private_ip"), tplSettingsOptions, form)
+ }
+ case addrErr.IsInvalidPath:
+ ctx.RenderWithErr(ctx.Tr("repo.migrate.invalid_local_path"), tplSettingsOptions, form)
+ default:
+ ctx.ServerError("Unknown error", err)
+ }
+ }
+ ctx.RenderWithErr(ctx.Tr("repo.mirror_address_url_invalid"), tplSettingsOptions, form)
+}
+
// Collaboration render a repository's collaboration page
func Collaboration(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("repo.settings")
diff --git a/routers/repo/view.go b/routers/repo/view.go
index 568d9ec6be..a03fd58c8a 100644
--- a/routers/repo/view.go
+++ b/routers/repo/view.go
@@ -274,43 +274,42 @@ func renderDirectory(ctx *context.Context, treeLink string) {
// FIXME: what happens when README file is an image?
if isTextFile && setting.LFS.StartServer {
- meta := lfs.IsPointerFile(&buf)
- if meta != nil {
- meta, err = ctx.Repo.Repository.GetLFSMetaObjectByOid(meta.Oid)
+ pointer, _ := lfs.ReadPointerFromBuffer(buf)
+ if pointer.IsValid() {
+ meta, err := ctx.Repo.Repository.GetLFSMetaObjectByOid(pointer.Oid)
if err != nil && err != models.ErrLFSObjectNotExist {
ctx.ServerError("GetLFSMetaObject", err)
return
}
- }
+ if meta != nil {
+ ctx.Data["IsLFSFile"] = true
+ isLFSFile = true
- if meta != nil {
- ctx.Data["IsLFSFile"] = true
- isLFSFile = true
+ // OK read the lfs object
+ var err error
+ dataRc, err = lfs.ReadMetaObject(pointer)
+ if err != nil {
+ ctx.ServerError("ReadMetaObject", err)
+ return
+ }
+ defer dataRc.Close()
- // OK read the lfs object
- var err error
- dataRc, err = lfs.ReadMetaObject(meta)
- if err != nil {
- ctx.ServerError("ReadMetaObject", err)
- return
+ buf = make([]byte, 1024)
+ n, err = dataRc.Read(buf)
+ if err != nil {
+ ctx.ServerError("Data", err)
+ return
+ }
+ buf = buf[:n]
+
+ isTextFile = base.IsTextFile(buf)
+ ctx.Data["IsTextFile"] = isTextFile
+
+ fileSize = meta.Size
+ ctx.Data["FileSize"] = meta.Size
+ filenameBase64 := base64.RawURLEncoding.EncodeToString([]byte(readmeFile.name))
+ ctx.Data["RawFileLink"] = fmt.Sprintf("%s%s.git/info/lfs/objects/%s/%s", setting.AppURL, ctx.Repo.Repository.FullName(), meta.Oid, filenameBase64)
}
- defer dataRc.Close()
-
- buf = make([]byte, 1024)
- n, err = dataRc.Read(buf)
- if err != nil {
- ctx.ServerError("Data", err)
- return
- }
- buf = buf[:n]
-
- isTextFile = base.IsTextFile(buf)
- ctx.Data["IsTextFile"] = isTextFile
-
- fileSize = meta.Size
- ctx.Data["FileSize"] = meta.Size
- filenameBase64 := base64.RawURLEncoding.EncodeToString([]byte(readmeFile.name))
- ctx.Data["RawFileLink"] = fmt.Sprintf("%s%s.git/info/lfs/objects/%s/%s", setting.AppURL, ctx.Repo.Repository.FullName(), meta.Oid, filenameBase64)
}
}
@@ -400,39 +399,39 @@ func renderFile(ctx *context.Context, entry *git.TreeEntry, treeLink, rawLink st
//Check for LFS meta file
if isTextFile && setting.LFS.StartServer {
- meta := lfs.IsPointerFile(&buf)
- if meta != nil {
- meta, err = ctx.Repo.Repository.GetLFSMetaObjectByOid(meta.Oid)
+ pointer, _ := lfs.ReadPointerFromBuffer(buf)
+ if pointer.IsValid() {
+ meta, err := ctx.Repo.Repository.GetLFSMetaObjectByOid(pointer.Oid)
if err != nil && err != models.ErrLFSObjectNotExist {
ctx.ServerError("GetLFSMetaObject", err)
return
}
- }
- if meta != nil {
- isLFSFile = true
+ if meta != nil {
+ isLFSFile = true
- // OK read the lfs object
- var err error
- dataRc, err = lfs.ReadMetaObject(meta)
- if err != nil {
- ctx.ServerError("ReadMetaObject", err)
- return
+ // OK read the lfs object
+ var err error
+ dataRc, err = lfs.ReadMetaObject(pointer)
+ if err != nil {
+ ctx.ServerError("ReadMetaObject", err)
+ return
+ }
+ defer dataRc.Close()
+
+ buf = make([]byte, 1024)
+ n, err = dataRc.Read(buf)
+ // Error EOF don't mean there is an error, it just means we read to
+ // the end
+ if err != nil && err != io.EOF {
+ ctx.ServerError("Data", err)
+ return
+ }
+ buf = buf[:n]
+
+ isTextFile = base.IsTextFile(buf)
+ fileSize = meta.Size
+ ctx.Data["RawFileLink"] = fmt.Sprintf("%s/media/%s/%s", ctx.Repo.RepoLink, ctx.Repo.BranchNameSubURL(), ctx.Repo.TreePath)
}
- defer dataRc.Close()
-
- buf = make([]byte, 1024)
- n, err = dataRc.Read(buf)
- // Error EOF don't mean there is an error, it just means we read to
- // the end
- if err != nil && err != io.EOF {
- ctx.ServerError("Data", err)
- return
- }
- buf = buf[:n]
-
- isTextFile = base.IsTextFile(buf)
- fileSize = meta.Size
- ctx.Data["RawFileLink"] = fmt.Sprintf("%s/media/%s/%s", ctx.Repo.RepoLink, ctx.Repo.BranchNameSubURL(), ctx.Repo.TreePath)
}
}
diff --git a/routers/routes/web.go b/routers/routes/web.go
index 8131c4fc87..b2a75acd09 100644
--- a/routers/routes/web.go
+++ b/routers/routes/web.go
@@ -16,7 +16,6 @@ import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/httpcache"
- "code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/metrics"
"code.gitea.io/gitea/modules/public"
@@ -38,6 +37,7 @@ import (
"code.gitea.io/gitea/routers/user"
userSetting "code.gitea.io/gitea/routers/user/setting"
"code.gitea.io/gitea/services/forms"
+ "code.gitea.io/gitea/services/lfs"
"code.gitea.io/gitea/services/mailer"
// to registers all internal adapters
diff --git a/services/forms/repo_form.go b/services/forms/repo_form.go
index d9eb06d194..55d1f6e3bc 100644
--- a/services/forms/repo_form.go
+++ b/services/forms/repo_form.go
@@ -71,6 +71,8 @@ type MigrateRepoForm struct {
// required: true
RepoName string `json:"repo_name" binding:"Required;AlphaDashDot;MaxSize(100)"`
Mirror bool `json:"mirror"`
+ LFS bool `json:"lfs"`
+ LFSEndpoint string `json:"lfs_endpoint"`
Private bool `json:"private"`
Description string `json:"description" binding:"MaxSize(255)"`
Wiki bool `json:"wiki"`
@@ -118,6 +120,8 @@ type RepoSettingForm struct {
MirrorAddress string
MirrorUsername string
MirrorPassword string
+ LFS bool `form:"mirror_lfs"`
+ LFSEndpoint string `form:"mirror_lfs_endpoint"`
Private bool
Template bool
EnablePrune bool
diff --git a/services/gitdiff/gitdiff.go b/services/gitdiff/gitdiff.go
index 18d56c174a..2ca6bd957e 100644
--- a/services/gitdiff/gitdiff.go
+++ b/services/gitdiff/gitdiff.go
@@ -25,6 +25,7 @@ import (
"code.gitea.io/gitea/modules/charset"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/highlight"
+ "code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/process"
"code.gitea.io/gitea/modules/setting"
@@ -1077,12 +1078,12 @@ func parseHunks(curFile *DiffFile, maxLines, maxLineCharacters int, input *bufio
curSection.Lines[len(curSection.Lines)-1].Content = line
// handle LFS
- if line[1:] == models.LFSMetaFileIdentifier {
+ if line[1:] == lfs.MetaFileIdentifier {
curFileLFSPrefix = true
- } else if curFileLFSPrefix && strings.HasPrefix(line[1:], models.LFSMetaFileOidPrefix) {
- oid := strings.TrimPrefix(line[1:], models.LFSMetaFileOidPrefix)
+ } else if curFileLFSPrefix && strings.HasPrefix(line[1:], lfs.MetaFileOidPrefix) {
+ oid := strings.TrimPrefix(line[1:], lfs.MetaFileOidPrefix)
if len(oid) == 64 {
- m := &models.LFSMetaObject{Oid: oid}
+ m := &models.LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}}
count, err := models.Count(m)
if err == nil && count > 0 {
diff --git a/modules/lfs/locks.go b/services/lfs/locks.go
similarity index 96%
rename from modules/lfs/locks.go
rename to services/lfs/locks.go
index eaa8305cb4..6bbe43d36b 100644
--- a/modules/lfs/locks.go
+++ b/services/lfs/locks.go
@@ -12,6 +12,7 @@ import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/convert"
+ lfs_module "code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
api "code.gitea.io/gitea/modules/structs"
@@ -26,7 +27,7 @@ func checkIsValidRequest(ctx *context.Context) bool {
return false
}
if !MetaMatcher(ctx.Req) {
- log.Info("Attempt access LOCKs without accepting the correct media type: %s", metaMediaType)
+ log.Info("Attempt access LOCKs without accepting the correct media type: %s", lfs_module.MediaType)
writeStatus(ctx, http.StatusBadRequest)
return false
}
@@ -72,9 +73,9 @@ func GetListLockHandler(ctx *context.Context) {
// Status is written in checkIsValidRequest
return
}
- ctx.Resp.Header().Set("Content-Type", metaMediaType)
+ ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
- rv := unpack(ctx)
+ rv, _ := unpack(ctx)
repository, err := models.GetRepositoryByOwnerAndName(rv.User, rv.Repo)
if err != nil {
@@ -159,7 +160,7 @@ func PostLockHandler(ctx *context.Context) {
// Status is written in checkIsValidRequest
return
}
- ctx.Resp.Header().Set("Content-Type", metaMediaType)
+ ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
userName := ctx.Params("username")
repoName := strings.TrimSuffix(ctx.Params("reponame"), ".git")
@@ -228,7 +229,7 @@ func VerifyLockHandler(ctx *context.Context) {
// Status is written in checkIsValidRequest
return
}
- ctx.Resp.Header().Set("Content-Type", metaMediaType)
+ ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
userName := ctx.Params("username")
repoName := strings.TrimSuffix(ctx.Params("reponame"), ".git")
@@ -295,7 +296,7 @@ func UnLockHandler(ctx *context.Context) {
// Status is written in checkIsValidRequest
return
}
- ctx.Resp.Header().Set("Content-Type", metaMediaType)
+ ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
userName := ctx.Params("username")
repoName := strings.TrimSuffix(ctx.Params("reponame"), ".git")
diff --git a/modules/lfs/server.go b/services/lfs/server.go
similarity index 67%
rename from modules/lfs/server.go
rename to services/lfs/server.go
index f45423b851..cd9a3fd7a1 100644
--- a/modules/lfs/server.go
+++ b/services/lfs/server.go
@@ -13,62 +13,24 @@ import (
"regexp"
"strconv"
"strings"
- "time"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
+ lfs_module "code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
- "code.gitea.io/gitea/modules/storage"
"github.com/dgrijalva/jwt-go"
jsoniter "github.com/json-iterator/go"
)
-const (
- metaMediaType = "application/vnd.git-lfs+json"
-)
-
-// RequestVars contain variables from the HTTP request. Variables from routing, json body decoding, and
-// some headers are stored.
-type RequestVars struct {
- Oid string
- Size int64
+// requestContext contain variables from the HTTP request.
+type requestContext struct {
User string
- Password string
Repo string
Authorization string
}
-// BatchVars contains multiple RequestVars processed in one batch operation.
-// https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
-type BatchVars struct {
- Transfers []string `json:"transfers,omitempty"`
- Operation string `json:"operation"`
- Objects []*RequestVars `json:"objects"`
-}
-
-// BatchResponse contains multiple object metadata Representation structures
-// for use with the batch API.
-type BatchResponse struct {
- Transfer string `json:"transfer,omitempty"`
- Objects []*Representation `json:"objects"`
-}
-
-// Representation is object metadata as seen by clients of the lfs server.
-type Representation struct {
- Oid string `json:"oid"`
- Size int64 `json:"size"`
- Actions map[string]*link `json:"actions"`
- Error *ObjectError `json:"error,omitempty"`
-}
-
-// ObjectError defines the JSON structure returned to the client in case of an error
-type ObjectError struct {
- Code int `json:"code"`
- Message string `json:"message"`
-}
-
// Claims is a JWT Token Claims
type Claims struct {
RepoID int64
@@ -78,20 +40,13 @@ type Claims struct {
}
// ObjectLink builds a URL linking to the object.
-func (v *RequestVars) ObjectLink() string {
- return setting.AppURL + path.Join(v.User, v.Repo+".git", "info/lfs/objects", v.Oid)
+func (rc *requestContext) ObjectLink(oid string) string {
+ return setting.AppURL + path.Join(rc.User, rc.Repo+".git", "info/lfs/objects", oid)
}
// VerifyLink builds a URL for verifying the object.
-func (v *RequestVars) VerifyLink() string {
- return setting.AppURL + path.Join(v.User, v.Repo+".git", "info/lfs/verify")
-}
-
-// link provides a structure used to build a hypermedia representation of an HTTP link.
-type link struct {
- Href string `json:"href"`
- Header map[string]string `json:"header,omitempty"`
- ExpiresAt time.Time `json:"expires_at,omitempty"`
+func (rc *requestContext) VerifyLink() string {
+ return setting.AppURL + path.Join(rc.User, rc.Repo+".git", "info/lfs/verify")
}
var oidRegExp = regexp.MustCompile(`^[A-Fa-f0-9]+$`)
@@ -125,28 +80,28 @@ func ObjectOidHandler(ctx *context.Context) {
writeStatus(ctx, 404)
}
-func getAuthenticatedRepoAndMeta(ctx *context.Context, rv *RequestVars, requireWrite bool) (*models.LFSMetaObject, *models.Repository) {
- if !isOidValid(rv.Oid) {
- log.Info("Attempt to access invalid LFS OID[%s] in %s/%s", rv.Oid, rv.User, rv.Repo)
+func getAuthenticatedRepoAndMeta(ctx *context.Context, rc *requestContext, p lfs_module.Pointer, requireWrite bool) (*models.LFSMetaObject, *models.Repository) {
+ if !isOidValid(p.Oid) {
+ log.Info("Attempt to access invalid LFS OID[%s] in %s/%s", p.Oid, rc.User, rc.Repo)
writeStatus(ctx, 404)
return nil, nil
}
- repository, err := models.GetRepositoryByOwnerAndName(rv.User, rv.Repo)
+ repository, err := models.GetRepositoryByOwnerAndName(rc.User, rc.Repo)
if err != nil {
- log.Error("Unable to get repository: %s/%s Error: %v", rv.User, rv.Repo, err)
+ log.Error("Unable to get repository: %s/%s Error: %v", rc.User, rc.Repo, err)
writeStatus(ctx, 404)
return nil, nil
}
- if !authenticate(ctx, repository, rv.Authorization, requireWrite) {
+ if !authenticate(ctx, repository, rc.Authorization, requireWrite) {
requireAuth(ctx)
return nil, nil
}
- meta, err := repository.GetLFSMetaObjectByOid(rv.Oid)
+ meta, err := repository.GetLFSMetaObjectByOid(p.Oid)
if err != nil {
- log.Error("Unable to get LFS OID[%s] Error: %v", rv.Oid, err)
+ log.Error("Unable to get LFS OID[%s] Error: %v", p.Oid, err)
writeStatus(ctx, 404)
return nil, nil
}
@@ -156,9 +111,9 @@ func getAuthenticatedRepoAndMeta(ctx *context.Context, rv *RequestVars, requireW
// getContentHandler gets the content from the content store
func getContentHandler(ctx *context.Context) {
- rv := unpack(ctx)
+ rc, p := unpack(ctx)
- meta, _ := getAuthenticatedRepoAndMeta(ctx, rv, false)
+ meta, _ := getAuthenticatedRepoAndMeta(ctx, rc, p, false)
if meta == nil {
// Status already written in getAuthenticatedRepoAndMeta
return
@@ -192,8 +147,8 @@ func getContentHandler(ctx *context.Context) {
}
}
- contentStore := &ContentStore{ObjectStorage: storage.LFS}
- content, err := contentStore.Get(meta)
+ contentStore := lfs_module.NewContentStore()
+ content, err := contentStore.Get(meta.Pointer)
if err != nil {
// Errors are logged in contentStore.Get
writeStatus(ctx, http.StatusNotFound)
@@ -233,20 +188,20 @@ func getContentHandler(ctx *context.Context) {
// getMetaHandler retrieves metadata about the object
func getMetaHandler(ctx *context.Context) {
- rv := unpack(ctx)
+ rc, p := unpack(ctx)
- meta, _ := getAuthenticatedRepoAndMeta(ctx, rv, false)
+ meta, _ := getAuthenticatedRepoAndMeta(ctx, rc, p, false)
if meta == nil {
// Status already written in getAuthenticatedRepoAndMeta
return
}
- ctx.Resp.Header().Set("Content-Type", metaMediaType)
+ ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
if ctx.Req.Method == "GET" {
json := jsoniter.ConfigCompatibleWithStandardLibrary
enc := json.NewEncoder(ctx.Resp)
- if err := enc.Encode(Represent(rv, meta, true, false)); err != nil {
+ if err := enc.Encode(represent(rc, meta.Pointer, true, false)); err != nil {
log.Error("Failed to encode representation as json. Error: %v", err)
}
}
@@ -263,51 +218,51 @@ func PostHandler(ctx *context.Context) {
}
if !MetaMatcher(ctx.Req) {
- log.Info("Attempt to POST without accepting the correct media type: %s", metaMediaType)
+ log.Info("Attempt to POST without accepting the correct media type: %s", lfs_module.MediaType)
writeStatus(ctx, 400)
return
}
- rv := unpack(ctx)
+ rc, p := unpack(ctx)
- repository, err := models.GetRepositoryByOwnerAndName(rv.User, rv.Repo)
+ repository, err := models.GetRepositoryByOwnerAndName(rc.User, rc.Repo)
if err != nil {
- log.Error("Unable to get repository: %s/%s Error: %v", rv.User, rv.Repo, err)
+ log.Error("Unable to get repository: %s/%s Error: %v", rc.User, rc.Repo, err)
writeStatus(ctx, 404)
return
}
- if !authenticate(ctx, repository, rv.Authorization, true) {
+ if !authenticate(ctx, repository, rc.Authorization, true) {
requireAuth(ctx)
return
}
- if !isOidValid(rv.Oid) {
- log.Info("Invalid LFS OID[%s] attempt to POST in %s/%s", rv.Oid, rv.User, rv.Repo)
+ if !isOidValid(p.Oid) {
+ log.Info("Invalid LFS OID[%s] attempt to POST in %s/%s", p.Oid, rc.User, rc.Repo)
writeStatus(ctx, 404)
return
}
- if setting.LFS.MaxFileSize > 0 && rv.Size > setting.LFS.MaxFileSize {
- log.Info("Denied LFS OID[%s] upload of size %d to %s/%s because of LFS_MAX_FILE_SIZE=%d", rv.Oid, rv.Size, rv.User, rv.Repo, setting.LFS.MaxFileSize)
+ if setting.LFS.MaxFileSize > 0 && p.Size > setting.LFS.MaxFileSize {
+ log.Info("Denied LFS OID[%s] upload of size %d to %s/%s because of LFS_MAX_FILE_SIZE=%d", p.Oid, p.Size, rc.User, rc.Repo, setting.LFS.MaxFileSize)
writeStatus(ctx, 413)
return
}
- meta, err := models.NewLFSMetaObject(&models.LFSMetaObject{Oid: rv.Oid, Size: rv.Size, RepositoryID: repository.ID})
+ meta, err := models.NewLFSMetaObject(&models.LFSMetaObject{Pointer: p, RepositoryID: repository.ID})
if err != nil {
- log.Error("Unable to write LFS OID[%s] size %d meta object in %v/%v to database. Error: %v", rv.Oid, rv.Size, rv.User, rv.Repo, err)
+ log.Error("Unable to write LFS OID[%s] size %d meta object in %v/%v to database. Error: %v", p.Oid, p.Size, rc.User, rc.Repo, err)
writeStatus(ctx, 404)
return
}
- ctx.Resp.Header().Set("Content-Type", metaMediaType)
+ ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
sentStatus := 202
- contentStore := &ContentStore{ObjectStorage: storage.LFS}
- exist, err := contentStore.Exists(meta)
+ contentStore := lfs_module.NewContentStore()
+ exist, err := contentStore.Exists(p)
if err != nil {
- log.Error("Unable to check if LFS OID[%s] exist on %s / %s. Error: %v", rv.Oid, rv.User, rv.Repo, err)
+ log.Error("Unable to check if LFS OID[%s] exist on %s / %s. Error: %v", p.Oid, rc.User, rc.Repo, err)
writeStatus(ctx, 500)
return
}
@@ -318,7 +273,7 @@ func PostHandler(ctx *context.Context) {
json := jsoniter.ConfigCompatibleWithStandardLibrary
enc := json.NewEncoder(ctx.Resp)
- if err := enc.Encode(Represent(rv, meta, meta.Existing, true)); err != nil {
+ if err := enc.Encode(represent(rc, meta.Pointer, meta.Existing, true)); err != nil {
log.Error("Failed to encode representation as json. Error: %v", err)
}
logRequest(ctx.Req, sentStatus)
@@ -333,25 +288,31 @@ func BatchHandler(ctx *context.Context) {
}
if !MetaMatcher(ctx.Req) {
- log.Info("Attempt to BATCH without accepting the correct media type: %s", metaMediaType)
+ log.Info("Attempt to BATCH without accepting the correct media type: %s", lfs_module.MediaType)
writeStatus(ctx, 400)
return
}
bv := unpackbatch(ctx)
- var responseObjects []*Representation
+ reqCtx := &requestContext{
+ User: ctx.Params("username"),
+ Repo: strings.TrimSuffix(ctx.Params("reponame"), ".git"),
+ Authorization: ctx.Req.Header.Get("Authorization"),
+ }
+
+ var responseObjects []*lfs_module.ObjectResponse
// Create a response object
for _, object := range bv.Objects {
if !isOidValid(object.Oid) {
- log.Info("Invalid LFS OID[%s] attempt to BATCH in %s/%s", object.Oid, object.User, object.Repo)
+ log.Info("Invalid LFS OID[%s] attempt to BATCH in %s/%s", object.Oid, reqCtx.User, reqCtx.Repo)
continue
}
- repository, err := models.GetRepositoryByOwnerAndName(object.User, object.Repo)
+ repository, err := models.GetRepositoryByOwnerAndName(reqCtx.User, reqCtx.Repo)
if err != nil {
- log.Error("Unable to get repository: %s/%s Error: %v", object.User, object.Repo, err)
+ log.Error("Unable to get repository: %s/%s Error: %v", reqCtx.User, reqCtx.Repo, err)
writeStatus(ctx, 404)
return
}
@@ -361,51 +322,51 @@ func BatchHandler(ctx *context.Context) {
requireWrite = true
}
- if !authenticate(ctx, repository, object.Authorization, requireWrite) {
+ if !authenticate(ctx, repository, reqCtx.Authorization, requireWrite) {
requireAuth(ctx)
return
}
- contentStore := &ContentStore{ObjectStorage: storage.LFS}
+ contentStore := lfs_module.NewContentStore()
meta, err := repository.GetLFSMetaObjectByOid(object.Oid)
if err == nil { // Object is found and exists
- exist, err := contentStore.Exists(meta)
+ exist, err := contentStore.Exists(meta.Pointer)
if err != nil {
- log.Error("Unable to check if LFS OID[%s] exist on %s / %s. Error: %v", object.Oid, object.User, object.Repo, err)
+ log.Error("Unable to check if LFS OID[%s] exist on %s / %s. Error: %v", object.Oid, reqCtx.User, reqCtx.Repo, err)
writeStatus(ctx, 500)
return
}
if exist {
- responseObjects = append(responseObjects, Represent(object, meta, true, false))
+ responseObjects = append(responseObjects, represent(reqCtx, meta.Pointer, true, false))
continue
}
}
if requireWrite && setting.LFS.MaxFileSize > 0 && object.Size > setting.LFS.MaxFileSize {
- log.Info("Denied LFS OID[%s] upload of size %d to %s/%s because of LFS_MAX_FILE_SIZE=%d", object.Oid, object.Size, object.User, object.Repo, setting.LFS.MaxFileSize)
+ log.Info("Denied LFS OID[%s] upload of size %d to %s/%s because of LFS_MAX_FILE_SIZE=%d", object.Oid, object.Size, reqCtx.User, reqCtx.Repo, setting.LFS.MaxFileSize)
writeStatus(ctx, 413)
return
}
// Object is not found
- meta, err = models.NewLFSMetaObject(&models.LFSMetaObject{Oid: object.Oid, Size: object.Size, RepositoryID: repository.ID})
+ meta, err = models.NewLFSMetaObject(&models.LFSMetaObject{Pointer: object, RepositoryID: repository.ID})
if err == nil {
- exist, err := contentStore.Exists(meta)
+ exist, err := contentStore.Exists(meta.Pointer)
if err != nil {
- log.Error("Unable to check if LFS OID[%s] exist on %s / %s. Error: %v", object.Oid, object.User, object.Repo, err)
+ log.Error("Unable to check if LFS OID[%s] exist on %s / %s. Error: %v", object.Oid, reqCtx.User, reqCtx.Repo, err)
writeStatus(ctx, 500)
return
}
- responseObjects = append(responseObjects, Represent(object, meta, meta.Existing, !exist))
+ responseObjects = append(responseObjects, represent(reqCtx, meta.Pointer, meta.Existing, !exist))
} else {
- log.Error("Unable to write LFS OID[%s] size %d meta object in %v/%v to database. Error: %v", object.Oid, object.Size, object.User, object.Repo, err)
+ log.Error("Unable to write LFS OID[%s] size %d meta object in %v/%v to database. Error: %v", object.Oid, object.Size, reqCtx.User, reqCtx.Repo, err)
}
}
- ctx.Resp.Header().Set("Content-Type", metaMediaType)
+ ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
- respobj := &BatchResponse{Objects: responseObjects}
+ respobj := &lfs_module.BatchResponse{Objects: responseObjects}
json := jsoniter.ConfigCompatibleWithStandardLibrary
enc := json.NewEncoder(ctx.Resp)
@@ -417,26 +378,26 @@ func BatchHandler(ctx *context.Context) {
// PutHandler receives data from the client and puts it into the content store
func PutHandler(ctx *context.Context) {
- rv := unpack(ctx)
+ rc, p := unpack(ctx)
- meta, repository := getAuthenticatedRepoAndMeta(ctx, rv, true)
+ meta, repository := getAuthenticatedRepoAndMeta(ctx, rc, p, true)
if meta == nil {
// Status already written in getAuthenticatedRepoAndMeta
return
}
- contentStore := &ContentStore{ObjectStorage: storage.LFS}
+ contentStore := lfs_module.NewContentStore()
defer ctx.Req.Body.Close()
- if err := contentStore.Put(meta, ctx.Req.Body); err != nil {
+ if err := contentStore.Put(meta.Pointer, ctx.Req.Body); err != nil {
// Put will log the error itself
ctx.Resp.WriteHeader(500)
- if err == errSizeMismatch || err == errHashMismatch {
+ if err == lfs_module.ErrSizeMismatch || err == lfs_module.ErrHashMismatch {
fmt.Fprintf(ctx.Resp, `{"message":"%s"}`, err)
} else {
fmt.Fprintf(ctx.Resp, `{"message":"Internal Server Error"}`)
}
- if _, err = repository.RemoveLFSMetaObjectByOid(rv.Oid); err != nil {
- log.Error("Whilst removing metaobject for LFS OID[%s] due to preceding error there was another Error: %v", rv.Oid, err)
+ if _, err = repository.RemoveLFSMetaObjectByOid(p.Oid); err != nil {
+ log.Error("Whilst removing metaobject for LFS OID[%s] due to preceding error there was another Error: %v", p.Oid, err)
}
return
}
@@ -453,21 +414,21 @@ func VerifyHandler(ctx *context.Context) {
}
if !MetaMatcher(ctx.Req) {
- log.Info("Attempt to VERIFY without accepting the correct media type: %s", metaMediaType)
+ log.Info("Attempt to VERIFY without accepting the correct media type: %s", lfs_module.MediaType)
writeStatus(ctx, 400)
return
}
- rv := unpack(ctx)
+ rc, p := unpack(ctx)
- meta, _ := getAuthenticatedRepoAndMeta(ctx, rv, true)
+ meta, _ := getAuthenticatedRepoAndMeta(ctx, rc, p, true)
if meta == nil {
// Status already written in getAuthenticatedRepoAndMeta
return
}
- contentStore := &ContentStore{ObjectStorage: storage.LFS}
- ok, err := contentStore.Verify(meta)
+ contentStore := lfs_module.NewContentStore()
+ ok, err := contentStore.Verify(meta.Pointer)
if err != nil {
// Error will be logged in Verify
ctx.Resp.WriteHeader(500)
@@ -482,30 +443,29 @@ func VerifyHandler(ctx *context.Context) {
logRequest(ctx.Req, 200)
}
-// Represent takes a RequestVars and Meta and turns it into a Representation suitable
+// represent takes a requestContext and Meta and turns it into a ObjectResponse suitable
// for json encoding
-func Represent(rv *RequestVars, meta *models.LFSMetaObject, download, upload bool) *Representation {
- rep := &Representation{
- Oid: meta.Oid,
- Size: meta.Size,
- Actions: make(map[string]*link),
+func represent(rc *requestContext, pointer lfs_module.Pointer, download, upload bool) *lfs_module.ObjectResponse {
+ rep := &lfs_module.ObjectResponse{
+ Pointer: pointer,
+ Actions: make(map[string]*lfs_module.Link),
}
header := make(map[string]string)
- if rv.Authorization == "" {
+ if rc.Authorization == "" {
//https://github.com/github/git-lfs/issues/1088
header["Authorization"] = "Authorization: Basic dummy"
} else {
- header["Authorization"] = rv.Authorization
+ header["Authorization"] = rc.Authorization
}
if download {
- rep.Actions["download"] = &link{Href: rv.ObjectLink(), Header: header}
+ rep.Actions["download"] = &lfs_module.Link{Href: rc.ObjectLink(pointer.Oid), Header: header}
}
if upload {
- rep.Actions["upload"] = &link{Href: rv.ObjectLink(), Header: header}
+ rep.Actions["upload"] = &lfs_module.Link{Href: rc.ObjectLink(pointer.Oid), Header: header}
}
if upload && !download {
@@ -516,56 +476,56 @@ func Represent(rv *RequestVars, meta *models.LFSMetaObject, download, upload boo
}
// This is only needed to workaround https://github.com/git-lfs/git-lfs/issues/3662
- verifyHeader["Accept"] = metaMediaType
+ verifyHeader["Accept"] = lfs_module.MediaType
- rep.Actions["verify"] = &link{Href: rv.VerifyLink(), Header: verifyHeader}
+ rep.Actions["verify"] = &lfs_module.Link{Href: rc.VerifyLink(), Header: verifyHeader}
}
return rep
}
// MetaMatcher provides a mux.MatcherFunc that only allows requests that contain
-// an Accept header with the metaMediaType
+// an Accept header with the lfs_module.MediaType
func MetaMatcher(r *http.Request) bool {
mediaParts := strings.Split(r.Header.Get("Accept"), ";")
mt := mediaParts[0]
- return mt == metaMediaType
+ return mt == lfs_module.MediaType
}
-func unpack(ctx *context.Context) *RequestVars {
+func unpack(ctx *context.Context) (*requestContext, lfs_module.Pointer) {
r := ctx.Req
- rv := &RequestVars{
+ rc := &requestContext{
User: ctx.Params("username"),
Repo: strings.TrimSuffix(ctx.Params("reponame"), ".git"),
- Oid: ctx.Params("oid"),
Authorization: r.Header.Get("Authorization"),
}
+ p := lfs_module.Pointer{Oid: ctx.Params("oid")}
if r.Method == "POST" { // Maybe also check if +json
- var p RequestVars
+ var p2 lfs_module.Pointer
bodyReader := r.Body
defer bodyReader.Close()
json := jsoniter.ConfigCompatibleWithStandardLibrary
dec := json.NewDecoder(bodyReader)
- err := dec.Decode(&p)
+ err := dec.Decode(&p2)
if err != nil {
// The error is logged as a WARN here because this may represent misbehaviour rather than a true error
- log.Warn("Unable to decode POST request vars for LFS OID[%s] in %s/%s: Error: %v", rv.Oid, rv.User, rv.Repo, err)
- return rv
+ log.Warn("Unable to decode POST request vars for LFS OID[%s] in %s/%s: Error: %v", p.Oid, rc.User, rc.Repo, err)
+ return rc, p
}
- rv.Oid = p.Oid
- rv.Size = p.Size
+ p.Oid = p2.Oid
+ p.Size = p2.Size
}
- return rv
+ return rc, p
}
// TODO cheap hack, unify with unpack
-func unpackbatch(ctx *context.Context) *BatchVars {
+func unpackbatch(ctx *context.Context) *lfs_module.BatchRequest {
r := ctx.Req
- var bv BatchVars
+ var bv lfs_module.BatchRequest
bodyReader := r.Body
defer bodyReader.Close()
@@ -578,12 +538,6 @@ func unpackbatch(ctx *context.Context) *BatchVars {
return &bv
}
- for i := 0; i < len(bv.Objects); i++ {
- bv.Objects[i].User = ctx.Params("username")
- bv.Objects[i].Repo = strings.TrimSuffix(ctx.Params("reponame"), ".git")
- bv.Objects[i].Authorization = r.Header.Get("Authorization")
- }
-
return &bv
}
diff --git a/services/mirror/mirror.go b/services/mirror/mirror.go
index e4981b8c00..9e2dde85fc 100644
--- a/services/mirror/mirror.go
+++ b/services/mirror/mirror.go
@@ -16,6 +16,7 @@ import (
"code.gitea.io/gitea/modules/cache"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/graceful"
+ "code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/notification"
repo_module "code.gitea.io/gitea/modules/repository"
@@ -206,7 +207,7 @@ func parseRemoteUpdateOutput(output string) []*mirrorSyncResult {
}
// runSync returns true if sync finished without error.
-func runSync(m *models.Mirror) ([]*mirrorSyncResult, bool) {
+func runSync(ctx context.Context, m *models.Mirror) ([]*mirrorSyncResult, bool) {
repoPath := m.Repo.RepoPath()
wikiPath := m.Repo.WikiPath()
timeout := time.Duration(setting.Git.Timeout.Mirror) * time.Second
@@ -253,13 +254,21 @@ func runSync(m *models.Mirror) ([]*mirrorSyncResult, bool) {
log.Error("OpenRepository: %v", err)
return nil, false
}
+ defer gitRepo.Close()
log.Trace("SyncMirrors [repo: %-v]: syncing releases with tags...", m.Repo)
if err = repo_module.SyncReleasesWithTags(m.Repo, gitRepo); err != nil {
- gitRepo.Close()
log.Error("Failed to synchronize tags to releases for repository: %v", err)
}
- gitRepo.Close()
+
+ if m.LFS && setting.LFS.StartServer {
+ log.Trace("SyncMirrors [repo: %-v]: syncing LFS objects...", m.Repo)
+ readAddress(m)
+ ep := lfs.DetermineEndpoint(m.Address, m.LFSEndpoint)
+ if err = repo_module.StoreMissingLfsObjectsInRepository(ctx, m.Repo, gitRepo, ep); err != nil {
+ log.Error("Failed to synchronize LFS objects for repository: %v", err)
+ }
+ }
log.Trace("SyncMirrors [repo: %-v]: updating size of repository", m.Repo)
if err := m.Repo.UpdateSize(models.DefaultDBContext()); err != nil {
@@ -378,12 +387,12 @@ func SyncMirrors(ctx context.Context) {
mirrorQueue.Close()
return
case repoID := <-mirrorQueue.Queue():
- syncMirror(repoID)
+ syncMirror(ctx, repoID)
}
}
}
-func syncMirror(repoID string) {
+func syncMirror(ctx context.Context, repoID string) {
log.Trace("SyncMirrors [repo_id: %v]", repoID)
defer func() {
err := recover()
@@ -403,7 +412,7 @@ func syncMirror(repoID string) {
}
log.Trace("SyncMirrors [repo: %-v]: Running Sync", m.Repo)
- results, ok := runSync(m)
+ results, ok := runSync(ctx, m)
if !ok {
return
}
diff --git a/services/mirror/mirror_test.go b/services/mirror/mirror_test.go
index 57628aa68d..20492c784b 100644
--- a/services/mirror/mirror_test.go
+++ b/services/mirror/mirror_test.go
@@ -48,7 +48,9 @@ func TestRelease_MirrorDelete(t *testing.T) {
})
assert.NoError(t, err)
- mirror, err := repository.MigrateRepositoryGitData(context.Background(), user, mirrorRepo, opts)
+ ctx := context.Background()
+
+ mirror, err := repository.MigrateRepositoryGitData(ctx, user, mirrorRepo, opts)
assert.NoError(t, err)
gitRepo, err := git.OpenRepository(repoPath)
@@ -74,7 +76,7 @@ func TestRelease_MirrorDelete(t *testing.T) {
err = mirror.GetMirror()
assert.NoError(t, err)
- _, ok := runSync(mirror.Mirror)
+ _, ok := runSync(ctx, mirror.Mirror)
assert.True(t, ok)
count, err := models.GetReleaseCountByRepoID(mirror.ID, findOptions)
@@ -85,7 +87,7 @@ func TestRelease_MirrorDelete(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, release_service.DeleteReleaseByID(release.ID, user, true))
- _, ok = runSync(mirror.Mirror)
+ _, ok = runSync(ctx, mirror.Mirror)
assert.True(t, ok)
count, err = models.GetReleaseCountByRepoID(mirror.ID, findOptions)
diff --git a/services/pull/lfs.go b/services/pull/lfs.go
index a1981b8253..b902c63619 100644
--- a/services/pull/lfs.go
+++ b/services/pull/lfs.go
@@ -70,6 +70,8 @@ func createLFSMetaObjectsFromCatFileBatch(catFileBatchReader *io.PipeReader, wg
defer wg.Done()
defer catFileBatchReader.Close()
+ contentStore := lfs.NewContentStore()
+
bufferedReader := bufio.NewReader(catFileBatchReader)
buf := make([]byte, 1025)
for {
@@ -101,10 +103,16 @@ func createLFSMetaObjectsFromCatFileBatch(catFileBatchReader *io.PipeReader, wg
}
pointerBuf = pointerBuf[:size]
// Now we need to check if the pointerBuf is an LFS pointer
- pointer := lfs.IsPointerFile(&pointerBuf)
- if pointer == nil {
+ pointer, _ := lfs.ReadPointerFromBuffer(pointerBuf)
+ if !pointer.IsValid() {
continue
}
+
+ exist, _ := contentStore.Exists(pointer)
+ if !exist {
+ continue
+ }
+
// Then we need to check that this pointer is in the db
if _, err := pr.HeadRepo.GetLFSMetaObjectByOid(pointer.Oid); err != nil {
if err == models.ErrLFSObjectNotExist {
@@ -117,8 +125,9 @@ func createLFSMetaObjectsFromCatFileBatch(catFileBatchReader *io.PipeReader, wg
// OK we have a pointer that is associated with the head repo
// and is actually a file in the LFS
// Therefore it should be associated with the base repo
- pointer.RepositoryID = pr.BaseRepoID
- if _, err := models.NewLFSMetaObject(pointer); err != nil {
+ meta := &models.LFSMetaObject{Pointer: pointer}
+ meta.RepositoryID = pr.BaseRepoID
+ if _, err := models.NewLFSMetaObject(meta); err != nil {
_ = catFileBatchReader.CloseWithError(err)
break
}
diff --git a/templates/repo/migrate/git.tmpl b/templates/repo/migrate/git.tmpl
index 233a019435..6525a9b4f5 100644
--- a/templates/repo/migrate/git.tmpl
+++ b/templates/repo/migrate/git.tmpl
@@ -15,7 +15,6 @@
{{.i18n.Tr "repo.migrate.clone_address_desc"}}{{if .ContextUser.CanImportLocal}} {{.i18n.Tr "repo.migrate.clone_local_path"}}{{end}}
- {{if .LFSActive}}
{{.i18n.Tr "repo.migrate.lfs_mirror_unsupported"}}{{end}}
{{.i18n.Tr "repo.mirror_lfs_endpoint_desc" "https://github.com/git-lfs/git-lfs/blob/main/docs/api/server-discovery.md#server-discovery" | Str2html}}
+