0
0
Fork 0
mirror of https://github.com/go-gitea/gitea synced 2024-11-24 19:42:50 +01:00

Add new [lfs_client].BATCH_SIZE and [server].LFS_MAX_BATCH_SIZE config settings. (#32307)

This contains two backwards-compatible changes:
* in the lfs http_client, the number of lfs oids requested per batch is
loaded from lfs_client#BATCH_SIZE and defaulted to the previous value of
20
* in the lfs server/service, the max number of lfs oids allowed in a
batch api request is loaded from server#LFS_MAX_BATCH_SIZE and defaults
to 'nil' which equates to the previous behavior of 'infinite'

This fixes #32306

---------

Signed-off-by: Royce Remer <royceremer@gmail.com>
Co-authored-by: wxiaoguang <wxiaoguang@gmail.com>
This commit is contained in:
Royce Remer 2024-10-29 22:41:55 -07:00 committed by GitHub
parent 1cd3f69859
commit c60e4dc109
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 48 additions and 7 deletions

View file

@ -324,6 +324,10 @@ RUN_USER = ; git
;; Maximum number of locks returned per page ;; Maximum number of locks returned per page
;LFS_LOCKS_PAGING_NUM = 50 ;LFS_LOCKS_PAGING_NUM = 50
;; ;;
;; When clients make lfs batch requests, reject them if there are more pointers than this number
;; zero means 'unlimited'
;LFS_MAX_BATCH_SIZE = 0
;;
;; Allow graceful restarts using SIGHUP to fork ;; Allow graceful restarts using SIGHUP to fork
;ALLOW_GRACEFUL_RESTARTS = true ;ALLOW_GRACEFUL_RESTARTS = true
;; ;;
@ -2638,6 +2642,10 @@ LEVEL = Info
;; override the azure blob base path if storage type is azureblob ;; override the azure blob base path if storage type is azureblob
;AZURE_BLOB_BASE_PATH = lfs/ ;AZURE_BLOB_BASE_PATH = lfs/
;[lfs_client]
;; When mirroring an upstream lfs endpoint, limit the number of pointers in each batch request to this number
;BATCH_SIZE = 20
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; settings for packages, will override storage setting ;; settings for packages, will override storage setting

View file

@ -16,10 +16,9 @@ import (
"code.gitea.io/gitea/modules/json" "code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/proxy" "code.gitea.io/gitea/modules/proxy"
"code.gitea.io/gitea/modules/setting"
) )
const httpBatchSize = 20
// HTTPClient is used to communicate with the LFS server // HTTPClient is used to communicate with the LFS server
// https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md // https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md
type HTTPClient struct { type HTTPClient struct {
@ -30,7 +29,7 @@ type HTTPClient struct {
// BatchSize returns the preferred size of batchs to process // BatchSize returns the preferred size of batchs to process
func (c *HTTPClient) BatchSize() int { func (c *HTTPClient) BatchSize() int {
return httpBatchSize return setting.LFSClient.BatchSize
} }
func newHTTPClient(endpoint *url.URL, httpTransport *http.Transport) *HTTPClient { func newHTTPClient(endpoint *url.URL, httpTransport *http.Transport) *HTTPClient {

View file

@ -10,7 +10,10 @@ import (
"code.gitea.io/gitea/modules/generate" "code.gitea.io/gitea/modules/generate"
) )
// LFS represents the configuration for Git LFS // LFS represents the server-side configuration for Git LFS.
// Ideally these options should be in a section like "[lfs_server]",
// but they are in "[server]" section due to historical reasons.
// Could be refactored in the future while keeping backwards compatibility.
var LFS = struct { var LFS = struct {
StartServer bool `ini:"LFS_START_SERVER"` StartServer bool `ini:"LFS_START_SERVER"`
AllowPureSSH bool `ini:"LFS_ALLOW_PURE_SSH"` AllowPureSSH bool `ini:"LFS_ALLOW_PURE_SSH"`
@ -18,15 +21,21 @@ var LFS = struct {
HTTPAuthExpiry time.Duration `ini:"LFS_HTTP_AUTH_EXPIRY"` HTTPAuthExpiry time.Duration `ini:"LFS_HTTP_AUTH_EXPIRY"`
MaxFileSize int64 `ini:"LFS_MAX_FILE_SIZE"` MaxFileSize int64 `ini:"LFS_MAX_FILE_SIZE"`
LocksPagingNum int `ini:"LFS_LOCKS_PAGING_NUM"` LocksPagingNum int `ini:"LFS_LOCKS_PAGING_NUM"`
MaxBatchSize int `ini:"LFS_MAX_BATCH_SIZE"`
Storage *Storage Storage *Storage
}{} }{}
// LFSClient represents configuration for Gitea's LFS clients, for example: mirroring upstream Git LFS
var LFSClient = struct {
BatchSize int `ini:"BATCH_SIZE"`
}{}
func loadLFSFrom(rootCfg ConfigProvider) error { func loadLFSFrom(rootCfg ConfigProvider) error {
mustMapSetting(rootCfg, "lfs_client", &LFSClient)
mustMapSetting(rootCfg, "server", &LFS)
sec := rootCfg.Section("server") sec := rootCfg.Section("server")
if err := sec.MapTo(&LFS); err != nil {
return fmt.Errorf("failed to map LFS settings: %v", err)
}
lfsSec, _ := rootCfg.GetSection("lfs") lfsSec, _ := rootCfg.GetSection("lfs")
@ -53,6 +62,10 @@ func loadLFSFrom(rootCfg ConfigProvider) error {
LFS.LocksPagingNum = 50 LFS.LocksPagingNum = 50
} }
if LFSClient.BatchSize < 1 {
LFSClient.BatchSize = 20
}
LFS.HTTPAuthExpiry = sec.Key("LFS_HTTP_AUTH_EXPIRY").MustDuration(24 * time.Hour) LFS.HTTPAuthExpiry = sec.Key("LFS_HTTP_AUTH_EXPIRY").MustDuration(24 * time.Hour)
if !LFS.StartServer || !InstallLock { if !LFS.StartServer || !InstallLock {

View file

@ -99,3 +99,19 @@ STORAGE_TYPE = minio
assert.EqualValues(t, "gitea", LFS.Storage.MinioConfig.Bucket) assert.EqualValues(t, "gitea", LFS.Storage.MinioConfig.Bucket)
assert.EqualValues(t, "lfs/", LFS.Storage.MinioConfig.BasePath) assert.EqualValues(t, "lfs/", LFS.Storage.MinioConfig.BasePath)
} }
func Test_LFSClientServerConfigs(t *testing.T) {
iniStr := `
[server]
LFS_MAX_BATCH_SIZE = 100
[lfs_client]
# will default to 20
BATCH_SIZE = 0
`
cfg, err := NewConfigProviderFromData(iniStr)
assert.NoError(t, err)
assert.NoError(t, loadLFSFrom(cfg))
assert.EqualValues(t, 100, LFS.MaxBatchSize)
assert.EqualValues(t, 20, LFSClient.BatchSize)
}

View file

@ -179,6 +179,11 @@ func BatchHandler(ctx *context.Context) {
return return
} }
if setting.LFS.MaxBatchSize != 0 && len(br.Objects) > setting.LFS.MaxBatchSize {
writeStatus(ctx, http.StatusRequestEntityTooLarge)
return
}
contentStore := lfs_module.NewContentStore() contentStore := lfs_module.NewContentStore()
var responseObjects []*lfs_module.ObjectResponse var responseObjects []*lfs_module.ObjectResponse