From 14c7055494b995476d9d2ec1948784bf36dd9e4d Mon Sep 17 00:00:00 2001
From: ChristopherHX <christopher.homberger@web.de>
Date: Sun, 22 Sep 2024 13:01:09 +0200
Subject: [PATCH 1/9] Fix artifact v4 upload above 8MB (#31664)

Multiple chunks are uploaded with type "block" without using
"appendBlock" and eventually out of order for bigger uploads.
8MB seems to be the chunk size

This change parses the blockList uploaded after all blocks to get the
final artifact size and order them correctly before calculating the
sha256 checksum over all blocks

Fixes #31354

(cherry picked from commit b594cec2bda6f861effedb2e8e0a7ebba191c0e9)

Conflicts:
	routers/api/actions/artifactsv4.go
  conflict because of Refactor AppURL usage (#30885) 67c1a07285008cc00036a87cef966c3bd519a50c
    that was not cherry-picked in Forgejo
    the resolution consist of removing the extra ctx argument
---
 routers/api/actions/artifacts_chunks.go       |  50 +++++-
 routers/api/actions/artifactsv4.go            | 144 +++++++++++++-----
 .../api_actions_artifact_v4_test.go           | 130 ++++++++++++++++
 3 files changed, 285 insertions(+), 39 deletions(-)

diff --git a/routers/api/actions/artifacts_chunks.go b/routers/api/actions/artifacts_chunks.go
index b0c96585cb..cdb56584b8 100644
--- a/routers/api/actions/artifacts_chunks.go
+++ b/routers/api/actions/artifacts_chunks.go
@@ -123,6 +123,54 @@ func listChunksByRunID(st storage.ObjectStorage, runID int64) (map[int64][]*chun
 	return chunksMap, nil
 }
 
+func listChunksByRunIDV4(st storage.ObjectStorage, runID, artifactID int64, blist *BlockList) ([]*chunkFileItem, error) {
+	storageDir := fmt.Sprintf("tmpv4%d", runID)
+	var chunks []*chunkFileItem
+	chunkMap := map[string]*chunkFileItem{}
+	dummy := &chunkFileItem{}
+	for _, name := range blist.Latest {
+		chunkMap[name] = dummy
+	}
+	if err := st.IterateObjects(storageDir, func(fpath string, obj storage.Object) error {
+		baseName := filepath.Base(fpath)
+		if !strings.HasPrefix(baseName, "block-") {
+			return nil
+		}
+		// when read chunks from storage, it only contains storage dir and basename,
+		// no matter the subdirectory setting in storage config
+		item := chunkFileItem{Path: storageDir + "/" + baseName, ArtifactID: artifactID}
+		var size int64
+		var b64chunkName string
+		if _, err := fmt.Sscanf(baseName, "block-%d-%d-%s", &item.RunID, &size, &b64chunkName); err != nil {
+			return fmt.Errorf("parse content range error: %v", err)
+		}
+		rchunkName, err := base64.URLEncoding.DecodeString(b64chunkName)
+		if err != nil {
+			return fmt.Errorf("failed to parse chunkName: %v", err)
+		}
+		chunkName := string(rchunkName)
+		item.End = item.Start + size - 1
+		if _, ok := chunkMap[chunkName]; ok {
+			chunkMap[chunkName] = &item
+		}
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+	for i, name := range blist.Latest {
+		chunk, ok := chunkMap[name]
+		if !ok || chunk.Path == "" {
+			return nil, fmt.Errorf("missing Chunk (%d/%d): %s", i, len(blist.Latest), name)
+		}
+		chunks = append(chunks, chunk)
+		if i > 0 {
+			chunk.Start = chunkMap[blist.Latest[i-1]].End + 1
+			chunk.End += chunk.Start
+		}
+	}
+	return chunks, nil
+}
+
 func mergeChunksForRun(ctx *ArtifactContext, st storage.ObjectStorage, runID int64, artifactName string) error {
 	// read all db artifacts by name
 	artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{
@@ -230,7 +278,7 @@ func mergeChunksForArtifact(ctx *ArtifactContext, chunks []*chunkFileItem, st st
 		rawChecksum := hash.Sum(nil)
 		actualChecksum := hex.EncodeToString(rawChecksum)
 		if !strings.HasSuffix(checksum, actualChecksum) {
-			return fmt.Errorf("update artifact error checksum is invalid")
+			return fmt.Errorf("update artifact error checksum is invalid %v vs %v", checksum, actualChecksum)
 		}
 	}
 
diff --git a/routers/api/actions/artifactsv4.go b/routers/api/actions/artifactsv4.go
index 7b2f9c4360..677e89da2f 100644
--- a/routers/api/actions/artifactsv4.go
+++ b/routers/api/actions/artifactsv4.go
@@ -24,8 +24,15 @@ package actions
 // PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=block
 // 1.3. Continue Upload Zip Content to Blobstorage (unauthenticated request), repeat until everything is uploaded
 // PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=appendBlock
-// 1.4. Unknown xml payload to Blobstorage (unauthenticated request), ignored for now
+// 1.4. BlockList xml payload to Blobstorage (unauthenticated request)
+// Files of about 800MB are parallel in parallel and / or out of order, this file is needed to enshure the correct order
 // PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=blockList
+// Request
+// <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+// <BlockList>
+// 	<Latest>blockId1</Latest>
+// 	<Latest>blockId2</Latest>
+// </BlockList>
 // 1.5. FinalizeArtifact
 // Post: /twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact
 // Request
@@ -82,6 +89,7 @@ import (
 	"crypto/hmac"
 	"crypto/sha256"
 	"encoding/base64"
+	"encoding/xml"
 	"fmt"
 	"io"
 	"net/http"
@@ -153,31 +161,34 @@ func ArtifactsV4Routes(prefix string) *web.Route {
 	return m
 }
 
-func (r artifactV4Routes) buildSignature(endp, expires, artifactName string, taskID int64) []byte {
+func (r artifactV4Routes) buildSignature(endp, expires, artifactName string, taskID, artifactID int64) []byte {
 	mac := hmac.New(sha256.New, setting.GetGeneralTokenSigningSecret())
 	mac.Write([]byte(endp))
 	mac.Write([]byte(expires))
 	mac.Write([]byte(artifactName))
 	mac.Write([]byte(fmt.Sprint(taskID)))
+	mac.Write([]byte(fmt.Sprint(artifactID)))
 	return mac.Sum(nil)
 }
 
-func (r artifactV4Routes) buildArtifactURL(endp, artifactName string, taskID int64) string {
+func (r artifactV4Routes) buildArtifactURL(endp, artifactName string, taskID, artifactID int64) string {
 	expires := time.Now().Add(60 * time.Minute).Format("2006-01-02 15:04:05.999999999 -0700 MST")
 	uploadURL := strings.TrimSuffix(setting.AppURL, "/") + strings.TrimSuffix(r.prefix, "/") +
-		"/" + endp + "?sig=" + base64.URLEncoding.EncodeToString(r.buildSignature(endp, expires, artifactName, taskID)) + "&expires=" + url.QueryEscape(expires) + "&artifactName=" + url.QueryEscape(artifactName) + "&taskID=" + fmt.Sprint(taskID)
+		"/" + endp + "?sig=" + base64.URLEncoding.EncodeToString(r.buildSignature(endp, expires, artifactName, taskID, artifactID)) + "&expires=" + url.QueryEscape(expires) + "&artifactName=" + url.QueryEscape(artifactName) + "&taskID=" + fmt.Sprint(taskID) + "&artifactID=" + fmt.Sprint(artifactID)
 	return uploadURL
 }
 
 func (r artifactV4Routes) verifySignature(ctx *ArtifactContext, endp string) (*actions.ActionTask, string, bool) {
 	rawTaskID := ctx.Req.URL.Query().Get("taskID")
+	rawArtifactID := ctx.Req.URL.Query().Get("artifactID")
 	sig := ctx.Req.URL.Query().Get("sig")
 	expires := ctx.Req.URL.Query().Get("expires")
 	artifactName := ctx.Req.URL.Query().Get("artifactName")
 	dsig, _ := base64.URLEncoding.DecodeString(sig)
 	taskID, _ := strconv.ParseInt(rawTaskID, 10, 64)
+	artifactID, _ := strconv.ParseInt(rawArtifactID, 10, 64)
 
-	expecedsig := r.buildSignature(endp, expires, artifactName, taskID)
+	expecedsig := r.buildSignature(endp, expires, artifactName, taskID, artifactID)
 	if !hmac.Equal(dsig, expecedsig) {
 		log.Error("Error unauthorized")
 		ctx.Error(http.StatusUnauthorized, "Error unauthorized")
@@ -272,6 +283,8 @@ func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) {
 		return
 	}
 	artifact.ContentEncoding = ArtifactV4ContentEncoding
+	artifact.FileSize = 0
+	artifact.FileCompressedSize = 0
 	if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
 		log.Error("Error UpdateArtifactByID: %v", err)
 		ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
@@ -280,7 +293,7 @@ func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) {
 
 	respData := CreateArtifactResponse{
 		Ok:              true,
-		SignedUploadUrl: r.buildArtifactURL("UploadArtifact", artifactName, ctx.ActionTask.ID),
+		SignedUploadUrl: r.buildArtifactURL("UploadArtifact", artifactName, ctx.ActionTask.ID, artifact.ID),
 	}
 	r.sendProtbufBody(ctx, &respData)
 }
@@ -306,38 +319,77 @@ func (r *artifactV4Routes) uploadArtifact(ctx *ArtifactContext) {
 	comp := ctx.Req.URL.Query().Get("comp")
 	switch comp {
 	case "block", "appendBlock":
-		// get artifact by name
-		artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName)
-		if err != nil {
-			log.Error("Error artifact not found: %v", err)
-			ctx.Error(http.StatusNotFound, "Error artifact not found")
-			return
-		}
+		blockid := ctx.Req.URL.Query().Get("blockid")
+		if blockid == "" {
+			// get artifact by name
+			artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName)
+			if err != nil {
+				log.Error("Error artifact not found: %v", err)
+				ctx.Error(http.StatusNotFound, "Error artifact not found")
+				return
+			}
 
-		if comp == "block" {
-			artifact.FileSize = 0
-			artifact.FileCompressedSize = 0
+			_, err = appendUploadChunk(r.fs, ctx, artifact, artifact.FileSize, ctx.Req.ContentLength, artifact.RunID)
+			if err != nil {
+				log.Error("Error runner api getting task: task is not running")
+				ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
+				return
+			}
+			artifact.FileCompressedSize += ctx.Req.ContentLength
+			artifact.FileSize += ctx.Req.ContentLength
+			if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
+				log.Error("Error UpdateArtifactByID: %v", err)
+				ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
+				return
+			}
+		} else {
+			_, err := r.fs.Save(fmt.Sprintf("tmpv4%d/block-%d-%d-%s", task.Job.RunID, task.Job.RunID, ctx.Req.ContentLength, base64.URLEncoding.EncodeToString([]byte(blockid))), ctx.Req.Body, -1)
+			if err != nil {
+				log.Error("Error runner api getting task: task is not running")
+				ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
+				return
+			}
 		}
-
-		_, err = appendUploadChunk(r.fs, ctx, artifact, artifact.FileSize, ctx.Req.ContentLength, artifact.RunID)
+		ctx.JSON(http.StatusCreated, "appended")
+	case "blocklist":
+		rawArtifactID := ctx.Req.URL.Query().Get("artifactID")
+		artifactID, _ := strconv.ParseInt(rawArtifactID, 10, 64)
+		_, err := r.fs.Save(fmt.Sprintf("tmpv4%d/%d-%d-blocklist", task.Job.RunID, task.Job.RunID, artifactID), ctx.Req.Body, -1)
 		if err != nil {
 			log.Error("Error runner api getting task: task is not running")
 			ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
 			return
 		}
-		artifact.FileCompressedSize += ctx.Req.ContentLength
-		artifact.FileSize += ctx.Req.ContentLength
-		if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
-			log.Error("Error UpdateArtifactByID: %v", err)
-			ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
-			return
-		}
-		ctx.JSON(http.StatusCreated, "appended")
-	case "blocklist":
 		ctx.JSON(http.StatusCreated, "created")
 	}
 }
 
+type BlockList struct {
+	Latest []string `xml:"Latest"`
+}
+
+type Latest struct {
+	Value string `xml:",chardata"`
+}
+
+func (r *artifactV4Routes) readBlockList(runID, artifactID int64) (*BlockList, error) {
+	blockListName := fmt.Sprintf("tmpv4%d/%d-%d-blocklist", runID, runID, artifactID)
+	s, err := r.fs.Open(blockListName)
+	if err != nil {
+		return nil, err
+	}
+
+	xdec := xml.NewDecoder(s)
+	blockList := &BlockList{}
+	err = xdec.Decode(blockList)
+
+	delerr := r.fs.Delete(blockListName)
+	if delerr != nil {
+		log.Warn("Failed to delete blockList %s: %v", blockListName, delerr)
+	}
+	return blockList, err
+}
+
 func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) {
 	var req FinalizeArtifactRequest
 
@@ -356,18 +408,34 @@ func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) {
 		ctx.Error(http.StatusNotFound, "Error artifact not found")
 		return
 	}
-	chunkMap, err := listChunksByRunID(r.fs, runID)
+
+	var chunks []*chunkFileItem
+	blockList, err := r.readBlockList(runID, artifact.ID)
 	if err != nil {
-		log.Error("Error merge chunks: %v", err)
-		ctx.Error(http.StatusInternalServerError, "Error merge chunks")
-		return
-	}
-	chunks, ok := chunkMap[artifact.ID]
-	if !ok {
-		log.Error("Error merge chunks")
-		ctx.Error(http.StatusInternalServerError, "Error merge chunks")
-		return
+		log.Warn("Failed to read BlockList, fallback to old behavior: %v", err)
+		chunkMap, err := listChunksByRunID(r.fs, runID)
+		if err != nil {
+			log.Error("Error merge chunks: %v", err)
+			ctx.Error(http.StatusInternalServerError, "Error merge chunks")
+			return
+		}
+		chunks, ok = chunkMap[artifact.ID]
+		if !ok {
+			log.Error("Error merge chunks")
+			ctx.Error(http.StatusInternalServerError, "Error merge chunks")
+			return
+		}
+	} else {
+		chunks, err = listChunksByRunIDV4(r.fs, runID, artifact.ID, blockList)
+		if err != nil {
+			log.Error("Error merge chunks: %v", err)
+			ctx.Error(http.StatusInternalServerError, "Error merge chunks")
+			return
+		}
+		artifact.FileSize = chunks[len(chunks)-1].End + 1
+		artifact.FileCompressedSize = chunks[len(chunks)-1].End + 1
 	}
+
 	checksum := ""
 	if req.Hash != nil {
 		checksum = req.Hash.Value
@@ -468,7 +536,7 @@ func (r *artifactV4Routes) getSignedArtifactURL(ctx *ArtifactContext) {
 		}
 	}
 	if respData.SignedUrl == "" {
-		respData.SignedUrl = r.buildArtifactURL("DownloadArtifact", artifactName, ctx.ActionTask.ID)
+		respData.SignedUrl = r.buildArtifactURL("DownloadArtifact", artifactName, ctx.ActionTask.ID, artifact.ID)
 	}
 	r.sendProtbufBody(ctx, &respData)
 }
diff --git a/tests/integration/api_actions_artifact_v4_test.go b/tests/integration/api_actions_artifact_v4_test.go
index b2c25a2e70..96668b1ddf 100644
--- a/tests/integration/api_actions_artifact_v4_test.go
+++ b/tests/integration/api_actions_artifact_v4_test.go
@@ -7,12 +7,14 @@ import (
 	"bytes"
 	"crypto/sha256"
 	"encoding/hex"
+	"encoding/xml"
 	"io"
 	"net/http"
 	"strings"
 	"testing"
 	"time"
 
+	"code.gitea.io/gitea/modules/storage"
 	"code.gitea.io/gitea/routers/api/actions"
 	actions_service "code.gitea.io/gitea/services/actions"
 	"code.gitea.io/gitea/tests"
@@ -175,6 +177,134 @@ func TestActionsArtifactV4UploadSingleFileWithRetentionDays(t *testing.T) {
 	assert.True(t, finalizeResp.Ok)
 }
 
+func TestActionsArtifactV4UploadSingleFileWithPotentialHarmfulBlockID(t *testing.T) {
+	defer tests.PrepareTestEnv(t)()
+
+	token, err := actions_service.CreateAuthorizationToken(48, 792, 193)
+	assert.NoError(t, err)
+
+	// acquire artifact upload url
+	req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact", toProtoJSON(&actions.CreateArtifactRequest{
+		Version:                 4,
+		Name:                    "artifactWithPotentialHarmfulBlockID",
+		WorkflowRunBackendId:    "792",
+		WorkflowJobRunBackendId: "193",
+	})).AddTokenAuth(token)
+	resp := MakeRequest(t, req, http.StatusOK)
+	var uploadResp actions.CreateArtifactResponse
+	protojson.Unmarshal(resp.Body.Bytes(), &uploadResp)
+	assert.True(t, uploadResp.Ok)
+	assert.Contains(t, uploadResp.SignedUploadUrl, "/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact")
+
+	// get upload urls
+	idx := strings.Index(uploadResp.SignedUploadUrl, "/twirp/")
+	url := uploadResp.SignedUploadUrl[idx:] + "&comp=block&blockid=%2f..%2fmyfile"
+	blockListURL := uploadResp.SignedUploadUrl[idx:] + "&comp=blocklist"
+
+	// upload artifact chunk
+	body := strings.Repeat("A", 1024)
+	req = NewRequestWithBody(t, "PUT", url, strings.NewReader(body))
+	MakeRequest(t, req, http.StatusCreated)
+
+	// verify that the exploit didn't work
+	_, err = storage.Actions.Stat("myfile")
+	assert.Error(t, err)
+
+	// upload artifact blockList
+	blockList := &actions.BlockList{
+		Latest: []string{
+			"/../myfile",
+		},
+	}
+	rawBlockList, err := xml.Marshal(blockList)
+	assert.NoError(t, err)
+	req = NewRequestWithBody(t, "PUT", blockListURL, bytes.NewReader(rawBlockList))
+	MakeRequest(t, req, http.StatusCreated)
+
+	t.Logf("Create artifact confirm")
+
+	sha := sha256.Sum256([]byte(body))
+
+	// confirm artifact upload
+	req = NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact", toProtoJSON(&actions.FinalizeArtifactRequest{
+		Name:                    "artifactWithPotentialHarmfulBlockID",
+		Size:                    1024,
+		Hash:                    wrapperspb.String("sha256:" + hex.EncodeToString(sha[:])),
+		WorkflowRunBackendId:    "792",
+		WorkflowJobRunBackendId: "193",
+	})).
+		AddTokenAuth(token)
+	resp = MakeRequest(t, req, http.StatusOK)
+	var finalizeResp actions.FinalizeArtifactResponse
+	protojson.Unmarshal(resp.Body.Bytes(), &finalizeResp)
+	assert.True(t, finalizeResp.Ok)
+}
+
+func TestActionsArtifactV4UploadSingleFileWithChunksOutOfOrder(t *testing.T) {
+	defer tests.PrepareTestEnv(t)()
+
+	token, err := actions_service.CreateAuthorizationToken(48, 792, 193)
+	assert.NoError(t, err)
+
+	// acquire artifact upload url
+	req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact", toProtoJSON(&actions.CreateArtifactRequest{
+		Version:                 4,
+		Name:                    "artifactWithChunksOutOfOrder",
+		WorkflowRunBackendId:    "792",
+		WorkflowJobRunBackendId: "193",
+	})).AddTokenAuth(token)
+	resp := MakeRequest(t, req, http.StatusOK)
+	var uploadResp actions.CreateArtifactResponse
+	protojson.Unmarshal(resp.Body.Bytes(), &uploadResp)
+	assert.True(t, uploadResp.Ok)
+	assert.Contains(t, uploadResp.SignedUploadUrl, "/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact")
+
+	// get upload urls
+	idx := strings.Index(uploadResp.SignedUploadUrl, "/twirp/")
+	block1URL := uploadResp.SignedUploadUrl[idx:] + "&comp=block&blockid=block1"
+	block2URL := uploadResp.SignedUploadUrl[idx:] + "&comp=block&blockid=block2"
+	blockListURL := uploadResp.SignedUploadUrl[idx:] + "&comp=blocklist"
+
+	// upload artifact chunks
+	bodyb := strings.Repeat("B", 1024)
+	req = NewRequestWithBody(t, "PUT", block2URL, strings.NewReader(bodyb))
+	MakeRequest(t, req, http.StatusCreated)
+
+	bodya := strings.Repeat("A", 1024)
+	req = NewRequestWithBody(t, "PUT", block1URL, strings.NewReader(bodya))
+	MakeRequest(t, req, http.StatusCreated)
+
+	// upload artifact blockList
+	blockList := &actions.BlockList{
+		Latest: []string{
+			"block1",
+			"block2",
+		},
+	}
+	rawBlockList, err := xml.Marshal(blockList)
+	assert.NoError(t, err)
+	req = NewRequestWithBody(t, "PUT", blockListURL, bytes.NewReader(rawBlockList))
+	MakeRequest(t, req, http.StatusCreated)
+
+	t.Logf("Create artifact confirm")
+
+	sha := sha256.Sum256([]byte(bodya + bodyb))
+
+	// confirm artifact upload
+	req = NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact", toProtoJSON(&actions.FinalizeArtifactRequest{
+		Name:                    "artifactWithChunksOutOfOrder",
+		Size:                    2048,
+		Hash:                    wrapperspb.String("sha256:" + hex.EncodeToString(sha[:])),
+		WorkflowRunBackendId:    "792",
+		WorkflowJobRunBackendId: "193",
+	})).
+		AddTokenAuth(token)
+	resp = MakeRequest(t, req, http.StatusOK)
+	var finalizeResp actions.FinalizeArtifactResponse
+	protojson.Unmarshal(resp.Body.Bytes(), &finalizeResp)
+	assert.True(t, finalizeResp.Ok)
+}
+
 func TestActionsArtifactV4DownloadSingle(t *testing.T) {
 	defer tests.PrepareTestEnv(t)()
 

From b2483b2ae031bbfd8829595fbff19cd323d80e7f Mon Sep 17 00:00:00 2001
From: Earl Warren <contact@earl-warren.org>
Date: Sun, 29 Sep 2024 09:58:47 +0200
Subject: [PATCH 2/9] Fix artifact v4 upload above 8MB (#31664) (fix lint
 errors)

---
 tests/integration/api_actions_artifact_v4_test.go | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/tests/integration/api_actions_artifact_v4_test.go b/tests/integration/api_actions_artifact_v4_test.go
index 96668b1ddf..f55250f6c1 100644
--- a/tests/integration/api_actions_artifact_v4_test.go
+++ b/tests/integration/api_actions_artifact_v4_test.go
@@ -181,7 +181,7 @@ func TestActionsArtifactV4UploadSingleFileWithPotentialHarmfulBlockID(t *testing
 	defer tests.PrepareTestEnv(t)()
 
 	token, err := actions_service.CreateAuthorizationToken(48, 792, 193)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	// acquire artifact upload url
 	req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact", toProtoJSON(&actions.CreateArtifactRequest{
@@ -208,7 +208,7 @@ func TestActionsArtifactV4UploadSingleFileWithPotentialHarmfulBlockID(t *testing
 
 	// verify that the exploit didn't work
 	_, err = storage.Actions.Stat("myfile")
-	assert.Error(t, err)
+	require.Error(t, err)
 
 	// upload artifact blockList
 	blockList := &actions.BlockList{
@@ -217,7 +217,7 @@ func TestActionsArtifactV4UploadSingleFileWithPotentialHarmfulBlockID(t *testing
 		},
 	}
 	rawBlockList, err := xml.Marshal(blockList)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 	req = NewRequestWithBody(t, "PUT", blockListURL, bytes.NewReader(rawBlockList))
 	MakeRequest(t, req, http.StatusCreated)
 
@@ -244,7 +244,7 @@ func TestActionsArtifactV4UploadSingleFileWithChunksOutOfOrder(t *testing.T) {
 	defer tests.PrepareTestEnv(t)()
 
 	token, err := actions_service.CreateAuthorizationToken(48, 792, 193)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	// acquire artifact upload url
 	req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact", toProtoJSON(&actions.CreateArtifactRequest{
@@ -282,7 +282,7 @@ func TestActionsArtifactV4UploadSingleFileWithChunksOutOfOrder(t *testing.T) {
 		},
 	}
 	rawBlockList, err := xml.Marshal(blockList)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 	req = NewRequestWithBody(t, "PUT", blockListURL, bytes.NewReader(rawBlockList))
 	MakeRequest(t, req, http.StatusCreated)
 

From b28a070a528b2df5472018d296e4e9d9e128b3bc Mon Sep 17 00:00:00 2001
From: cloudchamb3r <jizon0123@protonmail.com>
Date: Tue, 24 Sep 2024 02:09:57 +0900
Subject: [PATCH 3/9] Fix Bug in Issue/pulls list (#32081)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

fix #32080

## After
### for opened issues
<img width="1199" alt="Screenshot 2024-09-19 at 6 29 31 PM"
src="https://github.com/user-attachments/assets/86cf48ad-5e4b-4dcb-8abe-4d7fd74e0aec">

### for closed issues
<img width="1208" alt="Screenshot 2024-09-19 at 6 29 37 PM"
src="https://github.com/user-attachments/assets/a16bc545-bfcf-49a4-be52-3e7334910482">

### for all issues
<img width="1340" alt="Screenshot 2024-09-20 at 12 07 12 PM"
src="https://github.com/user-attachments/assets/b2309c8f-e59d-44e9-ae3b-bf54e1196169">

(cherry picked from commit e1f0598c8f5af5ac95f5e13b74fbab99506762db)
---
 routers/web/repo/issue.go                | 1 +
 templates/repo/issue/filter_actions.tmpl | 4 ++--
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/routers/web/repo/issue.go b/routers/web/repo/issue.go
index 01fd1e2725..5d13ccc97c 100644
--- a/routers/web/repo/issue.go
+++ b/routers/web/repo/issue.go
@@ -476,6 +476,7 @@ func issues(ctx *context.Context, milestoneID, projectID int64, isPullOption opt
 	ctx.Data["PosterID"] = posterID
 	ctx.Data["IsFuzzy"] = isFuzzy
 	ctx.Data["Keyword"] = keyword
+	ctx.Data["IsShowClosed"] = isShowClosed
 	switch {
 	case isShowClosed.Value():
 		ctx.Data["State"] = "closed"
diff --git a/templates/repo/issue/filter_actions.tmpl b/templates/repo/issue/filter_actions.tmpl
index a341448bcc..58b1ef8ecd 100644
--- a/templates/repo/issue/filter_actions.tmpl
+++ b/templates/repo/issue/filter_actions.tmpl
@@ -1,9 +1,9 @@
 <div class="ui secondary filter menu">
 	{{if not .Repository.IsArchived}}
 		<!-- Action Button -->
-		{{if .IsShowClosed}}
+		{{if and .IsShowClosed.Has .IsShowClosed.Value}}
 			<button class="ui primary basic button issue-action" data-action="open" data-url="{{$.RepoLink}}/issues/status">{{ctx.Locale.Tr "repo.issues.action_open"}}</button>
-		{{else}}
+		{{else if and .IsShowClosed.Has (not .IsShowClosed.Value)}}
 			<button class="ui red basic button issue-action" data-action="close" data-url="{{$.RepoLink}}/issues/status">{{ctx.Locale.Tr "repo.issues.action_close"}}</button>
 		{{end}}
 		{{if $.IsRepoAdmin}}

From d8ae7d9e96978c3e428ab8dc945cbd5ba601451c Mon Sep 17 00:00:00 2001
From: Lunny Xiao <xiaolunwen@gmail.com>
Date: Tue, 24 Sep 2024 09:30:05 +0800
Subject: [PATCH 4/9] Fix panic when cloning with wrong ssh format. (#32076)

(cherry picked from commit 3f2d8f873035b614b4cdb447d8e16f5af82cefe8)
---
 cmd/serv.go | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/cmd/serv.go b/cmd/serv.go
index 0e3006b36b..db67e36fa3 100644
--- a/cmd/serv.go
+++ b/cmd/serv.go
@@ -147,6 +147,12 @@ func runServ(c *cli.Context) error {
 		return nil
 	}
 
+	defer func() {
+		if err := recover(); err != nil {
+			_ = fail(ctx, "Internal Server Error", "Panic: %v\n%s", err, log.Stack(2))
+		}
+	}()
+
 	keys := strings.Split(c.Args().First(), "-")
 	if len(keys) != 2 || keys[0] != "key" {
 		return fail(ctx, "Key ID format error", "Invalid key argument: %s", c.Args().First())
@@ -193,10 +199,7 @@ func runServ(c *cli.Context) error {
 	}
 
 	verb := words[0]
-	repoPath := words[1]
-	if repoPath[0] == '/' {
-		repoPath = repoPath[1:]
-	}
+	repoPath := strings.TrimPrefix(words[1], "/")
 
 	var lfsVerb string
 	if verb == lfsAuthenticateVerb {

From e1e7299bd9a07c568a857ea346f446ffa67f8809 Mon Sep 17 00:00:00 2001
From: Kemal Zebari <60799661+kemzeb@users.noreply.github.com>
Date: Mon, 23 Sep 2024 20:38:08 -0700
Subject: [PATCH 5/9] Truncate commit message during Discord webhook push
 events (#31970)

Resolves #31668.

(cherry picked from commit aadbe0488f454b9f7f5a56765f4530f9d1e2c6ec)
---
 services/webhook/discord.go      | 11 +++++++++--
 services/webhook/discord_test.go | 14 ++++++++++++++
 services/webhook/general_test.go | 10 +++++++++-
 3 files changed, 32 insertions(+), 3 deletions(-)

diff --git a/services/webhook/discord.go b/services/webhook/discord.go
index b342b45690..af1dd79927 100644
--- a/services/webhook/discord.go
+++ b/services/webhook/discord.go
@@ -12,6 +12,7 @@ import (
 	"net/url"
 	"strconv"
 	"strings"
+	"unicode/utf8"
 
 	webhook_model "code.gitea.io/gitea/models/webhook"
 	"code.gitea.io/gitea/modules/git"
@@ -179,8 +180,14 @@ func (d discordConvertor) Push(p *api.PushPayload) (DiscordPayload, error) {
 	var text string
 	// for each commit, generate attachment text
 	for i, commit := range p.Commits {
-		text += fmt.Sprintf("[%s](%s) %s - %s", commit.ID[:7], commit.URL,
-			strings.TrimRight(commit.Message, "\r\n"), commit.Author.Name)
+		// limit the commit message display to just the summary, otherwise it would be hard to read
+		message := strings.TrimRight(strings.SplitN(commit.Message, "\n", 1)[0], "\r")
+
+		// a limit of 50 is set because GitHub does the same
+		if utf8.RuneCountInString(message) > 50 {
+			message = fmt.Sprintf("%.47s...", message)
+		}
+		text += fmt.Sprintf("[%s](%s) %s - %s", commit.ID[:7], commit.URL, message, commit.Author.Name)
 		// add linebreak to each commit but the last
 		if i < len(p.Commits)-1 {
 			text += "\n"
diff --git a/services/webhook/discord_test.go b/services/webhook/discord_test.go
index 73be143f46..cc5ec8a3e0 100644
--- a/services/webhook/discord_test.go
+++ b/services/webhook/discord_test.go
@@ -80,6 +80,20 @@ func TestDiscordPayload(t *testing.T) {
 		assert.Equal(t, p.Sender.AvatarURL, pl.Embeds[0].Author.IconURL)
 	})
 
+	t.Run("PushWithLongCommitMessage", func(t *testing.T) {
+		p := pushTestMultilineCommitMessagePayload()
+
+		pl, err := dc.Push(p)
+		require.NoError(t, err)
+
+		assert.Len(t, pl.Embeds, 1)
+		assert.Equal(t, "[test/repo:test] 2 new commits", pl.Embeds[0].Title)
+		assert.Equal(t, "[2020558](http://localhost:3000/test/repo/commit/2020558fe2e34debb818a514715839cabd25e778) This is a commit summary ⚠️⚠️⚠️⚠️ containing 你好... - user1\n[2020558](http://localhost:3000/test/repo/commit/2020558fe2e34debb818a514715839cabd25e778) This is a commit summary ⚠️⚠️⚠️⚠️ containing 你好... - user1", pl.Embeds[0].Description)
+		assert.Equal(t, p.Sender.UserName, pl.Embeds[0].Author.Name)
+		assert.Equal(t, setting.AppURL+p.Sender.UserName, pl.Embeds[0].Author.URL)
+		assert.Equal(t, p.Sender.AvatarURL, pl.Embeds[0].Author.IconURL)
+	})
+
 	t.Run("Issue", func(t *testing.T) {
 		p := issueTestPayload()
 
diff --git a/services/webhook/general_test.go b/services/webhook/general_test.go
index 5b9bfdc1b2..2d991f8300 100644
--- a/services/webhook/general_test.go
+++ b/services/webhook/general_test.go
@@ -64,9 +64,17 @@ func forkTestPayload() *api.ForkPayload {
 }
 
 func pushTestPayload() *api.PushPayload {
+	return pushTestPayloadWithCommitMessage("commit message")
+}
+
+func pushTestMultilineCommitMessagePayload() *api.PushPayload {
+	return pushTestPayloadWithCommitMessage("This is a commit summary ⚠️⚠️⚠️⚠️ containing 你好 ⚠️⚠️️\n\nThis is the message body.")
+}
+
+func pushTestPayloadWithCommitMessage(message string) *api.PushPayload {
 	commit := &api.PayloadCommit{
 		ID:      "2020558fe2e34debb818a514715839cabd25e778",
-		Message: "commit message",
+		Message: message,
 		URL:     "http://localhost:3000/test/repo/commit/2020558fe2e34debb818a514715839cabd25e778",
 		Author: &api.PayloadUser{
 			Name:     "user1",

From cb88d558378b66154c4334d73230ba0e8946d3b0 Mon Sep 17 00:00:00 2001
From: Lunny Xiao <xiaolunwen@gmail.com>
Date: Tue, 24 Sep 2024 15:12:06 +0800
Subject: [PATCH 6/9] Include collaboration repositories on dashboard
 source/forks/mirrors list (#31946)

Fix #13489

In the original implementation, only `All` will display your owned and
collaborated repositories. For other filters like `Source`, `Mirrors`
and etc. will only display your owned repositories.

This PR removed the limitations. Now except `collbrations`, other
filters will always display your owned and collaborated repositories.

(cherry picked from commit 4947bec8360c152daca23e120eae1732d3848492)
---
 web_src/js/components/DashboardRepoList.vue | 1 -
 1 file changed, 1 deletion(-)

diff --git a/web_src/js/components/DashboardRepoList.vue b/web_src/js/components/DashboardRepoList.vue
index 69a8ced47d..e5874133b1 100644
--- a/web_src/js/components/DashboardRepoList.vue
+++ b/web_src/js/components/DashboardRepoList.vue
@@ -78,7 +78,6 @@ const sfc = {
     searchURL() {
       return `${this.subUrl}/repo/search?sort=updated&order=desc&uid=${this.uid}&team_id=${this.teamId}&q=${this.searchQuery
       }&page=${this.page}&limit=${this.searchLimit}&mode=${this.repoTypes[this.reposFilter].searchMode
-      }${this.reposFilter !== 'all' ? '&exclusive=1' : ''
       }${this.archivedFilter === 'archived' ? '&archived=true' : ''}${this.archivedFilter === 'unarchived' ? '&archived=false' : ''
       }${this.privateFilter === 'private' ? '&is_private=true' : ''}${this.privateFilter === 'public' ? '&is_private=false' : ''
       }`;

From 0a0a3cea1b54d9cd7c95faf9318f6c3cdf1469a9 Mon Sep 17 00:00:00 2001
From: Lunny Xiao <xiaolunwen@gmail.com>
Date: Tue, 24 Sep 2024 15:42:08 +0800
Subject: [PATCH 7/9] Fix bug when deleting a migrated branch (#32075)

After migrating a repository with pull request, the branch is missed and
after the pull request merged, the branch cannot be deleted.

(cherry picked from commit 5a8568459d22e57cac506465463660526ca6a08f)

Conflicts:
	services/repository/branch.go
  conflict because of [GITEA] Fix typo in formatting error e71b5a038ecfd80630c98752472c2719b0e9659f
---
 services/repository/branch.go | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/services/repository/branch.go b/services/repository/branch.go
index 27e50e5ced..f0e7120926 100644
--- a/services/repository/branch.go
+++ b/services/repository/branch.go
@@ -430,13 +430,12 @@ func DeleteBranch(ctx context.Context, doer *user_model.User, repo *repo_model.R
 	}
 
 	rawBranch, err := git_model.GetBranch(ctx, repo.ID, branchName)
-	if err != nil {
+	if err != nil && !git_model.IsErrBranchNotExist(err) {
 		return fmt.Errorf("GetBranch: %v", err)
 	}
 
-	if rawBranch.IsDeleted {
-		return nil
-	}
+	// database branch record not exist or it's a deleted branch
+	notExist := git_model.IsErrBranchNotExist(err) || rawBranch.IsDeleted
 
 	commit, err := gitRepo.GetBranchCommit(branchName)
 	if err != nil {
@@ -444,8 +443,10 @@ func DeleteBranch(ctx context.Context, doer *user_model.User, repo *repo_model.R
 	}
 
 	if err := db.WithTx(ctx, func(ctx context.Context) error {
-		if err := git_model.AddDeletedBranch(ctx, repo.ID, branchName, doer.ID); err != nil {
-			return err
+		if !notExist {
+			if err := git_model.AddDeletedBranch(ctx, repo.ID, branchName, doer.ID); err != nil {
+				return err
+			}
 		}
 
 		return gitRepo.DeleteBranch(branchName, git.DeleteBranchOptions{

From c400f26e6cf17358750d680d91e828827f91b179 Mon Sep 17 00:00:00 2001
From: yp05327 <576951401@qq.com>
Date: Wed, 25 Sep 2024 03:34:08 +0900
Subject: [PATCH 8/9] Fix wrong status of `Set up Job` when first step is
 skipped (#32120)

Fix #32089

(cherry picked from commit 6fa962f409c84477a7a4cf35b4a38a4a93fc3224)
---
 modules/actions/task_state.go      | 51 ++++++++++++++++++------------
 modules/actions/task_state_test.go | 19 +++++++++++
 2 files changed, 49 insertions(+), 21 deletions(-)

diff --git a/modules/actions/task_state.go b/modules/actions/task_state.go
index 31a74be3fd..1f36e021a5 100644
--- a/modules/actions/task_state.go
+++ b/modules/actions/task_state.go
@@ -18,8 +18,32 @@ func FullSteps(task *actions_model.ActionTask) []*actions_model.ActionTaskStep {
 		return fullStepsOfEmptySteps(task)
 	}
 
-	firstStep := task.Steps[0]
+	// firstStep is the first step that has run or running, not include preStep.
+	// For example,
+	// 1. preStep(Success) -> step1(Success) -> step2(Running) -> step3(Waiting) -> postStep(Waiting): firstStep is step1.
+	// 2. preStep(Success) -> step1(Skipped) -> step2(Success) -> postStep(Success): firstStep is step2.
+	// 3. preStep(Success) -> step1(Running) -> step2(Waiting) -> postStep(Waiting): firstStep is step1.
+	// 4. preStep(Success) -> step1(Skipped) -> step2(Skipped) -> postStep(Skipped): firstStep is nil.
+	// 5. preStep(Success) -> step1(Cancelled) -> step2(Cancelled) -> postStep(Cancelled): firstStep is nil.
+	var firstStep *actions_model.ActionTaskStep
+	// lastHasRunStep is the last step that has run.
+	// For example,
+	// 1. preStep(Success) -> step1(Success) -> step2(Running) -> step3(Waiting) -> postStep(Waiting): lastHasRunStep is step1.
+	// 2. preStep(Success) -> step1(Success) -> step2(Success) -> step3(Success) -> postStep(Success): lastHasRunStep is step3.
+	// 3. preStep(Success) -> step1(Success) -> step2(Failure) -> step3 -> postStep(Waiting): lastHasRunStep is step2.
+	// So its Stopped is the Started of postStep when there are no more steps to run.
+	var lastHasRunStep *actions_model.ActionTaskStep
+
 	var logIndex int64
+	for _, step := range task.Steps {
+		if firstStep == nil && (step.Status.HasRun() || step.Status.IsRunning()) {
+			firstStep = step
+		}
+		if step.Status.HasRun() {
+			lastHasRunStep = step
+		}
+		logIndex += step.LogLength
+	}
 
 	preStep := &actions_model.ActionTaskStep{
 		Name:      preStepName,
@@ -28,32 +52,17 @@ func FullSteps(task *actions_model.ActionTask) []*actions_model.ActionTaskStep {
 		Status:    actions_model.StatusRunning,
 	}
 
-	if firstStep.Status.HasRun() || firstStep.Status.IsRunning() {
+	// No step has run or is running, so preStep is equal to the task
+	if firstStep == nil {
+		preStep.Stopped = task.Stopped
+		preStep.Status = task.Status
+	} else {
 		preStep.LogLength = firstStep.LogIndex
 		preStep.Stopped = firstStep.Started
 		preStep.Status = actions_model.StatusSuccess
-	} else if task.Status.IsDone() {
-		preStep.Stopped = task.Stopped
-		preStep.Status = actions_model.StatusFailure
-		if task.Status.IsSkipped() {
-			preStep.Status = actions_model.StatusSkipped
-		}
 	}
 	logIndex += preStep.LogLength
 
-	// lastHasRunStep is the last step that has run.
-	// For example,
-	// 1. preStep(Success) -> step1(Success) -> step2(Running) -> step3(Waiting) -> postStep(Waiting): lastHasRunStep is step1.
-	// 2. preStep(Success) -> step1(Success) -> step2(Success) -> step3(Success) -> postStep(Success): lastHasRunStep is step3.
-	// 3. preStep(Success) -> step1(Success) -> step2(Failure) -> step3 -> postStep(Waiting): lastHasRunStep is step2.
-	// So its Stopped is the Started of postStep when there are no more steps to run.
-	var lastHasRunStep *actions_model.ActionTaskStep
-	for _, step := range task.Steps {
-		if step.Status.HasRun() {
-			lastHasRunStep = step
-		}
-		logIndex += step.LogLength
-	}
 	if lastHasRunStep == nil {
 		lastHasRunStep = preStep
 	}
diff --git a/modules/actions/task_state_test.go b/modules/actions/task_state_test.go
index 28213d781b..ff0fd57195 100644
--- a/modules/actions/task_state_test.go
+++ b/modules/actions/task_state_test.go
@@ -137,6 +137,25 @@ func TestFullSteps(t *testing.T) {
 				{Name: postStepName, Status: actions_model.StatusSkipped, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
 			},
 		},
+		{
+			name: "first step is skipped",
+			task: &actions_model.ActionTask{
+				Steps: []*actions_model.ActionTaskStep{
+					{Status: actions_model.StatusSkipped, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
+					{Status: actions_model.StatusSuccess, LogIndex: 10, LogLength: 80, Started: 10010, Stopped: 10090},
+				},
+				Status:    actions_model.StatusSuccess,
+				Started:   10000,
+				Stopped:   10100,
+				LogLength: 100,
+			},
+			want: []*actions_model.ActionTaskStep{
+				{Name: preStepName, Status: actions_model.StatusSuccess, LogIndex: 0, LogLength: 10, Started: 10000, Stopped: 10010},
+				{Status: actions_model.StatusSkipped, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
+				{Status: actions_model.StatusSuccess, LogIndex: 10, LogLength: 80, Started: 10010, Stopped: 10090},
+				{Name: postStepName, Status: actions_model.StatusSuccess, LogIndex: 90, LogLength: 10, Started: 10090, Stopped: 10100},
+			},
+		},
 	}
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {

From 3003195ad7d58df6d18d4216c2ecf9201cbd15eb Mon Sep 17 00:00:00 2001
From: Earl Warren <contact@earl-warren.org>
Date: Sun, 29 Sep 2024 11:58:17 +0200
Subject: [PATCH 9/9] chore(release-notes): weekly cherry-pick week
 2024-40-v9.0

---
 release-notes/5418.md | 2 ++
 1 file changed, 2 insertions(+)
 create mode 100644 release-notes/5418.md

diff --git a/release-notes/5418.md b/release-notes/5418.md
new file mode 100644
index 0000000000..729f4a4f88
--- /dev/null
+++ b/release-notes/5418.md
@@ -0,0 +1,2 @@
+fix: [commit](https://codeberg.org/forgejo/forgejo/commit/0a0a3cea1b54d9cd7c95faf9318f6c3cdf1469a9) After migrating a repository that contains merged pull requests, the branch is missing and cannot be deleted.
+fix: [commit](https://codeberg.org/forgejo/forgejo/commit/14c7055494b995476d9d2ec1948784bf36dd9e4d) Forgejo Actions artifact v4 upload above 8MB.