mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-11-01 07:09:21 +01:00
18de83b2a3
## Changes - Adds the following high level access scopes, each with `read` and `write` levels: - `activitypub` - `admin` (hidden if user is not a site admin) - `misc` - `notification` - `organization` - `package` - `issue` - `repository` - `user` - Adds new middleware function `tokenRequiresScopes()` in addition to `reqToken()` - `tokenRequiresScopes()` is used for each high-level api section - _if_ a scoped token is present, checks that the required scope is included based on the section and HTTP method - `reqToken()` is used for individual routes - checks that required authentication is present (but does not check scope levels as this will already have been handled by `tokenRequiresScopes()` - Adds migration to convert old scoped access tokens to the new set of scopes - Updates the user interface for scope selection ### User interface example <img width="903" alt="Screen Shot 2023-05-31 at 1 56 55 PM" src="https://github.com/go-gitea/gitea/assets/23248839/654766ec-2143-4f59-9037-3b51600e32f3"> <img width="917" alt="Screen Shot 2023-05-31 at 1 56 43 PM" src="https://github.com/go-gitea/gitea/assets/23248839/1ad64081-012c-4a73-b393-66b30352654c"> ## tokenRequiresScopes Design Decision - `tokenRequiresScopes()` was added to more reliably cover api routes. For an incoming request, this function uses the given scope category (say `AccessTokenScopeCategoryOrganization`) and the HTTP method (say `DELETE`) and verifies that any scoped tokens in use include `delete:organization`. - `reqToken()` is used to enforce auth for individual routes that require it. If a scoped token is not present for a request, `tokenRequiresScopes()` will not return an error ## TODO - [x] Alphabetize scope categories - [x] Change 'public repos only' to a radio button (private vs public). Also expand this to organizations - [X] Disable token creation if no scopes selected. Alternatively, show warning - [x] `reqToken()` is missing from many `POST/DELETE` routes in the api. `tokenRequiresScopes()` only checks that a given token has the correct scope, `reqToken()` must be used to check that a token (or some other auth) is present. - _This should be addressed in this PR_ - [x] The migration should be reviewed very carefully in order to minimize access changes to existing user tokens. - _This should be addressed in this PR_ - [x] Link to api to swagger documentation, clarify what read/write/delete levels correspond to - [x] Review cases where more than one scope is needed as this directly deviates from the api definition. - _This should be addressed in this PR_ - For example: ```go m.Group("/users/{username}/orgs", func() { m.Get("", reqToken(), org.ListUserOrgs) m.Get("/{org}/permissions", reqToken(), org.GetUserOrgsPermissions) }, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryUser, auth_model.AccessTokenScopeCategoryOrganization), context_service.UserAssignmentAPI()) ``` ## Future improvements - [ ] Add required scopes to swagger documentation - [ ] Redesign `reqToken()` to be opt-out rather than opt-in - [ ] Subdivide scopes like `repository` - [ ] Once a token is created, if it has no scopes, we should display text instead of an empty bullet point - [ ] If the 'public repos only' option is selected, should read categories be selected by default Closes #24501 Closes #24799 Co-authored-by: Jonathan Tran <jon@allspice.io> Co-authored-by: Kyle D <kdumontnu@gmail.com> Co-authored-by: silverwind <me@silverwind.io>
328 lines
9.7 KiB
Go
328 lines
9.7 KiB
Go
// Copyright 2022 The Gitea Authors. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package integration
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"net/url"
|
|
"os"
|
|
"path/filepath"
|
|
"reflect"
|
|
"strings"
|
|
"testing"
|
|
|
|
auth_model "code.gitea.io/gitea/models/auth"
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
|
"code.gitea.io/gitea/models/unittest"
|
|
user_model "code.gitea.io/gitea/models/user"
|
|
base "code.gitea.io/gitea/modules/migration"
|
|
"code.gitea.io/gitea/modules/setting"
|
|
"code.gitea.io/gitea/modules/structs"
|
|
"code.gitea.io/gitea/modules/util"
|
|
"code.gitea.io/gitea/services/migrations"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
"gopkg.in/yaml.v3"
|
|
)
|
|
|
|
func TestDumpRestore(t *testing.T) {
|
|
onGiteaRun(t, func(t *testing.T, u *url.URL) {
|
|
AllowLocalNetworks := setting.Migrations.AllowLocalNetworks
|
|
setting.Migrations.AllowLocalNetworks = true
|
|
AppVer := setting.AppVer
|
|
// Gitea SDK (go-sdk) need to parse the AppVer from server response, so we must set it to a valid version string.
|
|
setting.AppVer = "1.16.0"
|
|
defer func() {
|
|
setting.Migrations.AllowLocalNetworks = AllowLocalNetworks
|
|
setting.AppVer = AppVer
|
|
}()
|
|
|
|
assert.NoError(t, migrations.Init())
|
|
|
|
reponame := "repo1"
|
|
|
|
basePath, err := os.MkdirTemp("", reponame)
|
|
assert.NoError(t, err)
|
|
defer util.RemoveAll(basePath)
|
|
|
|
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{Name: reponame})
|
|
repoOwner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID})
|
|
session := loginUser(t, repoOwner.Name)
|
|
token := getTokenForLoggedInUser(t, session, auth_model.AccessTokenScopeWriteIssue, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeReadMisc)
|
|
|
|
//
|
|
// Phase 1: dump repo1 from the Gitea instance to the filesystem
|
|
//
|
|
|
|
ctx := context.Background()
|
|
opts := migrations.MigrateOptions{
|
|
GitServiceType: structs.GiteaService,
|
|
Issues: true,
|
|
PullRequests: true,
|
|
Labels: true,
|
|
Milestones: true,
|
|
Comments: true,
|
|
AuthToken: token,
|
|
CloneAddr: repo.CloneLink().HTTPS,
|
|
RepoName: reponame,
|
|
}
|
|
err = migrations.DumpRepository(ctx, basePath, repoOwner.Name, opts)
|
|
assert.NoError(t, err)
|
|
|
|
//
|
|
// Verify desired side effects of the dump
|
|
//
|
|
d := filepath.Join(basePath, repo.OwnerName, repo.Name)
|
|
for _, f := range []string{"repo.yml", "topic.yml", "label.yml", "milestone.yml", "issue.yml"} {
|
|
assert.FileExists(t, filepath.Join(d, f))
|
|
}
|
|
|
|
//
|
|
// Phase 2: restore from the filesystem to the Gitea instance in restoredrepo
|
|
//
|
|
|
|
newreponame := "restored"
|
|
err = migrations.RestoreRepository(ctx, d, repo.OwnerName, newreponame, []string{
|
|
"labels", "issues", "comments", "milestones", "pull_requests",
|
|
}, false)
|
|
assert.NoError(t, err)
|
|
|
|
newrepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{Name: newreponame})
|
|
|
|
//
|
|
// Phase 3: dump restored from the Gitea instance to the filesystem
|
|
//
|
|
opts.RepoName = newreponame
|
|
opts.CloneAddr = newrepo.CloneLink().HTTPS
|
|
err = migrations.DumpRepository(ctx, basePath, repoOwner.Name, opts)
|
|
assert.NoError(t, err)
|
|
|
|
//
|
|
// Verify the dump of restored is the same as the dump of repo1
|
|
//
|
|
comparator := &compareDump{
|
|
t: t,
|
|
basePath: basePath,
|
|
}
|
|
comparator.assertEquals(repo, newrepo)
|
|
})
|
|
}
|
|
|
|
type compareDump struct {
|
|
t *testing.T
|
|
basePath string
|
|
repoBefore *repo_model.Repository
|
|
dirBefore string
|
|
repoAfter *repo_model.Repository
|
|
dirAfter string
|
|
}
|
|
|
|
type compareField struct {
|
|
before interface{}
|
|
after interface{}
|
|
ignore bool
|
|
transform func(string) string
|
|
nested *compareFields
|
|
}
|
|
|
|
type compareFields map[string]compareField
|
|
|
|
func (c *compareDump) replaceRepoName(original string) string {
|
|
return strings.ReplaceAll(original, c.repoBefore.Name, c.repoAfter.Name)
|
|
}
|
|
|
|
func (c *compareDump) assertEquals(repoBefore, repoAfter *repo_model.Repository) {
|
|
c.repoBefore = repoBefore
|
|
c.dirBefore = filepath.Join(c.basePath, repoBefore.OwnerName, repoBefore.Name)
|
|
c.repoAfter = repoAfter
|
|
c.dirAfter = filepath.Join(c.basePath, repoAfter.OwnerName, repoAfter.Name)
|
|
|
|
//
|
|
// base.Repository
|
|
//
|
|
_ = c.assertEqual("repo.yml", base.Repository{}, compareFields{
|
|
"Name": {
|
|
before: c.repoBefore.Name,
|
|
after: c.repoAfter.Name,
|
|
},
|
|
"CloneURL": {transform: c.replaceRepoName},
|
|
"OriginalURL": {transform: c.replaceRepoName},
|
|
})
|
|
|
|
//
|
|
// base.Label
|
|
//
|
|
labels, ok := c.assertEqual("label.yml", []base.Label{}, compareFields{}).([]*base.Label)
|
|
assert.True(c.t, ok)
|
|
assert.GreaterOrEqual(c.t, len(labels), 1)
|
|
|
|
//
|
|
// base.Milestone
|
|
//
|
|
milestones, ok := c.assertEqual("milestone.yml", []base.Milestone{}, compareFields{
|
|
"Updated": {ignore: true}, // the database updates that field independently
|
|
}).([]*base.Milestone)
|
|
assert.True(c.t, ok)
|
|
assert.GreaterOrEqual(c.t, len(milestones), 1)
|
|
|
|
//
|
|
// base.Issue and the associated comments
|
|
//
|
|
issues, ok := c.assertEqual("issue.yml", []base.Issue{}, compareFields{
|
|
"Assignees": {ignore: true}, // not implemented yet
|
|
}).([]*base.Issue)
|
|
assert.True(c.t, ok)
|
|
assert.GreaterOrEqual(c.t, len(issues), 1)
|
|
for _, issue := range issues {
|
|
filename := filepath.Join("comments", fmt.Sprintf("%d.yml", issue.Number))
|
|
comments, ok := c.assertEqual(filename, []base.Comment{}, compareFields{
|
|
"Index": {ignore: true},
|
|
}).([]*base.Comment)
|
|
assert.True(c.t, ok)
|
|
for _, comment := range comments {
|
|
assert.EqualValues(c.t, issue.Number, comment.IssueIndex)
|
|
}
|
|
}
|
|
|
|
//
|
|
// base.PullRequest and the associated comments
|
|
//
|
|
comparePullRequestBranch := &compareFields{
|
|
"RepoName": {
|
|
before: c.repoBefore.Name,
|
|
after: c.repoAfter.Name,
|
|
},
|
|
"CloneURL": {transform: c.replaceRepoName},
|
|
}
|
|
prs, ok := c.assertEqual("pull_request.yml", []base.PullRequest{}, compareFields{
|
|
"Assignees": {ignore: true}, // not implemented yet
|
|
"Head": {nested: comparePullRequestBranch},
|
|
"Base": {nested: comparePullRequestBranch},
|
|
"Labels": {ignore: true}, // because org labels are not handled properly
|
|
}).([]*base.PullRequest)
|
|
assert.True(c.t, ok)
|
|
assert.GreaterOrEqual(c.t, len(prs), 1)
|
|
for _, pr := range prs {
|
|
filename := filepath.Join("comments", fmt.Sprintf("%d.yml", pr.Number))
|
|
comments, ok := c.assertEqual(filename, []base.Comment{}, compareFields{}).([]*base.Comment)
|
|
assert.True(c.t, ok)
|
|
for _, comment := range comments {
|
|
assert.EqualValues(c.t, pr.Number, comment.IssueIndex)
|
|
}
|
|
}
|
|
}
|
|
|
|
func (c *compareDump) assertLoadYAMLFiles(beforeFilename, afterFilename string, before, after interface{}) {
|
|
_, beforeErr := os.Stat(beforeFilename)
|
|
_, afterErr := os.Stat(afterFilename)
|
|
assert.EqualValues(c.t, errors.Is(beforeErr, os.ErrNotExist), errors.Is(afterErr, os.ErrNotExist))
|
|
if errors.Is(beforeErr, os.ErrNotExist) {
|
|
return
|
|
}
|
|
|
|
beforeBytes, err := os.ReadFile(beforeFilename)
|
|
assert.NoError(c.t, err)
|
|
assert.NoError(c.t, yaml.Unmarshal(beforeBytes, before))
|
|
afterBytes, err := os.ReadFile(afterFilename)
|
|
assert.NoError(c.t, err)
|
|
assert.NoError(c.t, yaml.Unmarshal(afterBytes, after))
|
|
}
|
|
|
|
func (c *compareDump) assertLoadFiles(beforeFilename, afterFilename string, t reflect.Type) (before, after reflect.Value) {
|
|
var beforePtr, afterPtr reflect.Value
|
|
if t.Kind() == reflect.Slice {
|
|
//
|
|
// Given []Something{} create afterPtr, beforePtr []*Something{}
|
|
//
|
|
sliceType := reflect.SliceOf(reflect.PtrTo(t.Elem()))
|
|
beforeSlice := reflect.MakeSlice(sliceType, 0, 10)
|
|
beforePtr = reflect.New(beforeSlice.Type())
|
|
beforePtr.Elem().Set(beforeSlice)
|
|
afterSlice := reflect.MakeSlice(sliceType, 0, 10)
|
|
afterPtr = reflect.New(afterSlice.Type())
|
|
afterPtr.Elem().Set(afterSlice)
|
|
} else {
|
|
//
|
|
// Given Something{} create afterPtr, beforePtr *Something{}
|
|
//
|
|
beforePtr = reflect.New(t)
|
|
afterPtr = reflect.New(t)
|
|
}
|
|
c.assertLoadYAMLFiles(beforeFilename, afterFilename, beforePtr.Interface(), afterPtr.Interface())
|
|
return beforePtr.Elem(), afterPtr.Elem()
|
|
}
|
|
|
|
func (c *compareDump) assertEqual(filename string, kind interface{}, fields compareFields) (i interface{}) {
|
|
beforeFilename := filepath.Join(c.dirBefore, filename)
|
|
afterFilename := filepath.Join(c.dirAfter, filename)
|
|
|
|
typeOf := reflect.TypeOf(kind)
|
|
before, after := c.assertLoadFiles(beforeFilename, afterFilename, typeOf)
|
|
if typeOf.Kind() == reflect.Slice {
|
|
i = c.assertEqualSlices(before, after, fields)
|
|
} else {
|
|
i = c.assertEqualValues(before, after, fields)
|
|
}
|
|
return i
|
|
}
|
|
|
|
func (c *compareDump) assertEqualSlices(before, after reflect.Value, fields compareFields) interface{} {
|
|
assert.EqualValues(c.t, before.Len(), after.Len())
|
|
if before.Len() == after.Len() {
|
|
for i := 0; i < before.Len(); i++ {
|
|
_ = c.assertEqualValues(
|
|
reflect.Indirect(before.Index(i).Elem()),
|
|
reflect.Indirect(after.Index(i).Elem()),
|
|
fields)
|
|
}
|
|
}
|
|
return after.Interface()
|
|
}
|
|
|
|
func (c *compareDump) assertEqualValues(before, after reflect.Value, fields compareFields) interface{} {
|
|
for _, field := range reflect.VisibleFields(before.Type()) {
|
|
bf := before.FieldByName(field.Name)
|
|
bi := bf.Interface()
|
|
af := after.FieldByName(field.Name)
|
|
ai := af.Interface()
|
|
if compare, ok := fields[field.Name]; ok {
|
|
if compare.ignore == true {
|
|
//
|
|
// Ignore
|
|
//
|
|
continue
|
|
}
|
|
if compare.transform != nil {
|
|
//
|
|
// Transform these strings before comparing them
|
|
//
|
|
bs, ok := bi.(string)
|
|
assert.True(c.t, ok)
|
|
as, ok := ai.(string)
|
|
assert.True(c.t, ok)
|
|
assert.EqualValues(c.t, compare.transform(bs), compare.transform(as))
|
|
continue
|
|
}
|
|
if compare.before != nil && compare.after != nil {
|
|
//
|
|
// The fields are expected to have different values
|
|
//
|
|
assert.EqualValues(c.t, compare.before, bi)
|
|
assert.EqualValues(c.t, compare.after, ai)
|
|
continue
|
|
}
|
|
if compare.nested != nil {
|
|
//
|
|
// The fields are a struct, recurse
|
|
//
|
|
c.assertEqualValues(bf, af, *compare.nested)
|
|
continue
|
|
}
|
|
}
|
|
assert.EqualValues(c.t, bi, ai)
|
|
}
|
|
return after.Interface()
|
|
}
|