2021-07-13 15:28:07 +02:00
// Copyright 2021 The Gitea Authors. All rights reserved.
2022-11-27 19:20:29 +01:00
// SPDX-License-Identifier: MIT
2021-07-13 15:28:07 +02:00
2021-12-10 09:14:24 +01:00
package asymkey
2021-07-13 15:28:07 +02:00
import (
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 14:37:34 +01:00
"context"
2021-07-13 15:28:07 +02:00
"fmt"
"hash"
"strings"
2021-09-24 13:32:56 +02:00
"code.gitea.io/gitea/models/db"
2021-12-10 02:27:50 +01:00
repo_model "code.gitea.io/gitea/models/repo"
2021-11-11 08:03:30 +01:00
user_model "code.gitea.io/gitea/models/user"
2021-07-13 15:28:07 +02:00
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"github.com/keybase/go-crypto/openpgp/packet"
)
// __________________ ________ ____ __.
// / _____/\______ \/ _____/ | |/ _|____ ___.__.
// / \ ___ | ___/ \ ___ | <_/ __ < | |
// \ \_\ \| | \ \_\ \ | | \ ___/\___ |
// \______ /|____| \______ / |____|__ \___ > ____|
// \/ \/ \/ \/\/
// _________ .__ __
// \_ ___ \ ____ _____ _____ |__|/ |_
// / \ \/ / _ \ / \ / \| \ __\
// \ \___( <_> ) Y Y \ Y Y \ || |
// \______ /\____/|__|_| /__|_| /__||__|
// \/ \/ \/
// ____ ____ .__ _____.__ __ .__
// \ \ / /___________|__|/ ____\__| ____ _____ _/ |_|__| ____ ____
// \ Y // __ \_ __ \ \ __\| |/ ___\\__ \\ __\ |/ _ \ / \
// \ /\ ___/| | \/ || | | \ \___ / __ \| | | ( <_> ) | \
// \___/ \___ >__| |__||__| |__|\___ >____ /__| |__|\____/|___| /
// \/ \/ \/ \/
// This file provides functions relating commit verification
// CommitVerification represents a commit validation of signature
type CommitVerification struct {
Verified bool
Warning bool
Reason string
2021-11-24 10:49:20 +01:00
SigningUser * user_model . User
CommittingUser * user_model . User
2021-07-13 15:28:07 +02:00
SigningEmail string
SigningKey * GPGKey
2021-12-19 06:37:18 +01:00
SigningSSHKey * PublicKey
2021-07-13 15:28:07 +02:00
TrustStatus string
}
// SignCommit represents a commit with validation of signature.
type SignCommit struct {
Verification * CommitVerification
2021-11-24 10:49:20 +01:00
* user_model . UserCommit
2021-07-13 15:28:07 +02:00
}
const (
// BadSignature is used as the reason when the signature has a KeyID that is in the db
// but no key that has that ID verifies the signature. This is a suspicious failure.
BadSignature = "gpg.error.probable_bad_signature"
// BadDefaultSignature is used as the reason when the signature has a KeyID that matches the
// default Key but is not verified by the default key. This is a suspicious failure.
BadDefaultSignature = "gpg.error.probable_bad_default_signature"
// NoKeyFound is used as the reason when no key can be found to verify the signature.
NoKeyFound = "gpg.error.no_gpg_keys_found"
)
// ParseCommitsWithSignature checks if signaute of commits are corresponding to users gpg keys.
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 14:37:34 +01:00
func ParseCommitsWithSignature ( ctx context . Context , oldCommits [ ] * user_model . UserCommit , repoTrustModel repo_model . TrustModelType , isOwnerMemberCollaborator func ( * user_model . User ) ( bool , error ) ) [ ] * SignCommit {
2021-08-09 20:08:51 +02:00
newCommits := make ( [ ] * SignCommit , 0 , len ( oldCommits ) )
2021-07-13 15:28:07 +02:00
keyMap := map [ string ] bool { }
2021-08-09 20:08:51 +02:00
for _ , c := range oldCommits {
signCommit := & SignCommit {
UserCommit : c ,
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 14:37:34 +01:00
Verification : ParseCommitWithSignature ( ctx , c . Commit ) ,
2021-07-13 15:28:07 +02:00
}
2022-02-02 11:10:06 +01:00
_ = CalculateTrustStatus ( signCommit . Verification , repoTrustModel , isOwnerMemberCollaborator , & keyMap )
2021-07-13 15:28:07 +02:00
2021-08-09 20:08:51 +02:00
newCommits = append ( newCommits , signCommit )
2021-07-13 15:28:07 +02:00
}
return newCommits
}
// ParseCommitWithSignature check if signature is good against keystore.
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 14:37:34 +01:00
func ParseCommitWithSignature ( ctx context . Context , c * git . Commit ) * CommitVerification {
2021-11-24 10:49:20 +01:00
var committer * user_model . User
2021-07-13 15:28:07 +02:00
if c . Committer != nil {
var err error
// Find Committer account
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 14:37:34 +01:00
committer , err = user_model . GetUserByEmail ( ctx , c . Committer . Email ) // This finds the user by primary email or activated email so commit will not be valid if email is not
if err != nil { // Skipping not user for committer
2021-11-24 10:49:20 +01:00
committer = & user_model . User {
2021-07-13 15:28:07 +02:00
Name : c . Committer . Name ,
Email : c . Committer . Email ,
}
// We can expect this to often be an ErrUserNotExist. in the case
// it is not, however, it is important to log it.
2021-11-24 10:49:20 +01:00
if ! user_model . IsErrUserNotExist ( err ) {
2021-07-13 15:28:07 +02:00
log . Error ( "GetUserByEmail: %v" , err )
return & CommitVerification {
CommittingUser : committer ,
Verified : false ,
Reason : "gpg.error.no_committer_account" ,
}
}
}
}
// If no signature just report the committer
if c . Signature == nil {
return & CommitVerification {
CommittingUser : committer ,
Verified : false , // Default value
Reason : "gpg.error.not_signed_commit" , // Default value
}
}
2021-12-19 06:37:18 +01:00
// If this a SSH signature handle it differently
if strings . HasPrefix ( c . Signature . Signature , "-----BEGIN SSH SIGNATURE-----" ) {
2023-09-14 19:09:32 +02:00
return ParseCommitWithSSHSignature ( ctx , c , committer )
2021-12-19 06:37:18 +01:00
}
2021-07-13 15:28:07 +02:00
// Parsing signature
sig , err := extractSignature ( c . Signature . Signature )
if err != nil { // Skipping failed to extract sign
log . Error ( "SignatureRead err: %v" , err )
return & CommitVerification {
CommittingUser : committer ,
Verified : false ,
Reason : "gpg.error.extract_sign" ,
}
}
keyID := ""
if sig . IssuerKeyId != nil && ( * sig . IssuerKeyId ) != 0 {
keyID = fmt . Sprintf ( "%X" , * sig . IssuerKeyId )
}
if keyID == "" && sig . IssuerFingerprint != nil && len ( sig . IssuerFingerprint ) > 0 {
keyID = fmt . Sprintf ( "%X" , sig . IssuerFingerprint [ 12 : 20 ] )
}
defaultReason := NoKeyFound
// First check if the sig has a keyID and if so just look at that
if commitVerification := hashAndVerifyForKeyID (
2023-09-14 19:09:32 +02:00
ctx ,
2021-07-13 15:28:07 +02:00
sig ,
c . Signature . Payload ,
committer ,
keyID ,
setting . AppName ,
"" ) ; commitVerification != nil {
if commitVerification . Reason == BadSignature {
defaultReason = BadSignature
} else {
return commitVerification
}
}
// Now try to associate the signature with the committer, if present
if committer . ID != 0 {
2023-09-14 19:09:32 +02:00
keys , err := ListGPGKeys ( ctx , committer . ID , db . ListOptions { } )
2021-07-13 15:28:07 +02:00
if err != nil { // Skipping failed to get gpg keys of user
log . Error ( "ListGPGKeys: %v" , err )
return & CommitVerification {
CommittingUser : committer ,
Verified : false ,
Reason : "gpg.error.failed_retrieval_gpg_keys" ,
}
}
2023-09-14 19:09:32 +02:00
committerEmailAddresses , _ := user_model . GetEmailAddresses ( ctx , committer . ID )
2021-07-13 15:28:07 +02:00
activated := false
for _ , e := range committerEmailAddresses {
if e . IsActivated && strings . EqualFold ( e . Email , c . Committer . Email ) {
activated = true
break
}
}
for _ , k := range keys {
// Pre-check (& optimization) that emails attached to key can be attached to the committer email and can validate
canValidate := false
email := ""
if k . Verified && activated {
canValidate = true
email = c . Committer . Email
}
if ! canValidate {
for _ , e := range k . Emails {
if e . IsActivated && strings . EqualFold ( e . Email , c . Committer . Email ) {
canValidate = true
email = e . Email
break
}
}
}
if ! canValidate {
continue // Skip this key
}
commitVerification := hashAndVerifyWithSubKeysCommitVerification ( sig , c . Signature . Payload , k , committer , committer , email )
if commitVerification != nil {
return commitVerification
}
}
}
if setting . Repository . Signing . SigningKey != "" && setting . Repository . Signing . SigningKey != "default" && setting . Repository . Signing . SigningKey != "none" {
// OK we should try the default key
gpgSettings := git . GPGSettings {
Sign : true ,
KeyID : setting . Repository . Signing . SigningKey ,
Name : setting . Repository . Signing . SigningName ,
Email : setting . Repository . Signing . SigningEmail ,
}
if err := gpgSettings . LoadPublicKeyContent ( ) ; err != nil {
log . Error ( "Error getting default signing key: %s %v" , gpgSettings . KeyID , err )
2023-09-14 19:09:32 +02:00
} else if commitVerification := verifyWithGPGSettings ( ctx , & gpgSettings , sig , c . Signature . Payload , committer , keyID ) ; commitVerification != nil {
2021-07-13 15:28:07 +02:00
if commitVerification . Reason == BadSignature {
defaultReason = BadSignature
} else {
return commitVerification
}
}
}
defaultGPGSettings , err := c . GetRepositoryDefaultPublicGPGKey ( false )
if err != nil {
log . Error ( "Error getting default public gpg key: %v" , err )
} else if defaultGPGSettings == nil {
log . Warn ( "Unable to get defaultGPGSettings for unattached commit: %s" , c . ID . String ( ) )
} else if defaultGPGSettings . Sign {
2023-09-14 19:09:32 +02:00
if commitVerification := verifyWithGPGSettings ( ctx , defaultGPGSettings , sig , c . Signature . Payload , committer , keyID ) ; commitVerification != nil {
2021-07-13 15:28:07 +02:00
if commitVerification . Reason == BadSignature {
defaultReason = BadSignature
} else {
return commitVerification
}
}
}
return & CommitVerification { // Default at this stage
CommittingUser : committer ,
Verified : false ,
Warning : defaultReason != NoKeyFound ,
Reason : defaultReason ,
SigningKey : & GPGKey {
KeyID : keyID ,
} ,
}
}
2023-09-14 19:09:32 +02:00
func verifyWithGPGSettings ( ctx context . Context , gpgSettings * git . GPGSettings , sig * packet . Signature , payload string , committer * user_model . User , keyID string ) * CommitVerification {
2021-07-13 15:28:07 +02:00
// First try to find the key in the db
2023-09-14 19:09:32 +02:00
if commitVerification := hashAndVerifyForKeyID ( ctx , sig , payload , committer , gpgSettings . KeyID , gpgSettings . Name , gpgSettings . Email ) ; commitVerification != nil {
2021-07-13 15:28:07 +02:00
return commitVerification
}
// Otherwise we have to parse the key
ekeys , err := checkArmoredGPGKeyString ( gpgSettings . PublicKeyContent )
if err != nil {
log . Error ( "Unable to get default signing key: %v" , err )
return & CommitVerification {
CommittingUser : committer ,
Verified : false ,
Reason : "gpg.error.generate_hash" ,
}
}
for _ , ekey := range ekeys {
pubkey := ekey . PrimaryKey
content , err := base64EncPubKey ( pubkey )
if err != nil {
return & CommitVerification {
CommittingUser : committer ,
Verified : false ,
Reason : "gpg.error.generate_hash" ,
}
}
k := & GPGKey {
Content : content ,
CanSign : pubkey . CanSign ( ) ,
KeyID : pubkey . KeyIdString ( ) ,
}
for _ , subKey := range ekey . Subkeys {
content , err := base64EncPubKey ( subKey . PublicKey )
if err != nil {
return & CommitVerification {
CommittingUser : committer ,
Verified : false ,
Reason : "gpg.error.generate_hash" ,
}
}
k . SubsKey = append ( k . SubsKey , & GPGKey {
Content : content ,
CanSign : subKey . PublicKey . CanSign ( ) ,
KeyID : subKey . PublicKey . KeyIdString ( ) ,
} )
}
2021-11-24 10:49:20 +01:00
if commitVerification := hashAndVerifyWithSubKeysCommitVerification ( sig , payload , k , committer , & user_model . User {
2021-07-13 15:28:07 +02:00
Name : gpgSettings . Name ,
Email : gpgSettings . Email ,
} , gpgSettings . Email ) ; commitVerification != nil {
return commitVerification
}
if keyID == k . KeyID {
// This is a bad situation ... We have a key id that matches our default key but the signature doesn't match.
return & CommitVerification {
CommittingUser : committer ,
Verified : false ,
Warning : true ,
Reason : BadSignature ,
}
}
}
return nil
}
func verifySign ( s * packet . Signature , h hash . Hash , k * GPGKey ) error {
// Check if key can sign
if ! k . CanSign {
return fmt . Errorf ( "key can not sign" )
}
// Decode key
pkey , err := base64DecPubKey ( k . Content )
if err != nil {
return err
}
return pkey . VerifySignature ( h , s )
}
func hashAndVerify ( sig * packet . Signature , payload string , k * GPGKey ) ( * GPGKey , error ) {
// Generating hash of commit
hash , err := populateHash ( sig . Hash , [ ] byte ( payload ) )
if err != nil { // Skipping as failed to generate hash
log . Error ( "PopulateHash: %v" , err )
return nil , err
}
// We will ignore errors in verification as they don't need to be propagated up
err = verifySign ( sig , hash , k )
if err != nil {
return nil , nil
}
return k , nil
}
func hashAndVerifyWithSubKeys ( sig * packet . Signature , payload string , k * GPGKey ) ( * GPGKey , error ) {
verified , err := hashAndVerify ( sig , payload , k )
if err != nil || verified != nil {
return verified , err
}
for _ , sk := range k . SubsKey {
verified , err := hashAndVerify ( sig , payload , sk )
if err != nil || verified != nil {
return verified , err
}
}
return nil , nil
}
2021-11-24 10:49:20 +01:00
func hashAndVerifyWithSubKeysCommitVerification ( sig * packet . Signature , payload string , k * GPGKey , committer , signer * user_model . User , email string ) * CommitVerification {
2021-07-13 15:28:07 +02:00
key , err := hashAndVerifyWithSubKeys ( sig , payload , k )
if err != nil { // Skipping failed to generate hash
return & CommitVerification {
CommittingUser : committer ,
Verified : false ,
Reason : "gpg.error.generate_hash" ,
}
}
if key != nil {
return & CommitVerification { // Everything is ok
CommittingUser : committer ,
Verified : true ,
Reason : fmt . Sprintf ( "%s / %s" , signer . Name , key . KeyID ) ,
SigningUser : signer ,
SigningKey : key ,
SigningEmail : email ,
}
}
return nil
}
2023-09-14 19:09:32 +02:00
func hashAndVerifyForKeyID ( ctx context . Context , sig * packet . Signature , payload string , committer * user_model . User , keyID , name , email string ) * CommitVerification {
2021-07-13 15:28:07 +02:00
if keyID == "" {
return nil
}
keys , err := GetGPGKeysByKeyID ( keyID )
if err != nil {
log . Error ( "GetGPGKeysByKeyID: %v" , err )
return & CommitVerification {
CommittingUser : committer ,
Verified : false ,
Reason : "gpg.error.failed_retrieval_gpg_keys" ,
}
}
if len ( keys ) == 0 {
return nil
}
for _ , key := range keys {
var primaryKeys [ ] * GPGKey
if key . PrimaryKeyID != "" {
primaryKeys , err = GetGPGKeysByKeyID ( key . PrimaryKeyID )
if err != nil {
log . Error ( "GetGPGKeysByKeyID: %v" , err )
return & CommitVerification {
CommittingUser : committer ,
Verified : false ,
Reason : "gpg.error.failed_retrieval_gpg_keys" ,
}
}
}
2023-09-14 19:09:32 +02:00
activated , email := checkKeyEmails ( ctx , email , append ( [ ] * GPGKey { key } , primaryKeys ... ) ... )
2021-07-13 15:28:07 +02:00
if ! activated {
continue
}
2021-11-24 10:49:20 +01:00
signer := & user_model . User {
2021-07-13 15:28:07 +02:00
Name : name ,
Email : email ,
}
if key . OwnerID != 0 {
2023-09-14 19:09:32 +02:00
owner , err := user_model . GetUserByID ( ctx , key . OwnerID )
2021-07-13 15:28:07 +02:00
if err == nil {
signer = owner
2021-11-24 10:49:20 +01:00
} else if ! user_model . IsErrUserNotExist ( err ) {
log . Error ( "Failed to user_model.GetUserByID: %d for key ID: %d (%s) %v" , key . OwnerID , key . ID , key . KeyID , err )
2021-07-13 15:28:07 +02:00
return & CommitVerification {
CommittingUser : committer ,
Verified : false ,
Reason : "gpg.error.no_committer_account" ,
}
}
}
commitVerification := hashAndVerifyWithSubKeysCommitVerification ( sig , payload , key , committer , signer , email )
if commitVerification != nil {
return commitVerification
}
}
// This is a bad situation ... We have a key id that is in our database but the signature doesn't match.
return & CommitVerification {
CommittingUser : committer ,
Verified : false ,
Warning : true ,
Reason : BadSignature ,
}
}
// CalculateTrustStatus will calculate the TrustStatus for a commit verification within a repository
2021-12-10 09:14:24 +01:00
// There are several trust models in Gitea
2023-07-07 07:31:56 +02:00
func CalculateTrustStatus ( verification * CommitVerification , repoTrustModel repo_model . TrustModelType , isOwnerMemberCollaborator func ( * user_model . User ) ( bool , error ) , keyMap * map [ string ] bool ) error {
2021-07-13 15:28:07 +02:00
if ! verification . Verified {
2023-07-07 07:31:56 +02:00
return nil
2021-07-13 15:28:07 +02:00
}
// In the Committer trust model a signature is trusted if it matches the committer
// - it doesn't matter if they're a collaborator, the owner, Gitea or Github
// NB: This model is commit verification only
2021-12-10 09:14:24 +01:00
if repoTrustModel == repo_model . CommitterTrustModel {
2021-07-13 15:28:07 +02:00
// default to "unmatched"
verification . TrustStatus = "unmatched"
// We can only verify against users in our database but the default key will match
// against by email if it is not in the db.
if ( verification . SigningUser . ID != 0 &&
verification . CommittingUser . ID == verification . SigningUser . ID ) ||
( verification . SigningUser . ID == 0 && verification . CommittingUser . ID == 0 &&
verification . SigningUser . Email == verification . CommittingUser . Email ) {
verification . TrustStatus = "trusted"
}
2023-07-07 07:31:56 +02:00
return nil
2021-07-13 15:28:07 +02:00
}
// Now we drop to the more nuanced trust models...
verification . TrustStatus = "trusted"
if verification . SigningUser . ID == 0 {
// This commit is signed by the default key - but this key is not assigned to a user in the DB.
2021-12-10 02:27:50 +01:00
// However in the repo_model.CollaboratorCommitterTrustModel we cannot mark this as trusted
2021-07-13 15:28:07 +02:00
// unless the default key matches the email of a non-user.
2021-12-10 09:14:24 +01:00
if repoTrustModel == repo_model . CollaboratorCommitterTrustModel && ( verification . CommittingUser . ID != 0 ||
2021-07-13 15:28:07 +02:00
verification . SigningUser . Email != verification . CommittingUser . Email ) {
verification . TrustStatus = "untrusted"
}
2023-07-07 07:31:56 +02:00
return nil
2021-07-13 15:28:07 +02:00
}
2021-12-19 06:37:18 +01:00
// Check we actually have a GPG SigningKey
2023-07-07 07:31:56 +02:00
var err error
2021-12-19 06:37:18 +01:00
if verification . SigningKey != nil {
var isMember bool
if keyMap != nil {
var has bool
isMember , has = ( * keyMap ) [ verification . SigningKey . KeyID ]
if ! has {
2022-02-02 11:10:06 +01:00
isMember , err = isOwnerMemberCollaborator ( verification . SigningUser )
2021-12-19 06:37:18 +01:00
( * keyMap ) [ verification . SigningKey . KeyID ] = isMember
}
} else {
2022-02-02 11:10:06 +01:00
isMember , err = isOwnerMemberCollaborator ( verification . SigningUser )
2021-07-13 15:28:07 +02:00
}
2021-12-19 06:37:18 +01:00
if ! isMember {
verification . TrustStatus = "untrusted"
if verification . CommittingUser . ID != verification . SigningUser . ID {
// The committing user and the signing user are not the same
// This should be marked as questionable unless the signing user is a collaborator/team member etc.
verification . TrustStatus = "unmatched"
}
} else if repoTrustModel == repo_model . CollaboratorCommitterTrustModel && verification . CommittingUser . ID != verification . SigningUser . ID {
// The committing user and the signing user are not the same and our trustmodel states that they must match
2021-07-13 15:28:07 +02:00
verification . TrustStatus = "unmatched"
}
}
2022-06-20 12:02:49 +02:00
return err
2021-07-13 15:28:07 +02:00
}