mirror of
https://github.com/go-gitea/gitea.git
synced 2025-06-20 05:22:47 +00:00
Run gopls modernize
on codebase (#34751)
Recent modernize fixes: https://github.com/golang/tools/commits/master/gopls/internal/analysis/modernize
This commit is contained in:
parent
71e4740946
commit
1f35435b81
@ -156,8 +156,8 @@ func runCert(_ context.Context, c *cli.Command) error {
|
|||||||
BasicConstraintsValid: true,
|
BasicConstraintsValid: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
hosts := strings.Split(c.String("host"), ",")
|
hosts := strings.SplitSeq(c.String("host"), ",")
|
||||||
for _, h := range hosts {
|
for h := range hosts {
|
||||||
if ip := net.ParseIP(h); ip != nil {
|
if ip := net.ParseIP(h); ip != nil {
|
||||||
template.IPAddresses = append(template.IPAddresses, ip)
|
template.IPAddresses = append(template.IPAddresses, ip)
|
||||||
} else {
|
} else {
|
||||||
|
@ -137,8 +137,8 @@ func runDumpRepository(ctx context.Context, cmd *cli.Command) error {
|
|||||||
opts.PullRequests = true
|
opts.PullRequests = true
|
||||||
opts.ReleaseAssets = true
|
opts.ReleaseAssets = true
|
||||||
} else {
|
} else {
|
||||||
units := strings.Split(cmd.String("units"), ",")
|
units := strings.SplitSeq(cmd.String("units"), ",")
|
||||||
for _, unit := range units {
|
for unit := range units {
|
||||||
switch strings.ToLower(strings.TrimSpace(unit)) {
|
switch strings.ToLower(strings.TrimSpace(unit)) {
|
||||||
case "":
|
case "":
|
||||||
continue
|
continue
|
||||||
|
@ -480,7 +480,7 @@ func hookPrintResult(output, isCreate bool, branch, url string) {
|
|||||||
func pushOptions() map[string]string {
|
func pushOptions() map[string]string {
|
||||||
opts := make(map[string]string)
|
opts := make(map[string]string)
|
||||||
if pushCount, err := strconv.Atoi(os.Getenv(private.GitPushOptionCount)); err == nil {
|
if pushCount, err := strconv.Atoi(os.Getenv(private.GitPushOptionCount)); err == nil {
|
||||||
for idx := 0; idx < pushCount; idx++ {
|
for idx := range pushCount {
|
||||||
opt := os.Getenv(fmt.Sprintf("GIT_PUSH_OPTION_%d", idx))
|
opt := os.Getenv(fmt.Sprintf("GIT_PUSH_OPTION_%d", idx))
|
||||||
kv := strings.SplitN(opt, "=", 2)
|
kv := strings.SplitN(opt, "=", 2)
|
||||||
if len(kv) == 2 {
|
if len(kv) == 2 {
|
||||||
@ -732,7 +732,7 @@ func readPktLine(ctx context.Context, in *bufio.Reader, requestType pktLineType)
|
|||||||
|
|
||||||
// read prefix
|
// read prefix
|
||||||
lengthBytes := make([]byte, 4)
|
lengthBytes := make([]byte, 4)
|
||||||
for i := 0; i < 4; i++ {
|
for i := range 4 {
|
||||||
lengthBytes[i], err = in.ReadByte()
|
lengthBytes[i], err = in.ReadByte()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fail(ctx, "Protocol: stdin error", "Pkt-Line: read stdin failed : %v", err)
|
return nil, fail(ctx, "Protocol: stdin error", "Pkt-Line: read stdin failed : %v", err)
|
||||||
|
@ -337,8 +337,8 @@ func determineRemote(ctx context.Context, forkUser string) (string, string, erro
|
|||||||
fmt.Fprintf(os.Stderr, "Unable to list git remotes:\n%s\n", string(out))
|
fmt.Fprintf(os.Stderr, "Unable to list git remotes:\n%s\n", string(out))
|
||||||
return "", "", fmt.Errorf("unable to determine forked remote: %w", err)
|
return "", "", fmt.Errorf("unable to determine forked remote: %w", err)
|
||||||
}
|
}
|
||||||
lines := strings.Split(string(out), "\n")
|
lines := strings.SplitSeq(string(out), "\n")
|
||||||
for _, line := range lines {
|
for line := range lines {
|
||||||
fields := strings.Split(line, "\t")
|
fields := strings.Split(line, "\t")
|
||||||
name, remote := fields[0], fields[1]
|
name, remote := fields[0], fields[1]
|
||||||
// only look at pushers
|
// only look at pushers
|
||||||
@ -356,12 +356,12 @@ func determineRemote(ctx context.Context, forkUser string) (string, string, erro
|
|||||||
if !strings.Contains(remote, forkUser) {
|
if !strings.Contains(remote, forkUser) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(remote, "git@github.com:") {
|
if after, ok := strings.CutPrefix(remote, "git@github.com:"); ok {
|
||||||
forkUser = strings.TrimPrefix(remote, "git@github.com:")
|
forkUser = after
|
||||||
} else if strings.HasPrefix(remote, "https://github.com/") {
|
} else if after, ok := strings.CutPrefix(remote, "https://github.com/"); ok {
|
||||||
forkUser = strings.TrimPrefix(remote, "https://github.com/")
|
forkUser = after
|
||||||
} else if strings.HasPrefix(remote, "https://www.github.com/") {
|
} else if after, ok := strings.CutPrefix(remote, "https://www.github.com/"); ok {
|
||||||
forkUser = strings.TrimPrefix(remote, "https://www.github.com/")
|
forkUser = after
|
||||||
} else if forkUser == "" {
|
} else if forkUser == "" {
|
||||||
return "", "", fmt.Errorf("unable to extract forkUser from remote %s: %s", name, remote)
|
return "", "", fmt.Errorf("unable to extract forkUser from remote %s: %s", name, remote)
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,8 @@
|
|||||||
package actions
|
package actions
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"slices"
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/translation"
|
"code.gitea.io/gitea/modules/translation"
|
||||||
|
|
||||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||||
@ -88,12 +90,7 @@ func (s Status) IsBlocked() bool {
|
|||||||
|
|
||||||
// In returns whether s is one of the given statuses
|
// In returns whether s is one of the given statuses
|
||||||
func (s Status) In(statuses ...Status) bool {
|
func (s Status) In(statuses ...Status) bool {
|
||||||
for _, v := range statuses {
|
return slices.Contains(statuses, s)
|
||||||
if s == v {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s Status) AsResult() runnerv1.Result {
|
func (s Status) AsResult() runnerv1.Result {
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -125,12 +126,7 @@ func (at ActionType) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (at ActionType) InActions(actions ...string) bool {
|
func (at ActionType) InActions(actions ...string) bool {
|
||||||
for _, action := range actions {
|
return slices.Contains(actions, at.String())
|
||||||
if action == at.String() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Action represents user operation type and other information to
|
// Action represents user operation type and other information to
|
||||||
|
@ -208,10 +208,7 @@ func (nl NotificationList) LoadRepos(ctx context.Context) (repo_model.Repository
|
|||||||
repos := make(map[int64]*repo_model.Repository, len(repoIDs))
|
repos := make(map[int64]*repo_model.Repository, len(repoIDs))
|
||||||
left := len(repoIDs)
|
left := len(repoIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", repoIDs[:limit]).
|
In("id", repoIDs[:limit]).
|
||||||
Rows(new(repo_model.Repository))
|
Rows(new(repo_model.Repository))
|
||||||
@ -282,10 +279,7 @@ func (nl NotificationList) LoadIssues(ctx context.Context) ([]int, error) {
|
|||||||
issues := make(map[int64]*issues_model.Issue, len(issueIDs))
|
issues := make(map[int64]*issues_model.Issue, len(issueIDs))
|
||||||
left := len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", issueIDs[:limit]).
|
In("id", issueIDs[:limit]).
|
||||||
Rows(new(issues_model.Issue))
|
Rows(new(issues_model.Issue))
|
||||||
@ -377,10 +371,7 @@ func (nl NotificationList) LoadUsers(ctx context.Context) ([]int, error) {
|
|||||||
users := make(map[int64]*user_model.User, len(userIDs))
|
users := make(map[int64]*user_model.User, len(userIDs))
|
||||||
left := len(userIDs)
|
left := len(userIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", userIDs[:limit]).
|
In("id", userIDs[:limit]).
|
||||||
Rows(new(user_model.User))
|
Rows(new(user_model.User))
|
||||||
@ -428,10 +419,7 @@ func (nl NotificationList) LoadComments(ctx context.Context) ([]int, error) {
|
|||||||
comments := make(map[int64]*issues_model.Comment, len(commentIDs))
|
comments := make(map[int64]*issues_model.Comment, len(commentIDs))
|
||||||
left := len(commentIDs)
|
left := len(commentIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", commentIDs[:limit]).
|
In("id", commentIDs[:limit]).
|
||||||
Rows(new(issues_model.Comment))
|
Rows(new(issues_model.Comment))
|
||||||
|
@ -139,10 +139,7 @@ func GetActivityStatsTopAuthors(ctx context.Context, repo *repo_model.Repository
|
|||||||
return v[i].Commits > v[j].Commits
|
return v[i].Commits > v[j].Commits
|
||||||
})
|
})
|
||||||
|
|
||||||
cnt := count
|
cnt := min(count, len(v))
|
||||||
if cnt > len(v) {
|
|
||||||
cnt = len(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v[:cnt], nil
|
return v[:cnt], nil
|
||||||
}
|
}
|
||||||
|
@ -213,12 +213,7 @@ func GetRequiredScopes(level AccessTokenScopeLevel, scopeCategories ...AccessTok
|
|||||||
|
|
||||||
// ContainsCategory checks if a list of categories contains a specific category
|
// ContainsCategory checks if a list of categories contains a specific category
|
||||||
func ContainsCategory(categories []AccessTokenScopeCategory, category AccessTokenScopeCategory) bool {
|
func ContainsCategory(categories []AccessTokenScopeCategory, category AccessTokenScopeCategory) bool {
|
||||||
for _, c := range categories {
|
return slices.Contains(categories, category)
|
||||||
if c == category {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetScopeLevelFromAccessMode converts permission access mode to scope level
|
// GetScopeLevelFromAccessMode converts permission access mode to scope level
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"code.gitea.io/gitea/models/db"
|
"code.gitea.io/gitea/models/db"
|
||||||
@ -511,12 +512,7 @@ func (grant *OAuth2Grant) IncreaseCounter(ctx context.Context) error {
|
|||||||
|
|
||||||
// ScopeContains returns true if the grant scope contains the specified scope
|
// ScopeContains returns true if the grant scope contains the specified scope
|
||||||
func (grant *OAuth2Grant) ScopeContains(scope string) bool {
|
func (grant *OAuth2Grant) ScopeContains(scope string) bool {
|
||||||
for _, currentScope := range strings.Split(grant.Scope, " ") {
|
return slices.Contains(strings.Split(grant.Scope, " "), scope)
|
||||||
if scope == currentScope {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNonce updates the current nonce value of a grant
|
// SetNonce updates the current nonce value of a grant
|
||||||
|
@ -67,7 +67,7 @@ func contextSafetyCheck(e Engine) {
|
|||||||
_ = e.SQL("SELECT 1").Iterate(&m{}, func(int, any) error {
|
_ = e.SQL("SELECT 1").Iterate(&m{}, func(int, any) error {
|
||||||
callers := make([]uintptr, 32)
|
callers := make([]uintptr, 32)
|
||||||
callerNum := runtime.Callers(1, callers)
|
callerNum := runtime.Callers(1, callers)
|
||||||
for i := 0; i < callerNum; i++ {
|
for i := range callerNum {
|
||||||
if funcName := runtime.FuncForPC(callers[i]).Name(); funcName == "xorm.io/xorm.(*Session).Iterate" {
|
if funcName := runtime.FuncForPC(callers[i]).Name(); funcName == "xorm.io/xorm.(*Session).Iterate" {
|
||||||
contextSafetyDeniedFuncPCs = append(contextSafetyDeniedFuncPCs, callers[i])
|
contextSafetyDeniedFuncPCs = append(contextSafetyDeniedFuncPCs, callers[i])
|
||||||
}
|
}
|
||||||
@ -82,7 +82,7 @@ func contextSafetyCheck(e Engine) {
|
|||||||
// it should be very fast: xxxx ns/op
|
// it should be very fast: xxxx ns/op
|
||||||
callers := make([]uintptr, 32)
|
callers := make([]uintptr, 32)
|
||||||
callerNum := runtime.Callers(3, callers) // skip 3: runtime.Callers, contextSafetyCheck, GetEngine
|
callerNum := runtime.Callers(3, callers) // skip 3: runtime.Callers, contextSafetyCheck, GetEngine
|
||||||
for i := 0; i < callerNum; i++ {
|
for i := range callerNum {
|
||||||
if slices.Contains(contextSafetyDeniedFuncPCs, callers[i]) {
|
if slices.Contains(contextSafetyDeniedFuncPCs, callers[i]) {
|
||||||
panic(errors.New("using database context in an iterator would cause corrupted results"))
|
panic(errors.New("using database context in an iterator would cause corrupted results"))
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ package db
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
@ -80,10 +81,8 @@ func IsUsableName(reservedNames, reservedPatterns []string, name string) error {
|
|||||||
return util.NewInvalidArgumentErrorf("name is empty")
|
return util.NewInvalidArgumentErrorf("name is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range reservedNames {
|
if slices.Contains(reservedNames, name) {
|
||||||
if name == reservedNames[i] {
|
return ErrNameReserved{name}
|
||||||
return ErrNameReserved{name}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, pat := range reservedPatterns {
|
for _, pat := range reservedPatterns {
|
||||||
|
@ -46,10 +46,7 @@ func (f *file) readAt(fileMeta *dbfsMeta, offset int64, p []byte) (n int, err er
|
|||||||
blobPos := int(offset % f.blockSize)
|
blobPos := int(offset % f.blockSize)
|
||||||
blobOffset := offset - int64(blobPos)
|
blobOffset := offset - int64(blobPos)
|
||||||
blobRemaining := int(f.blockSize) - blobPos
|
blobRemaining := int(f.blockSize) - blobPos
|
||||||
needRead := len(p)
|
needRead := min(len(p), blobRemaining)
|
||||||
if needRead > blobRemaining {
|
|
||||||
needRead = blobRemaining
|
|
||||||
}
|
|
||||||
if blobOffset+int64(blobPos)+int64(needRead) > fileMeta.FileSize {
|
if blobOffset+int64(blobPos)+int64(needRead) > fileMeta.FileSize {
|
||||||
needRead = int(fileMeta.FileSize - blobOffset - int64(blobPos))
|
needRead = int(fileMeta.FileSize - blobOffset - int64(blobPos))
|
||||||
}
|
}
|
||||||
@ -66,14 +63,8 @@ func (f *file) readAt(fileMeta *dbfsMeta, offset int64, p []byte) (n int, err er
|
|||||||
blobData = nil
|
blobData = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
canCopy := len(blobData) - blobPos
|
canCopy := max(len(blobData)-blobPos, 0)
|
||||||
if canCopy <= 0 {
|
realRead := min(needRead, canCopy)
|
||||||
canCopy = 0
|
|
||||||
}
|
|
||||||
realRead := needRead
|
|
||||||
if realRead > canCopy {
|
|
||||||
realRead = canCopy
|
|
||||||
}
|
|
||||||
if realRead > 0 {
|
if realRead > 0 {
|
||||||
copy(p[:realRead], fileData.BlobData[blobPos:blobPos+realRead])
|
copy(p[:realRead], fileData.BlobData[blobPos:blobPos+realRead])
|
||||||
}
|
}
|
||||||
@ -113,10 +104,7 @@ func (f *file) Write(p []byte) (n int, err error) {
|
|||||||
blobPos := int(f.offset % f.blockSize)
|
blobPos := int(f.offset % f.blockSize)
|
||||||
blobOffset := f.offset - int64(blobPos)
|
blobOffset := f.offset - int64(blobPos)
|
||||||
blobRemaining := int(f.blockSize) - blobPos
|
blobRemaining := int(f.blockSize) - blobPos
|
||||||
needWrite := len(p)
|
needWrite := min(len(p), blobRemaining)
|
||||||
if needWrite > blobRemaining {
|
|
||||||
needWrite = blobRemaining
|
|
||||||
}
|
|
||||||
buf := make([]byte, f.blockSize)
|
buf := make([]byte, f.blockSize)
|
||||||
readBytes, err := f.readAt(fileMeta, blobOffset, buf)
|
readBytes, err := f.readAt(fileMeta, blobOffset, buf)
|
||||||
if err != nil && !errors.Is(err, io.EOF) {
|
if err != nil && !errors.Is(err, io.EOF) {
|
||||||
|
@ -246,7 +246,7 @@ func (protectBranch *ProtectedBranch) GetUnprotectedFilePatterns() []glob.Glob {
|
|||||||
|
|
||||||
func getFilePatterns(filePatterns string) []glob.Glob {
|
func getFilePatterns(filePatterns string) []glob.Glob {
|
||||||
extarr := make([]glob.Glob, 0, 10)
|
extarr := make([]glob.Glob, 0, 10)
|
||||||
for _, expr := range strings.Split(strings.ToLower(filePatterns), ";") {
|
for expr := range strings.SplitSeq(strings.ToLower(filePatterns), ";") {
|
||||||
expr = strings.TrimSpace(expr)
|
expr = strings.TrimSpace(expr)
|
||||||
if expr != "" {
|
if expr != "" {
|
||||||
if g, err := glob.Compile(expr, '.', '/'); err != nil {
|
if g, err := glob.Compile(expr, '.', '/'); err != nil {
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"html/template"
|
"html/template"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
@ -196,12 +197,7 @@ func (t CommentType) HasMailReplySupport() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t CommentType) CountedAsConversation() bool {
|
func (t CommentType) CountedAsConversation() bool {
|
||||||
for _, ct := range ConversationCountedCommentType() {
|
return slices.Contains(ConversationCountedCommentType(), t)
|
||||||
if t == ct {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConversationCountedCommentType returns the comment types that are counted as a conversation
|
// ConversationCountedCommentType returns the comment types that are counted as a conversation
|
||||||
@ -614,7 +610,7 @@ func UpdateCommentAttachments(ctx context.Context, c *Comment, uuids []string) e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("getAttachmentsByUUIDs [uuids: %v]: %w", uuids, err)
|
return fmt.Errorf("getAttachmentsByUUIDs [uuids: %v]: %w", uuids, err)
|
||||||
}
|
}
|
||||||
for i := 0; i < len(attachments); i++ {
|
for i := range attachments {
|
||||||
attachments[i].IssueID = c.IssueID
|
attachments[i].IssueID = c.IssueID
|
||||||
attachments[i].CommentID = c.ID
|
attachments[i].CommentID = c.ID
|
||||||
if err := repo_model.UpdateAttachment(ctx, attachments[i]); err != nil {
|
if err := repo_model.UpdateAttachment(ctx, attachments[i]); err != nil {
|
||||||
|
@ -57,10 +57,7 @@ func (comments CommentList) loadLabels(ctx context.Context) error {
|
|||||||
commentLabels := make(map[int64]*Label, len(labelIDs))
|
commentLabels := make(map[int64]*Label, len(labelIDs))
|
||||||
left := len(labelIDs)
|
left := len(labelIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", labelIDs[:limit]).
|
In("id", labelIDs[:limit]).
|
||||||
Rows(new(Label))
|
Rows(new(Label))
|
||||||
@ -107,10 +104,7 @@ func (comments CommentList) loadMilestones(ctx context.Context) error {
|
|||||||
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
||||||
left := len(milestoneIDs)
|
left := len(milestoneIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
err := db.GetEngine(ctx).
|
err := db.GetEngine(ctx).
|
||||||
In("id", milestoneIDs[:limit]).
|
In("id", milestoneIDs[:limit]).
|
||||||
Find(&milestoneMaps)
|
Find(&milestoneMaps)
|
||||||
@ -146,10 +140,7 @@ func (comments CommentList) loadOldMilestones(ctx context.Context) error {
|
|||||||
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
||||||
left := len(milestoneIDs)
|
left := len(milestoneIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
err := db.GetEngine(ctx).
|
err := db.GetEngine(ctx).
|
||||||
In("id", milestoneIDs[:limit]).
|
In("id", milestoneIDs[:limit]).
|
||||||
Find(&milestoneMaps)
|
Find(&milestoneMaps)
|
||||||
@ -184,10 +175,7 @@ func (comments CommentList) loadAssignees(ctx context.Context) error {
|
|||||||
assignees := make(map[int64]*user_model.User, len(assigneeIDs))
|
assignees := make(map[int64]*user_model.User, len(assigneeIDs))
|
||||||
left := len(assigneeIDs)
|
left := len(assigneeIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", assigneeIDs[:limit]).
|
In("id", assigneeIDs[:limit]).
|
||||||
Rows(new(user_model.User))
|
Rows(new(user_model.User))
|
||||||
@ -256,10 +244,7 @@ func (comments CommentList) LoadIssues(ctx context.Context) error {
|
|||||||
issues := make(map[int64]*Issue, len(issueIDs))
|
issues := make(map[int64]*Issue, len(issueIDs))
|
||||||
left := len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("id", issueIDs[:limit]).
|
In("id", issueIDs[:limit]).
|
||||||
Rows(new(Issue))
|
Rows(new(Issue))
|
||||||
@ -313,10 +298,7 @@ func (comments CommentList) loadDependentIssues(ctx context.Context) error {
|
|||||||
issues := make(map[int64]*Issue, len(issueIDs))
|
issues := make(map[int64]*Issue, len(issueIDs))
|
||||||
left := len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := e.
|
rows, err := e.
|
||||||
In("id", issueIDs[:limit]).
|
In("id", issueIDs[:limit]).
|
||||||
Rows(new(Issue))
|
Rows(new(Issue))
|
||||||
@ -392,10 +374,7 @@ func (comments CommentList) LoadAttachments(ctx context.Context) (err error) {
|
|||||||
commentsIDs := comments.getAttachmentCommentIDs()
|
commentsIDs := comments.getAttachmentCommentIDs()
|
||||||
left := len(commentsIDs)
|
left := len(commentsIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("comment_id", commentsIDs[:limit]).
|
In("comment_id", commentsIDs[:limit]).
|
||||||
Rows(new(repo_model.Attachment))
|
Rows(new(repo_model.Attachment))
|
||||||
|
@ -42,10 +42,7 @@ func (issues IssueList) LoadRepositories(ctx context.Context) (repo_model.Reposi
|
|||||||
repoMaps := make(map[int64]*repo_model.Repository, len(repoIDs))
|
repoMaps := make(map[int64]*repo_model.Repository, len(repoIDs))
|
||||||
left := len(repoIDs)
|
left := len(repoIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
err := db.GetEngine(ctx).
|
err := db.GetEngine(ctx).
|
||||||
In("id", repoIDs[:limit]).
|
In("id", repoIDs[:limit]).
|
||||||
Find(&repoMaps)
|
Find(&repoMaps)
|
||||||
@ -116,10 +113,7 @@ func (issues IssueList) LoadLabels(ctx context.Context) error {
|
|||||||
issueIDs := issues.getIssueIDs()
|
issueIDs := issues.getIssueIDs()
|
||||||
left := len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).Table("label").
|
rows, err := db.GetEngine(ctx).Table("label").
|
||||||
Join("LEFT", "issue_label", "issue_label.label_id = label.id").
|
Join("LEFT", "issue_label", "issue_label.label_id = label.id").
|
||||||
In("issue_label.issue_id", issueIDs[:limit]).
|
In("issue_label.issue_id", issueIDs[:limit]).
|
||||||
@ -171,10 +165,7 @@ func (issues IssueList) LoadMilestones(ctx context.Context) error {
|
|||||||
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
|
||||||
left := len(milestoneIDs)
|
left := len(milestoneIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
err := db.GetEngine(ctx).
|
err := db.GetEngine(ctx).
|
||||||
In("id", milestoneIDs[:limit]).
|
In("id", milestoneIDs[:limit]).
|
||||||
Find(&milestoneMaps)
|
Find(&milestoneMaps)
|
||||||
@ -203,10 +194,7 @@ func (issues IssueList) LoadProjects(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
|
|
||||||
projects := make([]*projectWithIssueID, 0, limit)
|
projects := make([]*projectWithIssueID, 0, limit)
|
||||||
err := db.GetEngine(ctx).
|
err := db.GetEngine(ctx).
|
||||||
@ -245,10 +233,7 @@ func (issues IssueList) LoadAssignees(ctx context.Context) error {
|
|||||||
issueIDs := issues.getIssueIDs()
|
issueIDs := issues.getIssueIDs()
|
||||||
left := len(issueIDs)
|
left := len(issueIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).Table("issue_assignees").
|
rows, err := db.GetEngine(ctx).Table("issue_assignees").
|
||||||
Join("INNER", "`user`", "`user`.id = `issue_assignees`.assignee_id").
|
Join("INNER", "`user`", "`user`.id = `issue_assignees`.assignee_id").
|
||||||
In("`issue_assignees`.issue_id", issueIDs[:limit]).OrderBy(user_model.GetOrderByName()).
|
In("`issue_assignees`.issue_id", issueIDs[:limit]).OrderBy(user_model.GetOrderByName()).
|
||||||
@ -306,10 +291,7 @@ func (issues IssueList) LoadPullRequests(ctx context.Context) error {
|
|||||||
pullRequestMaps := make(map[int64]*PullRequest, len(issuesIDs))
|
pullRequestMaps := make(map[int64]*PullRequest, len(issuesIDs))
|
||||||
left := len(issuesIDs)
|
left := len(issuesIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("issue_id", issuesIDs[:limit]).
|
In("issue_id", issuesIDs[:limit]).
|
||||||
Rows(new(PullRequest))
|
Rows(new(PullRequest))
|
||||||
@ -354,10 +336,7 @@ func (issues IssueList) LoadAttachments(ctx context.Context) (err error) {
|
|||||||
issuesIDs := issues.getIssueIDs()
|
issuesIDs := issues.getIssueIDs()
|
||||||
left := len(issuesIDs)
|
left := len(issuesIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).
|
rows, err := db.GetEngine(ctx).
|
||||||
In("issue_id", issuesIDs[:limit]).
|
In("issue_id", issuesIDs[:limit]).
|
||||||
Rows(new(repo_model.Attachment))
|
Rows(new(repo_model.Attachment))
|
||||||
@ -399,10 +378,7 @@ func (issues IssueList) loadComments(ctx context.Context, cond builder.Cond) (er
|
|||||||
issuesIDs := issues.getIssueIDs()
|
issuesIDs := issues.getIssueIDs()
|
||||||
left := len(issuesIDs)
|
left := len(issuesIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
rows, err := db.GetEngine(ctx).Table("comment").
|
rows, err := db.GetEngine(ctx).Table("comment").
|
||||||
Join("INNER", "issue", "issue.id = comment.issue_id").
|
Join("INNER", "issue", "issue.id = comment.issue_id").
|
||||||
In("issue.id", issuesIDs[:limit]).
|
In("issue.id", issuesIDs[:limit]).
|
||||||
@ -466,10 +442,7 @@ func (issues IssueList) loadTotalTrackedTimes(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
left := len(ids)
|
left := len(ids)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
|
|
||||||
// select issue_id, sum(time) from tracked_time where issue_id in (<issue ids in current page>) group by issue_id
|
// select issue_id, sum(time) from tracked_time where issue_id in (<issue ids in current page>) group by issue_id
|
||||||
rows, err := db.GetEngine(ctx).Table("tracked_time").
|
rows, err := db.GetEngine(ctx).Table("tracked_time").
|
||||||
|
@ -73,8 +73,8 @@ func (o *IssuesOptions) Copy(edit ...func(options *IssuesOptions)) *IssuesOption
|
|||||||
// sortType string
|
// sortType string
|
||||||
func applySorts(sess *xorm.Session, sortType string, priorityRepoID int64) {
|
func applySorts(sess *xorm.Session, sortType string, priorityRepoID int64) {
|
||||||
// Since this sortType is dynamically created, it has to be treated specially.
|
// Since this sortType is dynamically created, it has to be treated specially.
|
||||||
if strings.HasPrefix(sortType, ScopeSortPrefix) {
|
if after, ok := strings.CutPrefix(sortType, ScopeSortPrefix); ok {
|
||||||
scope := strings.TrimPrefix(sortType, ScopeSortPrefix)
|
scope := after
|
||||||
sess.Join("LEFT", "issue_label", "issue.id = issue_label.issue_id")
|
sess.Join("LEFT", "issue_label", "issue.id = issue_label.issue_id")
|
||||||
// "exclusive_order=0" means "no order is set", so exclude it from the JOIN criteria and then "LEFT JOIN" result is also null
|
// "exclusive_order=0" means "no order is set", so exclude it from the JOIN criteria and then "LEFT JOIN" result is also null
|
||||||
sess.Join("LEFT", "label", "label.id = issue_label.label_id AND label.exclusive_order <> 0 AND label.name LIKE ?", scope+"/%")
|
sess.Join("LEFT", "label", "label.id = issue_label.label_id AND label.exclusive_order <> 0 AND label.name LIKE ?", scope+"/%")
|
||||||
|
@ -94,10 +94,7 @@ func GetIssueStats(ctx context.Context, opts *IssuesOptions) (*IssueStats, error
|
|||||||
// ids in a temporary table and join from them.
|
// ids in a temporary table and join from them.
|
||||||
accum := &IssueStats{}
|
accum := &IssueStats{}
|
||||||
for i := 0; i < len(opts.IssueIDs); {
|
for i := 0; i < len(opts.IssueIDs); {
|
||||||
chunk := i + MaxQueryParameters
|
chunk := min(i+MaxQueryParameters, len(opts.IssueIDs))
|
||||||
if chunk > len(opts.IssueIDs) {
|
|
||||||
chunk = len(opts.IssueIDs)
|
|
||||||
}
|
|
||||||
stats, err := getIssueStatsChunk(ctx, opts, opts.IssueIDs[i:chunk])
|
stats, err := getIssueStatsChunk(ctx, opts, opts.IssueIDs[i:chunk])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -5,6 +5,7 @@ package issues_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
@ -270,7 +271,7 @@ func TestIssue_ResolveMentions(t *testing.T) {
|
|||||||
for i, user := range resolved {
|
for i, user := range resolved {
|
||||||
ids[i] = user.ID
|
ids[i] = user.ID
|
||||||
}
|
}
|
||||||
sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] })
|
slices.Sort(ids)
|
||||||
assert.Equal(t, expected, ids)
|
assert.Equal(t, expected, ids)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -292,7 +293,7 @@ func TestResourceIndex(t *testing.T) {
|
|||||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
assert.NoError(t, unittest.PrepareTestDatabase())
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < 100; i++ {
|
for i := range 100 {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
testInsertIssue(t, fmt.Sprintf("issue %d", i+1), "my issue", 0)
|
testInsertIssue(t, fmt.Sprintf("issue %d", i+1), "my issue", 0)
|
||||||
@ -314,7 +315,7 @@ func TestCorrectIssueStats(t *testing.T) {
|
|||||||
issueAmount := issues_model.MaxQueryParameters + 10
|
issueAmount := issues_model.MaxQueryParameters + 10
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < issueAmount; i++ {
|
for i := range issueAmount {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
testInsertIssue(t, fmt.Sprintf("Issue %d", i+1), "Bugs are nasty", 0)
|
testInsertIssue(t, fmt.Sprintf("Issue %d", i+1), "Bugs are nasty", 0)
|
||||||
|
@ -304,7 +304,7 @@ func UpdateIssueAttachments(ctx context.Context, issueID int64, uuids []string)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("getAttachmentsByUUIDs [uuids: %v]: %w", uuids, err)
|
return fmt.Errorf("getAttachmentsByUUIDs [uuids: %v]: %w", uuids, err)
|
||||||
}
|
}
|
||||||
for i := 0; i < len(attachments); i++ {
|
for i := range attachments {
|
||||||
attachments[i].IssueID = issueID
|
attachments[i].IssueID = issueID
|
||||||
if err := repo_model.UpdateAttachment(ctx, attachments[i]); err != nil {
|
if err := repo_model.UpdateAttachment(ctx, attachments[i]); err != nil {
|
||||||
return fmt.Errorf("update attachment [id: %d]: %w", attachments[i].ID, err)
|
return fmt.Errorf("update attachment [id: %d]: %w", attachments[i].ID, err)
|
||||||
|
@ -22,7 +22,7 @@ type ReviewList []*Review
|
|||||||
// LoadReviewers loads reviewers
|
// LoadReviewers loads reviewers
|
||||||
func (reviews ReviewList) LoadReviewers(ctx context.Context) error {
|
func (reviews ReviewList) LoadReviewers(ctx context.Context) error {
|
||||||
reviewerIDs := make([]int64, len(reviews))
|
reviewerIDs := make([]int64, len(reviews))
|
||||||
for i := 0; i < len(reviews); i++ {
|
for i := range reviews {
|
||||||
reviewerIDs[i] = reviews[i].ReviewerID
|
reviewerIDs[i] = reviews[i].ReviewerID
|
||||||
}
|
}
|
||||||
reviewers, err := user_model.GetPossibleUserByIDs(ctx, reviewerIDs)
|
reviewers, err := user_model.GetPossibleUserByIDs(ctx, reviewerIDs)
|
||||||
|
@ -350,10 +350,7 @@ func GetIssueTotalTrackedTime(ctx context.Context, opts *IssuesOptions, isClosed
|
|||||||
// we get the statistics in smaller chunks and get accumulates
|
// we get the statistics in smaller chunks and get accumulates
|
||||||
var accum int64
|
var accum int64
|
||||||
for i := 0; i < len(opts.IssueIDs); {
|
for i := 0; i < len(opts.IssueIDs); {
|
||||||
chunk := i + MaxQueryParameters
|
chunk := min(i+MaxQueryParameters, len(opts.IssueIDs))
|
||||||
if chunk > len(opts.IssueIDs) {
|
|
||||||
chunk = len(opts.IssueIDs)
|
|
||||||
}
|
|
||||||
time, err := getIssueTotalTrackedTimeChunk(ctx, opts, isClosed, opts.IssueIDs[i:chunk])
|
time, err := getIssueTotalTrackedTimeChunk(ctx, opts, isClosed, opts.IssueIDs[i:chunk])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -518,7 +518,7 @@ func ModifyColumn(x *xorm.Engine, tableName string, col *schemas.Column) error {
|
|||||||
|
|
||||||
func removeAllWithRetry(dir string) error {
|
func removeAllWithRetry(dir string) error {
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < 20; i++ {
|
for range 20 {
|
||||||
err = os.RemoveAll(dir)
|
err = os.RemoveAll(dir)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
|
@ -5,6 +5,7 @@ package v1_11 //nolint
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
|
|
||||||
"xorm.io/xorm"
|
"xorm.io/xorm"
|
||||||
)
|
)
|
||||||
@ -344,10 +345,8 @@ func AddBranchProtectionCanPushAndEnableWhitelist(x *xorm.Engine) error {
|
|||||||
}
|
}
|
||||||
return AccessModeWrite <= perm.UnitsMode[UnitTypeCode], nil
|
return AccessModeWrite <= perm.UnitsMode[UnitTypeCode], nil
|
||||||
}
|
}
|
||||||
for _, id := range protectedBranch.ApprovalsWhitelistUserIDs {
|
if slices.Contains(protectedBranch.ApprovalsWhitelistUserIDs, reviewer.ID) {
|
||||||
if id == reviewer.ID {
|
return true, nil
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// isUserInTeams
|
// isUserInTeams
|
||||||
|
@ -146,7 +146,7 @@ func copyOldAvatarToNewLocation(userID int64, oldAvatar string) (string, error)
|
|||||||
return "", fmt.Errorf("io.ReadAll: %w", err)
|
return "", fmt.Errorf("io.ReadAll: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
newAvatar := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%d-%x", userID, md5.Sum(data)))))
|
newAvatar := fmt.Sprintf("%x", md5.Sum(fmt.Appendf(nil, "%d-%x", userID, md5.Sum(data))))
|
||||||
if newAvatar == oldAvatar {
|
if newAvatar == oldAvatar {
|
||||||
return newAvatar, nil
|
return newAvatar, nil
|
||||||
}
|
}
|
||||||
|
@ -329,7 +329,7 @@ func ConvertScopedAccessTokens(x *xorm.Engine) error {
|
|||||||
for _, token := range tokens {
|
for _, token := range tokens {
|
||||||
var scopes []string
|
var scopes []string
|
||||||
allNewScopesMap := make(map[AccessTokenScope]bool)
|
allNewScopesMap := make(map[AccessTokenScope]bool)
|
||||||
for _, oldScope := range strings.Split(token.Scope, ",") {
|
for oldScope := range strings.SplitSeq(token.Scope, ",") {
|
||||||
if newScopes, exists := accessTokenScopeMap[OldAccessTokenScope(oldScope)]; exists {
|
if newScopes, exists := accessTokenScopeMap[OldAccessTokenScope(oldScope)]; exists {
|
||||||
for _, newScope := range newScopes {
|
for _, newScope := range newScopes {
|
||||||
allNewScopesMap[newScope] = true
|
allNewScopesMap[newScope] = true
|
||||||
|
@ -110,7 +110,7 @@ func Test_NewColumn(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, columns, 3)
|
assert.Len(t, columns, 3)
|
||||||
|
|
||||||
for i := 0; i < maxProjectColumns-3; i++ {
|
for i := range maxProjectColumns - 3 {
|
||||||
err := NewColumn(db.DefaultContext, &Column{
|
err := NewColumn(db.DefaultContext, &Column{
|
||||||
Title: fmt.Sprintf("column-%d", i+4),
|
Title: fmt.Sprintf("column-%d", i+4),
|
||||||
ProjectID: project1.ID,
|
ProjectID: project1.ID,
|
||||||
|
@ -6,6 +6,7 @@ package pull
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
|
|
||||||
"code.gitea.io/gitea/models/db"
|
"code.gitea.io/gitea/models/db"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
@ -100,9 +101,7 @@ func mergeFiles(oldFiles, newFiles map[string]ViewedState) map[string]ViewedStat
|
|||||||
return oldFiles
|
return oldFiles
|
||||||
}
|
}
|
||||||
|
|
||||||
for file, viewed := range newFiles {
|
maps.Copy(oldFiles, newFiles)
|
||||||
oldFiles[file] = viewed
|
|
||||||
}
|
|
||||||
return oldFiles
|
return oldFiles
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -449,7 +449,7 @@ func SearchRepositoryCondition(opts SearchRepoOptions) builder.Cond {
|
|||||||
if opts.Keyword != "" {
|
if opts.Keyword != "" {
|
||||||
// separate keyword
|
// separate keyword
|
||||||
subQueryCond := builder.NewCond()
|
subQueryCond := builder.NewCond()
|
||||||
for _, v := range strings.Split(opts.Keyword, ",") {
|
for v := range strings.SplitSeq(opts.Keyword, ",") {
|
||||||
if opts.TopicOnly {
|
if opts.TopicOnly {
|
||||||
subQueryCond = subQueryCond.Or(builder.Eq{"topic.name": strings.ToLower(v)})
|
subQueryCond = subQueryCond.Or(builder.Eq{"topic.name": strings.ToLower(v)})
|
||||||
} else {
|
} else {
|
||||||
@ -464,7 +464,7 @@ func SearchRepositoryCondition(opts SearchRepoOptions) builder.Cond {
|
|||||||
keywordCond := builder.In("id", subQuery)
|
keywordCond := builder.In("id", subQuery)
|
||||||
if !opts.TopicOnly {
|
if !opts.TopicOnly {
|
||||||
likes := builder.NewCond()
|
likes := builder.NewCond()
|
||||||
for _, v := range strings.Split(opts.Keyword, ",") {
|
for v := range strings.SplitSeq(opts.Keyword, ",") {
|
||||||
likes = likes.Or(builder.Like{"lower_name", strings.ToLower(v)})
|
likes = likes.Or(builder.Like{"lower_name", strings.ToLower(v)})
|
||||||
|
|
||||||
// If the string looks like "org/repo", match against that pattern too
|
// If the string looks like "org/repo", match against that pattern too
|
||||||
|
@ -185,10 +185,8 @@ func (cfg *ActionsConfig) IsWorkflowDisabled(file string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *ActionsConfig) DisableWorkflow(file string) {
|
func (cfg *ActionsConfig) DisableWorkflow(file string) {
|
||||||
for _, workflow := range cfg.DisabledWorkflows {
|
if slices.Contains(cfg.DisabledWorkflows, file) {
|
||||||
if file == workflow {
|
return
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.DisabledWorkflows = append(cfg.DisabledWorkflows, file)
|
cfg.DisabledWorkflows = append(cfg.DisabledWorkflows, file)
|
||||||
|
@ -124,7 +124,7 @@ func DeleteUploads(ctx context.Context, uploads ...*Upload) (err error) {
|
|||||||
defer committer.Close()
|
defer committer.Close()
|
||||||
|
|
||||||
ids := make([]int64, len(uploads))
|
ids := make([]int64, len(uploads))
|
||||||
for i := 0; i < len(uploads); i++ {
|
for i := range uploads {
|
||||||
ids[i] = uploads[i].ID
|
ids[i] = uploads[i].ID
|
||||||
}
|
}
|
||||||
if err = db.DeleteByIDs[Upload](ctx, ids...); err != nil {
|
if err = db.DeleteByIDs[Upload](ctx, ids...); err != nil {
|
||||||
|
@ -6,6 +6,7 @@ package unit
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
@ -204,22 +205,12 @@ func LoadUnitConfig() error {
|
|||||||
|
|
||||||
// UnitGlobalDisabled checks if unit type is global disabled
|
// UnitGlobalDisabled checks if unit type is global disabled
|
||||||
func (u Type) UnitGlobalDisabled() bool {
|
func (u Type) UnitGlobalDisabled() bool {
|
||||||
for _, ud := range DisabledRepoUnitsGet() {
|
return slices.Contains(DisabledRepoUnitsGet(), u)
|
||||||
if u == ud {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CanBeDefault checks if the unit type can be a default repo unit
|
// CanBeDefault checks if the unit type can be a default repo unit
|
||||||
func (u *Type) CanBeDefault() bool {
|
func (u *Type) CanBeDefault() bool {
|
||||||
for _, nadU := range NotAllowedDefaultRepoUnits {
|
return !slices.Contains(NotAllowedDefaultRepoUnits, *u)
|
||||||
if *u == nadU {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unit is a section of one repository
|
// Unit is a section of one repository
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
package user_test
|
package user_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"slices"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"code.gitea.io/gitea/models/db"
|
"code.gitea.io/gitea/models/db"
|
||||||
@ -100,12 +101,7 @@ func TestListEmails(t *testing.T) {
|
|||||||
assert.Greater(t, count, int64(5))
|
assert.Greater(t, count, int64(5))
|
||||||
|
|
||||||
contains := func(match func(s *user_model.SearchEmailResult) bool) bool {
|
contains := func(match func(s *user_model.SearchEmailResult) bool) bool {
|
||||||
for _, v := range emails {
|
return slices.ContainsFunc(emails, match)
|
||||||
if match(v) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.True(t, contains(func(s *user_model.SearchEmailResult) bool { return s.UID == 18 }))
|
assert.True(t, contains(func(s *user_model.SearchEmailResult) bool { return s.UID == 18 }))
|
||||||
|
@ -17,10 +17,7 @@ func GetUsersMapByIDs(ctx context.Context, userIDs []int64) (map[int64]*User, er
|
|||||||
|
|
||||||
left := len(userIDs)
|
left := len(userIDs)
|
||||||
for left > 0 {
|
for left > 0 {
|
||||||
limit := db.DefaultMaxInSize
|
limit := min(left, db.DefaultMaxInSize)
|
||||||
if left < limit {
|
|
||||||
limit = left
|
|
||||||
}
|
|
||||||
err := db.GetEngine(ctx).
|
err := db.GetEngine(ctx).
|
||||||
In("id", userIDs[:limit]).
|
In("id", userIDs[:limit]).
|
||||||
Find(&userMaps)
|
Find(&userMaps)
|
||||||
|
@ -204,9 +204,9 @@ func TestHashPasswordDeterministic(t *testing.T) {
|
|||||||
b := make([]byte, 16)
|
b := make([]byte, 16)
|
||||||
u := &user_model.User{}
|
u := &user_model.User{}
|
||||||
algos := hash.RecommendedHashAlgorithms
|
algos := hash.RecommendedHashAlgorithms
|
||||||
for j := 0; j < len(algos); j++ {
|
for j := range algos {
|
||||||
u.PasswdHashAlgo = algos[j]
|
u.PasswdHashAlgo = algos[j]
|
||||||
for i := 0; i < 50; i++ {
|
for range 50 {
|
||||||
// generate a random password
|
// generate a random password
|
||||||
rand.Read(b)
|
rand.Read(b)
|
||||||
pass := string(b)
|
pass := string(b)
|
||||||
|
@ -240,7 +240,7 @@ func CreateWebhooks(ctx context.Context, ws []*Webhook) error {
|
|||||||
if len(ws) == 0 {
|
if len(ws) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for i := 0; i < len(ws); i++ {
|
for i := range ws {
|
||||||
ws[i].Type = strings.TrimSpace(ws[i].Type)
|
ws[i].Type = strings.TrimSpace(ws[i].Type)
|
||||||
}
|
}
|
||||||
return db.Insert(ctx, ws)
|
return db.Insert(ctx, ws)
|
||||||
|
@ -6,6 +6,7 @@ package actions
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/git"
|
"code.gitea.io/gitea/modules/git"
|
||||||
@ -566,11 +567,8 @@ func matchPullRequestReviewEvent(prPayload *api.PullRequestPayload, evt *jobpars
|
|||||||
|
|
||||||
matched := false
|
matched := false
|
||||||
for _, val := range vals {
|
for _, val := range vals {
|
||||||
for _, action := range actions {
|
if slices.ContainsFunc(actions, glob.MustCompile(val, '/').Match) {
|
||||||
if glob.MustCompile(val, '/').Match(action) {
|
matched = true
|
||||||
matched = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if matched {
|
if matched {
|
||||||
break
|
break
|
||||||
@ -615,11 +613,8 @@ func matchPullRequestReviewCommentEvent(prPayload *api.PullRequestPayload, evt *
|
|||||||
|
|
||||||
matched := false
|
matched := false
|
||||||
for _, val := range vals {
|
for _, val := range vals {
|
||||||
for _, action := range actions {
|
if slices.ContainsFunc(actions, glob.MustCompile(val, '/').Match) {
|
||||||
if glob.MustCompile(val, '/').Match(action) {
|
matched = true
|
||||||
matched = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if matched {
|
if matched {
|
||||||
break
|
break
|
||||||
|
@ -101,7 +101,7 @@ func Generate(n int) (string, error) {
|
|||||||
buffer := make([]byte, n)
|
buffer := make([]byte, n)
|
||||||
maxInt := big.NewInt(int64(len(validChars)))
|
maxInt := big.NewInt(int64(len(validChars)))
|
||||||
for {
|
for {
|
||||||
for j := 0; j < n; j++ {
|
for j := range n {
|
||||||
rnd, err := rand.Int(rand.Reader, maxInt)
|
rnd, err := rand.Int(rand.Reader, maxInt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -50,7 +50,7 @@ func TestComplexity_Generate(t *testing.T) {
|
|||||||
|
|
||||||
test := func(t *testing.T, modes []string) {
|
test := func(t *testing.T, modes []string) {
|
||||||
testComplextity(modes)
|
testComplextity(modes)
|
||||||
for i := 0; i < maxCount; i++ {
|
for range maxCount {
|
||||||
pwd, err := Generate(pwdLen)
|
pwd, err := Generate(pwdLen)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, pwd, pwdLen)
|
assert.Len(t, pwd, pwdLen)
|
||||||
|
@ -101,7 +101,7 @@ func (c *Client) CheckPassword(pw string, padding bool) (int, error) {
|
|||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
for _, pair := range strings.Split(string(body), "\n") {
|
for pair := range strings.SplitSeq(string(body), "\n") {
|
||||||
parts := strings.Split(pair, ":")
|
parts := strings.Split(pair, ":")
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
continue
|
continue
|
||||||
|
@ -24,8 +24,8 @@ func drawBlock(img *image.Paletted, x, y, size, angle int, points []int) {
|
|||||||
rotate(points, m, m, angle)
|
rotate(points, m, m, angle)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < size; i++ {
|
for i := range size {
|
||||||
for j := 0; j < size; j++ {
|
for j := range size {
|
||||||
if pointInPolygon(i, j, points) {
|
if pointInPolygon(i, j, points) {
|
||||||
img.SetColorIndex(x+i, y+j, 1)
|
img.SetColorIndex(x+i, y+j, 1)
|
||||||
}
|
}
|
||||||
|
@ -134,7 +134,7 @@ func drawBlocks(p *image.Paletted, size int, c, b1, b2 blockFunc, b1Angle, b2Ang
|
|||||||
|
|
||||||
// then we make it left-right mirror, so we didn't draw 3/6/9 before
|
// then we make it left-right mirror, so we didn't draw 3/6/9 before
|
||||||
for x := 0; x < size/2; x++ {
|
for x := 0; x < size/2; x++ {
|
||||||
for y := 0; y < size; y++ {
|
for y := range size {
|
||||||
p.SetColorIndex(size-x, y, p.ColorIndexAt(x, y))
|
p.SetColorIndex(size-x, y, p.ColorIndexAt(x, y))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
2
modules/cache/cache.go
vendored
2
modules/cache/cache.go
vendored
@ -24,7 +24,7 @@ func Init() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for i := 0; i < 10; i++ {
|
for range 10 {
|
||||||
if err = c.Ping(); err == nil {
|
if err = c.Ping(); err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -164,7 +164,7 @@ func DetectEncoding(content []byte) (string, error) {
|
|||||||
}
|
}
|
||||||
times := 1024 / len(content)
|
times := 1024 / len(content)
|
||||||
detectContent = make([]byte, 0, times*len(content))
|
detectContent = make([]byte, 0, times*len(content))
|
||||||
for i := 0; i < times; i++ {
|
for range times {
|
||||||
detectContent = append(detectContent, content...)
|
detectContent = append(detectContent, content...)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -242,7 +242,7 @@ func stringMustEndWith(t *testing.T, expected, value string) {
|
|||||||
func TestToUTF8WithFallbackReader(t *testing.T) {
|
func TestToUTF8WithFallbackReader(t *testing.T) {
|
||||||
resetDefaultCharsetsOrder()
|
resetDefaultCharsetsOrder()
|
||||||
|
|
||||||
for testLen := 0; testLen < 2048; testLen++ {
|
for testLen := range 2048 {
|
||||||
pattern := " test { () }\n"
|
pattern := " test { () }\n"
|
||||||
input := ""
|
input := ""
|
||||||
for len(input) < testLen {
|
for len(input) < testLen {
|
||||||
|
@ -277,8 +277,8 @@ func NewSearchCommitsOptions(searchString string, forAllRefs bool) SearchCommits
|
|||||||
var keywords, authors, committers []string
|
var keywords, authors, committers []string
|
||||||
var after, before string
|
var after, before string
|
||||||
|
|
||||||
fields := strings.Fields(searchString)
|
fields := strings.FieldsSeq(searchString)
|
||||||
for _, k := range fields {
|
for k := range fields {
|
||||||
switch {
|
switch {
|
||||||
case strings.HasPrefix(k, "author:"):
|
case strings.HasPrefix(k, "author:"):
|
||||||
authors = append(authors, strings.TrimPrefix(k, "author:"))
|
authors = append(authors, strings.TrimPrefix(k, "author:"))
|
||||||
|
@ -7,6 +7,7 @@ package git
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"maps"
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
@ -38,9 +39,7 @@ func (tes Entries) GetCommitsInfo(ctx context.Context, commit *Commit, treePath
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for pth, found := range commits {
|
maps.Copy(revs, commits)
|
||||||
revs[pth] = found
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
sort.Strings(entryPaths)
|
sort.Strings(entryPaths)
|
||||||
|
@ -154,7 +154,7 @@ func TestCutDiffAroundLine(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkCutDiffAroundLine(b *testing.B) {
|
func BenchmarkCutDiffAroundLine(b *testing.B) {
|
||||||
for n := 0; n < b.N; n++ {
|
for b.Loop() {
|
||||||
CutDiffAroundLine(strings.NewReader(exampleDiff), 3, true, 3)
|
CutDiffAroundLine(strings.NewReader(exampleDiff), 3, true, 3)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ func (f Format) Parser(r io.Reader) *Parser {
|
|||||||
// would turn into "%0a%00".
|
// would turn into "%0a%00".
|
||||||
func (f Format) hexEscaped(delim []byte) string {
|
func (f Format) hexEscaped(delim []byte) string {
|
||||||
escaped := ""
|
escaped := ""
|
||||||
for i := 0; i < len(delim); i++ {
|
for i := range delim {
|
||||||
escaped += "%" + hex.EncodeToString([]byte{delim[i]})
|
escaped += "%" + hex.EncodeToString([]byte{delim[i]})
|
||||||
}
|
}
|
||||||
return escaped
|
return escaped
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
@ -25,12 +26,7 @@ var ErrNotValidHook = errors.New("not a valid Git hook")
|
|||||||
|
|
||||||
// IsValidHookName returns true if given name is a valid Git hook.
|
// IsValidHookName returns true if given name is a valid Git hook.
|
||||||
func IsValidHookName(name string) bool {
|
func IsValidHookName(name string) bool {
|
||||||
for _, hn := range hookNames {
|
return slices.Contains(hookNames, name)
|
||||||
if hn == name {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hook represents a Git hook.
|
// Hook represents a Git hook.
|
||||||
|
@ -13,7 +13,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func getCacheKey(repoPath, commitID, entryPath string) string {
|
func getCacheKey(repoPath, commitID, entryPath string) string {
|
||||||
hashBytes := sha256.Sum256([]byte(fmt.Sprintf("%s:%s:%s", repoPath, commitID, entryPath)))
|
hashBytes := sha256.Sum256(fmt.Appendf(nil, "%s:%s:%s", repoPath, commitID, entryPath))
|
||||||
return fmt.Sprintf("last_commit:%x", hashBytes)
|
return fmt.Sprintf("last_commit:%x", hashBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -346,10 +346,7 @@ func WalkGitLog(ctx context.Context, repo *Repository, head *Commit, treepath st
|
|||||||
|
|
||||||
results := make([]string, len(paths))
|
results := make([]string, len(paths))
|
||||||
remaining := len(paths)
|
remaining := len(paths)
|
||||||
nextRestart := (len(paths) * 3) / 4
|
nextRestart := min((len(paths)*3)/4, 70)
|
||||||
if nextRestart > 70 {
|
|
||||||
nextRestart = 70
|
|
||||||
}
|
|
||||||
lastEmptyParent := head.ID.String()
|
lastEmptyParent := head.ID.String()
|
||||||
commitSinceLastEmptyParent := uint64(0)
|
commitSinceLastEmptyParent := uint64(0)
|
||||||
commitSinceNextRestart := uint64(0)
|
commitSinceNextRestart := uint64(0)
|
||||||
|
@ -109,8 +109,8 @@ func (ref RefName) IsFor() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ref RefName) nameWithoutPrefix(prefix string) string {
|
func (ref RefName) nameWithoutPrefix(prefix string) string {
|
||||||
if strings.HasPrefix(string(ref), prefix) {
|
if after, ok := strings.CutPrefix(string(ref), prefix); ok {
|
||||||
return strings.TrimPrefix(string(ref), prefix)
|
return after
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
@ -44,9 +44,9 @@ func (repo *Repository) parsePrettyFormatLogToList(logs []byte) ([]*Commit, erro
|
|||||||
return commits, nil
|
return commits, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
parts := bytes.Split(logs, []byte{'\n'})
|
parts := bytes.SplitSeq(logs, []byte{'\n'})
|
||||||
|
|
||||||
for _, commitID := range parts {
|
for commitID := range parts {
|
||||||
commit, err := repo.GetCommit(string(commitID))
|
commit, err := repo.GetCommit(string(commitID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -547,11 +547,11 @@ func (repo *Repository) GetCommitBranchStart(env []string, branch, endCommitID s
|
|||||||
return "", runErr
|
return "", runErr
|
||||||
}
|
}
|
||||||
|
|
||||||
parts := bytes.Split(bytes.TrimSpace(stdout), []byte{'\n'})
|
parts := bytes.SplitSeq(bytes.TrimSpace(stdout), []byte{'\n'})
|
||||||
|
|
||||||
// check the commits one by one until we find a commit contained by another branch
|
// check the commits one by one until we find a commit contained by another branch
|
||||||
// and we think this commit is the divergence point
|
// and we think this commit is the divergence point
|
||||||
for _, commitID := range parts {
|
for commitID := range parts {
|
||||||
branches, err := repo.getBranches(env, string(commitID), 2)
|
branches, err := repo.getBranches(env, string(commitID), 2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -86,7 +86,7 @@ func (repo *Repository) LsFiles(filenames ...string) ([]string, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
filelist := make([]string, 0, len(filenames))
|
filelist := make([]string, 0, len(filenames))
|
||||||
for _, line := range bytes.Split(res, []byte{'\000'}) {
|
for line := range bytes.SplitSeq(res, []byte{'\000'}) {
|
||||||
filelist = append(filelist, string(line))
|
filelist = append(filelist, string(line))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,8 +39,8 @@ func (repo *Repository) GetTagNameBySHA(sha string) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
tagRefs := strings.Split(stdout, "\n")
|
tagRefs := strings.SplitSeq(stdout, "\n")
|
||||||
for _, tagRef := range tagRefs {
|
for tagRef := range tagRefs {
|
||||||
if len(strings.TrimSpace(tagRef)) > 0 {
|
if len(strings.TrimSpace(tagRef)) > 0 {
|
||||||
fields := strings.Fields(tagRef)
|
fields := strings.Fields(tagRef)
|
||||||
if strings.HasPrefix(fields[0], sha) && strings.HasPrefix(fields[1], TagPrefix) {
|
if strings.HasPrefix(fields[0], sha) && strings.HasPrefix(fields[1], TagPrefix) {
|
||||||
@ -62,7 +62,7 @@ func (repo *Repository) GetTagID(name string) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
// Make sure exact match is used: "v1" != "release/v1"
|
// Make sure exact match is used: "v1" != "release/v1"
|
||||||
for _, line := range strings.Split(stdout, "\n") {
|
for line := range strings.SplitSeq(stdout, "\n") {
|
||||||
fields := strings.Fields(line)
|
fields := strings.Fields(line)
|
||||||
if len(fields) == 2 && fields[1] == "refs/tags/"+name {
|
if len(fields) == 2 && fields[1] == "refs/tags/"+name {
|
||||||
return fields[0], nil
|
return fields[0], nil
|
||||||
|
@ -56,7 +56,7 @@ func (repo *Repository) LsTree(ref string, filenames ...string) ([]string, error
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
filelist := make([]string, 0, len(filenames))
|
filelist := make([]string, 0, len(filenames))
|
||||||
for _, line := range bytes.Split(res, []byte{'\000'}) {
|
for line := range bytes.SplitSeq(res, []byte{'\000'}) {
|
||||||
filelist = append(filelist, string(line))
|
filelist = append(filelist, string(line))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@ func (te *TreeEntry) FollowLinks(optLimit ...int) (*TreeEntry, error) {
|
|||||||
}
|
}
|
||||||
limit := util.OptionalArg(optLimit, 10)
|
limit := util.OptionalArg(optLimit, 10)
|
||||||
entry := te
|
entry := te
|
||||||
for i := 0; i < limit; i++ {
|
for range limit {
|
||||||
if !entry.IsLink() {
|
if !entry.IsLink() {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ func TestSubTree_Issue29101(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// old code could produce a different error if called multiple times
|
// old code could produce a different error if called multiple times
|
||||||
for i := 0; i < 10; i++ {
|
for range 10 {
|
||||||
_, err = commit.SubTree("file1.txt")
|
_, err = commit.SubTree("file1.txt")
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.True(t, IsErrNotExist(err))
|
assert.True(t, IsErrNotExist(err))
|
||||||
|
@ -70,7 +70,7 @@ func testLockAndDo(t *testing.T) {
|
|||||||
count := 0
|
count := 0
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(concurrency)
|
wg.Add(concurrency)
|
||||||
for i := 0; i < concurrency; i++ {
|
for range concurrency {
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := LockAndDo(ctx, "test", func(ctx context.Context) error {
|
err := LockAndDo(ctx, "test", func(ctx context.Context) error {
|
||||||
|
@ -6,6 +6,7 @@ package hostmatcher
|
|||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -38,7 +39,7 @@ func isBuiltin(s string) bool {
|
|||||||
// ParseHostMatchList parses the host list HostMatchList
|
// ParseHostMatchList parses the host list HostMatchList
|
||||||
func ParseHostMatchList(settingKeyHint, hostList string) *HostMatchList {
|
func ParseHostMatchList(settingKeyHint, hostList string) *HostMatchList {
|
||||||
hl := &HostMatchList{SettingKeyHint: settingKeyHint, SettingValue: hostList}
|
hl := &HostMatchList{SettingKeyHint: settingKeyHint, SettingValue: hostList}
|
||||||
for _, s := range strings.Split(hostList, ",") {
|
for s := range strings.SplitSeq(hostList, ",") {
|
||||||
s = strings.ToLower(strings.TrimSpace(s))
|
s = strings.ToLower(strings.TrimSpace(s))
|
||||||
if s == "" {
|
if s == "" {
|
||||||
continue
|
continue
|
||||||
@ -61,7 +62,7 @@ func ParseSimpleMatchList(settingKeyHint, matchList string) *HostMatchList {
|
|||||||
SettingKeyHint: settingKeyHint,
|
SettingKeyHint: settingKeyHint,
|
||||||
SettingValue: matchList,
|
SettingValue: matchList,
|
||||||
}
|
}
|
||||||
for _, s := range strings.Split(matchList, ",") {
|
for s := range strings.SplitSeq(matchList, ",") {
|
||||||
s = strings.ToLower(strings.TrimSpace(s))
|
s = strings.ToLower(strings.TrimSpace(s))
|
||||||
if s == "" {
|
if s == "" {
|
||||||
continue
|
continue
|
||||||
@ -98,10 +99,8 @@ func (hl *HostMatchList) checkPattern(host string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (hl *HostMatchList) checkIP(ip net.IP) bool {
|
func (hl *HostMatchList) checkIP(ip net.IP) bool {
|
||||||
for _, pattern := range hl.patterns {
|
if slices.Contains(hl.patterns, "*") {
|
||||||
if pattern == "*" {
|
return true
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
for _, builtin := range hl.builtins {
|
for _, builtin := range hl.builtins {
|
||||||
switch builtin {
|
switch builtin {
|
||||||
|
@ -79,7 +79,7 @@ func HandleGenericETagCache(req *http.Request, w http.ResponseWriter, etag strin
|
|||||||
func checkIfNoneMatchIsValid(req *http.Request, etag string) bool {
|
func checkIfNoneMatchIsValid(req *http.Request, etag string) bool {
|
||||||
ifNoneMatch := req.Header.Get("If-None-Match")
|
ifNoneMatch := req.Header.Get("If-None-Match")
|
||||||
if len(ifNoneMatch) > 0 {
|
if len(ifNoneMatch) > 0 {
|
||||||
for _, item := range strings.Split(ifNoneMatch, ",") {
|
for item := range strings.SplitSeq(ifNoneMatch, ",") {
|
||||||
item = strings.TrimPrefix(strings.TrimSpace(item), "W/") // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag#directives
|
item = strings.TrimPrefix(strings.TrimSpace(item), "W/") // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag#directives
|
||||||
if item == etag {
|
if item == etag {
|
||||||
return true
|
return true
|
||||||
|
@ -51,7 +51,7 @@ func generatePathTokens(input analysis.TokenStream, reversed bool) analysis.Toke
|
|||||||
slices.Reverse(input)
|
slices.Reverse(input)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(input); i++ {
|
for i := range input {
|
||||||
var sb strings.Builder
|
var sb strings.Builder
|
||||||
sb.Write(input[0].Term)
|
sb.Write(input[0].Term)
|
||||||
|
|
||||||
|
@ -129,8 +129,8 @@ func nonGenesisChanges(ctx context.Context, repo *repo_model.Repository, revisio
|
|||||||
changes.Updates = append(changes.Updates, updates...)
|
changes.Updates = append(changes.Updates, updates...)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
lines := strings.Split(stdout, "\n")
|
lines := strings.SplitSeq(stdout, "\n")
|
||||||
for _, line := range lines {
|
for line := range lines {
|
||||||
line = strings.TrimSpace(line)
|
line = strings.TrimSpace(line)
|
||||||
if len(line) == 0 {
|
if len(line) == 0 {
|
||||||
continue
|
continue
|
||||||
|
@ -77,7 +77,7 @@ func HighlightSearchResultCode(filename, language string, lineNums []int, code s
|
|||||||
|
|
||||||
// The lineNums outputted by highlight.Code might not match the original lineNums, because "highlight" removes the last `\n`
|
// The lineNums outputted by highlight.Code might not match the original lineNums, because "highlight" removes the last `\n`
|
||||||
lines := make([]*ResultLine, min(len(highlightedLines), len(lineNums)))
|
lines := make([]*ResultLine, min(len(highlightedLines), len(lineNums)))
|
||||||
for i := 0; i < len(lines); i++ {
|
for i := range lines {
|
||||||
lines[i] = &ResultLine{
|
lines[i] = &ResultLine{
|
||||||
Num: lineNums[i],
|
Num: lineNums[i],
|
||||||
FormattedContent: template.HTML(highlightedLines[i]),
|
FormattedContent: template.HTML(highlightedLines[i]),
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -447,12 +448,7 @@ func (o *valuedOption) IsChecked() bool {
|
|||||||
case api.IssueFormFieldTypeDropdown:
|
case api.IssueFormFieldTypeDropdown:
|
||||||
checks := strings.Split(o.field.Get("form-field-"+o.field.ID), ",")
|
checks := strings.Split(o.field.Get("form-field-"+o.field.ID), ",")
|
||||||
idx := strconv.Itoa(o.index)
|
idx := strconv.Itoa(o.index)
|
||||||
for _, v := range checks {
|
return slices.Contains(checks, idx)
|
||||||
if v == idx {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
case api.IssueFormFieldTypeCheckboxes:
|
case api.IssueFormFieldTypeCheckboxes:
|
||||||
return o.field.Get(fmt.Sprintf("form-field-%s-%d", o.field.ID, o.index)) == "on"
|
return o.field.Get(fmt.Sprintf("form-field-%s-%d", o.field.ID, o.index)) == "on"
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ func parseYamlFormat(fileName string, data []byte) ([]*Label, error) {
|
|||||||
func parseLegacyFormat(fileName string, data []byte) ([]*Label, error) {
|
func parseLegacyFormat(fileName string, data []byte) ([]*Label, error) {
|
||||||
lines := strings.Split(string(data), "\n")
|
lines := strings.Split(string(data), "\n")
|
||||||
list := make([]*Label, 0, len(lines))
|
list := make([]*Label, 0, len(lines))
|
||||||
for i := 0; i < len(lines); i++ {
|
for i := range lines {
|
||||||
line := strings.TrimSpace(lines[i])
|
line := strings.TrimSpace(lines[i])
|
||||||
if len(line) == 0 {
|
if len(line) == 0 {
|
||||||
continue
|
continue
|
||||||
@ -108,7 +108,7 @@ func LoadTemplateDescription(fileName string) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(list); i++ {
|
for i := range list {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
buf.WriteString(", ")
|
buf.WriteString(", ")
|
||||||
}
|
}
|
||||||
|
@ -212,7 +212,7 @@ func EventFormatTextMessage(mode *WriterMode, event *Event, msgFormat string, ms
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if hasColorValue {
|
if hasColorValue {
|
||||||
msg = []byte(fmt.Sprintf(msgFormat, msgArgs...))
|
msg = fmt.Appendf(nil, msgFormat, msgArgs...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// try to re-use the pre-formatted simple text message
|
// try to re-use the pre-formatted simple text message
|
||||||
@ -243,8 +243,8 @@ func EventFormatTextMessage(mode *WriterMode, event *Event, msgFormat string, ms
|
|||||||
buf = append(buf, msg...)
|
buf = append(buf, msg...)
|
||||||
|
|
||||||
if event.Stacktrace != "" && mode.StacktraceLevel <= event.Level {
|
if event.Stacktrace != "" && mode.StacktraceLevel <= event.Level {
|
||||||
lines := bytes.Split([]byte(event.Stacktrace), []byte("\n"))
|
lines := bytes.SplitSeq([]byte(event.Stacktrace), []byte("\n"))
|
||||||
for _, line := range lines {
|
for line := range lines {
|
||||||
buf = append(buf, "\n\t"...)
|
buf = append(buf, "\n\t"...)
|
||||||
buf = append(buf, line...)
|
buf = append(buf, line...)
|
||||||
}
|
}
|
||||||
|
@ -123,7 +123,7 @@ func FlagsFromString(from string, def ...uint32) Flags {
|
|||||||
return Flags{defined: true, flags: def[0]}
|
return Flags{defined: true, flags: def[0]}
|
||||||
}
|
}
|
||||||
flags := uint32(0)
|
flags := uint32(0)
|
||||||
for _, flag := range strings.Split(strings.ToLower(from), ",") {
|
for flag := range strings.SplitSeq(strings.ToLower(from), ",") {
|
||||||
flags |= flagFromString[strings.TrimSpace(flag)]
|
flags |= flagFromString[strings.TrimSpace(flag)]
|
||||||
}
|
}
|
||||||
return Flags{defined: true, flags: flags}
|
return Flags{defined: true, flags: flags}
|
||||||
|
@ -32,11 +32,11 @@ func TestLevelMarshalUnmarshalJSON(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, INFO, testLevel.Level)
|
assert.Equal(t, INFO, testLevel.Level)
|
||||||
|
|
||||||
err = json.Unmarshal([]byte(fmt.Sprintf(`{"level":%d}`, 2)), &testLevel)
|
err = json.Unmarshal(fmt.Appendf(nil, `{"level":%d}`, 2), &testLevel)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, INFO, testLevel.Level)
|
assert.Equal(t, INFO, testLevel.Level)
|
||||||
|
|
||||||
err = json.Unmarshal([]byte(fmt.Sprintf(`{"level":%d}`, 10012)), &testLevel)
|
err = json.Unmarshal(fmt.Appendf(nil, `{"level":%d}`, 10012), &testLevel)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, INFO, testLevel.Level)
|
assert.Equal(t, INFO, testLevel.Level)
|
||||||
|
|
||||||
@ -51,5 +51,5 @@ func TestLevelMarshalUnmarshalJSON(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func makeTestLevelBytes(level string) []byte {
|
func makeTestLevelBytes(level string) []byte {
|
||||||
return []byte(fmt.Sprintf(`{"level":"%s"}`, level))
|
return fmt.Appendf(nil, `{"level":"%s"}`, level)
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -109,13 +110,7 @@ func CustomLinkURLSchemes(schemes []string) {
|
|||||||
if !validScheme.MatchString(s) {
|
if !validScheme.MatchString(s) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
without := false
|
without := slices.Contains(xurls.SchemesNoAuthority, s)
|
||||||
for _, sna := range xurls.SchemesNoAuthority {
|
|
||||||
if s == sna {
|
|
||||||
without = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if without {
|
if without {
|
||||||
s += ":"
|
s += ":"
|
||||||
} else {
|
} else {
|
||||||
|
@ -62,7 +62,7 @@ func anyHashPatternExtract(s string) (ret anyHashPatternResult, ok bool) {
|
|||||||
// if url ends in '.', it's very likely that it is not part of the actual url but used to finish a sentence.
|
// if url ends in '.', it's very likely that it is not part of the actual url but used to finish a sentence.
|
||||||
ret.PosEnd--
|
ret.PosEnd--
|
||||||
ret.FullURL = ret.FullURL[:len(ret.FullURL)-1]
|
ret.FullURL = ret.FullURL[:len(ret.FullURL)-1]
|
||||||
for i := 0; i < len(m); i++ {
|
for i := range m {
|
||||||
m[i] = min(m[i], ret.PosEnd)
|
m[i] = min(m[i], ret.PosEnd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -31,8 +31,8 @@ func shortLinkProcessor(ctx *RenderContext, node *html.Node) {
|
|||||||
// It makes page handling terrible, but we prefer GitHub syntax
|
// It makes page handling terrible, but we prefer GitHub syntax
|
||||||
// And fall back to MediaWiki only when it is obvious from the look
|
// And fall back to MediaWiki only when it is obvious from the look
|
||||||
// Of text and link contents
|
// Of text and link contents
|
||||||
sl := strings.Split(content, "|")
|
sl := strings.SplitSeq(content, "|")
|
||||||
for _, v := range sl {
|
for v := range sl {
|
||||||
if equalPos := strings.IndexByte(v, '='); equalPos == -1 {
|
if equalPos := strings.IndexByte(v, '='); equalPos == -1 {
|
||||||
// There is no equal in this argument; this is a mandatory arg
|
// There is no equal in this argument; this is a mandatory arg
|
||||||
if props["name"] == "" {
|
if props["name"] == "" {
|
||||||
|
@ -182,10 +182,7 @@ func render(ctx *markup.RenderContext, input io.Reader, output io.Writer) error
|
|||||||
rc := &RenderConfig{Meta: markup.RenderMetaAsDetails}
|
rc := &RenderConfig{Meta: markup.RenderMetaAsDetails}
|
||||||
buf, _ = ExtractMetadataBytes(buf, rc)
|
buf, _ = ExtractMetadataBytes(buf, rc)
|
||||||
|
|
||||||
metaLength := bufWithMetadataLength - len(buf)
|
metaLength := max(bufWithMetadataLength-len(buf), 0)
|
||||||
if metaLength < 0 {
|
|
||||||
metaLength = 0
|
|
||||||
}
|
|
||||||
rc.metaLength = metaLength
|
rc.metaLength = metaLength
|
||||||
|
|
||||||
pc.Set(renderConfigKey, rc)
|
pc.Set(renderConfigKey, rc)
|
||||||
|
@ -252,7 +252,7 @@ This PR has been generated by [Renovate Bot](https://github.com/renovatebot/reno
|
|||||||
return username == "r-lyeh"
|
return username == "r-lyeh"
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
for i := 0; i < len(sameCases); i++ {
|
for i := range sameCases {
|
||||||
line, err := markdown.RenderString(markup.NewTestRenderContext(localMetas), sameCases[i])
|
line, err := markdown.RenderString(markup.NewTestRenderContext(localMetas), sameCases[i])
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, testAnswers[i], string(line))
|
assert.Equal(t, testAnswers[i], string(line))
|
||||||
|
@ -42,7 +42,7 @@ func (r *BlockRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
|||||||
|
|
||||||
func (r *BlockRenderer) writeLines(w util.BufWriter, source []byte, n gast.Node) {
|
func (r *BlockRenderer) writeLines(w util.BufWriter, source []byte, n gast.Node) {
|
||||||
l := n.Lines().Len()
|
l := n.Lines().Len()
|
||||||
for i := 0; i < l; i++ {
|
for i := range l {
|
||||||
line := n.Lines().At(i)
|
line := n.Lines().At(i)
|
||||||
_, _ = w.Write(util.EscapeHTML(line.Value(source)))
|
_, _ = w.Write(util.EscapeHTML(line.Value(source)))
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ func TestExtractMetadata(t *testing.T) {
|
|||||||
func TestExtractMetadataBytes(t *testing.T) {
|
func TestExtractMetadataBytes(t *testing.T) {
|
||||||
t.Run("ValidFrontAndBody", func(t *testing.T) {
|
t.Run("ValidFrontAndBody", func(t *testing.T) {
|
||||||
var meta IssueTemplate
|
var meta IssueTemplate
|
||||||
body, err := ExtractMetadataBytes([]byte(fmt.Sprintf("%s\n%s\n%s\n%s", sepTest, frontTest, sepTest, bodyTest)), &meta)
|
body, err := ExtractMetadataBytes(fmt.Appendf(nil, "%s\n%s\n%s\n%s", sepTest, frontTest, sepTest, bodyTest), &meta)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, bodyTest, string(body))
|
assert.Equal(t, bodyTest, string(body))
|
||||||
assert.Equal(t, metaTest, meta)
|
assert.Equal(t, metaTest, meta)
|
||||||
@ -69,19 +69,19 @@ func TestExtractMetadataBytes(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("NoFirstSeparator", func(t *testing.T) {
|
t.Run("NoFirstSeparator", func(t *testing.T) {
|
||||||
var meta IssueTemplate
|
var meta IssueTemplate
|
||||||
_, err := ExtractMetadataBytes([]byte(fmt.Sprintf("%s\n%s\n%s", frontTest, sepTest, bodyTest)), &meta)
|
_, err := ExtractMetadataBytes(fmt.Appendf(nil, "%s\n%s\n%s", frontTest, sepTest, bodyTest), &meta)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("NoLastSeparator", func(t *testing.T) {
|
t.Run("NoLastSeparator", func(t *testing.T) {
|
||||||
var meta IssueTemplate
|
var meta IssueTemplate
|
||||||
_, err := ExtractMetadataBytes([]byte(fmt.Sprintf("%s\n%s\n%s", sepTest, frontTest, bodyTest)), &meta)
|
_, err := ExtractMetadataBytes(fmt.Appendf(nil, "%s\n%s\n%s", sepTest, frontTest, bodyTest), &meta)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("NoBody", func(t *testing.T) {
|
t.Run("NoBody", func(t *testing.T) {
|
||||||
var meta IssueTemplate
|
var meta IssueTemplate
|
||||||
body, err := ExtractMetadataBytes([]byte(fmt.Sprintf("%s\n%s\n%s", sepTest, frontTest, sepTest)), &meta)
|
body, err := ExtractMetadataBytes(fmt.Appendf(nil, "%s\n%s\n%s", sepTest, frontTest, sepTest), &meta)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Empty(t, string(body))
|
assert.Empty(t, string(body))
|
||||||
assert.Equal(t, metaTest, meta)
|
assert.Equal(t, metaTest, meta)
|
||||||
|
@ -16,7 +16,7 @@ import (
|
|||||||
func (g *ASTTransformer) transformHeading(_ *markup.RenderContext, v *ast.Heading, reader text.Reader, tocList *[]Header) {
|
func (g *ASTTransformer) transformHeading(_ *markup.RenderContext, v *ast.Heading, reader text.Reader, tocList *[]Header) {
|
||||||
for _, attr := range v.Attributes() {
|
for _, attr := range v.Attributes() {
|
||||||
if _, ok := attr.Value.([]byte); !ok {
|
if _, ok := attr.Value.([]byte); !ok {
|
||||||
v.SetAttribute(attr.Name, []byte(fmt.Sprintf("%v", attr.Value)))
|
v.SetAttribute(attr.Name, fmt.Appendf(nil, "%v", attr.Value))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
txt := v.Text(reader.Source()) //nolint:staticcheck
|
txt := v.Text(reader.Source()) //nolint:staticcheck
|
||||||
|
@ -58,7 +58,7 @@ type PackageMetadata struct {
|
|||||||
Time map[string]time.Time `json:"time,omitempty"`
|
Time map[string]time.Time `json:"time,omitempty"`
|
||||||
Homepage string `json:"homepage,omitempty"`
|
Homepage string `json:"homepage,omitempty"`
|
||||||
Keywords []string `json:"keywords,omitempty"`
|
Keywords []string `json:"keywords,omitempty"`
|
||||||
Repository Repository `json:"repository,omitempty"`
|
Repository Repository `json:"repository"`
|
||||||
Author User `json:"author"`
|
Author User `json:"author"`
|
||||||
ReadmeFilename string `json:"readmeFilename,omitempty"`
|
ReadmeFilename string `json:"readmeFilename,omitempty"`
|
||||||
Users map[string]bool `json:"users,omitempty"`
|
Users map[string]bool `json:"users,omitempty"`
|
||||||
@ -75,7 +75,7 @@ type PackageMetadataVersion struct {
|
|||||||
Author User `json:"author"`
|
Author User `json:"author"`
|
||||||
Homepage string `json:"homepage,omitempty"`
|
Homepage string `json:"homepage,omitempty"`
|
||||||
License string `json:"license,omitempty"`
|
License string `json:"license,omitempty"`
|
||||||
Repository Repository `json:"repository,omitempty"`
|
Repository Repository `json:"repository"`
|
||||||
Keywords []string `json:"keywords,omitempty"`
|
Keywords []string `json:"keywords,omitempty"`
|
||||||
Dependencies map[string]string `json:"dependencies,omitempty"`
|
Dependencies map[string]string `json:"dependencies,omitempty"`
|
||||||
BundleDependencies []string `json:"bundleDependencies,omitempty"`
|
BundleDependencies []string `json:"bundleDependencies,omitempty"`
|
||||||
|
@ -23,5 +23,5 @@ type Metadata struct {
|
|||||||
OptionalDependencies map[string]string `json:"optional_dependencies,omitempty"`
|
OptionalDependencies map[string]string `json:"optional_dependencies,omitempty"`
|
||||||
Bin map[string]string `json:"bin,omitempty"`
|
Bin map[string]string `json:"bin,omitempty"`
|
||||||
Readme string `json:"readme,omitempty"`
|
Readme string `json:"readme,omitempty"`
|
||||||
Repository Repository `json:"repository,omitempty"`
|
Repository Repository `json:"repository"`
|
||||||
}
|
}
|
||||||
|
@ -250,7 +250,7 @@ func (e *MarshalEncoder) marshalArray(arr reflect.Value) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < length; i++ {
|
for i := range length {
|
||||||
if err := e.marshal(arr.Index(i).Interface()); err != nil {
|
if err := e.marshal(arr.Index(i).Interface()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -47,7 +47,7 @@ type Metadata struct {
|
|||||||
Keywords []string `json:"keywords,omitempty"`
|
Keywords []string `json:"keywords,omitempty"`
|
||||||
RepositoryURL string `json:"repository_url,omitempty"`
|
RepositoryURL string `json:"repository_url,omitempty"`
|
||||||
License string `json:"license,omitempty"`
|
License string `json:"license,omitempty"`
|
||||||
Author Person `json:"author,omitempty"`
|
Author Person `json:"author"`
|
||||||
Manifests map[string]*Manifest `json:"manifests,omitempty"`
|
Manifests map[string]*Manifest `json:"manifests,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ func FileHandlerFunc() http.HandlerFunc {
|
|||||||
func parseAcceptEncoding(val string) container.Set[string] {
|
func parseAcceptEncoding(val string) container.Set[string] {
|
||||||
parts := strings.Split(val, ";")
|
parts := strings.Split(val, ";")
|
||||||
types := make(container.Set[string])
|
types := make(container.Set[string])
|
||||||
for _, v := range strings.Split(parts[0], ",") {
|
for v := range strings.SplitSeq(parts[0], ",") {
|
||||||
types.Add(strings.TrimSpace(v))
|
types.Add(strings.TrimSpace(v))
|
||||||
}
|
}
|
||||||
return types
|
return types
|
||||||
|
@ -83,7 +83,7 @@ func prepareLevelDB(cfg *BaseConfig) (conn string, db *leveldb.DB, err error) {
|
|||||||
}
|
}
|
||||||
conn = cfg.ConnStr
|
conn = cfg.ConnStr
|
||||||
}
|
}
|
||||||
for i := 0; i < 10; i++ {
|
for range 10 {
|
||||||
if db, err = nosql.GetManager().GetLevelDB(conn); err == nil {
|
if db, err = nosql.GetManager().GetLevelDB(conn); err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ func newBaseRedisGeneric(cfg *BaseConfig, unique bool) (baseQueue, error) {
|
|||||||
client := nosql.GetManager().GetRedisClient(cfg.ConnStr)
|
client := nosql.GetManager().GetRedisClient(cfg.ConnStr)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < 10; i++ {
|
for range 10 {
|
||||||
err = client.Ping(graceful.GetManager().ShutdownContext()).Err()
|
err = client.Ping(graceful.GetManager().ShutdownContext()).Err()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
|
@ -87,7 +87,7 @@ func testQueueBasic(t *testing.T, newFn func(cfg *BaseConfig) (baseQueue, error)
|
|||||||
|
|
||||||
// test blocking push if queue is full
|
// test blocking push if queue is full
|
||||||
for i := 0; i < cfg.Length; i++ {
|
for i := 0; i < cfg.Length; i++ {
|
||||||
err = q.PushItem(ctx, []byte(fmt.Sprintf("item-%d", i)))
|
err = q.PushItem(ctx, fmt.Appendf(nil, "item-%d", i))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
ctxTimed, cancel = context.WithTimeout(ctx, 10*time.Millisecond)
|
ctxTimed, cancel = context.WithTimeout(ctx, 10*time.Millisecond)
|
||||||
|
@ -6,6 +6,7 @@ package queue
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"maps"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -70,9 +71,7 @@ func (m *Manager) ManagedQueues() map[int64]ManagedWorkerPoolQueue {
|
|||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
queues := make(map[int64]ManagedWorkerPoolQueue, len(m.Queues))
|
queues := make(map[int64]ManagedWorkerPoolQueue, len(m.Queues))
|
||||||
for k, v := range m.Queues {
|
maps.Copy(queues, m.Queues)
|
||||||
queues[k] = v
|
|
||||||
}
|
|
||||||
return queues
|
return queues
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,17 +77,17 @@ func TestWorkerPoolQueueUnhandled(t *testing.T) {
|
|||||||
|
|
||||||
runCount := 2 // we can run these tests even hundreds times to see its stability
|
runCount := 2 // we can run these tests even hundreds times to see its stability
|
||||||
t.Run("1/1", func(t *testing.T) {
|
t.Run("1/1", func(t *testing.T) {
|
||||||
for i := 0; i < runCount; i++ {
|
for range runCount {
|
||||||
test(t, setting.QueueSettings{BatchLength: 1, MaxWorkers: 1})
|
test(t, setting.QueueSettings{BatchLength: 1, MaxWorkers: 1})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("3/1", func(t *testing.T) {
|
t.Run("3/1", func(t *testing.T) {
|
||||||
for i := 0; i < runCount; i++ {
|
for range runCount {
|
||||||
test(t, setting.QueueSettings{BatchLength: 3, MaxWorkers: 1})
|
test(t, setting.QueueSettings{BatchLength: 3, MaxWorkers: 1})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("4/5", func(t *testing.T) {
|
t.Run("4/5", func(t *testing.T) {
|
||||||
for i := 0; i < runCount; i++ {
|
for range runCount {
|
||||||
test(t, setting.QueueSettings{BatchLength: 4, MaxWorkers: 5})
|
test(t, setting.QueueSettings{BatchLength: 4, MaxWorkers: 5})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -96,17 +96,17 @@ func TestWorkerPoolQueueUnhandled(t *testing.T) {
|
|||||||
func TestWorkerPoolQueuePersistence(t *testing.T) {
|
func TestWorkerPoolQueuePersistence(t *testing.T) {
|
||||||
runCount := 2 // we can run these tests even hundreds times to see its stability
|
runCount := 2 // we can run these tests even hundreds times to see its stability
|
||||||
t.Run("1/1", func(t *testing.T) {
|
t.Run("1/1", func(t *testing.T) {
|
||||||
for i := 0; i < runCount; i++ {
|
for range runCount {
|
||||||
testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 1, MaxWorkers: 1, Length: 100})
|
testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 1, MaxWorkers: 1, Length: 100})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("3/1", func(t *testing.T) {
|
t.Run("3/1", func(t *testing.T) {
|
||||||
for i := 0; i < runCount; i++ {
|
for range runCount {
|
||||||
testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 3, MaxWorkers: 1, Length: 100})
|
testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 3, MaxWorkers: 1, Length: 100})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("4/5", func(t *testing.T) {
|
t.Run("4/5", func(t *testing.T) {
|
||||||
for i := 0; i < runCount; i++ {
|
for range runCount {
|
||||||
testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 4, MaxWorkers: 5, Length: 100})
|
testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 4, MaxWorkers: 5, Length: 100})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -141,7 +141,7 @@ func testWorkerPoolQueuePersistence(t *testing.T, queueSetting setting.QueueSett
|
|||||||
|
|
||||||
q, _ := newWorkerPoolQueueForTest("pr_patch_checker_test", queueSetting, testHandler, true)
|
q, _ := newWorkerPoolQueueForTest("pr_patch_checker_test", queueSetting, testHandler, true)
|
||||||
stop := runWorkerPoolQueue(q)
|
stop := runWorkerPoolQueue(q)
|
||||||
for i := 0; i < testCount; i++ {
|
for i := range testCount {
|
||||||
_ = q.Push("task-" + strconv.Itoa(i))
|
_ = q.Push("task-" + strconv.Itoa(i))
|
||||||
}
|
}
|
||||||
close(startWhenAllReady)
|
close(startWhenAllReady)
|
||||||
@ -186,7 +186,7 @@ func TestWorkerPoolQueueActiveWorkers(t *testing.T) {
|
|||||||
|
|
||||||
q, _ := newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 1, Length: 100}, handler, false)
|
q, _ := newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 1, Length: 100}, handler, false)
|
||||||
stop := runWorkerPoolQueue(q)
|
stop := runWorkerPoolQueue(q)
|
||||||
for i := 0; i < 5; i++ {
|
for i := range 5 {
|
||||||
assert.NoError(t, q.Push(i))
|
assert.NoError(t, q.Push(i))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,7 +202,7 @@ func TestWorkerPoolQueueActiveWorkers(t *testing.T) {
|
|||||||
|
|
||||||
q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 3, Length: 100}, handler, false)
|
q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 3, Length: 100}, handler, false)
|
||||||
stop = runWorkerPoolQueue(q)
|
stop = runWorkerPoolQueue(q)
|
||||||
for i := 0; i < 15; i++ {
|
for i := range 15 {
|
||||||
assert.NoError(t, q.Push(i))
|
assert.NoError(t, q.Push(i))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -274,7 +274,7 @@ func TestWorkerPoolQueueWorkerIdleReset(t *testing.T) {
|
|||||||
}
|
}
|
||||||
q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 2, Length: 100}, handler, false)
|
q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 2, Length: 100}, handler, false)
|
||||||
stop := runWorkerPoolQueue(q)
|
stop := runWorkerPoolQueue(q)
|
||||||
for i := 0; i < 100; i++ {
|
for i := range 100 {
|
||||||
assert.NoError(t, q.Push(i))
|
assert.NoError(t, q.Push(i))
|
||||||
}
|
}
|
||||||
time.Sleep(500 * time.Millisecond)
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
@ -125,7 +125,7 @@ func InitializeLabels(ctx context.Context, id int64, labelTemplate string, isOrg
|
|||||||
}
|
}
|
||||||
|
|
||||||
labels := make([]*issues_model.Label, len(list))
|
labels := make([]*issues_model.Label, len(list))
|
||||||
for i := 0; i < len(list); i++ {
|
for i := range list {
|
||||||
labels[i] = &issues_model.Label{
|
labels[i] = &issues_model.Label{
|
||||||
Name: list[i].Name,
|
Name: list[i].Name,
|
||||||
Exclusive: list[i].Exclusive,
|
Exclusive: list[i].Exclusive,
|
||||||
|
@ -6,6 +6,7 @@ package reqctx
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"maps"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/process"
|
"code.gitea.io/gitea/modules/process"
|
||||||
@ -22,9 +23,7 @@ func (ds ContextData) GetData() ContextData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ds ContextData) MergeFrom(other ContextData) ContextData {
|
func (ds ContextData) MergeFrom(other ContextData) ContextData {
|
||||||
for k, v := range other {
|
maps.Copy(ds, other)
|
||||||
ds[k] = v
|
|
||||||
}
|
|
||||||
return ds
|
return ds
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ func loadIndexerFrom(rootCfg ConfigProvider) {
|
|||||||
// IndexerGlobFromString parses a comma separated list of patterns and returns a glob.Glob slice suited for repo indexing
|
// IndexerGlobFromString parses a comma separated list of patterns and returns a glob.Glob slice suited for repo indexing
|
||||||
func IndexerGlobFromString(globstr string) []*GlobMatcher {
|
func IndexerGlobFromString(globstr string) []*GlobMatcher {
|
||||||
extarr := make([]*GlobMatcher, 0, 10)
|
extarr := make([]*GlobMatcher, 0, 10)
|
||||||
for _, expr := range strings.Split(strings.ToLower(globstr), ",") {
|
for expr := range strings.SplitSeq(strings.ToLower(globstr), ",") {
|
||||||
expr = strings.TrimSpace(expr)
|
expr = strings.TrimSpace(expr)
|
||||||
if expr != "" {
|
if expr != "" {
|
||||||
if g, err := GlobMatcherCompile(expr, '.', '/'); err != nil {
|
if g, err := GlobMatcherCompile(expr, '.', '/'); err != nil {
|
||||||
|
@ -227,8 +227,8 @@ func initLoggerByName(manager *log.LoggerManager, rootCfg ConfigProvider, logger
|
|||||||
}
|
}
|
||||||
|
|
||||||
var eventWriters []log.EventWriter
|
var eventWriters []log.EventWriter
|
||||||
modes := strings.Split(modeVal, ",")
|
modes := strings.SplitSeq(modeVal, ",")
|
||||||
for _, modeName := range modes {
|
for modeName := range modes {
|
||||||
modeName = strings.TrimSpace(modeName)
|
modeName = strings.TrimSpace(modeName)
|
||||||
if modeName == "" {
|
if modeName == "" {
|
||||||
continue
|
continue
|
||||||
|
@ -149,8 +149,8 @@ func loadMarkupFrom(rootCfg ConfigProvider) {
|
|||||||
func newMarkupSanitizer(name string, sec ConfigSection) {
|
func newMarkupSanitizer(name string, sec ConfigSection) {
|
||||||
rule, ok := createMarkupSanitizerRule(name, sec)
|
rule, ok := createMarkupSanitizerRule(name, sec)
|
||||||
if ok {
|
if ok {
|
||||||
if strings.HasPrefix(name, "sanitizer.") {
|
if after, ok0 := strings.CutPrefix(name, "sanitizer."); ok0 {
|
||||||
names := strings.SplitN(strings.TrimPrefix(name, "sanitizer."), ".", 2)
|
names := strings.SplitN(after, ".", 2)
|
||||||
name = names[0]
|
name = names[0]
|
||||||
}
|
}
|
||||||
for _, renderer := range ExternalMarkupRenderers {
|
for _, renderer := range ExternalMarkupRenderers {
|
||||||
|
@ -48,11 +48,7 @@ func loadMirrorFrom(rootCfg ConfigProvider) {
|
|||||||
Mirror.MinInterval = 1 * time.Minute
|
Mirror.MinInterval = 1 * time.Minute
|
||||||
}
|
}
|
||||||
if Mirror.DefaultInterval < Mirror.MinInterval {
|
if Mirror.DefaultInterval < Mirror.MinInterval {
|
||||||
if time.Hour*8 < Mirror.MinInterval {
|
Mirror.DefaultInterval = max(time.Hour*8, Mirror.MinInterval)
|
||||||
Mirror.DefaultInterval = Mirror.MinInterval
|
|
||||||
} else {
|
|
||||||
Mirror.DefaultInterval = time.Hour * 8
|
|
||||||
}
|
|
||||||
log.Warn("Mirror.DefaultInterval is less than Mirror.MinInterval, set to %s", Mirror.DefaultInterval.String())
|
log.Warn("Mirror.DefaultInterval is less than Mirror.MinInterval, set to %s", Mirror.DefaultInterval.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,12 +31,7 @@ var storageTypes = []StorageType{
|
|||||||
|
|
||||||
// IsValidStorageType returns true if the given storage type is valid
|
// IsValidStorageType returns true if the given storage type is valid
|
||||||
func IsValidStorageType(storageType StorageType) bool {
|
func IsValidStorageType(storageType StorageType) bool {
|
||||||
for _, t := range storageTypes {
|
return slices.Contains(storageTypes, storageType)
|
||||||
if t == storageType {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MinioStorageConfig represents the configuration for a minio storage
|
// MinioStorageConfig represents the configuration for a minio storage
|
||||||
|
@ -203,7 +203,7 @@ func (l *IssueTemplateStringSlice) UnmarshalYAML(value *yaml.Node) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, v := range strings.Split(str, ",") {
|
for v := range strings.SplitSeq(str, ",") {
|
||||||
if v = strings.TrimSpace(v); v == "" {
|
if v = strings.TrimSpace(v); v == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -113,7 +113,7 @@ type Repository struct {
|
|||||||
// enum: sha1,sha256
|
// enum: sha1,sha256
|
||||||
ObjectFormatName string `json:"object_format_name"`
|
ObjectFormatName string `json:"object_format_name"`
|
||||||
// swagger:strfmt date-time
|
// swagger:strfmt date-time
|
||||||
MirrorUpdated time.Time `json:"mirror_updated,omitempty"`
|
MirrorUpdated time.Time `json:"mirror_updated"`
|
||||||
RepoTransfer *RepoTransfer `json:"repo_transfer"`
|
RepoTransfer *RepoTransfer `json:"repo_transfer"`
|
||||||
Topics []string `json:"topics"`
|
Topics []string `json:"topics"`
|
||||||
Licenses []string `json:"licenses"`
|
Licenses []string `json:"licenses"`
|
||||||
|
@ -57,7 +57,7 @@ type ActionWorkflow struct {
|
|||||||
HTMLURL string `json:"html_url"`
|
HTMLURL string `json:"html_url"`
|
||||||
BadgeURL string `json:"badge_url"`
|
BadgeURL string `json:"badge_url"`
|
||||||
// swagger:strfmt date-time
|
// swagger:strfmt date-time
|
||||||
DeletedAt time.Time `json:"deleted_at,omitempty"`
|
DeletedAt time.Time `json:"deleted_at"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ActionWorkflowResponse returns a ActionWorkflow
|
// ActionWorkflowResponse returns a ActionWorkflow
|
||||||
@ -104,9 +104,9 @@ type ActionWorkflowStep struct {
|
|||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Conclusion string `json:"conclusion,omitempty"`
|
Conclusion string `json:"conclusion,omitempty"`
|
||||||
// swagger:strfmt date-time
|
// swagger:strfmt date-time
|
||||||
StartedAt time.Time `json:"started_at,omitempty"`
|
StartedAt time.Time `json:"started_at"`
|
||||||
// swagger:strfmt date-time
|
// swagger:strfmt date-time
|
||||||
CompletedAt time.Time `json:"completed_at,omitempty"`
|
CompletedAt time.Time `json:"completed_at"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ActionWorkflowJob represents a WorkflowJob
|
// ActionWorkflowJob represents a WorkflowJob
|
||||||
@ -129,9 +129,9 @@ type ActionWorkflowJob struct {
|
|||||||
// swagger:strfmt date-time
|
// swagger:strfmt date-time
|
||||||
CreatedAt time.Time `json:"created_at"`
|
CreatedAt time.Time `json:"created_at"`
|
||||||
// swagger:strfmt date-time
|
// swagger:strfmt date-time
|
||||||
StartedAt time.Time `json:"started_at,omitempty"`
|
StartedAt time.Time `json:"started_at"`
|
||||||
// swagger:strfmt date-time
|
// swagger:strfmt date-time
|
||||||
CompletedAt time.Time `json:"completed_at,omitempty"`
|
CompletedAt time.Time `json:"completed_at"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ActionRunnerLabel represents a Runner Label
|
// ActionRunnerLabel represents a Runner Label
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user