mirror of
https://github.com/go-gitea/gitea.git
synced 2025-09-16 15:39:53 +00:00
committed by
Lunny Xiao
parent
b209531959
commit
33ad554800
11
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit.go
generated
vendored
11
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit.go
generated
vendored
@@ -171,7 +171,9 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
|
||||
}
|
||||
defer ioutil.CheckClose(reader, &err)
|
||||
|
||||
r := bufio.NewReader(reader)
|
||||
r := bufPool.Get().(*bufio.Reader)
|
||||
defer bufPool.Put(r)
|
||||
r.Reset(reader)
|
||||
|
||||
var message bool
|
||||
var pgpsig bool
|
||||
@@ -233,6 +235,11 @@ func (b *Commit) Encode(o plumbing.EncodedObject) error {
|
||||
return b.encode(o, true)
|
||||
}
|
||||
|
||||
// EncodeWithoutSignature export a Commit into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature).
|
||||
func (b *Commit) EncodeWithoutSignature(o plumbing.EncodedObject) error {
|
||||
return b.encode(o, false)
|
||||
}
|
||||
|
||||
func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
|
||||
o.SetType(plumbing.CommitObject)
|
||||
w, err := o.Writer()
|
||||
@@ -347,7 +354,7 @@ func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
|
||||
|
||||
encoded := &plumbing.MemoryObject{}
|
||||
// Encode commit components, excluding signature and get a reader object.
|
||||
if err := c.encode(encoded, false); err != nil {
|
||||
if err := c.EncodeWithoutSignature(encoded); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
er, err := encoded.Reader()
|
||||
|
176
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs_filtered.go
generated
vendored
Normal file
176
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs_filtered.go
generated
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
package object
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/storer"
|
||||
)
|
||||
|
||||
// NewFilterCommitIter returns a CommitIter that walks the commit history,
|
||||
// starting at the passed commit and visiting its parents in Breadth-first order.
|
||||
// The commits returned by the CommitIter will validate the passed CommitFilter.
|
||||
// The history won't be transversed beyond a commit if isLimit is true for it.
|
||||
// Each commit will be visited only once.
|
||||
// If the commit history can not be traversed, or the Close() method is called,
|
||||
// the CommitIter won't return more commits.
|
||||
// If no isValid is passed, all ancestors of from commit will be valid.
|
||||
// If no isLimit is limmit, all ancestors of all commits will be visited.
|
||||
func NewFilterCommitIter(
|
||||
from *Commit,
|
||||
isValid *CommitFilter,
|
||||
isLimit *CommitFilter,
|
||||
) CommitIter {
|
||||
var validFilter CommitFilter
|
||||
if isValid == nil {
|
||||
validFilter = func(_ *Commit) bool {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
validFilter = *isValid
|
||||
}
|
||||
|
||||
var limitFilter CommitFilter
|
||||
if isLimit == nil {
|
||||
limitFilter = func(_ *Commit) bool {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
limitFilter = *isLimit
|
||||
}
|
||||
|
||||
return &filterCommitIter{
|
||||
isValid: validFilter,
|
||||
isLimit: limitFilter,
|
||||
visited: map[plumbing.Hash]struct{}{},
|
||||
queue: []*Commit{from},
|
||||
}
|
||||
}
|
||||
|
||||
// CommitFilter returns a boolean for the passed Commit
|
||||
type CommitFilter func(*Commit) bool
|
||||
|
||||
// filterCommitIter implments CommitIter
|
||||
type filterCommitIter struct {
|
||||
isValid CommitFilter
|
||||
isLimit CommitFilter
|
||||
visited map[plumbing.Hash]struct{}
|
||||
queue []*Commit
|
||||
lastErr error
|
||||
}
|
||||
|
||||
// Next returns the next commit of the CommitIter.
|
||||
// It will return io.EOF if there are no more commits to visit,
|
||||
// or an error if the history could not be traversed.
|
||||
func (w *filterCommitIter) Next() (*Commit, error) {
|
||||
var commit *Commit
|
||||
var err error
|
||||
for {
|
||||
commit, err = w.popNewFromQueue()
|
||||
if err != nil {
|
||||
return nil, w.close(err)
|
||||
}
|
||||
|
||||
w.visited[commit.Hash] = struct{}{}
|
||||
|
||||
if !w.isLimit(commit) {
|
||||
err = w.addToQueue(commit.s, commit.ParentHashes...)
|
||||
if err != nil {
|
||||
return nil, w.close(err)
|
||||
}
|
||||
}
|
||||
|
||||
if w.isValid(commit) {
|
||||
return commit, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ForEach runs the passed callback over each Commit returned by the CommitIter
|
||||
// until the callback returns an error or there is no more commits to traverse.
|
||||
func (w *filterCommitIter) ForEach(cb func(*Commit) error) error {
|
||||
for {
|
||||
commit, err := w.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cb(commit); err == storer.ErrStop {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error returns the error that caused that the CommitIter is no longer returning commits
|
||||
func (w *filterCommitIter) Error() error {
|
||||
return w.lastErr
|
||||
}
|
||||
|
||||
// Close closes the CommitIter
|
||||
func (w *filterCommitIter) Close() {
|
||||
w.visited = map[plumbing.Hash]struct{}{}
|
||||
w.queue = []*Commit{}
|
||||
w.isLimit = nil
|
||||
w.isValid = nil
|
||||
}
|
||||
|
||||
// close closes the CommitIter with an error
|
||||
func (w *filterCommitIter) close(err error) error {
|
||||
w.Close()
|
||||
w.lastErr = err
|
||||
return err
|
||||
}
|
||||
|
||||
// popNewFromQueue returns the first new commit from the internal fifo queue,
|
||||
// or an io.EOF error if the queue is empty
|
||||
func (w *filterCommitIter) popNewFromQueue() (*Commit, error) {
|
||||
var first *Commit
|
||||
for {
|
||||
if len(w.queue) == 0 {
|
||||
if w.lastErr != nil {
|
||||
return nil, w.lastErr
|
||||
}
|
||||
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
first = w.queue[0]
|
||||
w.queue = w.queue[1:]
|
||||
if _, ok := w.visited[first.Hash]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
return first, nil
|
||||
}
|
||||
}
|
||||
|
||||
// addToQueue adds the passed commits to the internal fifo queue if they weren't seen
|
||||
// or returns an error if the passed hashes could not be used to get valid commits
|
||||
func (w *filterCommitIter) addToQueue(
|
||||
store storer.EncodedObjectStorer,
|
||||
hashes ...plumbing.Hash,
|
||||
) error {
|
||||
for _, hash := range hashes {
|
||||
if _, ok := w.visited[hash]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
commit, err := GetCommit(store, hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.queue = append(w.queue, commit)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
12
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/common.go
generated
vendored
Normal file
12
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/common.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
package object
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return bufio.NewReader(nil)
|
||||
},
|
||||
}
|
210
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/merge_base.go
generated
vendored
Normal file
210
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/merge_base.go
generated
vendored
Normal file
@@ -0,0 +1,210 @@
|
||||
package object
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/storer"
|
||||
)
|
||||
|
||||
// errIsReachable is thrown when first commit is an ancestor of the second
|
||||
var errIsReachable = fmt.Errorf("first is reachable from second")
|
||||
|
||||
// MergeBase mimics the behavior of `git merge-base actual other`, returning the
|
||||
// best common ancestor between the actual and the passed one.
|
||||
// The best common ancestors can not be reached from other common ancestors.
|
||||
func (c *Commit) MergeBase(other *Commit) ([]*Commit, error) {
|
||||
// use sortedByCommitDateDesc strategy
|
||||
sorted := sortByCommitDateDesc(c, other)
|
||||
newer := sorted[0]
|
||||
older := sorted[1]
|
||||
|
||||
newerHistory, err := ancestorsIndex(older, newer)
|
||||
if err == errIsReachable {
|
||||
return []*Commit{older}, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var res []*Commit
|
||||
inNewerHistory := isInIndexCommitFilter(newerHistory)
|
||||
resIter := NewFilterCommitIter(older, &inNewerHistory, &inNewerHistory)
|
||||
err = resIter.ForEach(func(commit *Commit) error {
|
||||
res = append(res, commit)
|
||||
return nil
|
||||
})
|
||||
|
||||
return Independents(res)
|
||||
}
|
||||
|
||||
// IsAncestor returns true if the actual commit is ancestor of the passed one.
|
||||
// It returns an error if the history is not transversable
|
||||
// It mimics the behavior of `git merge --is-ancestor actual other`
|
||||
func (c *Commit) IsAncestor(other *Commit) (bool, error) {
|
||||
found := false
|
||||
iter := NewCommitPreorderIter(other, nil, nil)
|
||||
err := iter.ForEach(func(comm *Commit) error {
|
||||
if comm.Hash != c.Hash {
|
||||
return nil
|
||||
}
|
||||
|
||||
found = true
|
||||
return storer.ErrStop
|
||||
})
|
||||
|
||||
return found, err
|
||||
}
|
||||
|
||||
// ancestorsIndex returns a map with the ancestors of the starting commit if the
|
||||
// excluded one is not one of them. It returns errIsReachable if the excluded commit
|
||||
// is ancestor of the starting, or another error if the history is not traversable.
|
||||
func ancestorsIndex(excluded, starting *Commit) (map[plumbing.Hash]struct{}, error) {
|
||||
if excluded.Hash.String() == starting.Hash.String() {
|
||||
return nil, errIsReachable
|
||||
}
|
||||
|
||||
startingHistory := map[plumbing.Hash]struct{}{}
|
||||
startingIter := NewCommitIterBSF(starting, nil, nil)
|
||||
err := startingIter.ForEach(func(commit *Commit) error {
|
||||
if commit.Hash == excluded.Hash {
|
||||
return errIsReachable
|
||||
}
|
||||
|
||||
startingHistory[commit.Hash] = struct{}{}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return startingHistory, nil
|
||||
}
|
||||
|
||||
// Independents returns a subset of the passed commits, that are not reachable the others
|
||||
// It mimics the behavior of `git merge-base --independent commit...`.
|
||||
func Independents(commits []*Commit) ([]*Commit, error) {
|
||||
// use sortedByCommitDateDesc strategy
|
||||
candidates := sortByCommitDateDesc(commits...)
|
||||
candidates = removeDuplicated(candidates)
|
||||
|
||||
seen := map[plumbing.Hash]struct{}{}
|
||||
var isLimit CommitFilter = func(commit *Commit) bool {
|
||||
_, ok := seen[commit.Hash]
|
||||
return ok
|
||||
}
|
||||
|
||||
if len(candidates) < 2 {
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
pos := 0
|
||||
for {
|
||||
from := candidates[pos]
|
||||
others := remove(candidates, from)
|
||||
fromHistoryIter := NewFilterCommitIter(from, nil, &isLimit)
|
||||
err := fromHistoryIter.ForEach(func(fromAncestor *Commit) error {
|
||||
for _, other := range others {
|
||||
if fromAncestor.Hash == other.Hash {
|
||||
candidates = remove(candidates, other)
|
||||
others = remove(others, other)
|
||||
}
|
||||
}
|
||||
|
||||
if len(candidates) == 1 {
|
||||
return storer.ErrStop
|
||||
}
|
||||
|
||||
seen[fromAncestor.Hash] = struct{}{}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nextPos := indexOf(candidates, from) + 1
|
||||
if nextPos >= len(candidates) {
|
||||
break
|
||||
}
|
||||
|
||||
pos = nextPos
|
||||
}
|
||||
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
// sortByCommitDateDesc returns the passed commits, sorted by `committer.When desc`
|
||||
//
|
||||
// Following this strategy, it is tried to reduce the time needed when walking
|
||||
// the history from one commit to reach the others. It is assumed that ancestors
|
||||
// use to be committed before its descendant;
|
||||
// That way `Independents(A^, A)` will be processed as being `Independents(A, A^)`;
|
||||
// so starting by `A` it will be reached `A^` way sooner than walking from `A^`
|
||||
// to the initial commit, and then from `A` to `A^`.
|
||||
func sortByCommitDateDesc(commits ...*Commit) []*Commit {
|
||||
sorted := make([]*Commit, len(commits))
|
||||
copy(sorted, commits)
|
||||
sort.Slice(sorted, func(i, j int) bool {
|
||||
return sorted[i].Committer.When.After(sorted[j].Committer.When)
|
||||
})
|
||||
|
||||
return sorted
|
||||
}
|
||||
|
||||
// indexOf returns the first position where target was found in the passed commits
|
||||
func indexOf(commits []*Commit, target *Commit) int {
|
||||
for i, commit := range commits {
|
||||
if target.Hash == commit.Hash {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
// remove returns the passed commits excluding the commit toDelete
|
||||
func remove(commits []*Commit, toDelete *Commit) []*Commit {
|
||||
res := make([]*Commit, len(commits))
|
||||
j := 0
|
||||
for _, commit := range commits {
|
||||
if commit.Hash == toDelete.Hash {
|
||||
continue
|
||||
}
|
||||
|
||||
res[j] = commit
|
||||
j++
|
||||
}
|
||||
|
||||
return res[:j]
|
||||
}
|
||||
|
||||
// removeDuplicated removes duplicated commits from the passed slice of commits
|
||||
func removeDuplicated(commits []*Commit) []*Commit {
|
||||
seen := make(map[plumbing.Hash]struct{}, len(commits))
|
||||
res := make([]*Commit, len(commits))
|
||||
j := 0
|
||||
for _, commit := range commits {
|
||||
if _, ok := seen[commit.Hash]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
seen[commit.Hash] = struct{}{}
|
||||
res[j] = commit
|
||||
j++
|
||||
}
|
||||
|
||||
return res[:j]
|
||||
}
|
||||
|
||||
// isInIndexCommitFilter returns a commitFilter that returns true
|
||||
// if the commit is in the passed index.
|
||||
func isInIndexCommitFilter(index map[plumbing.Hash]struct{}) CommitFilter {
|
||||
return func(c *Commit) bool {
|
||||
_, ok := index[c.Hash]
|
||||
return ok
|
||||
}
|
||||
}
|
4
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/patch.go
generated
vendored
4
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/patch.go
generated
vendored
@@ -321,6 +321,10 @@ func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
|
||||
|
||||
for _, chunk := range fp.Chunks() {
|
||||
s := chunk.Content()
|
||||
if len(s) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
switch chunk.Type() {
|
||||
case fdiff.Add:
|
||||
cs.Addition += strings.Count(s, "\n")
|
||||
|
13
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go
generated
vendored
13
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go
generated
vendored
@@ -93,7 +93,9 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
|
||||
}
|
||||
defer ioutil.CheckClose(reader, &err)
|
||||
|
||||
r := bufio.NewReader(reader)
|
||||
r := bufPool.Get().(*bufio.Reader)
|
||||
defer bufPool.Put(r)
|
||||
r.Reset(reader)
|
||||
for {
|
||||
var line []byte
|
||||
line, err = r.ReadBytes('\n')
|
||||
@@ -141,7 +143,7 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
|
||||
if pgpsig {
|
||||
if bytes.Contains(l, []byte(endpgp)) {
|
||||
t.PGPSignature += endpgp + "\n"
|
||||
pgpsig = false
|
||||
break
|
||||
} else {
|
||||
t.PGPSignature += string(l) + "\n"
|
||||
}
|
||||
@@ -169,6 +171,11 @@ func (t *Tag) Encode(o plumbing.EncodedObject) error {
|
||||
return t.encode(o, true)
|
||||
}
|
||||
|
||||
// EncodeWithoutSignature export a Tag into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature).
|
||||
func (t *Tag) EncodeWithoutSignature(o plumbing.EncodedObject) error {
|
||||
return t.encode(o, false)
|
||||
}
|
||||
|
||||
func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
|
||||
o.SetType(plumbing.TagObject)
|
||||
w, err := o.Writer()
|
||||
@@ -289,7 +296,7 @@ func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
|
||||
|
||||
encoded := &plumbing.MemoryObject{}
|
||||
// Encode tag components, excluding signature and get a reader object.
|
||||
if err := t.encode(encoded, false); err != nil {
|
||||
if err := t.EncodeWithoutSignature(encoded); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
er, err := encoded.Reader()
|
||||
|
23
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go
generated
vendored
23
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go
generated
vendored
@@ -230,7 +230,9 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
|
||||
}
|
||||
defer ioutil.CheckClose(reader, &err)
|
||||
|
||||
r := bufio.NewReader(reader)
|
||||
r := bufPool.Get().(*bufio.Reader)
|
||||
defer bufPool.Put(r)
|
||||
r.Reset(reader)
|
||||
for {
|
||||
str, err := r.ReadString(' ')
|
||||
if err != nil {
|
||||
@@ -383,7 +385,7 @@ func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWa
|
||||
// underlying repository will be skipped automatically. It is possible that this
|
||||
// may change in future versions.
|
||||
func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
|
||||
var obj Object
|
||||
var obj *Tree
|
||||
for {
|
||||
current := len(w.stack) - 1
|
||||
if current < 0 {
|
||||
@@ -403,7 +405,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
|
||||
// Finished with the current tree, move back up to the parent
|
||||
w.stack = w.stack[:current]
|
||||
w.base, _ = path.Split(w.base)
|
||||
w.base = path.Clean(w.base) // Remove trailing slash
|
||||
w.base = strings.TrimSuffix(w.base, "/")
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -419,7 +421,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
|
||||
obj, err = GetTree(w.s, entry.Hash)
|
||||
}
|
||||
|
||||
name = path.Join(w.base, entry.Name)
|
||||
name = simpleJoin(w.base, entry.Name)
|
||||
|
||||
if err != nil {
|
||||
err = io.EOF
|
||||
@@ -433,9 +435,9 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
if t, ok := obj.(*Tree); ok {
|
||||
w.stack = append(w.stack, &treeEntryIter{t, 0})
|
||||
w.base = path.Join(w.base, entry.Name)
|
||||
if obj != nil {
|
||||
w.stack = append(w.stack, &treeEntryIter{obj, 0})
|
||||
w.base = simpleJoin(w.base, entry.Name)
|
||||
}
|
||||
|
||||
return
|
||||
@@ -509,3 +511,10 @@ func (iter *TreeIter) ForEach(cb func(*Tree) error) error {
|
||||
return cb(t)
|
||||
})
|
||||
}
|
||||
|
||||
func simpleJoin(parent, child string) string {
|
||||
if len(parent) > 0 {
|
||||
return parent + "/" + child
|
||||
}
|
||||
return child
|
||||
}
|
Reference in New Issue
Block a user