pin to v1.0.0 of github action parser
This commit is contained in:
4
vendor/gopkg.in/src-d/go-billy.v4/.gitignore
generated
vendored
4
vendor/gopkg.in/src-d/go-billy.v4/.gitignore
generated
vendored
@@ -1 +1,5 @@
|
||||
/coverage.txt
|
||||
/vendor
|
||||
Gopkg.lock
|
||||
Gopkg.toml
|
||||
go.sum
|
||||
|
39
vendor/gopkg.in/src-d/go-billy.v4/util/util.go
generated
vendored
39
vendor/gopkg.in/src-d/go-billy.v4/util/util.go
generated
vendored
@@ -168,6 +168,45 @@ func TempFile(fs billy.Basic, dir, prefix string) (f billy.File, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// TempDir creates a new temporary directory in the directory dir
|
||||
// with a name beginning with prefix and returns the path of the
|
||||
// new directory. If dir is the empty string, TempDir uses the
|
||||
// default directory for temporary files (see os.TempDir).
|
||||
// Multiple programs calling TempDir simultaneously
|
||||
// will not choose the same directory. It is the caller's responsibility
|
||||
// to remove the directory when no longer needed.
|
||||
func TempDir(fs billy.Dir, dir, prefix string) (name string, err error) {
|
||||
// This implementation is based on stdlib ioutil.TempDir
|
||||
|
||||
if dir == "" {
|
||||
dir = os.TempDir()
|
||||
}
|
||||
|
||||
nconflict := 0
|
||||
for i := 0; i < 10000; i++ {
|
||||
try := filepath.Join(dir, prefix+nextSuffix())
|
||||
err = fs.MkdirAll(try, 0700)
|
||||
if os.IsExist(err) {
|
||||
if nconflict++; nconflict > 10 {
|
||||
randmu.Lock()
|
||||
rand = reseed()
|
||||
randmu.Unlock()
|
||||
}
|
||||
continue
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
name = try
|
||||
}
|
||||
break
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type underlying interface {
|
||||
Underlying() billy.Basic
|
||||
}
|
||||
|
5
vendor/gopkg.in/src-d/go-git.v4/options.go
generated
vendored
5
vendor/gopkg.in/src-d/go-git.v4/options.go
generated
vendored
@@ -335,6 +335,11 @@ type LogOptions struct {
|
||||
// Show only those commits in which the specified file was inserted/updated.
|
||||
// It is equivalent to running `git log -- <file-name>`.
|
||||
FileName *string
|
||||
|
||||
// Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as <commit>.
|
||||
// It is equivalent to running `git log --all`.
|
||||
// If set on true, the From option will be ignored.
|
||||
All bool
|
||||
}
|
||||
|
||||
var (
|
||||
|
8
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go
generated
vendored
8
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go
generated
vendored
@@ -51,7 +51,13 @@ func WritePackfileToObjectStorage(
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(w, &err)
|
||||
_, err = io.Copy(w, packfile)
|
||||
|
||||
var n int64
|
||||
n, err = io.Copy(w, packfile)
|
||||
if err == nil && n == 0 {
|
||||
return ErrEmptyPackfile
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
2
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/fsobject.go
generated
vendored
2
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/fsobject.go
generated
vendored
@@ -48,7 +48,7 @@ func NewFSObject(
|
||||
// Reader implements the plumbing.EncodedObject interface.
|
||||
func (o *FSObject) Reader() (io.ReadCloser, error) {
|
||||
obj, ok := o.cache.Get(o.hash)
|
||||
if ok {
|
||||
if ok && obj != o {
|
||||
reader, err := obj.Reader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
51
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/packfile.go
generated
vendored
51
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/packfile.go
generated
vendored
@@ -21,6 +21,16 @@ var (
|
||||
ErrZLib = NewError("zlib reading error")
|
||||
)
|
||||
|
||||
// When reading small objects from packfile it is beneficial to do so at
|
||||
// once to exploit the buffered I/O. In many cases the objects are so small
|
||||
// that they were already loaded to memory when the object header was
|
||||
// loaded from the packfile. Wrapping in FSObject would cause this buffered
|
||||
// data to be thrown away and then re-read later, with the additional
|
||||
// seeking causing reloads from disk. Objects smaller than this threshold
|
||||
// are now always read into memory and stored in cache instead of being
|
||||
// wrapped in FSObject.
|
||||
const smallObjectThreshold = 16 * 1024
|
||||
|
||||
// Packfile allows retrieving information from inside a packfile.
|
||||
type Packfile struct {
|
||||
idxfile.Index
|
||||
@@ -79,15 +89,7 @@ func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := p.s.SeekFromStart(o); err != nil {
|
||||
if err == io.EOF || isInvalid(err) {
|
||||
return nil, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.nextObject()
|
||||
return p.objectAtOffset(o)
|
||||
}
|
||||
|
||||
// GetSizeByOffset retrieves the size of the encoded object from the
|
||||
@@ -108,6 +110,12 @@ func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) {
|
||||
return h.Length, nil
|
||||
}
|
||||
|
||||
func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) {
|
||||
h, err := p.s.SeekObjectHeader(offset)
|
||||
p.s.pendingObject = nil
|
||||
return h, err
|
||||
}
|
||||
|
||||
func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
|
||||
h, err := p.s.NextObjectHeader()
|
||||
p.s.pendingObject = nil
|
||||
@@ -154,11 +162,7 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err
|
||||
if baseType, ok := p.offsetToType[offset]; ok {
|
||||
typ = baseType
|
||||
} else {
|
||||
if _, err = p.s.SeekFromStart(offset); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
h, err = p.nextObjectHeader()
|
||||
h, err = p.objectHeaderAtOffset(offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -175,8 +179,8 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Packfile) nextObject() (plumbing.EncodedObject, error) {
|
||||
h, err := p.nextObjectHeader()
|
||||
func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) {
|
||||
h, err := p.objectHeaderAtOffset(offset)
|
||||
if err != nil {
|
||||
if err == io.EOF || isInvalid(err) {
|
||||
return nil, plumbing.ErrObjectNotFound
|
||||
@@ -190,6 +194,13 @@ func (p *Packfile) nextObject() (plumbing.EncodedObject, error) {
|
||||
return p.getNextObject(h)
|
||||
}
|
||||
|
||||
// If the object is not a delta and it's small enough then read it
|
||||
// completely into memory now since it is already read from disk
|
||||
// into buffer anyway.
|
||||
if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
|
||||
return p.getNextObject(h)
|
||||
}
|
||||
|
||||
hash, err := p.FindHash(h.Offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -233,11 +244,7 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := p.s.SeekFromStart(offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h, err := p.nextObjectHeader()
|
||||
h, err := p.objectHeaderAtOffset(offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -329,8 +336,6 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.cachePut(base)
|
||||
}
|
||||
|
||||
obj.SetType(base.Type())
|
||||
|
6
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/parser.go
generated
vendored
6
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/parser.go
generated
vendored
@@ -398,11 +398,7 @@ func (p *Parser) readData(o *objectInfo) ([]byte, error) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
if _, err := p.scanner.SeekFromStart(o.Offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := p.scanner.NextObjectHeader(); err != nil {
|
||||
if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
46
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go
generated
vendored
46
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go
generated
vendored
@@ -138,14 +138,52 @@ func (s *Scanner) readCount() (uint32, error) {
|
||||
return binary.ReadUint32(s.r)
|
||||
}
|
||||
|
||||
// SeekObjectHeader seeks to specified offset and returns the ObjectHeader
|
||||
// for the next object in the reader
|
||||
func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) {
|
||||
// if seeking we assume that you are not interested in the header
|
||||
if s.version == 0 {
|
||||
s.version = VersionSupported
|
||||
}
|
||||
|
||||
if _, err := s.r.Seek(offset, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h, err := s.nextObjectHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h.Offset = offset
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// NextObjectHeader returns the ObjectHeader for the next object in the reader
|
||||
func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
|
||||
defer s.Flush()
|
||||
|
||||
if err := s.doPending(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
offset, err := s.r.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h, err := s.nextObjectHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h.Offset = offset
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// nextObjectHeader returns the ObjectHeader for the next object in the reader
|
||||
// without the Offset field
|
||||
func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) {
|
||||
defer s.Flush()
|
||||
|
||||
s.crc.Reset()
|
||||
|
||||
h := &ObjectHeader{}
|
||||
@@ -308,7 +346,7 @@ var byteSlicePool = sync.Pool{
|
||||
// SeekFromStart sets a new offset from start, returns the old position before
|
||||
// the change.
|
||||
func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) {
|
||||
// if seeking we assume that you are not interested on the header
|
||||
// if seeking we assume that you are not interested in the header
|
||||
if s.version == 0 {
|
||||
s.version = VersionSupported
|
||||
}
|
||||
@@ -385,7 +423,7 @@ type bufferedSeeker struct {
|
||||
}
|
||||
|
||||
func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
if whence == io.SeekCurrent {
|
||||
if whence == io.SeekCurrent && offset == 0 {
|
||||
current, err := r.r.Seek(offset, whence)
|
||||
if err != nil {
|
||||
return current, err
|
||||
|
132
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker.go
generated
vendored
132
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker.go
generated
vendored
@@ -1,10 +1,12 @@
|
||||
package object
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"io"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/storer"
|
||||
"gopkg.in/src-d/go-git.v4/storage"
|
||||
)
|
||||
|
||||
type commitPreIterator struct {
|
||||
@@ -181,3 +183,133 @@ func (w *commitPostIterator) ForEach(cb func(*Commit) error) error {
|
||||
}
|
||||
|
||||
func (w *commitPostIterator) Close() {}
|
||||
|
||||
// commitAllIterator stands for commit iterator for all refs.
|
||||
type commitAllIterator struct {
|
||||
// currCommit points to the current commit.
|
||||
currCommit *list.Element
|
||||
}
|
||||
|
||||
// NewCommitAllIter returns a new commit iterator for all refs.
|
||||
// repoStorer is a repo Storer used to get commits and references.
|
||||
// commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order
|
||||
func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) {
|
||||
commitsPath := list.New()
|
||||
commitsLookup := make(map[plumbing.Hash]*list.Element)
|
||||
head, err := storer.ResolveReference(repoStorer, plumbing.HEAD)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// add all references along with the HEAD
|
||||
if err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
refIter, err := repoStorer.IterReferences()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer refIter.Close()
|
||||
err = refIter.ForEach(
|
||||
func(ref *plumbing.Reference) error {
|
||||
return addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup)
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &commitAllIterator{commitsPath.Front()}, nil
|
||||
}
|
||||
|
||||
func addReference(
|
||||
repoStorer storage.Storer,
|
||||
commitIterFunc func(*Commit) CommitIter,
|
||||
ref *plumbing.Reference,
|
||||
commitsPath *list.List,
|
||||
commitsLookup map[plumbing.Hash]*list.Element) error {
|
||||
|
||||
_, exists := commitsLookup[ref.Hash()]
|
||||
if exists {
|
||||
// we already have it - skip the reference.
|
||||
return nil
|
||||
}
|
||||
|
||||
refCommit, _ := GetCommit(repoStorer, ref.Hash())
|
||||
if refCommit == nil {
|
||||
// if it's not a commit - skip it.
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
refCommits []*Commit
|
||||
parent *list.Element
|
||||
)
|
||||
// collect all ref commits to add
|
||||
commitIter := commitIterFunc(refCommit)
|
||||
for c, e := commitIter.Next(); e == nil; {
|
||||
parent, exists = commitsLookup[c.Hash]
|
||||
if exists {
|
||||
break
|
||||
}
|
||||
refCommits = append(refCommits, c)
|
||||
c, e = commitIter.Next()
|
||||
}
|
||||
commitIter.Close()
|
||||
|
||||
if parent == nil {
|
||||
// common parent - not found
|
||||
// add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet)
|
||||
for _, c := range refCommits {
|
||||
parent = commitsPath.PushBack(c)
|
||||
commitsLookup[c.Hash] = parent
|
||||
}
|
||||
} else {
|
||||
// add ref's commits to the path in reverse order (from the latest)
|
||||
for i := len(refCommits) - 1; i >= 0; i-- {
|
||||
c := refCommits[i]
|
||||
// insert before found common parent
|
||||
parent = commitsPath.InsertBefore(c, parent)
|
||||
commitsLookup[c.Hash] = parent
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (it *commitAllIterator) Next() (*Commit, error) {
|
||||
if it.currCommit == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
c := it.currCommit.Value.(*Commit)
|
||||
it.currCommit = it.currCommit.Next()
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (it *commitAllIterator) ForEach(cb func(*Commit) error) error {
|
||||
for {
|
||||
c, err := it.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cb(c)
|
||||
if err == storer.ErrStop {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (it *commitAllIterator) Close() {
|
||||
it.currCommit = nil
|
||||
}
|
||||
|
50
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_file.go
generated
vendored
50
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_file.go
generated
vendored
@@ -1,23 +1,30 @@
|
||||
package object
|
||||
|
||||
import (
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/storer"
|
||||
"io"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/storer"
|
||||
)
|
||||
|
||||
type commitFileIter struct {
|
||||
fileName string
|
||||
sourceIter CommitIter
|
||||
currentCommit *Commit
|
||||
checkParent bool
|
||||
}
|
||||
|
||||
// NewCommitFileIterFromIter returns a commit iterator which performs diffTree between
|
||||
// successive trees returned from the commit iterator from the argument. The purpose of this is
|
||||
// to find the commits that explain how the files that match the path came to be.
|
||||
func NewCommitFileIterFromIter(fileName string, commitIter CommitIter) CommitIter {
|
||||
// If checkParent is true then the function double checks if potential parent (next commit in a path)
|
||||
// is one of the parents in the tree (it's used by `git log --all`).
|
||||
func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter {
|
||||
iterator := new(commitFileIter)
|
||||
iterator.sourceIter = commitIter
|
||||
iterator.fileName = fileName
|
||||
iterator.checkParent = checkParent
|
||||
return iterator
|
||||
}
|
||||
|
||||
@@ -71,20 +78,14 @@ func (c *commitFileIter) getNextFileCommit() (*Commit, error) {
|
||||
return nil, diffErr
|
||||
}
|
||||
|
||||
foundChangeForFile := false
|
||||
for _, change := range changes {
|
||||
if change.name() == c.fileName {
|
||||
foundChangeForFile = true
|
||||
break
|
||||
}
|
||||
}
|
||||
found := c.hasFileChange(changes, parentCommit)
|
||||
|
||||
// Storing the current-commit in-case a change is found, and
|
||||
// Updating the current-commit for the next-iteration
|
||||
prevCommit := c.currentCommit
|
||||
c.currentCommit = parentCommit
|
||||
|
||||
if foundChangeForFile == true {
|
||||
if found {
|
||||
return prevCommit, nil
|
||||
}
|
||||
|
||||
@@ -95,6 +96,35 @@ func (c *commitFileIter) getNextFileCommit() (*Commit, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *commitFileIter) hasFileChange(changes Changes, parent *Commit) bool {
|
||||
for _, change := range changes {
|
||||
if change.name() != c.fileName {
|
||||
continue
|
||||
}
|
||||
|
||||
// filename matches, now check if source iterator contains all commits (from all refs)
|
||||
if c.checkParent {
|
||||
if parent != nil && isParentHash(parent.Hash, c.currentCommit) {
|
||||
return true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isParentHash(hash plumbing.Hash, commit *Commit) bool {
|
||||
for _, h := range commit.ParentHashes {
|
||||
if h == hash {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *commitFileIter) ForEach(cb func(*Commit) error) error {
|
||||
for {
|
||||
commit, nextErr := c.Next()
|
||||
|
78
vendor/gopkg.in/src-d/go-git.v4/repository.go
generated
vendored
78
vendor/gopkg.in/src-d/go-git.v4/repository.go
generated
vendored
@@ -41,6 +41,8 @@ var (
|
||||
ErrTagExists = errors.New("tag already exists")
|
||||
// ErrTagNotFound an error stating the specified tag does not exist
|
||||
ErrTagNotFound = errors.New("tag not found")
|
||||
// ErrFetching is returned when the packfile could not be downloaded
|
||||
ErrFetching = errors.New("unable to fetch packfile")
|
||||
|
||||
ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch")
|
||||
ErrRepositoryNotExists = errors.New("repository does not exist")
|
||||
@@ -858,6 +860,8 @@ func (r *Repository) fetchAndUpdateReferences(
|
||||
remoteRefs, err := remote.fetch(ctx, o)
|
||||
if err == NoErrAlreadyUpToDate {
|
||||
objsUpdated = false
|
||||
} else if err == packfile.ErrEmptyPackfile {
|
||||
return nil, ErrFetching
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1023,8 +1027,36 @@ func (r *Repository) PushContext(ctx context.Context, o *PushOptions) error {
|
||||
|
||||
// Log returns the commit history from the given LogOptions.
|
||||
func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) {
|
||||
h := o.From
|
||||
if o.From == plumbing.ZeroHash {
|
||||
fn := commitIterFunc(o.Order)
|
||||
if fn == nil {
|
||||
return nil, fmt.Errorf("invalid Order=%v", o.Order)
|
||||
}
|
||||
|
||||
var (
|
||||
it object.CommitIter
|
||||
err error
|
||||
)
|
||||
if o.All {
|
||||
it, err = r.logAll(fn)
|
||||
} else {
|
||||
it, err = r.log(o.From, fn)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if o.FileName != nil {
|
||||
// for `git log --all` also check parent (if the next commit comes from the real parent)
|
||||
it = r.logWithFile(*o.FileName, it, o.All)
|
||||
}
|
||||
|
||||
return it, nil
|
||||
}
|
||||
|
||||
func (r *Repository) log(from plumbing.Hash, commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) {
|
||||
h := from
|
||||
if from == plumbing.ZeroHash {
|
||||
head, err := r.Head()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1037,27 +1069,41 @@ func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return commitIterFunc(commit), nil
|
||||
}
|
||||
|
||||
var commitIter object.CommitIter
|
||||
switch o.Order {
|
||||
func (r *Repository) logAll(commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) {
|
||||
return object.NewCommitAllIter(r.Storer, commitIterFunc)
|
||||
}
|
||||
|
||||
func (*Repository) logWithFile(fileName string, commitIter object.CommitIter, checkParent bool) object.CommitIter {
|
||||
return object.NewCommitFileIterFromIter(fileName, commitIter, checkParent)
|
||||
}
|
||||
|
||||
func commitIterFunc(order LogOrder) func(c *object.Commit) object.CommitIter {
|
||||
switch order {
|
||||
case LogOrderDefault:
|
||||
commitIter = object.NewCommitPreorderIter(commit, nil, nil)
|
||||
return func(c *object.Commit) object.CommitIter {
|
||||
return object.NewCommitPreorderIter(c, nil, nil)
|
||||
}
|
||||
case LogOrderDFS:
|
||||
commitIter = object.NewCommitPreorderIter(commit, nil, nil)
|
||||
return func(c *object.Commit) object.CommitIter {
|
||||
return object.NewCommitPreorderIter(c, nil, nil)
|
||||
}
|
||||
case LogOrderDFSPost:
|
||||
commitIter = object.NewCommitPostorderIter(commit, nil)
|
||||
return func(c *object.Commit) object.CommitIter {
|
||||
return object.NewCommitPostorderIter(c, nil)
|
||||
}
|
||||
case LogOrderBSF:
|
||||
commitIter = object.NewCommitIterBSF(commit, nil, nil)
|
||||
return func(c *object.Commit) object.CommitIter {
|
||||
return object.NewCommitIterBSF(c, nil, nil)
|
||||
}
|
||||
case LogOrderCommitterTime:
|
||||
commitIter = object.NewCommitIterCTime(commit, nil, nil)
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid Order=%v", o.Order)
|
||||
return func(c *object.Commit) object.CommitIter {
|
||||
return object.NewCommitIterCTime(c, nil, nil)
|
||||
}
|
||||
}
|
||||
|
||||
if o.FileName == nil {
|
||||
return commitIter, nil
|
||||
}
|
||||
return object.NewCommitFileIterFromIter(*o.FileName, commitIter), nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tags returns all the tag References in a repository.
|
||||
|
51
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_setref.go
generated
vendored
51
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_setref.go
generated
vendored
@@ -1,15 +1,24 @@
|
||||
// +build !norwfs
|
||||
|
||||
package dotgit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/utils/ioutil"
|
||||
|
||||
"gopkg.in/src-d/go-billy.v4"
|
||||
)
|
||||
|
||||
func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) {
|
||||
if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) {
|
||||
return d.setRefRwfs(fileName, content, old)
|
||||
}
|
||||
|
||||
return d.setRefNorwfs(fileName, content, old)
|
||||
}
|
||||
|
||||
func (d *DotGit) setRefRwfs(fileName, content string, old *plumbing.Reference) (err error) {
|
||||
// If we are not checking an old ref, just truncate the file.
|
||||
mode := os.O_RDWR | os.O_CREATE
|
||||
if old == nil {
|
||||
@@ -41,3 +50,41 @@ func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err
|
||||
_, err = f.Write([]byte(content))
|
||||
return err
|
||||
}
|
||||
|
||||
// There are some filesystems that don't support opening files in RDWD mode.
|
||||
// In these filesystems the standard SetRef function can not be used as it
|
||||
// reads the reference file to check that it's not modified before updating it.
|
||||
//
|
||||
// This version of the function writes the reference without extra checks
|
||||
// making it compatible with these simple filesystems. This is usually not
|
||||
// a problem as they should be accessed by only one process at a time.
|
||||
func (d *DotGit) setRefNorwfs(fileName, content string, old *plumbing.Reference) error {
|
||||
_, err := d.fs.Stat(fileName)
|
||||
if err == nil && old != nil {
|
||||
fRead, err := d.fs.Open(fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref, err := d.readReferenceFrom(fRead, old.Name().String())
|
||||
fRead.Close()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ref.Hash() != old.Hash() {
|
||||
return fmt.Errorf("reference has changed concurrently")
|
||||
}
|
||||
}
|
||||
|
||||
f, err := d.fs.Create(fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
_, err = f.Write([]byte(content))
|
||||
return err
|
||||
}
|
||||
|
47
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_setref_norwfs.go
generated
vendored
47
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_setref_norwfs.go
generated
vendored
@@ -1,47 +0,0 @@
|
||||
// +build norwfs
|
||||
|
||||
package dotgit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
)
|
||||
|
||||
// There are some filesystems that don't support opening files in RDWD mode.
|
||||
// In these filesystems the standard SetRef function can not be used as i
|
||||
// reads the reference file to check that it's not modified before updating it.
|
||||
//
|
||||
// This version of the function writes the reference without extra checks
|
||||
// making it compatible with these simple filesystems. This is usually not
|
||||
// a problem as they should be accessed by only one process at a time.
|
||||
func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) error {
|
||||
_, err := d.fs.Stat(fileName)
|
||||
if err == nil && old != nil {
|
||||
fRead, err := d.fs.Open(fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref, err := d.readReferenceFrom(fRead, old.Name().String())
|
||||
fRead.Close()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ref.Hash() != old.Hash() {
|
||||
return fmt.Errorf("reference has changed concurrently")
|
||||
}
|
||||
}
|
||||
|
||||
f, err := d.fs.Create(fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
_, err = f.Write([]byte(content))
|
||||
return err
|
||||
}
|
60
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go
generated
vendored
60
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go
generated
vendored
@@ -20,24 +20,25 @@ import (
|
||||
type ObjectStorage struct {
|
||||
options Options
|
||||
|
||||
// deltaBaseCache is an object cache uses to cache delta's bases when
|
||||
deltaBaseCache cache.Object
|
||||
// objectCache is an object cache uses to cache delta's bases and also recently
|
||||
// loaded loose objects
|
||||
objectCache cache.Object
|
||||
|
||||
dir *dotgit.DotGit
|
||||
index map[plumbing.Hash]idxfile.Index
|
||||
}
|
||||
|
||||
// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
|
||||
func NewObjectStorage(dir *dotgit.DotGit, cache cache.Object) *ObjectStorage {
|
||||
return NewObjectStorageWithOptions(dir, cache, Options{})
|
||||
func NewObjectStorage(dir *dotgit.DotGit, objectCache cache.Object) *ObjectStorage {
|
||||
return NewObjectStorageWithOptions(dir, objectCache, Options{})
|
||||
}
|
||||
|
||||
// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options
|
||||
func NewObjectStorageWithOptions(dir *dotgit.DotGit, cache cache.Object, ops Options) *ObjectStorage {
|
||||
func NewObjectStorageWithOptions(dir *dotgit.DotGit, objectCache cache.Object, ops Options) *ObjectStorage {
|
||||
return &ObjectStorage{
|
||||
options: ops,
|
||||
deltaBaseCache: cache,
|
||||
dir: dir,
|
||||
options: ops,
|
||||
objectCache: objectCache,
|
||||
dir: dir,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -206,7 +207,7 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
|
||||
idx := s.index[pack]
|
||||
hash, err := idx.FindHash(offset)
|
||||
if err == nil {
|
||||
obj, ok := s.deltaBaseCache.Get(hash)
|
||||
obj, ok := s.objectCache.Get(hash)
|
||||
if ok {
|
||||
return obj.Size(), nil
|
||||
}
|
||||
@@ -215,8 +216,8 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
|
||||
}
|
||||
|
||||
var p *packfile.Packfile
|
||||
if s.deltaBaseCache != nil {
|
||||
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache)
|
||||
if s.objectCache != nil {
|
||||
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
|
||||
} else {
|
||||
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
|
||||
}
|
||||
@@ -241,9 +242,19 @@ func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) (
|
||||
// EncodedObject returns the object with the given hash, by searching for it in
|
||||
// the packfile and the git object directories.
|
||||
func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
|
||||
obj, err := s.getFromUnpacked(h)
|
||||
if err == plumbing.ErrObjectNotFound {
|
||||
var obj plumbing.EncodedObject
|
||||
var err error
|
||||
|
||||
if s.index != nil {
|
||||
obj, err = s.getFromPackfile(h, false)
|
||||
if err == plumbing.ErrObjectNotFound {
|
||||
obj, err = s.getFromUnpacked(h)
|
||||
}
|
||||
} else {
|
||||
obj, err = s.getFromUnpacked(h)
|
||||
if err == plumbing.ErrObjectNotFound {
|
||||
obj, err = s.getFromPackfile(h, false)
|
||||
}
|
||||
}
|
||||
|
||||
// If the error is still object not found, check if it's a shared object
|
||||
@@ -254,7 +265,7 @@ func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (p
|
||||
// Create a new object storage with the DotGit(s) and check for the
|
||||
// required hash object. Skip when not found.
|
||||
for _, dg := range dotgits {
|
||||
o := NewObjectStorage(dg, s.deltaBaseCache)
|
||||
o := NewObjectStorage(dg, s.objectCache)
|
||||
enobj, enerr := o.EncodedObject(t, h)
|
||||
if enerr != nil {
|
||||
continue
|
||||
@@ -304,9 +315,12 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
|
||||
if cacheObj, found := s.objectCache.Get(h); found {
|
||||
return cacheObj, nil
|
||||
}
|
||||
|
||||
obj = s.NewEncodedObject()
|
||||
r, err := objfile.NewReader(f)
|
||||
if err != nil {
|
||||
@@ -327,6 +341,8 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.objectCache.Put(obj)
|
||||
|
||||
_, err = io.Copy(w, r)
|
||||
return obj, err
|
||||
}
|
||||
@@ -369,7 +385,7 @@ func (s *ObjectStorage) decodeObjectAt(
|
||||
) (plumbing.EncodedObject, error) {
|
||||
hash, err := idx.FindHash(offset)
|
||||
if err == nil {
|
||||
obj, ok := s.deltaBaseCache.Get(hash)
|
||||
obj, ok := s.objectCache.Get(hash)
|
||||
if ok {
|
||||
return obj, nil
|
||||
}
|
||||
@@ -380,8 +396,8 @@ func (s *ObjectStorage) decodeObjectAt(
|
||||
}
|
||||
|
||||
var p *packfile.Packfile
|
||||
if s.deltaBaseCache != nil {
|
||||
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache)
|
||||
if s.objectCache != nil {
|
||||
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
|
||||
} else {
|
||||
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
|
||||
}
|
||||
@@ -400,11 +416,7 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
|
||||
}
|
||||
|
||||
p := packfile.NewScanner(f)
|
||||
if _, err := p.SeekFromStart(offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
header, err := p.NextObjectHeader()
|
||||
header, err := p.SeekObjectHeader(offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -495,7 +507,7 @@ func (s *ObjectStorage) buildPackfileIters(
|
||||
}
|
||||
return newPackfileIter(
|
||||
s.dir.Fs(), pack, t, seen, s.index[h],
|
||||
s.deltaBaseCache, s.options.KeepDescriptors,
|
||||
s.objectCache, s.options.KeepDescriptors,
|
||||
)
|
||||
},
|
||||
}, nil
|
||||
|
6
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go
generated
vendored
6
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go
generated
vendored
@@ -51,11 +51,7 @@ func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options)
|
||||
fs: fs,
|
||||
dir: dir,
|
||||
|
||||
ObjectStorage: ObjectStorage{
|
||||
options: ops,
|
||||
deltaBaseCache: cache,
|
||||
dir: dir,
|
||||
},
|
||||
ObjectStorage: *NewObjectStorageWithOptions(dir, cache, ops),
|
||||
ReferenceStorage: ReferenceStorage{dir: dir},
|
||||
IndexStorage: IndexStorage{dir: dir},
|
||||
ShallowStorage: ShallowStorage{dir: dir},
|
||||
|
Reference in New Issue
Block a user