Vendor Update Go Libs (#13166)

* update github.com/alecthomas/chroma v0.8.0 -> v0.8.1

* github.com/blevesearch/bleve v1.0.10 -> v1.0.12

* editorconfig-core-go v2.1.1 -> v2.3.7

* github.com/gliderlabs/ssh v0.2.2 -> v0.3.1

* migrate editorconfig.ParseBytes to Parse

* github.com/shurcooL/vfsgen to 0d455de96546

* github.com/go-git/go-git/v5 v5.1.0 -> v5.2.0

* github.com/google/uuid v1.1.1 -> v1.1.2

* github.com/huandu/xstrings v1.3.0 -> v1.3.2

* github.com/klauspost/compress v1.10.11 -> v1.11.1

* github.com/markbates/goth v1.61.2 -> v1.65.0

* github.com/mattn/go-sqlite3 v1.14.0 -> v1.14.4

* github.com/mholt/archiver v3.3.0 -> v3.3.2

* github.com/microcosm-cc/bluemonday 4f7140c49acb -> v1.0.4

* github.com/minio/minio-go v7.0.4 -> v7.0.5

* github.com/olivere/elastic v7.0.9 -> v7.0.20

* github.com/urfave/cli v1.20.0 -> v1.22.4

* github.com/prometheus/client_golang v1.1.0 -> v1.8.0

* github.com/xanzy/go-gitlab v0.37.0 -> v0.38.1

* mvdan.cc/xurls v2.1.0 -> v2.2.0

Co-authored-by: Lauris BH <lauris@nix.lv>
This commit is contained in:
6543 2020-10-16 07:06:27 +02:00 committed by GitHub
parent 91f2afdb54
commit 12a1f914f4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
656 changed files with 52967 additions and 25229 deletions

View file

@ -8,7 +8,7 @@ require (
github.com/gliderlabs/ssh v0.2.2
github.com/go-git/gcfg v1.5.0
github.com/go-git/go-billy/v5 v5.0.0
github.com/go-git/go-git-fixtures/v4 v4.0.1
github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12
github.com/google/go-cmp v0.3.0
github.com/imdario/mergo v0.3.9
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99

View file

@ -20,6 +20,8 @@ github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agR
github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
github.com/go-git/go-git-fixtures/v4 v4.0.1 h1:q+IFMfLx200Q3scvt2hN79JsEzy4AmBTp/pqnefH+Bc=
github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw=
github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M=
github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=

View file

@ -28,7 +28,7 @@ func (e *ErrInvalidRevision) Error() string {
type Revisioner interface {
}
// Ref represents a reference name : HEAD, master
// Ref represents a reference name : HEAD, master, <hash>
type Ref string
// TildePath represents ~, ~{n}
@ -297,7 +297,7 @@ func (p *Parser) parseAt() (Revisioner, error) {
}
if t != cbrace {
return nil, &ErrInvalidRevision{fmt.Sprintf(`missing "}" in @{-n} structure`)}
return nil, &ErrInvalidRevision{s: `missing "}" in @{-n} structure`}
}
return AtCheckout{n}, nil
@ -419,7 +419,7 @@ func (p *Parser) parseCaretBraces() (Revisioner, error) {
case re == "" && tok == emark && nextTok == minus:
negate = true
case re == "" && tok == emark:
return nil, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component sequences starting with "/!" others than those defined are reserved`)}
return nil, &ErrInvalidRevision{s: `revision suffix brace component sequences starting with "/!" others than those defined are reserved`}
case re == "" && tok == slash:
p.unscan()
case tok != slash && start:
@ -490,7 +490,7 @@ func (p *Parser) parseColonSlash() (Revisioner, error) {
case re == "" && tok == emark && nextTok == minus:
negate = true
case re == "" && tok == emark:
return nil, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component sequences starting with "/!" others than those defined are reserved`)}
return nil, &ErrInvalidRevision{s: `revision suffix brace component sequences starting with "/!" others than those defined are reserved`}
case tok == eof:
p.unscan()
reg, err := regexp.Compile(re)

View file

@ -2,6 +2,7 @@ package git
import (
"errors"
"fmt"
"regexp"
"strings"
"time"
@ -373,6 +374,30 @@ var (
ErrMissingAuthor = errors.New("author field is required")
)
// AddOptions describes how a add operation should be performed
type AddOptions struct {
// All equivalent to `git add -A`, update the index not only where the
// working tree has a file matching `Path` but also where the index already
// has an entry. This adds, modifies, and removes index entries to match the
// working tree. If no `Path` nor `Glob` is given when `All` option is
// used, all files in the entire working tree are updated.
All bool
// Path is the exact filepath to a the file or directory to be added.
Path string
// Glob adds all paths, matching pattern, to the index. If pattern matches a
// directory path, all directory contents are added to the index recursively.
Glob string
}
// Validate validates the fields and sets the default values.
func (o *AddOptions) Validate(r *Repository) error {
if o.Path != "" && o.Glob != "" {
return fmt.Errorf("fields Path and Glob are mutual exclusive")
}
return nil
}
// CommitOptions describes how a commit operation should be performed.
type CommitOptions struct {
// All automatically stage files that have been modified and deleted, but
@ -464,7 +489,8 @@ var (
// CreateTagOptions describes how a tag object should be created.
type CreateTagOptions struct {
// Tagger defines the signature of the tag creator.
// Tagger defines the signature of the tag creator. If Tagger is empty the
// Name and Email is read from the config, and time.Now it's used as When.
Tagger *object.Signature
// Message defines the annotation of the tag. It is canonicalized during
// validation into the format expected by git - no leading whitespace and
@ -478,7 +504,9 @@ type CreateTagOptions struct {
// Validate validates the fields and sets the default values.
func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error {
if o.Tagger == nil {
return ErrMissingTagger
if err := o.loadConfigTagger(r); err != nil {
return err
}
}
if o.Message == "" {
@ -491,6 +519,35 @@ func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error {
return nil
}
func (o *CreateTagOptions) loadConfigTagger(r *Repository) error {
cfg, err := r.ConfigScoped(config.SystemScope)
if err != nil {
return err
}
if o.Tagger == nil && cfg.Author.Email != "" && cfg.Author.Name != "" {
o.Tagger = &object.Signature{
Name: cfg.Author.Name,
Email: cfg.Author.Email,
When: time.Now(),
}
}
if o.Tagger == nil && cfg.User.Email != "" && cfg.User.Name != "" {
o.Tagger = &object.Signature{
Name: cfg.User.Name,
Email: cfg.User.Email,
When: time.Now(),
}
}
if o.Tagger == nil {
return ErrMissingTagger
}
return nil
}
// ListOptions describes how a remote list should be performed.
type ListOptions struct {
// Auth credentials, if required, to use with the remote repository.
@ -545,6 +602,9 @@ type PlainOpenOptions struct {
// DetectDotGit defines whether parent directories should be
// walked until a .git directory or file is found.
DetectDotGit bool
// Enable .git/commondir support (see https://git-scm.com/docs/gitrepository-layout#Documentation/gitrepository-layout.txt).
// NOTE: This option will only work with the filesystem storage.
EnableDotGitCommonDir bool
}
// Validate validates the fields and sets the default values.

View file

@ -118,7 +118,7 @@ func isSetSymLink(m os.FileMode) bool {
func (m FileMode) Bytes() []byte {
ret := make([]byte, 4)
binary.LittleEndian.PutUint32(ret, uint32(m))
return ret[:]
return ret
}
// IsMalformed returns if the FileMode should not appear in a git packfile,

View file

@ -44,6 +44,46 @@ func (c *Config) Section(name string) *Section {
return s
}
// HasSection checks if the Config has a section with the specified name.
func (c *Config) HasSection(name string) bool {
for _, s := range c.Sections {
if s.IsName(name) {
return true
}
}
return false
}
// RemoveSection removes a section from a config file.
func (c *Config) RemoveSection(name string) *Config {
result := Sections{}
for _, s := range c.Sections {
if !s.IsName(name) {
result = append(result, s)
}
}
c.Sections = result
return c
}
// RemoveSubsection remove a subsection from a config file.
func (c *Config) RemoveSubsection(section string, subsection string) *Config {
for _, s := range c.Sections {
if s.IsName(section) {
result := Subsections{}
for _, ss := range s.Subsections {
if !ss.IsName(subsection) {
result = append(result, ss)
}
}
s.Subsections = result
}
}
return c
}
// AddOption adds an option to a given section and subsection. Use the
// NoSubsection constant for the subsection argument if no subsection is wanted.
func (c *Config) AddOption(section string, subsection string, key string, value string) *Config {
@ -67,33 +107,3 @@ func (c *Config) SetOption(section string, subsection string, key string, value
return c
}
// RemoveSection removes a section from a config file.
func (c *Config) RemoveSection(name string) *Config {
result := Sections{}
for _, s := range c.Sections {
if !s.IsName(name) {
result = append(result, s)
}
}
c.Sections = result
return c
}
// RemoveSubsection remove s a subsection from a config file.
func (c *Config) RemoveSubsection(section string, subsection string) *Config {
for _, s := range c.Sections {
if s.IsName(section) {
result := Subsections{}
for _, ss := range s.Subsections {
if !ss.IsName(subsection) {
result = append(result, ss)
}
}
s.Subsections = result
}
}
return c
}

View file

@ -19,7 +19,7 @@ type Options []*Option
// IsKey returns true if the given key matches
// this option's key in a case-insensitive comparison.
func (o *Option) IsKey(key string) bool {
return strings.ToLower(o.Key) == strings.ToLower(key)
return strings.EqualFold(o.Key, key)
}
func (opts Options) GoString() string {
@ -54,6 +54,16 @@ func (opts Options) Get(key string) string {
return ""
}
// Has checks if an Option exist with the given key.
func (opts Options) Has(key string) bool {
for _, o := range opts {
if o.IsKey(key) {
return true
}
}
return false
}
// GetAll returns all possible values for the same key.
func (opts Options) GetAll(key string) []string {
result := []string{}

View file

@ -61,32 +61,7 @@ func (s Subsections) GoString() string {
// IsName checks if the name provided is equals to the Section name, case insensitive.
func (s *Section) IsName(name string) bool {
return strings.ToLower(s.Name) == strings.ToLower(name)
}
// Option return the value for the specified key. Empty string is returned if
// key does not exists.
func (s *Section) Option(key string) string {
return s.Options.Get(key)
}
// AddOption adds a new Option to the Section. The updated Section is returned.
func (s *Section) AddOption(key string, value string) *Section {
s.Options = s.Options.withAddedOption(key, value)
return s
}
// SetOption adds a new Option to the Section. If the option already exists, is replaced.
// The updated Section is returned.
func (s *Section) SetOption(key string, value string) *Section {
s.Options = s.Options.withSettedOption(key, value)
return s
}
// Remove an option with the specified key. The updated Section is returned.
func (s *Section) RemoveOption(key string) *Section {
s.Options = s.Options.withoutOption(key)
return s
return strings.EqualFold(s.Name, name)
}
// Subsection returns a Subsection from the specified Section. If the
@ -115,6 +90,55 @@ func (s *Section) HasSubsection(name string) bool {
return false
}
// RemoveSubsection removes a subsection from a Section.
func (s *Section) RemoveSubsection(name string) *Section {
result := Subsections{}
for _, s := range s.Subsections {
if !s.IsName(name) {
result = append(result, s)
}
}
s.Subsections = result
return s
}
// Option return the value for the specified key. Empty string is returned if
// key does not exists.
func (s *Section) Option(key string) string {
return s.Options.Get(key)
}
// OptionAll returns all possible values for an option with the specified key.
// If the option does not exists, an empty slice will be returned.
func (s *Section) OptionAll(key string) []string {
return s.Options.GetAll(key)
}
// HasOption checks if the Section has an Option with the given key.
func (s *Section) HasOption(key string) bool {
return s.Options.Has(key)
}
// AddOption adds a new Option to the Section. The updated Section is returned.
func (s *Section) AddOption(key string, value string) *Section {
s.Options = s.Options.withAddedOption(key, value)
return s
}
// SetOption adds a new Option to the Section. If the option already exists, is replaced.
// The updated Section is returned.
func (s *Section) SetOption(key string, value string) *Section {
s.Options = s.Options.withSettedOption(key, value)
return s
}
// Remove an option with the specified key. The updated Section is returned.
func (s *Section) RemoveOption(key string) *Section {
s.Options = s.Options.withoutOption(key)
return s
}
// IsName checks if the name of the subsection is exactly the specified name.
func (s *Subsection) IsName(name string) bool {
return s.Name == name
@ -126,6 +150,17 @@ func (s *Subsection) Option(key string) string {
return s.Options.Get(key)
}
// OptionAll returns all possible values for an option with the specified key.
// If the option does not exists, an empty slice will be returned.
func (s *Subsection) OptionAll(key string) []string {
return s.Options.GetAll(key)
}
// HasOption checks if the Subsection has an Option with the given key.
func (s *Subsection) HasOption(key string) bool {
return s.Options.Has(key)
}
// AddOption adds a new Option to the Subsection. The updated Subsection is returned.
func (s *Subsection) AddOption(key string, value string) *Subsection {
s.Options = s.Options.withAddedOption(key, value)

View file

@ -1,6 +1,7 @@
package gitignore
import (
"bufio"
"bytes"
"io/ioutil"
"os"
@ -15,7 +16,6 @@ import (
const (
commentPrefix = "#"
coreSection = "core"
eol = "\n"
excludesfile = "excludesfile"
gitDir = ".git"
gitignoreFile = ".gitignore"
@ -29,11 +29,11 @@ func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps [
if err == nil {
defer f.Close()
if data, err := ioutil.ReadAll(f); err == nil {
for _, s := range strings.Split(string(data), eol) {
if !strings.HasPrefix(s, commentPrefix) && len(strings.TrimSpace(s)) > 0 {
ps = append(ps, ParsePattern(s, path))
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
s := scanner.Text()
if !strings.HasPrefix(s, commentPrefix) && len(strings.TrimSpace(s)) > 0 {
ps = append(ps, ParsePattern(s, path))
}
}
} else if !os.IsNotExist(err) {

View file

@ -188,7 +188,7 @@ func (d *Decoder) doReadEntryNameV4() (string, error) {
func (d *Decoder) doReadEntryName(len uint16) (string, error) {
name := make([]byte, len)
_, err := io.ReadFull(d.r, name[:])
_, err := io.ReadFull(d.r, name)
return string(name), err
}
@ -390,7 +390,9 @@ func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) {
e.Trees = i
_, err = io.ReadFull(d.r, e.Hash[:])
if err != nil {
return nil, err
}
return e, nil
}

View file

@ -49,7 +49,6 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
return err
}
target.SetSize(int64(dst.Len()))
b := byteSlicePool.Get().([]byte)
@ -113,7 +112,7 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
invalidOffsetSize(offset, sz, srcSz) {
break
}
dst.Write(src[offset:offset+sz])
dst.Write(src[offset : offset+sz])
remainingTargetSz -= sz
} else if isCopyFromDelta(cmd) {
sz := uint(cmd) // cmd is the size itself

View file

@ -3,7 +3,6 @@ package plumbing
import (
"bytes"
"io"
"io/ioutil"
)
// MemoryObject on memory Object implementation
@ -39,9 +38,11 @@ func (o *MemoryObject) Size() int64 { return o.sz }
// afterwards
func (o *MemoryObject) SetSize(s int64) { o.sz = s }
// Reader returns a ObjectReader used to read the object's content.
// Reader returns an io.ReadCloser used to read the object's content.
//
// For a MemoryObject, this reader is seekable.
func (o *MemoryObject) Reader() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewBuffer(o.cont)), nil
return nopCloser{bytes.NewReader(o.cont)}, nil
}
// Writer returns a ObjectWriter used to write the object's content.
@ -59,3 +60,13 @@ func (o *MemoryObject) Write(p []byte) (n int, err error) {
// Close releases any resources consumed by the object when it is acting as a
// ObjectWriter.
func (o *MemoryObject) Close() error { return nil }
// nopCloser exposes the extra methods of bytes.Reader while nopping Close().
//
// This allows clients to attempt seeking in a cached Blob's Reader.
type nopCloser struct {
*bytes.Reader
}
// Close does nothing.
func (nc nopCloser) Close() error { return nil }

View file

@ -75,7 +75,7 @@ func (c *Change) Files() (from, to *File, err error) {
func (c *Change) String() string {
action, err := c.Action()
if err != nil {
return fmt.Sprintf("malformed change")
return "malformed change"
}
return fmt.Sprintf("<Action: %s, Path: %s>", action, c.name())

View file

@ -243,16 +243,16 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
// Encode transforms a Commit into a plumbing.EncodedObject.
func (b *Commit) Encode(o plumbing.EncodedObject) error {
return b.encode(o, true)
func (c *Commit) Encode(o plumbing.EncodedObject) error {
return c.encode(o, true)
}
// EncodeWithoutSignature export a Commit into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature).
func (b *Commit) EncodeWithoutSignature(o plumbing.EncodedObject) error {
return b.encode(o, false)
func (c *Commit) EncodeWithoutSignature(o plumbing.EncodedObject) error {
return c.encode(o, false)
}
func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
o.SetType(plumbing.CommitObject)
w, err := o.Writer()
if err != nil {
@ -261,11 +261,11 @@ func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
defer ioutil.CheckClose(w, &err)
if _, err = fmt.Fprintf(w, "tree %s\n", b.TreeHash.String()); err != nil {
if _, err = fmt.Fprintf(w, "tree %s\n", c.TreeHash.String()); err != nil {
return err
}
for _, parent := range b.ParentHashes {
for _, parent := range c.ParentHashes {
if _, err = fmt.Fprintf(w, "parent %s\n", parent.String()); err != nil {
return err
}
@ -275,7 +275,7 @@ func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
return err
}
if err = b.Author.Encode(w); err != nil {
if err = c.Author.Encode(w); err != nil {
return err
}
@ -283,11 +283,11 @@ func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
return err
}
if err = b.Committer.Encode(w); err != nil {
if err = c.Committer.Encode(w); err != nil {
return err
}
if b.PGPSignature != "" && includeSig {
if c.PGPSignature != "" && includeSig {
if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil {
return err
}
@ -296,14 +296,14 @@ func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
// newline. Use join for this so it's clear that a newline should not be
// added after this section, as it will be added when the message is
// printed.
signature := strings.TrimSuffix(b.PGPSignature, "\n")
signature := strings.TrimSuffix(c.PGPSignature, "\n")
lines := strings.Split(signature, "\n")
if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil {
return err
}
}
if _, err = fmt.Fprintf(w, "\n\n%s", b.Message); err != nil {
if _, err = fmt.Fprintf(w, "\n\n%s", c.Message); err != nil {
return err
}

View file

@ -173,4 +173,3 @@ func (w *filterCommitIter) addToQueue(
return nil
}

View file

@ -4,7 +4,6 @@ import (
"io"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/storer"
)
@ -29,7 +28,7 @@ func NewCommitPathIterFromIter(pathFilter func(string) bool, commitIter CommitIt
return iterator
}
// this function is kept for compatibilty, can be replaced with NewCommitPathIterFromIter
// NewCommitFileIterFromIter is kept for compatibility, can be replaced with NewCommitPathIterFromIter
func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter {
return NewCommitPathIterFromIter(
func(path string) bool {

View file

@ -3,10 +3,10 @@ package commitgraph
import (
"io"
"github.com/emirpasic/gods/trees/binaryheap"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/emirpasic/gods/trees/binaryheap"
)
type commitNodeIteratorByCTime struct {

View file

@ -121,12 +121,12 @@ type Patch struct {
filePatches []fdiff.FilePatch
}
func (t *Patch) FilePatches() []fdiff.FilePatch {
return t.filePatches
func (p *Patch) FilePatches() []fdiff.FilePatch {
return p.filePatches
}
func (t *Patch) Message() string {
return t.message
func (p *Patch) Message() string {
return p.message
}
func (p *Patch) Encode(w io.Writer) error {
@ -198,12 +198,12 @@ func (tf *textFilePatch) Files() (from fdiff.File, to fdiff.File) {
return
}
func (t *textFilePatch) IsBinary() bool {
return len(t.chunks) == 0
func (tf *textFilePatch) IsBinary() bool {
return len(tf.chunks) == 0
}
func (t *textFilePatch) Chunks() []fdiff.Chunk {
return t.chunks
func (tf *textFilePatch) Chunks() []fdiff.Chunk {
return tf.chunks
}
// textChunk is an implementation of fdiff.Chunk interface

View file

@ -536,7 +536,7 @@ var errIndexFull = errors.New("index is full")
// between two files.
// To save space in memory, this index uses a space efficient encoding which
// will not exceed 1MiB per instance. The index starts out at a smaller size
// (closer to 2KiB), but may grow as more distinct blocks withing the scanned
// (closer to 2KiB), but may grow as more distinct blocks within the scanned
// file are discovered.
// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/SimilarityIndex.java
type similarityIndex struct {
@ -709,7 +709,7 @@ func (i *similarityIndex) common(dst *similarityIndex) uint64 {
}
func (i *similarityIndex) add(key int, cnt uint64) error {
key = int(uint32(key)*0x9e370001 >> 1)
key = int(uint32(key) * 0x9e370001 >> 1)
j := i.slot(key)
for {
@ -769,7 +769,7 @@ func (i *similarityIndex) slot(key int) int {
// We use 31 - hashBits because the upper bit was already forced
// to be 0 and we want the remaining high bits to be used as the
// table slot.
return int(uint32(key) >> uint(31 - i.hashBits))
return int(uint32(key) >> uint(31-i.hashBits))
}
func shouldGrowAt(hashBits int) int {

View file

@ -86,10 +86,7 @@ func (l *List) Get(capability Capability) []string {
// Set sets a capability removing the previous values
func (l *List) Set(capability Capability, values ...string) error {
if _, ok := l.m[capability]; ok {
delete(l.m, capability)
}
delete(l.m, capability)
return l.Add(capability, values...)
}

View file

@ -109,42 +109,42 @@ func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest {
// - is a DepthReference is given capability.DeepenNot MUST be present
// - MUST contain only maximum of one of capability.Sideband and capability.Sideband64k
// - MUST contain only maximum of one of capability.MultiACK and capability.MultiACKDetailed
func (r *UploadRequest) Validate() error {
if len(r.Wants) == 0 {
func (req *UploadRequest) Validate() error {
if len(req.Wants) == 0 {
return fmt.Errorf("want can't be empty")
}
if err := r.validateRequiredCapabilities(); err != nil {
if err := req.validateRequiredCapabilities(); err != nil {
return err
}
if err := r.validateConflictCapabilities(); err != nil {
if err := req.validateConflictCapabilities(); err != nil {
return err
}
return nil
}
func (r *UploadRequest) validateRequiredCapabilities() error {
func (req *UploadRequest) validateRequiredCapabilities() error {
msg := "missing capability %s"
if len(r.Shallows) != 0 && !r.Capabilities.Supports(capability.Shallow) {
if len(req.Shallows) != 0 && !req.Capabilities.Supports(capability.Shallow) {
return fmt.Errorf(msg, capability.Shallow)
}
switch r.Depth.(type) {
switch req.Depth.(type) {
case DepthCommits:
if r.Depth != DepthCommits(0) {
if !r.Capabilities.Supports(capability.Shallow) {
if req.Depth != DepthCommits(0) {
if !req.Capabilities.Supports(capability.Shallow) {
return fmt.Errorf(msg, capability.Shallow)
}
}
case DepthSince:
if !r.Capabilities.Supports(capability.DeepenSince) {
if !req.Capabilities.Supports(capability.DeepenSince) {
return fmt.Errorf(msg, capability.DeepenSince)
}
case DepthReference:
if !r.Capabilities.Supports(capability.DeepenNot) {
if !req.Capabilities.Supports(capability.DeepenNot) {
return fmt.Errorf(msg, capability.DeepenNot)
}
}
@ -152,15 +152,15 @@ func (r *UploadRequest) validateRequiredCapabilities() error {
return nil
}
func (r *UploadRequest) validateConflictCapabilities() error {
func (req *UploadRequest) validateConflictCapabilities() error {
msg := "capabilities %s and %s are mutually exclusive"
if r.Capabilities.Supports(capability.Sideband) &&
r.Capabilities.Supports(capability.Sideband64k) {
if req.Capabilities.Supports(capability.Sideband) &&
req.Capabilities.Supports(capability.Sideband64k) {
return fmt.Errorf(msg, capability.Sideband, capability.Sideband64k)
}
if r.Capabilities.Supports(capability.MultiACK) &&
r.Capabilities.Supports(capability.MultiACKDetailed) {
if req.Capabilities.Supports(capability.MultiACK) &&
req.Capabilities.Supports(capability.MultiACKDetailed) {
return fmt.Errorf(msg, capability.MultiACK, capability.MultiACKDetailed)
}

View file

@ -14,9 +14,9 @@ import (
// Decode reads the next upload-request form its input and
// stores it in the UploadRequest.
func (u *UploadRequest) Decode(r io.Reader) error {
func (req *UploadRequest) Decode(r io.Reader) error {
d := newUlReqDecoder(r)
return d.Decode(u)
return d.Decode(req)
}
type ulReqDecoder struct {

View file

@ -15,9 +15,9 @@ import (
// All the payloads will end with a newline character. Wants and
// shallows are sorted alphabetically. A depth of 0 means no depth
// request is sent.
func (u *UploadRequest) Encode(w io.Writer) error {
func (req *UploadRequest) Encode(w io.Writer) error {
e := newUlReqEncoder(w)
return e.Encode(u)
return e.Encode(req)
}
type ulReqEncoder struct {

View file

@ -68,12 +68,12 @@ func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceU
return r
}
func (r *ReferenceUpdateRequest) validate() error {
if len(r.Commands) == 0 {
func (req *ReferenceUpdateRequest) validate() error {
if len(req.Commands) == 0 {
return ErrEmptyCommands
}
for _, c := range r.Commands {
for _, c := range req.Commands {
if err := c.validate(); err != nil {
return err
}

View file

@ -14,33 +14,33 @@ var (
)
// Encode writes the ReferenceUpdateRequest encoding to the stream.
func (r *ReferenceUpdateRequest) Encode(w io.Writer) error {
if err := r.validate(); err != nil {
func (req *ReferenceUpdateRequest) Encode(w io.Writer) error {
if err := req.validate(); err != nil {
return err
}
e := pktline.NewEncoder(w)
if err := r.encodeShallow(e, r.Shallow); err != nil {
if err := req.encodeShallow(e, req.Shallow); err != nil {
return err
}
if err := r.encodeCommands(e, r.Commands, r.Capabilities); err != nil {
if err := req.encodeCommands(e, req.Commands, req.Capabilities); err != nil {
return err
}
if r.Packfile != nil {
if _, err := io.Copy(w, r.Packfile); err != nil {
if req.Packfile != nil {
if _, err := io.Copy(w, req.Packfile); err != nil {
return err
}
return r.Packfile.Close()
return req.Packfile.Close()
}
return nil
}
func (r *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder,
func (req *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder,
h *plumbing.Hash) error {
if h == nil {
@ -51,7 +51,7 @@ func (r *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder,
return e.Encodef("%s%s", shallow, objId)
}
func (r *ReferenceUpdateRequest) encodeCommands(e *pktline.Encoder,
func (req *ReferenceUpdateRequest) encodeCommands(e *pktline.Encoder,
cmds []*Command, cap *capability.List) error {
if err := e.Encodef("%s\x00%s",

View file

@ -32,6 +32,19 @@ var (
ErrExactSHA1NotSupported = errors.New("server does not support exact SHA1 refspec")
)
type NoMatchingRefSpecError struct {
refSpec config.RefSpec
}
func (e NoMatchingRefSpecError) Error() string {
return fmt.Sprintf("couldn't find remote ref %q", e.refSpec.Src())
}
func (e NoMatchingRefSpecError) Is(target error) bool {
_, ok := target.(NoMatchingRefSpecError)
return ok
}
const (
// This describes the maximum number of commits to walk when
// computing the haves to send to a server, for each ref in the
@ -126,7 +139,7 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
if o.Force {
for i := 0; i < len(o.RefSpecs); i++ {
rs := &o.RefSpecs[i]
if !rs.IsForceUpdate() {
if !rs.IsForceUpdate() && !rs.IsDelete() {
o.RefSpecs[i] = config.RefSpec("+" + rs.String())
}
}
@ -218,9 +231,9 @@ func (r *Remote) newReferenceUpdateRequest(
if o.Progress != nil {
req.Progress = o.Progress
if ar.Capabilities.Supports(capability.Sideband64k) {
req.Capabilities.Set(capability.Sideband64k)
_ = req.Capabilities.Set(capability.Sideband64k)
} else if ar.Capabilities.Supports(capability.Sideband) {
req.Capabilities.Set(capability.Sideband)
_ = req.Capabilities.Set(capability.Sideband)
}
}
@ -498,10 +511,8 @@ func (r *Remote) deleteReferences(rs config.RefSpec,
if _, ok := refsDict[rs.Dst(ref.Name()).String()]; ok {
return nil
}
} else {
if rs.Dst("") != ref.Name() {
return nil
}
} else if rs.Dst("") != ref.Name() {
return nil
}
cmd := &packp.Command{
@ -753,7 +764,7 @@ func doCalculateRefs(
})
if !matched && !s.IsWildcard() {
return fmt.Errorf("couldn't find remote ref %q", s.Src())
return NoMatchingRefSpecError{refSpec: s}
}
return err
@ -1037,21 +1048,22 @@ func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) {
}
var resultRefs []*plumbing.Reference
refs.ForEach(func(ref *plumbing.Reference) error {
err = refs.ForEach(func(ref *plumbing.Reference) error {
resultRefs = append(resultRefs, ref)
return nil
})
if err != nil {
return nil, err
}
return resultRefs, nil
}
func objectsToPush(commands []*packp.Command) []plumbing.Hash {
var objects []plumbing.Hash
objects := make([]plumbing.Hash, 0, len(commands))
for _, cmd := range commands {
if cmd.New == plumbing.ZeroHash {
continue
}
objects = append(objects, cmd.New)
}
return objects

View file

@ -3,6 +3,7 @@ package git
import (
"bytes"
"context"
"encoding/hex"
"errors"
"fmt"
"io"
@ -13,6 +14,8 @@ import (
"strings"
"time"
"github.com/go-git/go-git/v5/storage/filesystem/dotgit"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/internal/revision"
"github.com/go-git/go-git/v5/plumbing"
@ -47,6 +50,7 @@ var (
ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch")
ErrRepositoryNotExists = errors.New("repository does not exist")
ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist")
ErrRepositoryAlreadyExists = errors.New("repository already exists")
ErrRemoteNotFound = errors.New("remote not found")
ErrRemoteExists = errors.New("remote already exists")
@ -89,7 +93,7 @@ func Init(s storage.Storer, worktree billy.Filesystem) (*Repository, error) {
}
if worktree == nil {
r.setIsBare(true)
_ = r.setIsBare(true)
return r, nil
}
@ -253,7 +257,19 @@ func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error)
return nil, err
}
s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault())
var repositoryFs billy.Filesystem
if o.EnableDotGitCommonDir {
dotGitCommon, err := dotGitCommonDirectory(dot)
if err != nil {
return nil, err
}
repositoryFs = dotgit.NewRepositoryFilesystem(dot, dotGitCommon)
} else {
repositoryFs = dot
}
s := filesystem.NewStorage(repositoryFs, cache.NewObjectLRUDefault())
return Open(s, wt)
}
@ -262,6 +278,14 @@ func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem,
if path, err = filepath.Abs(path); err != nil {
return nil, nil, err
}
pathinfo, err := os.Stat(path)
if !os.IsNotExist(err) {
if !pathinfo.IsDir() && detect {
path = filepath.Dir(path)
}
}
var fs billy.Filesystem
var fi os.FileInfo
for {
@ -328,6 +352,38 @@ func dotGitFileToOSFilesystem(path string, fs billy.Filesystem) (bfs billy.Files
return osfs.New(fs.Join(path, gitdir)), nil
}
func dotGitCommonDirectory(fs billy.Filesystem) (commonDir billy.Filesystem, err error) {
f, err := fs.Open("commondir")
if os.IsNotExist(err) {
return nil, nil
}
if err != nil {
return nil, err
}
b, err := stdioutil.ReadAll(f)
if err != nil {
return nil, err
}
if len(b) > 0 {
path := strings.TrimSpace(string(b))
if filepath.IsAbs(path) {
commonDir = osfs.New(path)
} else {
commonDir = osfs.New(filepath.Join(fs.Root(), path))
}
if _, err := commonDir.Stat(""); err != nil {
if os.IsNotExist(err) {
return nil, ErrRepositoryIncomplete
}
return nil, err
}
}
return commonDir, nil
}
// PlainClone a repository into the path with the given options, isBare defines
// if the new repository will be bare or normal. If the path is not empty
// ErrRepositoryAlreadyExists is returned.
@ -361,7 +417,7 @@ func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOp
err = r.clone(ctx, o)
if err != nil && err != ErrRepositoryAlreadyExists {
if cleanup {
cleanUpDir(path, cleanupParent)
_ = cleanUpDir(path, cleanupParent)
}
}
@ -1379,7 +1435,7 @@ func (r *Repository) Worktree() (*Worktree, error) {
// resolve to a commit hash, not a tree or annotated tag.
//
// Implemented resolvers : HEAD, branch, tag, heads/branch, refs/heads/branch,
// refs/tags/tag, refs/remotes/origin/branch, refs/remotes/origin/HEAD, tilde and caret (HEAD~1, master~^, tag~2, ref/heads/master~1, ...), selection by text (HEAD^{/fix nasty bug})
// refs/tags/tag, refs/remotes/origin/branch, refs/remotes/origin/HEAD, tilde and caret (HEAD~1, master~^, tag~2, ref/heads/master~1, ...), selection by text (HEAD^{/fix nasty bug}), hash (prefix and full)
func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, error) {
p := revision.NewParserFromString(string(rev))
@ -1392,17 +1448,13 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
var commit *object.Commit
for _, item := range items {
switch item.(type) {
switch item := item.(type) {
case revision.Ref:
revisionRef := item.(revision.Ref)
revisionRef := item
var tryHashes []plumbing.Hash
maybeHash := plumbing.NewHash(string(revisionRef))
if !maybeHash.IsZero() {
tryHashes = append(tryHashes, maybeHash)
}
tryHashes = append(tryHashes, r.resolveHashPrefix(string(revisionRef))...)
for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) {
ref, err := storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef)))
@ -1447,7 +1499,7 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
}
case revision.CaretPath:
depth := item.(revision.CaretPath).Depth
depth := item.Depth
if depth == 0 {
break
@ -1475,7 +1527,7 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
commit = c
case revision.TildePath:
for i := 0; i < item.(revision.TildePath).Depth; i++ {
for i := 0; i < item.Depth; i++ {
c, err := commit.Parents().Next()
if err != nil {
@ -1487,8 +1539,8 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
case revision.CaretReg:
history := object.NewCommitPreorderIter(commit, nil, nil)
re := item.(revision.CaretReg).Regexp
negate := item.(revision.CaretReg).Negate
re := item.Regexp
negate := item.Negate
var c *object.Commit
@ -1520,6 +1572,49 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
return &commit.Hash, nil
}
// resolveHashPrefix returns a list of potential hashes that the given string
// is a prefix of. It quietly swallows errors, returning nil.
func (r *Repository) resolveHashPrefix(hashStr string) []plumbing.Hash {
// Handle complete and partial hashes.
// plumbing.NewHash forces args into a full 20 byte hash, which isn't suitable
// for partial hashes since they will become zero-filled.
if hashStr == "" {
return nil
}
if len(hashStr) == len(plumbing.ZeroHash)*2 {
// Only a full hash is possible.
hexb, err := hex.DecodeString(hashStr)
if err != nil {
return nil
}
var h plumbing.Hash
copy(h[:], hexb)
return []plumbing.Hash{h}
}
// Partial hash.
// hex.DecodeString only decodes to complete bytes, so only works with pairs of hex digits.
evenHex := hashStr[:len(hashStr)&^1]
hexb, err := hex.DecodeString(evenHex)
if err != nil {
return nil
}
candidates := expandPartialHash(r.Storer, hexb)
if len(evenHex) == len(hashStr) {
// The prefix was an exact number of bytes.
return candidates
}
// Do another prefix check to ensure the dangling nybble is correct.
var hashes []plumbing.Hash
for _, h := range candidates {
if strings.HasPrefix(h.String(), hashStr) {
hashes = append(hashes, h)
}
}
return hashes
}
type RepackConfig struct {
// UseRefDeltas configures whether packfile encoder will use reference deltas.
// By default OFSDeltaObject is used.
@ -1612,3 +1707,31 @@ func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, er
return h, err
}
func expandPartialHash(st storer.EncodedObjectStorer, prefix []byte) (hashes []plumbing.Hash) {
// The fast version is implemented by storage/filesystem.ObjectStorage.
type fastIter interface {
HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error)
}
if fi, ok := st.(fastIter); ok {
h, err := fi.HashesWithPrefix(prefix)
if err != nil {
return nil
}
return h
}
// Slow path.
iter, err := st.IterEncodedObjects(plumbing.AnyObject)
if err != nil {
return nil
}
iter.ForEach(func(obj plumbing.EncodedObject) error {
h := obj.Hash()
if bytes.HasPrefix(h[:], prefix) {
hashes = append(hashes, h)
}
return nil
})
return
}

View file

@ -3,12 +3,14 @@ package dotgit
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
stdioutil "io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"time"
@ -30,6 +32,12 @@ const (
objectsPath = "objects"
packPath = "pack"
refsPath = "refs"
branchesPath = "branches"
hooksPath = "hooks"
infoPath = "info"
remotesPath = "remotes"
logsPath = "logs"
worktreesPath = "worktrees"
tmpPackedRefsPrefix = "._packed-refs"
@ -82,7 +90,7 @@ type DotGit struct {
incomingChecked bool
incomingDirName string
objectList []plumbing.Hash
objectList []plumbing.Hash // sorted
objectMap map[plumbing.Hash]struct{}
packList []plumbing.Hash
packMap map[plumbing.Hash]struct{}
@ -330,6 +338,53 @@ func (d *DotGit) NewObject() (*ObjectWriter, error) {
return newObjectWriter(d.fs)
}
// ObjectsWithPrefix returns the hashes of objects that have the given prefix.
func (d *DotGit) ObjectsWithPrefix(prefix []byte) ([]plumbing.Hash, error) {
// Handle edge cases.
if len(prefix) < 1 {
return d.Objects()
} else if len(prefix) > len(plumbing.ZeroHash) {
return nil, nil
}
if d.options.ExclusiveAccess {
err := d.genObjectList()
if err != nil {
return nil, err
}
// Rely on d.objectList being sorted.
// Figure out the half-open interval defined by the prefix.
first := sort.Search(len(d.objectList), func(i int) bool {
// Same as plumbing.HashSlice.Less.
return bytes.Compare(d.objectList[i][:], prefix) >= 0
})
lim := len(d.objectList)
if limPrefix, overflow := incBytes(prefix); !overflow {
lim = sort.Search(len(d.objectList), func(i int) bool {
// Same as plumbing.HashSlice.Less.
return bytes.Compare(d.objectList[i][:], limPrefix) >= 0
})
}
return d.objectList[first:lim], nil
}
// This is the slow path.
var objects []plumbing.Hash
var n int
err := d.ForEachObjectHash(func(hash plumbing.Hash) error {
n++
if bytes.HasPrefix(hash[:], prefix) {
objects = append(objects, hash)
}
return nil
})
if err != nil {
return nil, err
}
return objects, nil
}
// Objects returns a slice with the hashes of objects found under the
// .git/objects/ directory.
func (d *DotGit) Objects() ([]plumbing.Hash, error) {
@ -421,12 +476,17 @@ func (d *DotGit) genObjectList() error {
}
d.objectMap = make(map[plumbing.Hash]struct{})
return d.forEachObjectHash(func(h plumbing.Hash) error {
populate := func(h plumbing.Hash) error {
d.objectList = append(d.objectList, h)
d.objectMap[h] = struct{}{}
return nil
})
}
if err := d.forEachObjectHash(populate); err != nil {
return err
}
plumbing.HashesSort(d.objectList)
return nil
}
func (d *DotGit) hasObject(h plumbing.Hash) error {
@ -1109,3 +1169,20 @@ func isNum(b byte) bool {
func isHexAlpha(b byte) bool {
return b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F'
}
// incBytes increments a byte slice, which involves incrementing the
// right-most byte, and following carry leftward.
// It makes a copy so that the provided slice's underlying array is not modified.
// If the overall operation overflows (e.g. incBytes(0xff, 0xff)), the second return parameter indicates that.
func incBytes(in []byte) (out []byte, overflow bool) {
out = make([]byte, len(in))
copy(out, in)
for i := len(out) - 1; i >= 0; i-- {
out[i]++
if out[i] != 0 {
return // Didn't overflow.
}
}
overflow = true
return
}

View file

@ -0,0 +1,111 @@
package dotgit
import (
"os"
"path/filepath"
"strings"
"github.com/go-git/go-billy/v5"
)
// RepositoryFilesystem is a billy.Filesystem compatible object wrapper
// which handles dot-git filesystem operations and supports commondir according to git scm layout:
// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt
type RepositoryFilesystem struct {
dotGitFs billy.Filesystem
commonDotGitFs billy.Filesystem
}
func NewRepositoryFilesystem(dotGitFs, commonDotGitFs billy.Filesystem) *RepositoryFilesystem {
return &RepositoryFilesystem{
dotGitFs: dotGitFs,
commonDotGitFs: commonDotGitFs,
}
}
func (fs *RepositoryFilesystem) mapToRepositoryFsByPath(path string) billy.Filesystem {
// Nothing to decide if commondir not defined
if fs.commonDotGitFs == nil {
return fs.dotGitFs
}
cleanPath := filepath.Clean(path)
// Check exceptions for commondir (https://git-scm.com/docs/gitrepository-layout#Documentation/gitrepository-layout.txt)
switch cleanPath {
case fs.dotGitFs.Join(logsPath, "HEAD"):
return fs.dotGitFs
case fs.dotGitFs.Join(refsPath, "bisect"), fs.dotGitFs.Join(refsPath, "rewritten"), fs.dotGitFs.Join(refsPath, "worktree"):
return fs.dotGitFs
}
// Determine dot-git root by first path element.
// There are some elements which should always use commondir when commondir defined.
// Usual dot-git root will be used for the rest of files.
switch strings.Split(cleanPath, string(filepath.Separator))[0] {
case objectsPath, refsPath, packedRefsPath, configPath, branchesPath, hooksPath, infoPath, remotesPath, logsPath, shallowPath, worktreesPath:
return fs.commonDotGitFs
default:
return fs.dotGitFs
}
}
func (fs *RepositoryFilesystem) Create(filename string) (billy.File, error) {
return fs.mapToRepositoryFsByPath(filename).Create(filename)
}
func (fs *RepositoryFilesystem) Open(filename string) (billy.File, error) {
return fs.mapToRepositoryFsByPath(filename).Open(filename)
}
func (fs *RepositoryFilesystem) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
return fs.mapToRepositoryFsByPath(filename).OpenFile(filename, flag, perm)
}
func (fs *RepositoryFilesystem) Stat(filename string) (os.FileInfo, error) {
return fs.mapToRepositoryFsByPath(filename).Stat(filename)
}
func (fs *RepositoryFilesystem) Rename(oldpath, newpath string) error {
return fs.mapToRepositoryFsByPath(oldpath).Rename(oldpath, newpath)
}
func (fs *RepositoryFilesystem) Remove(filename string) error {
return fs.mapToRepositoryFsByPath(filename).Remove(filename)
}
func (fs *RepositoryFilesystem) Join(elem ...string) string {
return fs.dotGitFs.Join(elem...)
}
func (fs *RepositoryFilesystem) TempFile(dir, prefix string) (billy.File, error) {
return fs.mapToRepositoryFsByPath(dir).TempFile(dir, prefix)
}
func (fs *RepositoryFilesystem) ReadDir(path string) ([]os.FileInfo, error) {
return fs.mapToRepositoryFsByPath(path).ReadDir(path)
}
func (fs *RepositoryFilesystem) MkdirAll(filename string, perm os.FileMode) error {
return fs.mapToRepositoryFsByPath(filename).MkdirAll(filename, perm)
}
func (fs *RepositoryFilesystem) Lstat(filename string) (os.FileInfo, error) {
return fs.mapToRepositoryFsByPath(filename).Lstat(filename)
}
func (fs *RepositoryFilesystem) Symlink(target, link string) error {
return fs.mapToRepositoryFsByPath(target).Symlink(target, link)
}
func (fs *RepositoryFilesystem) Readlink(link string) (string, error) {
return fs.mapToRepositoryFsByPath(link).Readlink(link)
}
func (fs *RepositoryFilesystem) Chroot(path string) (billy.Filesystem, error) {
return fs.mapToRepositoryFsByPath(path).Chroot(path)
}
func (fs *RepositoryFilesystem) Root() string {
return fs.dotGitFs.Root()
}

View file

@ -1,6 +1,7 @@
package filesystem
import (
"bytes"
"io"
"os"
"time"
@ -518,6 +519,36 @@ func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, pl
return plumbing.ZeroHash, plumbing.ZeroHash, -1
}
func (s *ObjectStorage) HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error) {
hashes, err := s.dir.ObjectsWithPrefix(prefix)
if err != nil {
return nil, err
}
// TODO: This could be faster with some idxfile changes,
// or diving into the packfile.
for _, index := range s.index {
ei, err := index.Entries()
if err != nil {
return nil, err
}
for {
e, err := ei.Next()
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
if bytes.HasPrefix(e.Hash[:], prefix) {
hashes = append(hashes, e.Hash)
}
}
ei.Close()
}
return hashes, nil
}
// IterEncodedObjects returns an iterator for all the objects in the packfile
// with the given type.
func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) {

View file

@ -195,10 +195,10 @@ func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) er
var errNotSupported = fmt.Errorf("Not supported")
func (s *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) {
func (o *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) {
return time.Time{}, errNotSupported
}
func (s *ObjectStorage) DeleteLooseObject(plumbing.Hash) error {
func (o *ObjectStorage) DeleteLooseObject(plumbing.Hash) error {
return errNotSupported
}

View file

@ -29,7 +29,7 @@ func Do(src, dst string) (diffs []diffmatchpatch.Diff) {
// a bulk delete+insert and the half-baked suboptimal result is returned at once.
// The underlying algorithm is Meyers, its complexity is O(N*d) where N is
// min(lines(src), lines(dst)) and d is the size of the diff.
func DoWithTimeout (src, dst string, timeout time.Duration) (diffs []diffmatchpatch.Diff) {
func DoWithTimeout(src, dst string, timeout time.Duration) (diffs []diffmatchpatch.Diff) {
dmp := diffmatchpatch.New()
dmp.DiffTimeout = timeout
wSrc, wDst, warray := dmp.DiffLinesToRunes(src, dst)

View file

@ -91,8 +91,7 @@ func (n *node) calculateChildren() error {
if os.IsNotExist(err) {
return nil
}
return nil
return err
}
for _, file := range files {

View file

@ -93,7 +93,12 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error {
head, err := w.r.Head()
if err == nil {
if !updated && head.Hash() == ref.Hash() {
headAheadOfRef, err := isFastForward(w.r.Storer, ref.Hash(), head.Hash())
if err != nil {
return err
}
if !updated && headAheadOfRef {
return NoErrAlreadyUpToDate
}

View file

@ -6,7 +6,6 @@ import (
"sort"
"strings"
"golang.org/x/crypto/openpgp"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/filemode"
"github.com/go-git/go-git/v5/plumbing/format/index"
@ -14,6 +13,7 @@ import (
"github.com/go-git/go-git/v5/storage"
"github.com/go-git/go-billy/v5"
"golang.org/x/crypto/openpgp"
)
// Commit stores the current contents of the index in a new commit along with
@ -58,17 +58,23 @@ func (w *Worktree) autoAddModifiedAndDeleted() error {
return err
}
idx, err := w.r.Storer.Index()
if err != nil {
return err
}
for path, fs := range s {
if fs.Worktree != Modified && fs.Worktree != Deleted {
continue
}
if _, err := w.Add(path); err != nil {
if _, _, err := w.doAddFile(idx, s, path, nil); err != nil {
return err
}
}
return nil
return w.r.Storer.SetIndex(idx)
}
func (w *Worktree) updateHEAD(commit plumbing.Hash) error {

View file

@ -7,6 +7,7 @@ import (
"os"
"path"
"path/filepath"
"strings"
"github.com/go-git/go-billy/v5/util"
"github.com/go-git/go-git/v5/plumbing"
@ -264,7 +265,77 @@ func diffTreeIsEquals(a, b noder.Hasher) bool {
// the worktree to the index. If any of the files is already staged in the index
// no error is returned. When path is a file, the blob.Hash is returned.
func (w *Worktree) Add(path string) (plumbing.Hash, error) {
// TODO(mcuadros): remove plumbing.Hash from signature at v5.
// TODO(mcuadros): deprecate in favor of AddWithOption in v6.
return w.doAdd(path, make([]gitignore.Pattern, 0))
}
func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, ignorePattern []gitignore.Pattern) (added bool, err error) {
files, err := w.Filesystem.ReadDir(directory)
if err != nil {
return false, err
}
if len(ignorePattern) > 0 {
m := gitignore.NewMatcher(ignorePattern)
matchPath := strings.Split(directory, string(os.PathSeparator))
if m.Match(matchPath, true) {
// ignore
return false, nil
}
}
for _, file := range files {
name := path.Join(directory, file.Name())
var a bool
if file.IsDir() {
if file.Name() == GitDirName {
// ignore special git directory
continue
}
a, err = w.doAddDirectory(idx, s, name, ignorePattern)
} else {
a, _, err = w.doAddFile(idx, s, name, ignorePattern)
}
if err != nil {
return
}
if !added && a {
added = true
}
}
return
}
// AddWithOptions file contents to the index, updates the index using the
// current content found in the working tree, to prepare the content staged for
// the next commit.
//
// It typically adds the current content of existing paths as a whole, but with
// some options it can also be used to add content with only part of the changes
// made to the working tree files applied, or remove paths that do not exist in
// the working tree anymore.
func (w *Worktree) AddWithOptions(opts *AddOptions) error {
if err := opts.Validate(w.r); err != nil {
return err
}
if opts.All {
_, err := w.doAdd(".", w.Excludes)
return err
}
if opts.Glob != "" {
return w.AddGlob(opts.Glob)
}
_, err := w.Add(opts.Path)
return err
}
func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern) (plumbing.Hash, error) {
s, err := w.Status()
if err != nil {
return plumbing.ZeroHash, err
@ -280,9 +351,9 @@ func (w *Worktree) Add(path string) (plumbing.Hash, error) {
fi, err := w.Filesystem.Lstat(path)
if err != nil || !fi.IsDir() {
added, h, err = w.doAddFile(idx, s, path)
added, h, err = w.doAddFile(idx, s, path, ignorePattern)
} else {
added, err = w.doAddDirectory(idx, s, path)
added, err = w.doAddDirectory(idx, s, path, ignorePattern)
}
if err != nil {
@ -296,42 +367,11 @@ func (w *Worktree) Add(path string) (plumbing.Hash, error) {
return h, w.r.Storer.SetIndex(idx)
}
func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string) (added bool, err error) {
files, err := w.Filesystem.ReadDir(directory)
if err != nil {
return false, err
}
for _, file := range files {
name := path.Join(directory, file.Name())
var a bool
if file.IsDir() {
if file.Name() == GitDirName {
// ignore special git directory
continue
}
a, err = w.doAddDirectory(idx, s, name)
} else {
a, _, err = w.doAddFile(idx, s, name)
}
if err != nil {
return
}
if !added && a {
added = true
}
}
return
}
// AddGlob adds all paths, matching pattern, to the index. If pattern matches a
// directory path, all directory contents are added to the index recursively. No
// error is returned if all matching paths are already staged in index.
func (w *Worktree) AddGlob(pattern string) error {
// TODO(mcuadros): deprecate in favor of AddWithOption in v6.
files, err := util.Glob(w.Filesystem, pattern)
if err != nil {
return err
@ -360,9 +400,9 @@ func (w *Worktree) AddGlob(pattern string) error {
var added bool
if fi.IsDir() {
added, err = w.doAddDirectory(idx, s, file)
added, err = w.doAddDirectory(idx, s, file, make([]gitignore.Pattern, 0))
} else {
added, _, err = w.doAddFile(idx, s, file)
added, _, err = w.doAddFile(idx, s, file, make([]gitignore.Pattern, 0))
}
if err != nil {
@ -383,10 +423,18 @@ func (w *Worktree) AddGlob(pattern string) error {
// doAddFile create a new blob from path and update the index, added is true if
// the file added is different from the index.
func (w *Worktree) doAddFile(idx *index.Index, s Status, path string) (added bool, h plumbing.Hash, err error) {
func (w *Worktree) doAddFile(idx *index.Index, s Status, path string, ignorePattern []gitignore.Pattern) (added bool, h plumbing.Hash, err error) {
if s.File(path).Worktree == Unmodified {
return false, h, nil
}
if len(ignorePattern) > 0 {
m := gitignore.NewMatcher(ignorePattern)
matchPath := strings.Split(path, string(os.PathSeparator))
if m.Match(matchPath, true) {
// ignore
return false, h, nil
}
}
h, err = w.copyFileToStorage(path)
if err != nil {