Update vendor github.com/cavaliercoder/grab

This commit is contained in:
Daniele Rondina
2020-11-19 00:56:59 +01:00
parent f9a7113ab9
commit 287098f101
14 changed files with 391 additions and 140 deletions

2
go.mod
View File

@@ -7,7 +7,7 @@ require (
github.com/Sabayon/pkgs-checker v0.7.2 github.com/Sabayon/pkgs-checker v0.7.2
github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154 github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154
github.com/briandowns/spinner v1.7.0 github.com/briandowns/spinner v1.7.0
github.com/cavaliercoder/grab v2.0.0+incompatible github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec
github.com/crillab/gophersat v1.3.2-0.20201023142334-3fc2ac466765 github.com/crillab/gophersat v1.3.2-0.20201023142334-3fc2ac466765
github.com/docker/docker v17.12.0-ce-rc1.0.20200417035958-130b0bc6032c+incompatible github.com/docker/docker v17.12.0-ce-rc1.0.20200417035958-130b0bc6032c+incompatible
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect

2
go.sum
View File

@@ -97,6 +97,8 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec h1:4XvMn0XuV7qxCH22gbnR79r+xTUaLOSA0GW/egpO3SQ=
github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec/go.mod h1:NbXoa59CCAGqtRm7kRrcZIk2dTCJMRVF8QI3BOD7isY=
github.com/cavaliercoder/grab v2.0.0+incompatible h1:wZHbBQx56+Yxjx2TCGDcenhh3cJn7cCLMfkEPmySTSE= github.com/cavaliercoder/grab v2.0.0+incompatible h1:wZHbBQx56+Yxjx2TCGDcenhh3cJn7cCLMfkEPmySTSE=
github.com/cavaliercoder/grab v2.0.0+incompatible/go.mod h1:tTBkfNqSBfuMmMBFaO2phgyhdYhiZQ/+iXCZDzcDsMI= github.com/cavaliercoder/grab v2.0.0+incompatible/go.mod h1:tTBkfNqSBfuMmMBFaO2phgyhdYhiZQ/+iXCZDzcDsMI=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=

3
vendor/github.com/cavaliercoder/grab/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,3 @@
# ignore IDE project files
*.iml
.idea/

View File

@@ -1,6 +1,7 @@
language: go language: go
go: go:
- tip
- 1.10.x - 1.10.x
- 1.9.x - 1.9.x
- 1.8.x - 1.8.x

View File

@@ -70,7 +70,7 @@ Loop:
case <-t.C: case <-t.C:
fmt.Printf(" transferred %v / %v bytes (%.2f%%)\n", fmt.Printf(" transferred %v / %v bytes (%.2f%%)\n",
resp.BytesComplete(), resp.BytesComplete(),
resp.Size, resp.Size(),
100*resp.Progress()) 100*resp.Progress())
case <-resp.Done: case <-resp.Done:

54
vendor/github.com/cavaliercoder/grab/bps/bps.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
/*
Package bps provides gauges for calculating the Bytes Per Second transfer rate
of data streams.
*/
package bps
import (
"context"
"time"
)
// Gauge is the common interface for all BPS gauges in this package. Given a
// set of samples over time, each gauge type can be used to measure the Bytes
// Per Second transfer rate of a data stream.
//
// All samples must monotonically increase in timestamp and value. Each sample
// should represent the total number of bytes sent in a stream, rather than
// accounting for the number sent since the last sample.
//
// To ensure a gauge can report progress as quickly as possible, take an initial
// sample when your stream first starts.
//
// All gauge implementations are safe for concurrent use.
type Gauge interface {
// Sample adds a new sample of the progress of the monitored stream.
Sample(t time.Time, n int64)
// BPS returns the calculated Bytes Per Second rate of the monitored stream.
BPS() float64
}
// SampleFunc is used by Watch to take periodic samples of a monitored stream.
type SampleFunc func() (n int64)
// Watch will periodically call the given SampleFunc to sample the progress of
// a monitored stream and update the given gauge. SampleFunc should return the
// total number of bytes transferred by the stream since it started.
//
// Watch is a blocking call and should typically be called in a new goroutine.
// To prevent the goroutine from leaking, make sure to cancel the given context
// once the stream is completed or canceled.
func Watch(ctx context.Context, g Gauge, f SampleFunc, interval time.Duration) {
g.Sample(time.Now(), f())
t := time.NewTicker(interval)
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case now := <-t.C:
g.Sample(now, f())
}
}
}

81
vendor/github.com/cavaliercoder/grab/bps/sma.go generated vendored Normal file
View File

@@ -0,0 +1,81 @@
package bps
import (
"sync"
"time"
)
// NewSMA returns a gauge that uses a Simple Moving Average with the given
// number of samples to measure the bytes per second of a byte stream.
//
// BPS is computed using the timestamp of the most recent and oldest sample in
// the sample buffer. When a new sample is added, the oldest sample is dropped
// if the sample count exceeds maxSamples.
//
// The gauge does not account for any latency in arrival time of new samples or
// the desired window size. Any variance in the arrival of samples will result
// in a BPS measurement that is correct for the submitted samples, but over a
// varying time window.
//
// maxSamples should be equal to 1 + (window size / sampling interval) where
// window size is the number of seconds over which the moving average is
// smoothed and sampling interval is the number of seconds between each sample.
//
// For example, if you want a five second window, sampling once per second,
// maxSamples should be 1 + 5/1 = 6.
func NewSMA(maxSamples int) Gauge {
if maxSamples < 2 {
panic("sample count must be greater than 1")
}
return &sma{
maxSamples: uint64(maxSamples),
samples: make([]int64, maxSamples),
timestamps: make([]time.Time, maxSamples),
}
}
type sma struct {
mu sync.Mutex
index uint64
maxSamples uint64
sampleCount uint64
samples []int64
timestamps []time.Time
}
func (c *sma) Sample(t time.Time, n int64) {
c.mu.Lock()
defer c.mu.Unlock()
c.timestamps[c.index] = t
c.samples[c.index] = n
c.index = (c.index + 1) % c.maxSamples
// prevent integer overflow in sampleCount. Values greater or equal to
// maxSamples have the same semantic meaning.
c.sampleCount++
if c.sampleCount > c.maxSamples {
c.sampleCount = c.maxSamples
}
}
func (c *sma) BPS() float64 {
c.mu.Lock()
defer c.mu.Unlock()
// we need two samples to start
if c.sampleCount < 2 {
return 0
}
// First sample is always the oldest until ring buffer first overflows
oldest := c.index
if c.sampleCount < c.maxSamples {
oldest = 0
}
newest := (c.index + c.maxSamples - 1) % c.maxSamples
seconds := c.timestamps[newest].Sub(c.timestamps[oldest]).Seconds()
bytes := float64(c.samples[newest] - c.samples[oldest])
return bytes / seconds
}

View File

@@ -4,20 +4,33 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"io"
"net/http" "net/http"
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
"sync/atomic"
"time" "time"
) )
// HTTPClient provides an interface allowing us to perform HTTP requests.
type HTTPClient interface {
Do(req *http.Request) (*http.Response, error)
}
// truncater is a private interface allowing different response
// Writers to be truncated
type truncater interface {
Truncate(size int64) error
}
// A Client is a file download client. // A Client is a file download client.
// //
// Clients are safe for concurrent use by multiple goroutines. // Clients are safe for concurrent use by multiple goroutines.
type Client struct { type Client struct {
// HTTPClient specifies the http.Client which will be used for communicating // HTTPClient specifies the http.Client which will be used for communicating
// with the remote server during the file transfer. // with the remote server during the file transfer.
HTTPClient *http.Client HTTPClient HTTPClient
// UserAgent specifies the User-Agent string which will be set in the // UserAgent specifies the User-Agent string which will be set in the
// headers of all requests made by this client. // headers of all requests made by this client.
@@ -64,6 +77,7 @@ var DefaultClient = NewClient()
func (c *Client) Do(req *Request) *Response { func (c *Client) Do(req *Request) *Response {
// cancel will be called on all code-paths via closeResponse // cancel will be called on all code-paths via closeResponse
ctx, cancel := context.WithCancel(req.Context()) ctx, cancel := context.WithCancel(req.Context())
req = req.WithContext(ctx)
resp := &Response{ resp := &Response{
Request: req, Request: req,
Start: time.Now(), Start: time.Now(),
@@ -189,7 +203,7 @@ func (c *Client) run(resp *Response, f stateFunc) {
// //
// If an error occurs, the next stateFunc is closeResponse. // If an error occurs, the next stateFunc is closeResponse.
func (c *Client) statFileInfo(resp *Response) stateFunc { func (c *Client) statFileInfo(resp *Response) stateFunc {
if resp.Filename == "" { if resp.Request.NoStore || resp.Filename == "" {
return c.headRequest return c.headRequest
} }
fi, err := os.Stat(resp.Filename) fi, err := os.Stat(resp.Filename)
@@ -225,31 +239,39 @@ func (c *Client) validateLocal(resp *Response) stateFunc {
return c.closeResponse return c.closeResponse
} }
// determine expected file size // determine target file size
size := resp.Request.Size expectedSize := resp.Request.Size
if size == 0 && resp.HTTPResponse != nil { if expectedSize == 0 && resp.HTTPResponse != nil {
size = resp.HTTPResponse.ContentLength expectedSize = resp.HTTPResponse.ContentLength
} }
if size == 0 {
if expectedSize == 0 {
// size is either actually 0 or unknown
// if unknown, we ask the remote server
// if known to be 0, we proceed with a GET
return c.headRequest return c.headRequest
} }
if size == resp.fi.Size() { if expectedSize == resp.fi.Size() {
// local file matches remote file size - wrap it up
resp.DidResume = true resp.DidResume = true
resp.bytesResumed = resp.fi.Size() resp.bytesResumed = resp.fi.Size()
return c.checksumFile return c.checksumFile
} }
if resp.Request.NoResume { if resp.Request.NoResume {
// local file should be overwritten
return c.getRequest return c.getRequest
} }
if size < resp.fi.Size() { if expectedSize >= 0 && expectedSize < resp.fi.Size() {
// remote size is known, is smaller than local size and we want to resume
resp.err = ErrBadLength resp.err = ErrBadLength
return c.closeResponse return c.closeResponse
} }
if resp.CanResume { if resp.CanResume {
// set resume range on GET request
resp.Request.HTTPRequest.Header.Set( resp.Request.HTTPRequest.Header.Set(
"Range", "Range",
fmt.Sprintf("bytes=%d-", resp.fi.Size())) fmt.Sprintf("bytes=%d-", resp.fi.Size()))
@@ -265,19 +287,24 @@ func (c *Client) checksumFile(resp *Response) stateFunc {
return c.closeResponse return c.closeResponse
} }
if resp.Filename == "" { if resp.Filename == "" {
panic("filename not set") panic("grab: developer error: filename not set")
}
if resp.Size() < 0 {
panic("grab: developer error: size unknown")
} }
req := resp.Request req := resp.Request
// compare checksum // compute checksum
var sum []byte var sum []byte
sum, resp.err = checksum(req.Context(), resp.Filename, req.hash) sum, resp.err = resp.checksumUnsafe()
if resp.err != nil { if resp.err != nil {
return c.closeResponse return c.closeResponse
} }
// compare checksum
if !bytes.Equal(sum, req.checksum) { if !bytes.Equal(sum, req.checksum) {
resp.err = ErrBadChecksum resp.err = ErrBadChecksum
if req.deleteOnError { if !resp.Request.NoStore && req.deleteOnError {
if err := os.Remove(resp.Filename); err != nil { if err := os.Remove(resp.Filename); err != nil {
// err should be os.PathError and include file path // err should be os.PathError and include file path
resp.err = fmt.Errorf( resp.err = fmt.Errorf(
@@ -326,6 +353,14 @@ func (c *Client) headRequest(resp *Response) stateFunc {
return c.getRequest return c.getRequest
} }
// In case of redirects during HEAD, record the final URL and use it
// instead of the original URL when sending future requests.
// This way we avoid sending potentially unsupported requests to
// the original URL, e.g. "Range", since it was the final URL
// that advertised its support.
resp.Request.HTTPRequest.URL = resp.HTTPResponse.Request.URL
resp.Request.HTTPRequest.Host = resp.HTTPResponse.Request.Host
return c.readResponse return c.readResponse
} }
@@ -335,6 +370,8 @@ func (c *Client) getRequest(resp *Response) stateFunc {
return c.closeResponse return c.closeResponse
} }
// TODO: check Content-Range
// check status code // check status code
if !resp.Request.IgnoreBadStatusCodes { if !resp.Request.IgnoreBadStatusCodes {
if resp.HTTPResponse.StatusCode < 200 || resp.HTTPResponse.StatusCode > 299 { if resp.HTTPResponse.StatusCode < 200 || resp.HTTPResponse.StatusCode > 299 {
@@ -348,13 +385,15 @@ func (c *Client) getRequest(resp *Response) stateFunc {
func (c *Client) readResponse(resp *Response) stateFunc { func (c *Client) readResponse(resp *Response) stateFunc {
if resp.HTTPResponse == nil { if resp.HTTPResponse == nil {
panic("Response.HTTPResponse is not ready") panic("grab: developer error: Response.HTTPResponse is nil")
} }
// check expected size // check expected size
resp.Size = resp.bytesResumed + resp.HTTPResponse.ContentLength resp.sizeUnsafe = resp.HTTPResponse.ContentLength
if resp.HTTPResponse.ContentLength > 0 && resp.Request.Size > 0 { if resp.sizeUnsafe >= 0 {
if resp.Request.Size != resp.Size { // remote size is known
resp.sizeUnsafe += resp.bytesResumed
if resp.Request.Size > 0 && resp.Request.Size != resp.sizeUnsafe {
resp.err = ErrBadLength resp.err = ErrBadLength
return c.closeResponse return c.closeResponse
} }
@@ -371,7 +410,7 @@ func (c *Client) readResponse(resp *Response) stateFunc {
resp.Filename = filepath.Join(resp.Request.Filename, filename) resp.Filename = filepath.Join(resp.Request.Filename, filename)
} }
if resp.requestMethod() == "HEAD" { if !resp.Request.NoStore && resp.requestMethod() == "HEAD" {
if resp.HTTPResponse.Header.Get("Accept-Ranges") == "bytes" { if resp.HTTPResponse.Header.Get("Accept-Ranges") == "bytes" {
resp.CanResume = true resp.CanResume = true
} }
@@ -385,39 +424,45 @@ func (c *Client) readResponse(resp *Response) stateFunc {
// //
// Requires that Response.Filename and resp.DidResume are already be set. // Requires that Response.Filename and resp.DidResume are already be set.
func (c *Client) openWriter(resp *Response) stateFunc { func (c *Client) openWriter(resp *Response) stateFunc {
if !resp.Request.NoCreateDirectories { if !resp.Request.NoStore && !resp.Request.NoCreateDirectories {
resp.err = mkdirp(resp.Filename) resp.err = mkdirp(resp.Filename)
if resp.err != nil { if resp.err != nil {
return c.closeResponse return c.closeResponse
} }
} }
// compute write flags if resp.Request.NoStore {
flag := os.O_CREATE | os.O_WRONLY resp.writer = &resp.storeBuffer
if resp.fi != nil { } else {
if resp.DidResume { // compute write flags
flag = os.O_APPEND | os.O_WRONLY flag := os.O_CREATE | os.O_WRONLY
} else { if resp.fi != nil {
flag = os.O_TRUNC | os.O_WRONLY if resp.DidResume {
flag = os.O_APPEND | os.O_WRONLY
} else {
// truncate later in copyFile, if not cancelled
// by BeforeCopy hook
flag = os.O_WRONLY
}
} }
}
// open file // open file
f, err := os.OpenFile(resp.Filename, flag, 0644) f, err := os.OpenFile(resp.Filename, flag, 0666)
if err != nil { if err != nil {
resp.err = err resp.err = err
return c.closeResponse return c.closeResponse
} }
resp.writer = f resp.writer = f
// seek to start or end // seek to start or end
whence := os.SEEK_SET whence := os.SEEK_SET
if resp.bytesResumed > 0 { if resp.bytesResumed > 0 {
whence = os.SEEK_END whence = os.SEEK_END
} }
_, resp.err = f.Seek(0, whence) _, resp.err = f.Seek(0, whence)
if resp.err != nil { if resp.err != nil {
return c.closeResponse return c.closeResponse
}
} }
// init transfer // init transfer
@@ -450,24 +495,42 @@ func (c *Client) copyFile(resp *Response) stateFunc {
} }
} }
var bytesCopied int64
if resp.transfer == nil { if resp.transfer == nil {
panic("developer error: Response.transfer is not initialized") panic("grab: developer error: Response.transfer is nil")
} }
go resp.watchBps()
_, resp.err = resp.transfer.copy() // We waited to truncate the file in openWriter() to make sure
// the BeforeCopy didn't cancel the copy. If this was an existing
// file that is not going to be resumed, truncate the contents.
if t, ok := resp.writer.(truncater); ok && resp.fi != nil && !resp.DidResume {
t.Truncate(0)
}
bytesCopied, resp.err = resp.transfer.copy()
if resp.err != nil { if resp.err != nil {
return c.closeResponse return c.closeResponse
} }
closeWriter(resp) closeWriter(resp)
// set timestamp // set file timestamp
if !resp.Request.IgnoreRemoteTime { if !resp.Request.NoStore && !resp.Request.IgnoreRemoteTime {
resp.err = setLastModified(resp.HTTPResponse, resp.Filename) resp.err = setLastModified(resp.HTTPResponse, resp.Filename)
if resp.err != nil { if resp.err != nil {
return c.closeResponse return c.closeResponse
} }
} }
// update transfer size if previously unknown
if resp.Size() < 0 {
discoveredSize := resp.bytesResumed + bytesCopied
atomic.StoreInt64(&resp.sizeUnsafe, discoveredSize)
if resp.Request.Size > 0 && resp.Request.Size != discoveredSize {
resp.err = ErrBadLength
return c.closeResponse
}
}
// run AfterCopy hook // run AfterCopy hook
if f := resp.Request.AfterCopy; f != nil { if f := resp.Request.AfterCopy; f != nil {
resp.err = f(resp) resp.err = f(resp)
@@ -480,16 +543,16 @@ func (c *Client) copyFile(resp *Response) stateFunc {
} }
func closeWriter(resp *Response) { func closeWriter(resp *Response) {
if resp.writer != nil { if closer, ok := resp.writer.(io.Closer); ok {
resp.writer.Close() closer.Close()
resp.writer = nil
} }
resp.writer = nil
} }
// close finalizes the Response // close finalizes the Response
func (c *Client) closeResponse(resp *Response) stateFunc { func (c *Client) closeResponse(resp *Response) stateFunc {
if resp.IsComplete() { if resp.IsComplete() {
panic("response already closed") panic("grab: developer error: response already closed")
} }
resp.fi = nil resp.fi = nil

3
vendor/github.com/cavaliercoder/grab/go.mod generated vendored Normal file
View File

@@ -0,0 +1,3 @@
module github.com/cavaliercoder/grab
go 1.14

View File

@@ -51,6 +51,11 @@ type Request struct {
// completed in full, it will not be restarted. // completed in full, it will not be restarted.
NoResume bool NoResume bool
// NoStore specifies that grab should not write to the local file system.
// Instead, the download will be stored in memory and accessible only via
// Response.Open or Response.Bytes.
NoStore bool
// NoCreateDirectories specifies that any missing directories in the given // NoCreateDirectories specifies that any missing directories in the given
// Filename path should not be created automatically, if they do not already // Filename path should not be created automatically, if they do not already
// exist. // exist.

View File

@@ -1,11 +1,13 @@
package grab package grab
import ( import (
"bytes"
"context" "context"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"os" "os"
"sync" "sync/atomic"
"time" "time"
) )
@@ -31,7 +33,7 @@ type Response struct {
Filename string Filename string
// Size specifies the total expected size of the file transfer. // Size specifies the total expected size of the file transfer.
Size int64 sizeUnsafe int64
// Start specifies the time at which the file transfer started. // Start specifies the time at which the file transfer started.
Start time.Time Start time.Time
@@ -70,7 +72,11 @@ type Response struct {
// writer is the file handle used to write the downloaded file to local // writer is the file handle used to write the downloaded file to local
// storage // storage
writer io.WriteCloser writer io.Writer
// storeBuffer receives the contents of the transfer if Request.NoStore is
// enabled.
storeBuffer bytes.Buffer
// bytesCompleted specifies the number of bytes which were already // bytesCompleted specifies the number of bytes which were already
// transferred before this transfer began. // transferred before this transfer began.
@@ -80,11 +86,6 @@ type Response struct {
// file, tracking progress and allowing for cancelation. // file, tracking progress and allowing for cancelation.
transfer *transfer transfer *transfer
// bytesPerSecond specifies the number of bytes that have been transferred in
// the last 1-second window.
bytesPerSecond float64
bytesPerSecondMu sync.Mutex
// bufferSize specifies the size in bytes of the transfer buffer. // bufferSize specifies the size in bytes of the transfer buffer.
bufferSize int bufferSize int
@@ -125,6 +126,13 @@ func (c *Response) Err() error {
return c.err return c.err
} }
// Size returns the size of the file transfer. If the remote server does not
// specify the total size and the transfer is incomplete, the return value is
// -1.
func (c *Response) Size() int64 {
return atomic.LoadInt64(&c.sizeUnsafe)
}
// BytesComplete returns the total number of bytes which have been copied to // BytesComplete returns the total number of bytes which have been copied to
// the destination, including any bytes that were resumed from a previous // the destination, including any bytes that were resumed from a previous
// download. // download.
@@ -132,25 +140,24 @@ func (c *Response) BytesComplete() int64 {
return c.bytesResumed + c.transfer.N() return c.bytesResumed + c.transfer.N()
} }
// BytesPerSecond returns the number of bytes transferred in the last second. If // BytesPerSecond returns the number of bytes per second transferred using a
// the download is already complete, the average bytes/sec for the life of the // simple moving average of the last five seconds. If the download is already
// download is returned. // complete, the average bytes/sec for the life of the download is returned.
func (c *Response) BytesPerSecond() float64 { func (c *Response) BytesPerSecond() float64 {
if c.IsComplete() { if c.IsComplete() {
return float64(c.transfer.N()) / c.Duration().Seconds() return float64(c.transfer.N()) / c.Duration().Seconds()
} }
c.bytesPerSecondMu.Lock() return c.transfer.BPS()
defer c.bytesPerSecondMu.Unlock()
return c.bytesPerSecond
} }
// Progress returns the ratio of total bytes that have been downloaded. Multiply // Progress returns the ratio of total bytes that have been downloaded. Multiply
// the returned value by 100 to return the percentage completed. // the returned value by 100 to return the percentage completed.
func (c *Response) Progress() float64 { func (c *Response) Progress() float64 {
if c.Size == 0 { size := c.Size()
if size <= 0 {
return 0 return 0
} }
return float64(c.BytesComplete()) / float64(c.Size) return float64(c.BytesComplete()) / float64(size)
} }
// Duration returns the duration of a file transfer. If the transfer is in // Duration returns the duration of a file transfer. If the transfer is in
@@ -173,40 +180,53 @@ func (c *Response) ETA() time.Time {
return c.End return c.End
} }
bt := c.BytesComplete() bt := c.BytesComplete()
bps := c.BytesPerSecond() bps := c.transfer.BPS()
if bps == 0 { if bps == 0 {
return time.Time{} return time.Time{}
} }
secs := float64(c.Size-bt) / bps secs := float64(c.Size()-bt) / bps
return time.Now().Add(time.Duration(secs) * time.Second) return time.Now().Add(time.Duration(secs) * time.Second)
} }
// watchBps watches the progress of a transfer and maintains statistics. // Open blocks the calling goroutine until the underlying file transfer is
func (c *Response) watchBps() { // completed and then opens the transferred file for reading. If Request.NoStore
var prev int64 // was enabled, the reader will read from memory.
then := c.Start //
// If an error occurred during the transfer, it will be returned.
t := time.NewTicker(time.Second) //
defer t.Stop() // It is the callers responsibility to close the returned file handle.
func (c *Response) Open() (io.ReadCloser, error) {
for { if err := c.Err(); err != nil {
select { return nil, err
case <-c.Done:
return
case now := <-t.C:
d := now.Sub(then)
then = now
cur := c.transfer.N()
bs := cur - prev
prev = cur
c.bytesPerSecondMu.Lock()
c.bytesPerSecond = float64(bs) / d.Seconds()
c.bytesPerSecondMu.Unlock()
}
} }
return c.openUnsafe()
}
func (c *Response) openUnsafe() (io.ReadCloser, error) {
if c.Request.NoStore {
return ioutil.NopCloser(bytes.NewReader(c.storeBuffer.Bytes())), nil
}
return os.Open(c.Filename)
}
// Bytes blocks the calling goroutine until the underlying file transfer is
// completed and then reads all bytes from the completed tranafer. If
// Request.NoStore was enabled, the bytes will be read from memory.
//
// If an error occurred during the transfer, it will be returned.
func (c *Response) Bytes() ([]byte, error) {
if err := c.Err(); err != nil {
return nil, err
}
if c.Request.NoStore {
return c.storeBuffer.Bytes(), nil
}
f, err := c.Open()
if err != nil {
return nil, err
}
defer f.Close()
return ioutil.ReadAll(f)
} }
func (c *Response) requestMethod() string { func (c *Response) requestMethod() string {
@@ -216,6 +236,20 @@ func (c *Response) requestMethod() string {
return c.HTTPResponse.Request.Method return c.HTTPResponse.Request.Method
} }
func (c *Response) checksumUnsafe() ([]byte, error) {
f, err := c.openUnsafe()
if err != nil {
return nil, err
}
defer f.Close()
t := newTransfer(c.Request.Context(), nil, c.Request.hash, f, nil)
if _, err = t.copy(); err != nil {
return nil, err
}
sum := c.Request.hash.Sum(nil)
return sum, nil
}
func (c *Response) closeResponseBody() error { func (c *Response) closeResponseBody() error {
if c.HTTPResponse == nil || c.HTTPResponse.Body == nil { if c.HTTPResponse == nil || c.HTTPResponse.Body == nil {
return nil return nil

View File

@@ -4,30 +4,42 @@ import (
"context" "context"
"io" "io"
"sync/atomic" "sync/atomic"
"time"
"github.com/cavaliercoder/grab/bps"
) )
type transfer struct { type transfer struct {
n int64 // must be 64bit aligned on 386 n int64 // must be 64bit aligned on 386
ctx context.Context ctx context.Context
lim RateLimiter gauge bps.Gauge
w io.Writer lim RateLimiter
r io.Reader w io.Writer
b []byte r io.Reader
b []byte
} }
func newTransfer(ctx context.Context, lim RateLimiter, dst io.Writer, src io.Reader, buf []byte) *transfer { func newTransfer(ctx context.Context, lim RateLimiter, dst io.Writer, src io.Reader, buf []byte) *transfer {
return &transfer{ return &transfer{
ctx: ctx, ctx: ctx,
lim: lim, gauge: bps.NewSMA(6), // five second moving average sampling every second
w: dst, lim: lim,
r: src, w: dst,
b: buf, r: src,
b: buf,
} }
} }
// copy behaves similarly to io.CopyBuffer except that it checks for cancelation // copy behaves similarly to io.CopyBuffer except that it checks for cancelation
// of the given context.Context and reports progress in a thread-safe manner. // of the given context.Context, reports progress in a thread-safe manner and
// tracks the transfer rate.
func (c *transfer) copy() (written int64, err error) { func (c *transfer) copy() (written int64, err error) {
// maintain a bps gauge in another goroutine
ctx, cancel := context.WithCancel(c.ctx)
defer cancel()
go bps.Watch(ctx, c.gauge, c.N, time.Second)
// start the transfer
if c.b == nil { if c.b == nil {
c.b = make([]byte, 32*1024) c.b = make([]byte, 32*1024)
} }
@@ -39,12 +51,6 @@ func (c *transfer) copy() (written int64, err error) {
default: default:
// keep working // keep working
} }
if c.lim != nil {
err = c.lim.WaitN(c.ctx, len(c.b))
if err != nil {
return
}
}
nr, er := c.r.Read(c.b) nr, er := c.r.Read(c.b)
if nr > 0 { if nr > 0 {
nw, ew := c.w.Write(c.b[0:nr]) nw, ew := c.w.Write(c.b[0:nr])
@@ -60,6 +66,13 @@ func (c *transfer) copy() (written int64, err error) {
err = io.ErrShortWrite err = io.ErrShortWrite
break break
} }
// wait for rate limiter
if c.lim != nil {
err = c.lim.WaitN(c.ctx, nr)
if err != nil {
return
}
}
} }
if er != nil { if er != nil {
if er != io.EOF { if er != io.EOF {
@@ -79,3 +92,12 @@ func (c *transfer) N() (n int64) {
n = atomic.LoadInt64(&c.n) n = atomic.LoadInt64(&c.n)
return return
} }
// BPS returns the current bytes per second transfer rate using a simple moving
// average.
func (c *transfer) BPS() (bps float64) {
if c == nil || c.gauge == nil {
return 0
}
return c.gauge.BPS()
}

View File

@@ -1,9 +1,7 @@
package grab package grab
import ( import (
"context"
"fmt" "fmt"
"hash"
"mime" "mime"
"net/http" "net/http"
"os" "os"
@@ -36,22 +34,26 @@ func mkdirp(path string) error {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return fmt.Errorf("error checking destination directory: %v", err) return fmt.Errorf("error checking destination directory: %v", err)
} }
if err := os.MkdirAll(dir, 0755); err != nil { if err := os.MkdirAll(dir, 0777); err != nil {
return fmt.Errorf("error creating destination directory: %v", err) return fmt.Errorf("error creating destination directory: %v", err)
} }
} else if !fi.IsDir() { } else if !fi.IsDir() {
panic("destination path is not directory") panic("grab: developer error: destination path is not directory")
} }
return nil return nil
} }
// guessFilename returns a filename for the given http.Response. If none can be // guessFilename returns a filename for the given http.Response. If none can be
// determined ErrNoFilename is returned. // determined ErrNoFilename is returned.
//
// TODO: NoStore operations should not require a filename
func guessFilename(resp *http.Response) (string, error) { func guessFilename(resp *http.Response) (string, error) {
filename := resp.Request.URL.Path filename := resp.Request.URL.Path
if cd := resp.Header.Get("Content-Disposition"); cd != "" { if cd := resp.Header.Get("Content-Disposition"); cd != "" {
if _, params, err := mime.ParseMediaType(cd); err == nil { if _, params, err := mime.ParseMediaType(cd); err == nil {
filename = params["filename"] if val, ok := params["filename"]; ok {
filename = val
} // else filename directive is missing.. fallback to URL.Path
} }
} }
@@ -67,23 +69,3 @@ func guessFilename(resp *http.Response) (string, error) {
return filename, nil return filename, nil
} }
// checksum returns a hash of the given file, using the given hash algorithm.
func checksum(ctx context.Context, filename string, h hash.Hash) (b []byte, err error) {
var f *os.File
f, err = os.Open(filename)
if err != nil {
return
}
defer func() {
err = f.Close()
}()
t := newTransfer(ctx, nil, h, f, nil)
if _, err = t.copy(); err != nil {
return
}
b = h.Sum(nil)
return
}

3
vendor/modules.txt vendored
View File

@@ -44,8 +44,9 @@ github.com/asdine/storm/internal
github.com/asdine/storm/q github.com/asdine/storm/q
# github.com/briandowns/spinner v1.7.0 # github.com/briandowns/spinner v1.7.0
github.com/briandowns/spinner github.com/briandowns/spinner
# github.com/cavaliercoder/grab v2.0.0+incompatible # github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec
github.com/cavaliercoder/grab github.com/cavaliercoder/grab
github.com/cavaliercoder/grab/bps
# github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9 # github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9
github.com/chuckpreslar/emission github.com/chuckpreslar/emission
# github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 # github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0