vendor: add sourcegraph/s3cache and dependencies

Adds:
 - github.com/kr/http/transport
 - github.com/sqs/s3
 - github.com/sqs/s3/s3util
 - sourcegraph.com/sourcegraph/s3cache
This commit is contained in:
Will Norris 2015-12-07 20:04:55 -08:00
parent ec96fcbc90
commit 11370ac826
19 changed files with 1228 additions and 0 deletions

4
vendor/github.com/sqs/s3/s3util/Readme generated vendored Normal file
View file

@ -0,0 +1,4 @@
Package s3util provides streaming transfers to and from Amazon S3.
Full documentation:
http://godoc.org/github.com/kr/s3/s3util

28
vendor/github.com/sqs/s3/s3util/config.go generated vendored Normal file
View file

@ -0,0 +1,28 @@
// Package s3util provides streaming transfers to and from Amazon S3.
//
// To use it, open or create an S3 object, read or write data,
// and close the object.
//
// You must assign valid credentials to DefaultConfig.Keys before using
// DefaultConfig. Be sure to close an io.WriteCloser returned by this package,
// to flush buffers and complete the multipart upload process.
package s3util
// TODO(kr): parse error responses; return structured data
import (
"net/http"
"github.com/sqs/s3"
)
var DefaultConfig = &Config{
Service: s3.DefaultService,
Keys: new(s3.Keys),
}
type Config struct {
*s3.Service
*s3.Keys
*http.Client // if nil, uses http.DefaultClient
}

32
vendor/github.com/sqs/s3/s3util/delete.go generated vendored Normal file
View file

@ -0,0 +1,32 @@
package s3util
import (
"io"
"net/http"
"time"
)
// Delete deletes the S3 object at url. An HTTP status other than 204 (No
// Content) is considered an error.
//
// If c is nil, Delete uses DefaultConfig.
func Delete(url string, c *Config) (io.ReadCloser, error) {
if c == nil {
c = DefaultConfig
}
r, _ := http.NewRequest("DELETE", url, nil)
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
c.Sign(r, *c.Keys)
client := c.Client
if client == nil {
client = http.DefaultClient
}
resp, err := client.Do(r)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusNoContent {
return nil, newRespError(resp)
}
return resp.Body, nil
}

29
vendor/github.com/sqs/s3/s3util/error.go generated vendored Normal file
View file

@ -0,0 +1,29 @@
package s3util
import (
"bytes"
"fmt"
"io"
"net/http"
)
type respError struct {
r *http.Response
b bytes.Buffer
}
func newRespError(r *http.Response) *respError {
e := new(respError)
e.r = r
io.Copy(&e.b, r.Body)
r.Body.Close()
return e
}
func (e *respError) Error() string {
return fmt.Sprintf(
"unwanted http status %d: %q",
e.r.StatusCode,
e.b.String(),
)
}

33
vendor/github.com/sqs/s3/s3util/open.go generated vendored Normal file
View file

@ -0,0 +1,33 @@
package s3util
import (
"io"
"net/http"
"time"
)
// Open requests the S3 object at url. An HTTP status other than 200 is
// considered an error.
//
// If c is nil, Open uses DefaultConfig.
func Open(url string, c *Config) (io.ReadCloser, error) {
if c == nil {
c = DefaultConfig
}
// TODO(kr): maybe parallel range fetching
r, _ := http.NewRequest("GET", url, nil)
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
c.Sign(r, *c.Keys)
client := c.Client
if client == nil {
client = http.DefaultClient
}
resp, err := client.Do(r)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 && resp.StatusCode != http.StatusPartialContent {
return nil, newRespError(resp)
}
return resp.Body, nil
}

218
vendor/github.com/sqs/s3/s3util/readdir.go generated vendored Normal file
View file

@ -0,0 +1,218 @@
package s3util
import (
"bytes"
"encoding/xml"
"errors"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
)
// File represents an S3 object or directory.
type File struct {
url string
prefix string
config *Config
result *listObjectsResult
}
type fileInfo struct {
name string
size int64
dir bool
modTime time.Time
sys *Stat
}
// Stat contains information about an S3 object or directory.
// It is the "underlying data source" returned by method Sys
// for each FileInfo produced by this package.
// fi.Sys().(*s3util.Stat)
// For the meaning of these fields, see
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html.
type Stat struct {
Key string
LastModified string
ETag string // ETag value, without double quotes.
Size string
StorageClass string
OwnerID string `xml:"Owner>ID"`
OwnerName string `xml:"Owner>DisplayName"`
}
type listObjectsResult struct {
IsTruncated bool
Contents []Stat
Directories []string `xml:"CommonPrefixes>Prefix"` // Suffix "/" trimmed
}
func (f *fileInfo) Name() string { return f.name }
func (f *fileInfo) Size() int64 { return f.size }
func (f *fileInfo) Mode() os.FileMode {
if f.dir {
return 0755 | os.ModeDir
}
return 0644
}
func (f *fileInfo) ModTime() time.Time {
if f.modTime.IsZero() && f.sys != nil {
// we return the zero value if a parse error ever happens.
f.modTime, _ = time.Parse(time.RFC3339Nano, f.sys.LastModified)
}
return f.modTime
}
func (f *fileInfo) IsDir() bool { return f.dir }
func (f *fileInfo) Sys() interface{} { return f.sys }
// NewFile returns a new File with the given URL and config.
//
// Set rawurl to a directory on S3, such as
// https://mybucket.s3.amazonaws.com/myfolder.
// The URL cannot have query parameters or a fragment.
// If c is nil, DefaultConfig will be used.
func NewFile(rawurl string, c *Config) (*File, error) {
u, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
if u.RawQuery != "" {
return nil, errors.New("url cannot have raw query parameters.")
}
if u.Fragment != "" {
return nil, errors.New("url cannot have a fragment.")
}
prefix := strings.TrimLeft(u.Path, "/")
if prefix != "" && !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
u.Path = ""
return &File{u.String(), prefix, c, nil}, nil
}
// Readdir requests a list of entries in the S3 directory
// represented by f and returns a slice of up to n FileInfo
// values, in alphabetical order. Subsequent calls
// on the same File will yield further FileInfos.
// Only direct children are returned, not deeper descendants.
func (f *File) Readdir(n int) ([]os.FileInfo, error) {
if f.result != nil && !f.result.IsTruncated {
return make([]os.FileInfo, 0), io.EOF
}
reader, err := f.sendRequest(n)
if err != nil {
return nil, err
}
defer reader.Close()
return f.parseResponse(reader)
}
func (f *File) sendRequest(count int) (io.ReadCloser, error) {
c := f.config
if c == nil {
c = DefaultConfig
}
var buf bytes.Buffer
buf.WriteString(f.url)
buf.WriteString("?delimiter=%2F")
if f.prefix != "" {
buf.WriteString("&prefix=")
buf.WriteString(url.QueryEscape(f.prefix))
}
if count > 0 {
buf.WriteString("&max-keys=")
buf.WriteString(strconv.Itoa(count))
}
if f.result != nil && f.result.IsTruncated {
var lastDir, lastKey, marker string
if len(f.result.Directories) > 0 {
lastDir = f.result.Directories[len(f.result.Directories)-1]
}
if len(f.result.Contents) > 0 {
lastKey = f.result.Contents[len(f.result.Contents)-1].Key
}
if lastKey > lastDir {
marker = lastKey
} else {
marker = lastDir
}
if marker != "" {
buf.WriteString("&marker=")
buf.WriteString(url.QueryEscape(marker))
}
}
u := buf.String()
r, _ := http.NewRequest("GET", u, nil)
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
c.Sign(r, *c.Keys)
client := c.Client
if client == nil {
client = http.DefaultClient
}
resp, err := client.Do(r)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, newRespError(resp)
}
return resp.Body, nil
}
func (f *File) parseResponse(reader io.Reader) ([]os.FileInfo, error) {
// Reading it all in now makes the XML decoding way faster.
bb, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
reader = bytes.NewReader(bb)
decoder := xml.NewDecoder(reader)
result := listObjectsResult{}
if err := decoder.Decode(&result); err != nil {
return nil, err
}
infos := make([]os.FileInfo, len(result.Contents)+len(result.Directories))
var size int64
var name string
var is_dir bool
for i, content := range result.Contents {
c := content
c.ETag = strings.Trim(c.ETag, `"`)
size, _ = strconv.ParseInt(c.Size, 10, 0)
if size == 0 && strings.HasSuffix(c.Key, "/") {
name = strings.TrimRight(c.Key, "/")
is_dir = true
} else {
name = c.Key
is_dir = false
}
infos[i] = &fileInfo{
name: name,
size: size,
dir: is_dir,
sys: &c,
}
}
for i, dir := range result.Directories {
infos[len(result.Contents)+i] = &fileInfo{
name: strings.TrimRight(dir, "/"),
size: 0,
dir: true,
}
}
f.result = &result
return infos, nil
}

291
vendor/github.com/sqs/s3/s3util/uploader.go generated vendored Normal file
View file

@ -0,0 +1,291 @@
package s3util
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"sync"
"syscall"
"time"
"github.com/sqs/s3"
)
// defined by amazon
const (
minPartSize = 5 * 1024 * 1024
maxPartSize = 1<<31 - 1 // for 32-bit use; amz max is 5GiB
maxObjSize = 5 * 1024 * 1024 * 1024 * 1024
maxNPart = 10000
)
const (
concurrency = 5
nTry = 2
)
type part struct {
r io.ReadSeeker
len int64
// read by xml encoder
PartNumber int
ETag string
}
type uploader struct {
s3 s3.Service
keys s3.Keys
url string
client *http.Client
UploadId string // written by xml decoder
bufsz int64
buf []byte
off int
ch chan *part
part int
closed bool
err error
wg sync.WaitGroup
xml struct {
XMLName string `xml:"CompleteMultipartUpload"`
Part []*part
}
}
// Create creates an S3 object at url and sends multipart upload requests as
// data is written.
//
// If h is not nil, each of its entries is added to the HTTP request header.
// If c is nil, Create uses DefaultConfig.
func Create(url string, h http.Header, c *Config) (io.WriteCloser, error) {
if c == nil {
c = DefaultConfig
}
return newUploader(url, h, c)
}
// Sends an S3 multipart upload initiation request.
// See http://docs.amazonwebservices.com/AmazonS3/latest/dev/mpuoverview.html.
// This initial request returns an UploadId that we use to identify
// subsequent PUT requests.
func newUploader(url string, h http.Header, c *Config) (u *uploader, err error) {
u = new(uploader)
u.s3 = *c.Service
u.url = url
u.keys = *c.Keys
u.client = c.Client
if u.client == nil {
u.client = http.DefaultClient
}
u.bufsz = minPartSize
r, err := http.NewRequest("POST", url+"?uploads", nil)
if err != nil {
return nil, err
}
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
for k := range h {
for _, v := range h[k] {
r.Header.Add(k, v)
}
}
u.s3.Sign(r, u.keys)
resp, err := u.client.Do(r)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, newRespError(resp)
}
err = xml.NewDecoder(resp.Body).Decode(u)
if err != nil {
return nil, err
}
u.ch = make(chan *part)
for i := 0; i < concurrency; i++ {
go u.worker()
}
return u, nil
}
func (u *uploader) Write(p []byte) (n int, err error) {
if u.closed {
return 0, syscall.EINVAL
}
if u.err != nil {
return 0, u.err
}
for n < len(p) {
if cap(u.buf) == 0 {
u.buf = make([]byte, int(u.bufsz))
// Increase part size (1.001x).
// This lets us reach the max object size (5TiB) while
// still doing minimal buffering for small objects.
u.bufsz = min(u.bufsz+u.bufsz/1000, maxPartSize)
}
r := copy(u.buf[u.off:], p[n:])
u.off += r
n += r
if u.off == len(u.buf) {
u.flush()
}
}
return n, nil
}
func (u *uploader) flush() {
u.wg.Add(1)
u.part++
p := &part{bytes.NewReader(u.buf[:u.off]), int64(u.off), u.part, ""}
u.xml.Part = append(u.xml.Part, p)
u.ch <- p
u.buf, u.off = nil, 0
}
func (u *uploader) worker() {
for p := range u.ch {
u.retryUploadPart(p)
}
}
// Calls putPart up to nTry times to recover from transient errors.
func (u *uploader) retryUploadPart(p *part) {
defer u.wg.Done()
defer func() { p.r = nil }() // free the large buffer
var err error
for i := 0; i < nTry; i++ {
p.r.Seek(0, 0)
err = u.putPart(p)
if err == nil {
return
}
}
u.err = err
}
// Uploads part p, reading its contents from p.r.
// Stores the ETag in p.ETag.
func (u *uploader) putPart(p *part) error {
v := url.Values{}
v.Set("partNumber", strconv.Itoa(p.PartNumber))
v.Set("uploadId", u.UploadId)
req, err := http.NewRequest("PUT", u.url+"?"+v.Encode(), p.r)
if err != nil {
return err
}
req.ContentLength = p.len
req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
u.s3.Sign(req, u.keys)
resp, err := u.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return newRespError(resp)
}
s := resp.Header.Get("etag") // includes quote chars for some reason
if len(s) < 2 {
return fmt.Errorf("received invalid etag %q", s)
}
p.ETag = s[1 : len(s)-1]
return nil
}
func (u *uploader) Close() error {
if u.closed {
return syscall.EINVAL
}
if cap(u.buf) > 0 {
u.flush()
}
u.wg.Wait()
close(u.ch)
u.closed = true
if u.err != nil {
u.abort()
return u.err
}
if u.part == 0 {
// Can't upload an empty file with multipart uploads.
u.abort()
if u.err != nil {
return u.err
}
req, err := http.NewRequest("PUT", u.url, bytes.NewReader(nil))
if err != nil {
return err
}
req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
u.s3.Sign(req, u.keys)
resp, err := u.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return newRespError(resp)
}
resp.Body.Close()
return nil
}
body, err := xml.Marshal(u.xml)
if err != nil {
return err
}
b := bytes.NewBuffer(body)
v := url.Values{}
v.Set("uploadId", u.UploadId)
req, err := http.NewRequest("POST", u.url+"?"+v.Encode(), b)
if err != nil {
return err
}
req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
u.s3.Sign(req, u.keys)
resp, err := u.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return newRespError(resp)
}
resp.Body.Close()
return nil
}
func (u *uploader) abort() {
// TODO(kr): devise a reasonable way to report an error here in addition
// to the error that caused the abort.
v := url.Values{}
v.Set("uploadId", u.UploadId)
s := u.url + "?" + v.Encode()
req, err := http.NewRequest("DELETE", s, nil)
if err != nil {
return
}
req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
u.s3.Sign(req, u.keys)
resp, err := u.client.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return
}
}
func min(a, b int64) int64 {
if a < b {
return a
}
return b
}