mirror of
https://github.com/willnorris/imageproxy.git
synced 2026-04-27 22:06:24 +02:00
remove vendor directory
imageproxy should be built with go1.11 or later with modules support enabled.
This commit is contained in:
parent
44e7b31616
commit
7d53c5100d
640 changed files with 0 additions and 208060 deletions
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
|
|
@ -1,202 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
437
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
437
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
|
|
@ -1,437 +0,0 @@
|
||||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package metadata provides access to Google Compute Engine (GCE)
|
|
||||||
// metadata and API service accounts.
|
|
||||||
//
|
|
||||||
// This package is a wrapper around the GCE metadata service,
|
|
||||||
// as documented at https://developers.google.com/compute/docs/metadata.
|
|
||||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/net/context/ctxhttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// metadataIP is the documented metadata server IP address.
|
|
||||||
metadataIP = "169.254.169.254"
|
|
||||||
|
|
||||||
// metadataHostEnv is the environment variable specifying the
|
|
||||||
// GCE metadata hostname. If empty, the default value of
|
|
||||||
// metadataIP ("169.254.169.254") is used instead.
|
|
||||||
// This is variable name is not defined by any spec, as far as
|
|
||||||
// I know; it was made up for the Go package.
|
|
||||||
metadataHostEnv = "GCE_METADATA_HOST"
|
|
||||||
|
|
||||||
userAgent = "gcloud-golang/0.1"
|
|
||||||
)
|
|
||||||
|
|
||||||
type cachedValue struct {
|
|
||||||
k string
|
|
||||||
trim bool
|
|
||||||
mu sync.Mutex
|
|
||||||
v string
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
projID = &cachedValue{k: "project/project-id", trim: true}
|
|
||||||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
|
||||||
instID = &cachedValue{k: "instance/id", trim: true}
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
metaClient = &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Dial: (&net.Dialer{
|
|
||||||
Timeout: 2 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}).Dial,
|
|
||||||
ResponseHeaderTimeout: 2 * time.Second,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
subscribeClient = &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Dial: (&net.Dialer{
|
|
||||||
Timeout: 2 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}).Dial,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// NotDefinedError is returned when requested metadata is not defined.
|
|
||||||
//
|
|
||||||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
|
||||||
//
|
|
||||||
// This error is not returned if the value is defined to be the empty
|
|
||||||
// string.
|
|
||||||
type NotDefinedError string
|
|
||||||
|
|
||||||
func (suffix NotDefinedError) Error() string {
|
|
||||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a value from the metadata service.
|
|
||||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|
||||||
//
|
|
||||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
|
||||||
// 169.254.169.254 will be used instead.
|
|
||||||
//
|
|
||||||
// If the requested metadata is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
func Get(suffix string) (string, error) {
|
|
||||||
val, _, err := getETag(metaClient, suffix)
|
|
||||||
return val, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// getETag returns a value from the metadata service as well as the associated
|
|
||||||
// ETag using the provided client. This func is otherwise equivalent to Get.
|
|
||||||
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
|
|
||||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
|
||||||
// a container, which is an important use-case for local testing of cloud
|
|
||||||
// deployments. To enable spoofing of the metadata service, the environment
|
|
||||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
|
||||||
// requests shall go.
|
|
||||||
host := os.Getenv(metadataHostEnv)
|
|
||||||
if host == "" {
|
|
||||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
|
||||||
// binaries built with the "netgo" tag and without cgo won't
|
|
||||||
// know the search suffix for "metadata" is
|
|
||||||
// ".google.internal", and this IP address is documented as
|
|
||||||
// being stable anyway.
|
|
||||||
host = metadataIP
|
|
||||||
}
|
|
||||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
|
||||||
req, _ := http.NewRequest("GET", url, nil)
|
|
||||||
req.Header.Set("Metadata-Flavor", "Google")
|
|
||||||
req.Header.Set("User-Agent", userAgent)
|
|
||||||
res, err := client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode == http.StatusNotFound {
|
|
||||||
return "", "", NotDefinedError(suffix)
|
|
||||||
}
|
|
||||||
if res.StatusCode != 200 {
|
|
||||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
|
||||||
}
|
|
||||||
all, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
return string(all), res.Header.Get("Etag"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTrimmed(suffix string) (s string, err error) {
|
|
||||||
s, err = Get(suffix)
|
|
||||||
s = strings.TrimSpace(s)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cachedValue) get() (v string, err error) {
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
c.mu.Lock()
|
|
||||||
if c.v != "" {
|
|
||||||
return c.v, nil
|
|
||||||
}
|
|
||||||
if c.trim {
|
|
||||||
v, err = getTrimmed(c.k)
|
|
||||||
} else {
|
|
||||||
v, err = Get(c.k)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
c.v = v
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
onGCEOnce sync.Once
|
|
||||||
onGCE bool
|
|
||||||
)
|
|
||||||
|
|
||||||
// OnGCE reports whether this process is running on Google Compute Engine.
|
|
||||||
func OnGCE() bool {
|
|
||||||
onGCEOnce.Do(initOnGCE)
|
|
||||||
return onGCE
|
|
||||||
}
|
|
||||||
|
|
||||||
func initOnGCE() {
|
|
||||||
onGCE = testOnGCE()
|
|
||||||
}
|
|
||||||
|
|
||||||
func testOnGCE() bool {
|
|
||||||
// The user explicitly said they're on GCE, so trust them.
|
|
||||||
if os.Getenv(metadataHostEnv) != "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
resc := make(chan bool, 2)
|
|
||||||
|
|
||||||
// Try two strategies in parallel.
|
|
||||||
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
|
||||||
go func() {
|
|
||||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
|
||||||
req.Header.Set("User-Agent", userAgent)
|
|
||||||
res, err := ctxhttp.Do(ctx, metaClient, req)
|
|
||||||
if err != nil {
|
|
||||||
resc <- false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
addrs, err := net.LookupHost("metadata.google.internal")
|
|
||||||
if err != nil || len(addrs) == 0 {
|
|
||||||
resc <- false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resc <- strsContains(addrs, metadataIP)
|
|
||||||
}()
|
|
||||||
|
|
||||||
tryHarder := systemInfoSuggestsGCE()
|
|
||||||
if tryHarder {
|
|
||||||
res := <-resc
|
|
||||||
if res {
|
|
||||||
// The first strategy succeeded, so let's use it.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Wait for either the DNS or metadata server probe to
|
|
||||||
// contradict the other one and say we are running on
|
|
||||||
// GCE. Give it a lot of time to do so, since the system
|
|
||||||
// info already suggests we're running on a GCE BIOS.
|
|
||||||
timer := time.NewTimer(5 * time.Second)
|
|
||||||
defer timer.Stop()
|
|
||||||
select {
|
|
||||||
case res = <-resc:
|
|
||||||
return res
|
|
||||||
case <-timer.C:
|
|
||||||
// Too slow. Who knows what this system is.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// There's no hint from the system info that we're running on
|
|
||||||
// GCE, so use the first probe's result as truth, whether it's
|
|
||||||
// true or false. The goal here is to optimize for speed for
|
|
||||||
// users who are NOT running on GCE. We can't assume that
|
|
||||||
// either a DNS lookup or an HTTP request to a blackholed IP
|
|
||||||
// address is fast. Worst case this should return when the
|
|
||||||
// metaClient's Transport.ResponseHeaderTimeout or
|
|
||||||
// Transport.Dial.Timeout fires (in two seconds).
|
|
||||||
return <-resc
|
|
||||||
}
|
|
||||||
|
|
||||||
// systemInfoSuggestsGCE reports whether the local system (without
|
|
||||||
// doing network requests) suggests that we're running on GCE. If this
|
|
||||||
// returns true, testOnGCE tries a bit harder to reach its metadata
|
|
||||||
// server.
|
|
||||||
func systemInfoSuggestsGCE() bool {
|
|
||||||
if runtime.GOOS != "linux" {
|
|
||||||
// We don't have any non-Linux clues available, at least yet.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
|
||||||
name := strings.TrimSpace(string(slurp))
|
|
||||||
return name == "Google" || name == "Google Compute Engine"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe subscribes to a value from the metadata service.
|
|
||||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|
||||||
// The suffix may contain query parameters.
|
|
||||||
//
|
|
||||||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
|
||||||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
|
||||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
|
||||||
// is deleted. Subscribe returns the error value returned from the last call to
|
|
||||||
// fn, which may be nil when ok == false.
|
|
||||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
|
||||||
const failedSubscribeSleep = time.Second * 5
|
|
||||||
|
|
||||||
// First check to see if the metadata value exists at all.
|
|
||||||
val, lastETag, err := getETag(subscribeClient, suffix)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := fn(val, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ok := true
|
|
||||||
if strings.ContainsRune(suffix, '?') {
|
|
||||||
suffix += "&wait_for_change=true&last_etag="
|
|
||||||
} else {
|
|
||||||
suffix += "?wait_for_change=true&last_etag="
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
|
|
||||||
if err != nil {
|
|
||||||
if _, deleted := err.(NotDefinedError); !deleted {
|
|
||||||
time.Sleep(failedSubscribeSleep)
|
|
||||||
continue // Retry on other errors.
|
|
||||||
}
|
|
||||||
ok = false
|
|
||||||
}
|
|
||||||
lastETag = etag
|
|
||||||
|
|
||||||
if err := fn(val, ok); err != nil || !ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectID returns the current instance's project ID string.
|
|
||||||
func ProjectID() (string, error) { return projID.get() }
|
|
||||||
|
|
||||||
// NumericProjectID returns the current instance's numeric project ID.
|
|
||||||
func NumericProjectID() (string, error) { return projNum.get() }
|
|
||||||
|
|
||||||
// InternalIP returns the instance's primary internal IP address.
|
|
||||||
func InternalIP() (string, error) {
|
|
||||||
return getTrimmed("instance/network-interfaces/0/ip")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExternalIP returns the instance's primary external (public) IP address.
|
|
||||||
func ExternalIP() (string, error) {
|
|
||||||
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hostname returns the instance's hostname. This will be of the form
|
|
||||||
// "<instanceID>.c.<projID>.internal".
|
|
||||||
func Hostname() (string, error) {
|
|
||||||
return getTrimmed("instance/hostname")
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceTags returns the list of user-defined instance tags,
|
|
||||||
// assigned when initially creating a GCE instance.
|
|
||||||
func InstanceTags() ([]string, error) {
|
|
||||||
var s []string
|
|
||||||
j, err := Get("instance/tags")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceID returns the current VM's numeric instance ID.
|
|
||||||
func InstanceID() (string, error) {
|
|
||||||
return instID.get()
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceName returns the current VM's instance ID string.
|
|
||||||
func InstanceName() (string, error) {
|
|
||||||
host, err := Hostname()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return strings.Split(host, ".")[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
|
||||||
func Zone() (string, error) {
|
|
||||||
zone, err := getTrimmed("instance/zone")
|
|
||||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceAttributes returns the list of user-defined attributes,
|
|
||||||
// assigned when initially creating a GCE VM instance. The value of an
|
|
||||||
// attribute can be obtained with InstanceAttributeValue.
|
|
||||||
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
|
|
||||||
|
|
||||||
// ProjectAttributes returns the list of user-defined attributes
|
|
||||||
// applying to the project as a whole, not just this VM. The value of
|
|
||||||
// an attribute can be obtained with ProjectAttributeValue.
|
|
||||||
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
|
|
||||||
|
|
||||||
func lines(suffix string) ([]string, error) {
|
|
||||||
j, err := Get(suffix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
|
||||||
for i := range s {
|
|
||||||
s[i] = strings.TrimSpace(s[i])
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceAttributeValue returns the value of the provided VM
|
|
||||||
// instance attribute.
|
|
||||||
//
|
|
||||||
// If the requested attribute is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
//
|
|
||||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
|
||||||
// defined to be the empty string.
|
|
||||||
func InstanceAttributeValue(attr string) (string, error) {
|
|
||||||
return Get("instance/attributes/" + attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectAttributeValue returns the value of the provided
|
|
||||||
// project attribute.
|
|
||||||
//
|
|
||||||
// If the requested attribute is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
//
|
|
||||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
|
||||||
// defined to be the empty string.
|
|
||||||
func ProjectAttributeValue(attr string) (string, error) {
|
|
||||||
return Get("project/attributes/" + attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scopes returns the service account scopes for the given account.
|
|
||||||
// The account may be empty or the string "default" to use the instance's
|
|
||||||
// main account.
|
|
||||||
func Scopes(serviceAccount string) ([]string, error) {
|
|
||||||
if serviceAccount == "" {
|
|
||||||
serviceAccount = "default"
|
|
||||||
}
|
|
||||||
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
|
||||||
}
|
|
||||||
|
|
||||||
func strsContains(ss []string, s string) bool {
|
|
||||||
for _, v := range ss {
|
|
||||||
if v == s {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
256
vendor/cloud.google.com/go/iam/iam.go
generated
vendored
256
vendor/cloud.google.com/go/iam/iam.go
generated
vendored
|
|
@ -1,256 +0,0 @@
|
||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package iam supports the resource-specific operations of Google Cloud
|
|
||||||
// IAM (Identity and Access Management) for the Google Cloud Libraries.
|
|
||||||
// See https://cloud.google.com/iam for more about IAM.
|
|
||||||
//
|
|
||||||
// Users of the Google Cloud Libraries will typically not use this package
|
|
||||||
// directly. Instead they will begin with some resource that supports IAM, like
|
|
||||||
// a pubsub topic, and call its IAM method to get a Handle for that resource.
|
|
||||||
package iam
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
pb "google.golang.org/genproto/googleapis/iam/v1"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// client abstracts the IAMPolicy API to allow multiple implementations.
|
|
||||||
type client interface {
|
|
||||||
Get(ctx context.Context, resource string) (*pb.Policy, error)
|
|
||||||
Set(ctx context.Context, resource string, p *pb.Policy) error
|
|
||||||
Test(ctx context.Context, resource string, perms []string) ([]string, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
|
|
||||||
type grpcClient struct {
|
|
||||||
c pb.IAMPolicyClient
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
|
|
||||||
proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return proto, nil
|
|
||||||
}
|
|
||||||
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
|
|
||||||
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
|
|
||||||
Resource: resource,
|
|
||||||
Policy: p,
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
|
||||||
res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
|
|
||||||
Resource: resource,
|
|
||||||
Permissions: perms,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return res.Permissions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Handle provides IAM operations for a resource.
|
|
||||||
type Handle struct {
|
|
||||||
c client
|
|
||||||
resource string
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalNewHandle is for use by the Google Cloud Libraries only.
|
|
||||||
//
|
|
||||||
// InternalNewHandle returns a Handle for resource.
|
|
||||||
// The conn parameter refers to a server that must support the IAMPolicy service.
|
|
||||||
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
|
|
||||||
return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
|
|
||||||
//
|
|
||||||
// InternalNewHandleClient returns a Handle for resource using the given
|
|
||||||
// client implementation.
|
|
||||||
func InternalNewHandleClient(c client, resource string) *Handle {
|
|
||||||
return &Handle{
|
|
||||||
c: c,
|
|
||||||
resource: resource,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Policy retrieves the IAM policy for the resource.
|
|
||||||
func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
|
|
||||||
proto, err := h.c.Get(ctx, h.resource)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Policy{InternalProto: proto}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
|
||||||
//
|
|
||||||
// If policy was created from a prior call to Get, then the modification will
|
|
||||||
// only succeed if the policy has not changed since the Get.
|
|
||||||
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
|
|
||||||
return h.c.Set(ctx, h.resource, policy.InternalProto)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
|
||||||
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
|
|
||||||
return h.c.Test(ctx, h.resource, permissions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A RoleName is a name representing a collection of permissions.
|
|
||||||
type RoleName string
|
|
||||||
|
|
||||||
// Common role names.
|
|
||||||
const (
|
|
||||||
Owner RoleName = "roles/owner"
|
|
||||||
Editor RoleName = "roles/editor"
|
|
||||||
Viewer RoleName = "roles/viewer"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// AllUsers is a special member that denotes all users, even unauthenticated ones.
|
|
||||||
AllUsers = "allUsers"
|
|
||||||
|
|
||||||
// AllAuthenticatedUsers is a special member that denotes all authenticated users.
|
|
||||||
AllAuthenticatedUsers = "allAuthenticatedUsers"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Policy is a list of Bindings representing roles
|
|
||||||
// granted to members.
|
|
||||||
//
|
|
||||||
// The zero Policy is a valid policy with no bindings.
|
|
||||||
type Policy struct {
|
|
||||||
// TODO(jba): when type aliases are available, put Policy into an internal package
|
|
||||||
// and provide an exported alias here.
|
|
||||||
|
|
||||||
// This field is exported for use by the Google Cloud Libraries only.
|
|
||||||
// It may become unexported in a future release.
|
|
||||||
InternalProto *pb.Policy
|
|
||||||
}
|
|
||||||
|
|
||||||
// Members returns the list of members with the supplied role.
|
|
||||||
// The return value should not be modified. Use Add and Remove
|
|
||||||
// to modify the members of a role.
|
|
||||||
func (p *Policy) Members(r RoleName) []string {
|
|
||||||
b := p.binding(r)
|
|
||||||
if b == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return b.Members
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasRole reports whether member has role r.
|
|
||||||
func (p *Policy) HasRole(member string, r RoleName) bool {
|
|
||||||
return memberIndex(member, p.binding(r)) >= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds member member to role r if it is not already present.
|
|
||||||
// A new binding is created if there is no binding for the role.
|
|
||||||
func (p *Policy) Add(member string, r RoleName) {
|
|
||||||
b := p.binding(r)
|
|
||||||
if b == nil {
|
|
||||||
if p.InternalProto == nil {
|
|
||||||
p.InternalProto = &pb.Policy{}
|
|
||||||
}
|
|
||||||
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
|
|
||||||
Role: string(r),
|
|
||||||
Members: []string{member},
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if memberIndex(member, b) < 0 {
|
|
||||||
b.Members = append(b.Members, member)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes member from role r if it is present.
|
|
||||||
func (p *Policy) Remove(member string, r RoleName) {
|
|
||||||
bi := p.bindingIndex(r)
|
|
||||||
if bi < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bindings := p.InternalProto.Bindings
|
|
||||||
b := bindings[bi]
|
|
||||||
mi := memberIndex(member, b)
|
|
||||||
if mi < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Order doesn't matter for bindings or members, so to remove, move the last item
|
|
||||||
// into the removed spot and shrink the slice.
|
|
||||||
if len(b.Members) == 1 {
|
|
||||||
// Remove binding.
|
|
||||||
last := len(bindings) - 1
|
|
||||||
bindings[bi] = bindings[last]
|
|
||||||
bindings[last] = nil
|
|
||||||
p.InternalProto.Bindings = bindings[:last]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Remove member.
|
|
||||||
// TODO(jba): worry about multiple copies of m?
|
|
||||||
last := len(b.Members) - 1
|
|
||||||
b.Members[mi] = b.Members[last]
|
|
||||||
b.Members[last] = ""
|
|
||||||
b.Members = b.Members[:last]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Roles returns the names of all the roles that appear in the Policy.
|
|
||||||
func (p *Policy) Roles() []RoleName {
|
|
||||||
if p.InternalProto == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var rns []RoleName
|
|
||||||
for _, b := range p.InternalProto.Bindings {
|
|
||||||
rns = append(rns, RoleName(b.Role))
|
|
||||||
}
|
|
||||||
return rns
|
|
||||||
}
|
|
||||||
|
|
||||||
// binding returns the Binding for the suppied role, or nil if there isn't one.
|
|
||||||
func (p *Policy) binding(r RoleName) *pb.Binding {
|
|
||||||
i := p.bindingIndex(r)
|
|
||||||
if i < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return p.InternalProto.Bindings[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Policy) bindingIndex(r RoleName) int {
|
|
||||||
if p.InternalProto == nil {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
for i, b := range p.InternalProto.Bindings {
|
|
||||||
if b.Role == string(r) {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// memberIndex returns the index of m in b's Members, or -1 if not found.
|
|
||||||
func memberIndex(m string, b *pb.Binding) int {
|
|
||||||
if b == nil {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
for i, mm := range b.Members {
|
|
||||||
if mm == m {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
54
vendor/cloud.google.com/go/internal/annotate.go
generated
vendored
54
vendor/cloud.google.com/go/internal/annotate.go
generated
vendored
|
|
@ -1,54 +0,0 @@
|
||||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Annotate prepends msg to the error message in err, attempting
|
|
||||||
// to preserve other information in err, like an error code.
|
|
||||||
//
|
|
||||||
// Annotate panics if err is nil.
|
|
||||||
//
|
|
||||||
// Annotate knows about these error types:
|
|
||||||
// - "google.golang.org/grpc/status".Status
|
|
||||||
// - "google.golang.org/api/googleapi".Error
|
|
||||||
// If the error is not one of these types, Annotate behaves
|
|
||||||
// like
|
|
||||||
// fmt.Errorf("%s: %v", msg, err)
|
|
||||||
func Annotate(err error, msg string) error {
|
|
||||||
if err == nil {
|
|
||||||
panic("Annotate called with nil")
|
|
||||||
}
|
|
||||||
if s, ok := status.FromError(err); ok {
|
|
||||||
p := s.Proto()
|
|
||||||
p.Message = msg + ": " + p.Message
|
|
||||||
return status.ErrorProto(p)
|
|
||||||
}
|
|
||||||
if g, ok := err.(*googleapi.Error); ok {
|
|
||||||
g.Message = msg + ": " + g.Message
|
|
||||||
return g
|
|
||||||
}
|
|
||||||
return fmt.Errorf("%s: %v", msg, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Annotatef uses format and args to format a string, then calls Annotate.
|
|
||||||
func Annotatef(err error, format string, args ...interface{}) error {
|
|
||||||
return Annotate(err, fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
108
vendor/cloud.google.com/go/internal/optional/optional.go
generated
vendored
108
vendor/cloud.google.com/go/internal/optional/optional.go
generated
vendored
|
|
@ -1,108 +0,0 @@
|
||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package optional provides versions of primitive types that can
|
|
||||||
// be nil. These are useful in methods that update some of an API object's
|
|
||||||
// fields.
|
|
||||||
package optional
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// Bool is either a bool or nil.
|
|
||||||
Bool interface{}
|
|
||||||
|
|
||||||
// String is either a string or nil.
|
|
||||||
String interface{}
|
|
||||||
|
|
||||||
// Int is either an int or nil.
|
|
||||||
Int interface{}
|
|
||||||
|
|
||||||
// Uint is either a uint or nil.
|
|
||||||
Uint interface{}
|
|
||||||
|
|
||||||
// Float64 is either a float64 or nil.
|
|
||||||
Float64 interface{}
|
|
||||||
|
|
||||||
// Duration is either a time.Duration or nil.
|
|
||||||
Duration interface{}
|
|
||||||
)
|
|
||||||
|
|
||||||
// ToBool returns its argument as a bool.
|
|
||||||
// It panics if its argument is nil or not a bool.
|
|
||||||
func ToBool(v Bool) bool {
|
|
||||||
x, ok := v.(bool)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Bool", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToString returns its argument as a string.
|
|
||||||
// It panics if its argument is nil or not a string.
|
|
||||||
func ToString(v String) string {
|
|
||||||
x, ok := v.(string)
|
|
||||||
if !ok {
|
|
||||||
doPanic("String", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToInt returns its argument as an int.
|
|
||||||
// It panics if its argument is nil or not an int.
|
|
||||||
func ToInt(v Int) int {
|
|
||||||
x, ok := v.(int)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Int", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToUint returns its argument as a uint.
|
|
||||||
// It panics if its argument is nil or not a uint.
|
|
||||||
func ToUint(v Uint) uint {
|
|
||||||
x, ok := v.(uint)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Uint", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToFloat64 returns its argument as a float64.
|
|
||||||
// It panics if its argument is nil or not a float64.
|
|
||||||
func ToFloat64(v Float64) float64 {
|
|
||||||
x, ok := v.(float64)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Float64", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToDuration returns its argument as a time.Duration.
|
|
||||||
// It panics if its argument is nil or not a time.Duration.
|
|
||||||
func ToDuration(v Duration) time.Duration {
|
|
||||||
x, ok := v.(time.Duration)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Duration", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
func doPanic(capType string, v interface{}) {
|
|
||||||
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
|
|
||||||
}
|
|
||||||
55
vendor/cloud.google.com/go/internal/retry.go
generated
vendored
55
vendor/cloud.google.com/go/internal/retry.go
generated
vendored
|
|
@ -1,55 +0,0 @@
|
||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
gax "github.com/googleapis/gax-go"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Retry calls the supplied function f repeatedly according to the provided
|
|
||||||
// backoff parameters. It returns when one of the following occurs:
|
|
||||||
// When f's first return value is true, Retry immediately returns with f's second
|
|
||||||
// return value.
|
|
||||||
// When the provided context is done, Retry returns with an error that
|
|
||||||
// includes both ctx.Error() and the last error returned by f.
|
|
||||||
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
|
|
||||||
return retry(ctx, bo, f, gax.Sleep)
|
|
||||||
}
|
|
||||||
|
|
||||||
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
|
|
||||||
sleep func(context.Context, time.Duration) error) error {
|
|
||||||
var lastErr error
|
|
||||||
for {
|
|
||||||
stop, err := f()
|
|
||||||
if stop {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Remember the last "real" error from f.
|
|
||||||
if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
|
|
||||||
lastErr = err
|
|
||||||
}
|
|
||||||
p := bo.Pause()
|
|
||||||
if cerr := sleep(ctx, p); cerr != nil {
|
|
||||||
if lastErr != nil {
|
|
||||||
return Annotatef(lastErr, "retry failed with %v; last error", cerr)
|
|
||||||
}
|
|
||||||
return cerr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
71
vendor/cloud.google.com/go/internal/version/version.go
generated
vendored
71
vendor/cloud.google.com/go/internal/version/version.go
generated
vendored
|
|
@ -1,71 +0,0 @@
|
||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//go:generate ./update_version.sh
|
|
||||||
|
|
||||||
// Package version contains version information for Google Cloud Client
|
|
||||||
// Libraries for Go, as reported in request headers.
|
|
||||||
package version
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Repo is the current version of the client libraries in this
|
|
||||||
// repo. It should be a date in YYYYMMDD format.
|
|
||||||
const Repo = "20180118"
|
|
||||||
|
|
||||||
// Go returns the Go runtime version. The returned string
|
|
||||||
// has no whitespace.
|
|
||||||
func Go() string {
|
|
||||||
return goVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
var goVersion = goVer(runtime.Version())
|
|
||||||
|
|
||||||
const develPrefix = "devel +"
|
|
||||||
|
|
||||||
func goVer(s string) string {
|
|
||||||
if strings.HasPrefix(s, develPrefix) {
|
|
||||||
s = s[len(develPrefix):]
|
|
||||||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
|
||||||
s = s[:p]
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(s, "go1") {
|
|
||||||
s = s[2:]
|
|
||||||
var prerelease string
|
|
||||||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
|
||||||
s, prerelease = s[:p], s[p:]
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(s, ".") {
|
|
||||||
s += "0"
|
|
||||||
} else if strings.Count(s, ".") < 2 {
|
|
||||||
s += ".0"
|
|
||||||
}
|
|
||||||
if prerelease != "" {
|
|
||||||
s += "-" + prerelease
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func notSemverRune(r rune) bool {
|
|
||||||
return strings.IndexRune("0123456789.", r) < 0
|
|
||||||
}
|
|
||||||
235
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
235
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
|
|
@ -1,235 +0,0 @@
|
||||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ACLRole is the level of access to grant.
|
|
||||||
type ACLRole string
|
|
||||||
|
|
||||||
const (
|
|
||||||
RoleOwner ACLRole = "OWNER"
|
|
||||||
RoleReader ACLRole = "READER"
|
|
||||||
RoleWriter ACLRole = "WRITER"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ACLEntity refers to a user or group.
|
|
||||||
// They are sometimes referred to as grantees.
|
|
||||||
//
|
|
||||||
// It could be in the form of:
|
|
||||||
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
|
|
||||||
// "domain-<domain>" and "project-team-<projectId>".
|
|
||||||
//
|
|
||||||
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
|
|
||||||
type ACLEntity string
|
|
||||||
|
|
||||||
const (
|
|
||||||
AllUsers ACLEntity = "allUsers"
|
|
||||||
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
|
|
||||||
type ACLRule struct {
|
|
||||||
Entity ACLEntity
|
|
||||||
Role ACLRole
|
|
||||||
}
|
|
||||||
|
|
||||||
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
|
|
||||||
type ACLHandle struct {
|
|
||||||
c *Client
|
|
||||||
bucket string
|
|
||||||
object string
|
|
||||||
isDefault bool
|
|
||||||
userProject string // for requester-pays buckets
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete permanently deletes the ACL entry for the given entity.
|
|
||||||
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
|
|
||||||
if a.object != "" {
|
|
||||||
return a.objectDelete(ctx, entity)
|
|
||||||
}
|
|
||||||
if a.isDefault {
|
|
||||||
return a.bucketDefaultDelete(ctx, entity)
|
|
||||||
}
|
|
||||||
return a.bucketDelete(ctx, entity)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the permission level for the given entity.
|
|
||||||
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
|
||||||
if a.object != "" {
|
|
||||||
return a.objectSet(ctx, entity, role, false)
|
|
||||||
}
|
|
||||||
if a.isDefault {
|
|
||||||
return a.objectSet(ctx, entity, role, true)
|
|
||||||
}
|
|
||||||
return a.bucketSet(ctx, entity, role)
|
|
||||||
}
|
|
||||||
|
|
||||||
// List retrieves ACL entries.
|
|
||||||
func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) {
|
|
||||||
if a.object != "" {
|
|
||||||
return a.objectList(ctx)
|
|
||||||
}
|
|
||||||
if a.isDefault {
|
|
||||||
return a.bucketDefaultList(ctx)
|
|
||||||
}
|
|
||||||
return a.bucketList(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
|
||||||
var acls *raw.ObjectAccessControls
|
|
||||||
var err error
|
|
||||||
err = runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
acls, err = req.Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return toACLRules(acls.Items), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
|
||||||
return runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
return req.Do()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
|
||||||
var acls *raw.BucketAccessControls
|
|
||||||
var err error
|
|
||||||
err = runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
acls, err = req.Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r := make([]ACLRule, len(acls.Items))
|
|
||||||
for i, v := range acls.Items {
|
|
||||||
r[i].Entity = ACLEntity(v.Entity)
|
|
||||||
r[i].Role = ACLRole(v.Role)
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
|
||||||
acl := &raw.BucketAccessControl{
|
|
||||||
Bucket: a.bucket,
|
|
||||||
Entity: string(entity),
|
|
||||||
Role: string(role),
|
|
||||||
}
|
|
||||||
err := runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
_, err := req.Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
|
||||||
err := runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
return req.Do()
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
|
||||||
var acls *raw.ObjectAccessControls
|
|
||||||
var err error
|
|
||||||
err = runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
acls, err = req.Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return toACLRules(acls.Items), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
|
|
||||||
type setRequest interface {
|
|
||||||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
|
||||||
Header() http.Header
|
|
||||||
}
|
|
||||||
|
|
||||||
acl := &raw.ObjectAccessControl{
|
|
||||||
Bucket: a.bucket,
|
|
||||||
Entity: string(entity),
|
|
||||||
Role: string(role),
|
|
||||||
}
|
|
||||||
var req setRequest
|
|
||||||
if isBucketDefault {
|
|
||||||
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
|
|
||||||
} else {
|
|
||||||
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
|
||||||
}
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
return runWithRetry(ctx, func() error {
|
|
||||||
_, err := req.Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
|
||||||
return runWithRetry(ctx, func() error {
|
|
||||||
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
|
||||||
a.configureCall(req, ctx)
|
|
||||||
return req.Do()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) configureCall(call interface {
|
|
||||||
Header() http.Header
|
|
||||||
}, ctx context.Context) {
|
|
||||||
vc := reflect.ValueOf(call)
|
|
||||||
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
|
|
||||||
if a.userProject != "" {
|
|
||||||
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
|
|
||||||
}
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
}
|
|
||||||
|
|
||||||
func toACLRules(items []*raw.ObjectAccessControl) []ACLRule {
|
|
||||||
r := make([]ACLRule, 0, len(items))
|
|
||||||
for _, item := range items {
|
|
||||||
r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)})
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
767
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
767
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
|
|
@ -1,767 +0,0 @@
|
||||||
// Copyright 2014 Google Inc. LiveAndArchived Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/optional"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
"google.golang.org/api/iterator"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BucketHandle provides operations on a Google Cloud Storage bucket.
|
|
||||||
// Use Client.Bucket to get a handle.
|
|
||||||
type BucketHandle struct {
|
|
||||||
c *Client
|
|
||||||
name string
|
|
||||||
acl ACLHandle
|
|
||||||
defaultObjectACL ACLHandle
|
|
||||||
conds *BucketConditions
|
|
||||||
userProject string // project for Requester Pays buckets
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bucket returns a BucketHandle, which provides operations on the named bucket.
|
|
||||||
// This call does not perform any network operations.
|
|
||||||
//
|
|
||||||
// The supplied name must contain only lowercase letters, numbers, dashes,
|
|
||||||
// underscores, and dots. The full specification for valid bucket names can be
|
|
||||||
// found at:
|
|
||||||
// https://cloud.google.com/storage/docs/bucket-naming
|
|
||||||
func (c *Client) Bucket(name string) *BucketHandle {
|
|
||||||
return &BucketHandle{
|
|
||||||
c: c,
|
|
||||||
name: name,
|
|
||||||
acl: ACLHandle{
|
|
||||||
c: c,
|
|
||||||
bucket: name,
|
|
||||||
},
|
|
||||||
defaultObjectACL: ACLHandle{
|
|
||||||
c: c,
|
|
||||||
bucket: name,
|
|
||||||
isDefault: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create creates the Bucket in the project.
|
|
||||||
// If attrs is nil the API defaults will be used.
|
|
||||||
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error {
|
|
||||||
var bkt *raw.Bucket
|
|
||||||
if attrs != nil {
|
|
||||||
bkt = attrs.toRawBucket()
|
|
||||||
} else {
|
|
||||||
bkt = &raw.Bucket{}
|
|
||||||
}
|
|
||||||
bkt.Name = b.name
|
|
||||||
// If there is lifecycle information but no location, explicitly set
|
|
||||||
// the location. This is a GCS quirk/bug.
|
|
||||||
if bkt.Location == "" && bkt.Lifecycle != nil {
|
|
||||||
bkt.Location = "US"
|
|
||||||
}
|
|
||||||
req := b.c.raw.Buckets.Insert(projectID, bkt)
|
|
||||||
setClientHeader(req.Header())
|
|
||||||
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the Bucket.
|
|
||||||
func (b *BucketHandle) Delete(ctx context.Context) error {
|
|
||||||
req, err := b.newDeleteCall()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return runWithRetry(ctx, func() error { return req.Context(ctx).Do() })
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
|
|
||||||
req := b.c.raw.Buckets.Delete(b.name)
|
|
||||||
setClientHeader(req.Header())
|
|
||||||
if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if b.userProject != "" {
|
|
||||||
req.UserProject(b.userProject)
|
|
||||||
}
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ACL returns an ACLHandle, which provides access to the bucket's access control list.
|
|
||||||
// This controls who can list, create or overwrite the objects in a bucket.
|
|
||||||
// This call does not perform any network operations.
|
|
||||||
func (b *BucketHandle) ACL() *ACLHandle {
|
|
||||||
return &b.acl
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs.
|
|
||||||
// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL.
|
|
||||||
// This call does not perform any network operations.
|
|
||||||
func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
|
|
||||||
return &b.defaultObjectACL
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object returns an ObjectHandle, which provides operations on the named object.
|
|
||||||
// This call does not perform any network operations.
|
|
||||||
//
|
|
||||||
// name must consist entirely of valid UTF-8-encoded runes. The full specification
|
|
||||||
// for valid object names can be found at:
|
|
||||||
// https://cloud.google.com/storage/docs/bucket-naming
|
|
||||||
func (b *BucketHandle) Object(name string) *ObjectHandle {
|
|
||||||
return &ObjectHandle{
|
|
||||||
c: b.c,
|
|
||||||
bucket: b.name,
|
|
||||||
object: name,
|
|
||||||
acl: ACLHandle{
|
|
||||||
c: b.c,
|
|
||||||
bucket: b.name,
|
|
||||||
object: name,
|
|
||||||
userProject: b.userProject,
|
|
||||||
},
|
|
||||||
gen: -1,
|
|
||||||
userProject: b.userProject,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attrs returns the metadata for the bucket.
|
|
||||||
func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
|
|
||||||
req, err := b.newGetCall()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var resp *raw.Bucket
|
|
||||||
err = runWithRetry(ctx, func() error {
|
|
||||||
resp, err = req.Context(ctx).Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
|
|
||||||
return nil, ErrBucketNotExist
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return newBucket(resp), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
|
|
||||||
req := b.c.raw.Buckets.Get(b.name).Projection("full")
|
|
||||||
setClientHeader(req.Header())
|
|
||||||
if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if b.userProject != "" {
|
|
||||||
req.UserProject(b.userProject)
|
|
||||||
}
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (*BucketAttrs, error) {
|
|
||||||
req, err := b.newPatchCall(&uattrs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// TODO(jba): retry iff metagen is set?
|
|
||||||
rb, err := req.Context(ctx).Do()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return newBucket(rb), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
|
|
||||||
rb := uattrs.toRawBucket()
|
|
||||||
req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full")
|
|
||||||
setClientHeader(req.Header())
|
|
||||||
if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if b.userProject != "" {
|
|
||||||
req.UserProject(b.userProject)
|
|
||||||
}
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BucketAttrs represents the metadata for a Google Cloud Storage bucket.
|
|
||||||
// Read-only fields are ignored by BucketHandle.Create.
|
|
||||||
type BucketAttrs struct {
|
|
||||||
// Name is the name of the bucket.
|
|
||||||
// This field is read-only.
|
|
||||||
Name string
|
|
||||||
|
|
||||||
// ACL is the list of access control rules on the bucket.
|
|
||||||
ACL []ACLRule
|
|
||||||
|
|
||||||
// DefaultObjectACL is the list of access controls to
|
|
||||||
// apply to new objects when no object ACL is provided.
|
|
||||||
DefaultObjectACL []ACLRule
|
|
||||||
|
|
||||||
// Location is the location of the bucket. It defaults to "US".
|
|
||||||
Location string
|
|
||||||
|
|
||||||
// MetaGeneration is the metadata generation of the bucket.
|
|
||||||
// This field is read-only.
|
|
||||||
MetaGeneration int64
|
|
||||||
|
|
||||||
// StorageClass is the default storage class of the bucket. This defines
|
|
||||||
// how objects in the bucket are stored and determines the SLA
|
|
||||||
// and the cost of storage. Typical values are "MULTI_REGIONAL",
|
|
||||||
// "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and
|
|
||||||
// "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which
|
|
||||||
// is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on
|
|
||||||
// the bucket's location settings.
|
|
||||||
StorageClass string
|
|
||||||
|
|
||||||
// Created is the creation time of the bucket.
|
|
||||||
// This field is read-only.
|
|
||||||
Created time.Time
|
|
||||||
|
|
||||||
// VersioningEnabled reports whether this bucket has versioning enabled.
|
|
||||||
VersioningEnabled bool
|
|
||||||
|
|
||||||
// Labels are the bucket's labels.
|
|
||||||
Labels map[string]string
|
|
||||||
|
|
||||||
// RequesterPays reports whether the bucket is a Requester Pays bucket.
|
|
||||||
// Clients performing operations on Requester Pays buckets must provide
|
|
||||||
// a user project (see BucketHandle.UserProject), which will be billed
|
|
||||||
// for the operations.
|
|
||||||
RequesterPays bool
|
|
||||||
// Lifecycle is the lifecycle configuration for objects in the bucket.
|
|
||||||
Lifecycle Lifecycle
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lifecycle is the lifecycle configuration for objects in the bucket.
|
|
||||||
type Lifecycle struct {
|
|
||||||
Rules []LifecycleRule
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule.
|
|
||||||
rfc3339Date = "2006-01-02"
|
|
||||||
|
|
||||||
// DeleteAction is a lifecycle action that deletes a live and/or archived
|
|
||||||
// objects. Takes precendence over SetStorageClass actions.
|
|
||||||
DeleteAction = "Delete"
|
|
||||||
|
|
||||||
// SetStorageClassAction changes the storage class of live and/or archived
|
|
||||||
// objects.
|
|
||||||
SetStorageClassAction = "SetStorageClass"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LifecycleRule is a lifecycle configuration rule.
|
|
||||||
//
|
|
||||||
// When all the configured conditions are met by an object in the bucket, the
|
|
||||||
// configured action will automatically be taken on that object.
|
|
||||||
type LifecycleRule struct {
|
|
||||||
// Action is the action to take when all of the associated conditions are
|
|
||||||
// met.
|
|
||||||
Action LifecycleAction
|
|
||||||
|
|
||||||
// Condition is the set of conditions that must be met for the associated
|
|
||||||
// action to be taken.
|
|
||||||
Condition LifecycleCondition
|
|
||||||
}
|
|
||||||
|
|
||||||
// LifecycleAction is a lifecycle configuration action.
|
|
||||||
type LifecycleAction struct {
|
|
||||||
// Type is the type of action to take on matching objects.
|
|
||||||
//
|
|
||||||
// Acceptable values are "Delete" to delete matching objects and
|
|
||||||
// "SetStorageClass" to set the storage class defined in StorageClass on
|
|
||||||
// matching objects.
|
|
||||||
Type string
|
|
||||||
|
|
||||||
// StorageClass is the storage class to set on matching objects if the Action
|
|
||||||
// is "SetStorageClass".
|
|
||||||
StorageClass string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Liveness specifies whether the object is live or not.
|
|
||||||
type Liveness int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// LiveAndArchived includes both live and archived objects.
|
|
||||||
LiveAndArchived Liveness = iota
|
|
||||||
// Live specifies that the object is still live.
|
|
||||||
Live
|
|
||||||
// Archived specifies that the object is archived.
|
|
||||||
Archived
|
|
||||||
)
|
|
||||||
|
|
||||||
// LifecycleCondition is a set of conditions used to match objects and take an
|
|
||||||
// action automatically.
|
|
||||||
//
|
|
||||||
// All configured conditions must be met for the associated action to be taken.
|
|
||||||
type LifecycleCondition struct {
|
|
||||||
// AgeInDays is the age of the object in days.
|
|
||||||
AgeInDays int64
|
|
||||||
|
|
||||||
// CreatedBefore is the time the object was created.
|
|
||||||
//
|
|
||||||
// This condition is satisfied when an object is created before midnight of
|
|
||||||
// the specified date in UTC.
|
|
||||||
CreatedBefore time.Time
|
|
||||||
|
|
||||||
// Liveness specifies the object's liveness. Relevant only for versioned objects
|
|
||||||
Liveness Liveness
|
|
||||||
|
|
||||||
// MatchesStorageClasses is the condition matching the object's storage
|
|
||||||
// class.
|
|
||||||
//
|
|
||||||
// Values include "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE",
|
|
||||||
// "STANDARD", and "DURABLE_REDUCED_AVAILABILITY".
|
|
||||||
MatchesStorageClasses []string
|
|
||||||
|
|
||||||
// NumNewerVersions is the condition matching objects with a number of newer versions.
|
|
||||||
//
|
|
||||||
// If the value is N, this condition is satisfied when there are at least N
|
|
||||||
// versions (including the live version) newer than this version of the
|
|
||||||
// object.
|
|
||||||
NumNewerVersions int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBucket(b *raw.Bucket) *BucketAttrs {
|
|
||||||
if b == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
bucket := &BucketAttrs{
|
|
||||||
Name: b.Name,
|
|
||||||
Location: b.Location,
|
|
||||||
MetaGeneration: b.Metageneration,
|
|
||||||
StorageClass: b.StorageClass,
|
|
||||||
Created: convertTime(b.TimeCreated),
|
|
||||||
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
|
|
||||||
Labels: b.Labels,
|
|
||||||
RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
|
|
||||||
Lifecycle: toLifecycle(b.Lifecycle),
|
|
||||||
}
|
|
||||||
acl := make([]ACLRule, len(b.Acl))
|
|
||||||
for i, rule := range b.Acl {
|
|
||||||
acl[i] = ACLRule{
|
|
||||||
Entity: ACLEntity(rule.Entity),
|
|
||||||
Role: ACLRole(rule.Role),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bucket.ACL = acl
|
|
||||||
objACL := make([]ACLRule, len(b.DefaultObjectAcl))
|
|
||||||
for i, rule := range b.DefaultObjectAcl {
|
|
||||||
objACL[i] = ACLRule{
|
|
||||||
Entity: ACLEntity(rule.Entity),
|
|
||||||
Role: ACLRole(rule.Role),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bucket.DefaultObjectACL = objACL
|
|
||||||
return bucket
|
|
||||||
}
|
|
||||||
|
|
||||||
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
|
|
||||||
func (b *BucketAttrs) toRawBucket() *raw.Bucket {
|
|
||||||
var acl []*raw.BucketAccessControl
|
|
||||||
if len(b.ACL) > 0 {
|
|
||||||
acl = make([]*raw.BucketAccessControl, len(b.ACL))
|
|
||||||
for i, rule := range b.ACL {
|
|
||||||
acl[i] = &raw.BucketAccessControl{
|
|
||||||
Entity: string(rule.Entity),
|
|
||||||
Role: string(rule.Role),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dACL := toRawObjectACL(b.DefaultObjectACL)
|
|
||||||
// Copy label map.
|
|
||||||
var labels map[string]string
|
|
||||||
if len(b.Labels) > 0 {
|
|
||||||
labels = make(map[string]string, len(b.Labels))
|
|
||||||
for k, v := range b.Labels {
|
|
||||||
labels[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Ignore VersioningEnabled if it is false. This is OK because
|
|
||||||
// we only call this method when creating a bucket, and by default
|
|
||||||
// new buckets have versioning off.
|
|
||||||
var v *raw.BucketVersioning
|
|
||||||
if b.VersioningEnabled {
|
|
||||||
v = &raw.BucketVersioning{Enabled: true}
|
|
||||||
}
|
|
||||||
var bb *raw.BucketBilling
|
|
||||||
if b.RequesterPays {
|
|
||||||
bb = &raw.BucketBilling{RequesterPays: true}
|
|
||||||
}
|
|
||||||
return &raw.Bucket{
|
|
||||||
Name: b.Name,
|
|
||||||
DefaultObjectAcl: dACL,
|
|
||||||
Location: b.Location,
|
|
||||||
StorageClass: b.StorageClass,
|
|
||||||
Acl: acl,
|
|
||||||
Versioning: v,
|
|
||||||
Labels: labels,
|
|
||||||
Billing: bb,
|
|
||||||
Lifecycle: toRawLifecycle(b.Lifecycle),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type BucketAttrsToUpdate struct {
|
|
||||||
// VersioningEnabled, if set, updates whether the bucket uses versioning.
|
|
||||||
VersioningEnabled optional.Bool
|
|
||||||
|
|
||||||
// RequesterPays, if set, updates whether the bucket is a Requester Pays bucket.
|
|
||||||
RequesterPays optional.Bool
|
|
||||||
|
|
||||||
setLabels map[string]string
|
|
||||||
deleteLabels map[string]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLabel causes a label to be added or modified when ua is used
|
|
||||||
// in a call to Bucket.Update.
|
|
||||||
func (ua *BucketAttrsToUpdate) SetLabel(name, value string) {
|
|
||||||
if ua.setLabels == nil {
|
|
||||||
ua.setLabels = map[string]string{}
|
|
||||||
}
|
|
||||||
ua.setLabels[name] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteLabel causes a label to be deleted when ua is used in a
|
|
||||||
// call to Bucket.Update.
|
|
||||||
func (ua *BucketAttrsToUpdate) DeleteLabel(name string) {
|
|
||||||
if ua.deleteLabels == nil {
|
|
||||||
ua.deleteLabels = map[string]bool{}
|
|
||||||
}
|
|
||||||
ua.deleteLabels[name] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
|
|
||||||
rb := &raw.Bucket{}
|
|
||||||
if ua.VersioningEnabled != nil {
|
|
||||||
rb.Versioning = &raw.BucketVersioning{
|
|
||||||
Enabled: optional.ToBool(ua.VersioningEnabled),
|
|
||||||
ForceSendFields: []string{"Enabled"},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ua.RequesterPays != nil {
|
|
||||||
rb.Billing = &raw.BucketBilling{
|
|
||||||
RequesterPays: optional.ToBool(ua.RequesterPays),
|
|
||||||
ForceSendFields: []string{"RequesterPays"},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ua.setLabels != nil || ua.deleteLabels != nil {
|
|
||||||
rb.Labels = map[string]string{}
|
|
||||||
for k, v := range ua.setLabels {
|
|
||||||
rb.Labels[k] = v
|
|
||||||
}
|
|
||||||
if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 {
|
|
||||||
rb.ForceSendFields = append(rb.ForceSendFields, "Labels")
|
|
||||||
}
|
|
||||||
for l := range ua.deleteLabels {
|
|
||||||
rb.NullFields = append(rb.NullFields, "Labels."+l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return rb
|
|
||||||
}
|
|
||||||
|
|
||||||
// If returns a new BucketHandle that applies a set of preconditions.
|
|
||||||
// Preconditions already set on the BucketHandle are ignored.
|
|
||||||
// Operations on the new handle will only occur if the preconditions are
|
|
||||||
// satisfied. The only valid preconditions for buckets are MetagenerationMatch
|
|
||||||
// and MetagenerationNotMatch.
|
|
||||||
func (b *BucketHandle) If(conds BucketConditions) *BucketHandle {
|
|
||||||
b2 := *b
|
|
||||||
b2.conds = &conds
|
|
||||||
return &b2
|
|
||||||
}
|
|
||||||
|
|
||||||
// BucketConditions constrain bucket methods to act on specific metagenerations.
|
|
||||||
//
|
|
||||||
// The zero value is an empty set of constraints.
|
|
||||||
type BucketConditions struct {
|
|
||||||
// MetagenerationMatch specifies that the bucket must have the given
|
|
||||||
// metageneration for the operation to occur.
|
|
||||||
// If MetagenerationMatch is zero, it has no effect.
|
|
||||||
MetagenerationMatch int64
|
|
||||||
|
|
||||||
// MetagenerationNotMatch specifies that the bucket must not have the given
|
|
||||||
// metageneration for the operation to occur.
|
|
||||||
// If MetagenerationNotMatch is zero, it has no effect.
|
|
||||||
MetagenerationNotMatch int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *BucketConditions) validate(method string) error {
|
|
||||||
if *c == (BucketConditions{}) {
|
|
||||||
return fmt.Errorf("storage: %s: empty conditions", method)
|
|
||||||
}
|
|
||||||
if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 {
|
|
||||||
return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserProject returns a new BucketHandle that passes the project ID as the user
|
|
||||||
// project for all subsequent calls. Calls with a user project will be billed to that
|
|
||||||
// project rather than to the bucket's owning project.
|
|
||||||
//
|
|
||||||
// A user project is required for all operations on Requester Pays buckets.
|
|
||||||
func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
|
|
||||||
b2 := *b
|
|
||||||
b2.userProject = projectID
|
|
||||||
b2.acl.userProject = projectID
|
|
||||||
b2.defaultObjectACL.userProject = projectID
|
|
||||||
return &b2
|
|
||||||
}
|
|
||||||
|
|
||||||
// applyBucketConds modifies the provided call using the conditions in conds.
|
|
||||||
// call is something that quacks like a *raw.WhateverCall.
|
|
||||||
func applyBucketConds(method string, conds *BucketConditions, call interface{}) error {
|
|
||||||
if conds == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := conds.validate(method); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cval := reflect.ValueOf(call)
|
|
||||||
switch {
|
|
||||||
case conds.MetagenerationMatch != 0:
|
|
||||||
if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
|
|
||||||
return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
|
|
||||||
}
|
|
||||||
case conds.MetagenerationNotMatch != 0:
|
|
||||||
if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
|
|
||||||
return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
|
|
||||||
var rl raw.BucketLifecycle
|
|
||||||
if len(l.Rules) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for _, r := range l.Rules {
|
|
||||||
rr := &raw.BucketLifecycleRule{
|
|
||||||
Action: &raw.BucketLifecycleRuleAction{
|
|
||||||
Type: r.Action.Type,
|
|
||||||
StorageClass: r.Action.StorageClass,
|
|
||||||
},
|
|
||||||
Condition: &raw.BucketLifecycleRuleCondition{
|
|
||||||
Age: r.Condition.AgeInDays,
|
|
||||||
MatchesStorageClass: r.Condition.MatchesStorageClasses,
|
|
||||||
NumNewerVersions: r.Condition.NumNewerVersions,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
switch r.Condition.Liveness {
|
|
||||||
case LiveAndArchived:
|
|
||||||
rr.Condition.IsLive = nil
|
|
||||||
case Live:
|
|
||||||
rr.Condition.IsLive = googleapi.Bool(true)
|
|
||||||
case Archived:
|
|
||||||
rr.Condition.IsLive = googleapi.Bool(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !r.Condition.CreatedBefore.IsZero() {
|
|
||||||
rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date)
|
|
||||||
}
|
|
||||||
rl.Rule = append(rl.Rule, rr)
|
|
||||||
}
|
|
||||||
return &rl
|
|
||||||
}
|
|
||||||
|
|
||||||
func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
|
|
||||||
var l Lifecycle
|
|
||||||
if rl == nil {
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
for _, rr := range rl.Rule {
|
|
||||||
r := LifecycleRule{
|
|
||||||
Action: LifecycleAction{
|
|
||||||
Type: rr.Action.Type,
|
|
||||||
StorageClass: rr.Action.StorageClass,
|
|
||||||
},
|
|
||||||
Condition: LifecycleCondition{
|
|
||||||
AgeInDays: rr.Condition.Age,
|
|
||||||
MatchesStorageClasses: rr.Condition.MatchesStorageClass,
|
|
||||||
NumNewerVersions: rr.Condition.NumNewerVersions,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case rr.Condition.IsLive == nil:
|
|
||||||
r.Condition.Liveness = LiveAndArchived
|
|
||||||
case *rr.Condition.IsLive == true:
|
|
||||||
r.Condition.Liveness = Live
|
|
||||||
case *rr.Condition.IsLive == false:
|
|
||||||
r.Condition.Liveness = Archived
|
|
||||||
}
|
|
||||||
|
|
||||||
if rr.Condition.CreatedBefore != "" {
|
|
||||||
r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore)
|
|
||||||
}
|
|
||||||
l.Rules = append(l.Rules, r)
|
|
||||||
}
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Objects returns an iterator over the objects in the bucket that match the Query q.
|
|
||||||
// If q is nil, no filtering is done.
|
|
||||||
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
|
|
||||||
it := &ObjectIterator{
|
|
||||||
ctx: ctx,
|
|
||||||
bucket: b,
|
|
||||||
}
|
|
||||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
|
||||||
it.fetch,
|
|
||||||
func() int { return len(it.items) },
|
|
||||||
func() interface{} { b := it.items; it.items = nil; return b })
|
|
||||||
if q != nil {
|
|
||||||
it.query = *q
|
|
||||||
}
|
|
||||||
return it
|
|
||||||
}
|
|
||||||
|
|
||||||
// An ObjectIterator is an iterator over ObjectAttrs.
|
|
||||||
type ObjectIterator struct {
|
|
||||||
ctx context.Context
|
|
||||||
bucket *BucketHandle
|
|
||||||
query Query
|
|
||||||
pageInfo *iterator.PageInfo
|
|
||||||
nextFunc func() error
|
|
||||||
items []*ObjectAttrs
|
|
||||||
}
|
|
||||||
|
|
||||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
|
||||||
func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
|
||||||
|
|
||||||
// Next returns the next result. Its second return value is iterator.Done if
|
|
||||||
// there are no more results. Once Next returns iterator.Done, all subsequent
|
|
||||||
// calls will return iterator.Done.
|
|
||||||
//
|
|
||||||
// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will
|
|
||||||
// have a non-empty Prefix field, and a zero value for all other fields. These
|
|
||||||
// represent prefixes.
|
|
||||||
func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
|
|
||||||
if err := it.nextFunc(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
item := it.items[0]
|
|
||||||
it.items = it.items[1:]
|
|
||||||
return item, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
|
|
||||||
req := it.bucket.c.raw.Objects.List(it.bucket.name)
|
|
||||||
setClientHeader(req.Header())
|
|
||||||
req.Projection("full")
|
|
||||||
req.Delimiter(it.query.Delimiter)
|
|
||||||
req.Prefix(it.query.Prefix)
|
|
||||||
req.Versions(it.query.Versions)
|
|
||||||
req.PageToken(pageToken)
|
|
||||||
if it.bucket.userProject != "" {
|
|
||||||
req.UserProject(it.bucket.userProject)
|
|
||||||
}
|
|
||||||
if pageSize > 0 {
|
|
||||||
req.MaxResults(int64(pageSize))
|
|
||||||
}
|
|
||||||
var resp *raw.Objects
|
|
||||||
var err error
|
|
||||||
err = runWithRetry(it.ctx, func() error {
|
|
||||||
resp, err = req.Context(it.ctx).Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
|
|
||||||
err = ErrBucketNotExist
|
|
||||||
}
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
for _, item := range resp.Items {
|
|
||||||
it.items = append(it.items, newObject(item))
|
|
||||||
}
|
|
||||||
for _, prefix := range resp.Prefixes {
|
|
||||||
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
|
|
||||||
}
|
|
||||||
return resp.NextPageToken, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(jbd): Add storage.buckets.update.
|
|
||||||
|
|
||||||
// Buckets returns an iterator over the buckets in the project. You may
|
|
||||||
// optionally set the iterator's Prefix field to restrict the list to buckets
|
|
||||||
// whose names begin with the prefix. By default, all buckets in the project
|
|
||||||
// are returned.
|
|
||||||
func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
|
|
||||||
it := &BucketIterator{
|
|
||||||
ctx: ctx,
|
|
||||||
client: c,
|
|
||||||
projectID: projectID,
|
|
||||||
}
|
|
||||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
|
||||||
it.fetch,
|
|
||||||
func() int { return len(it.buckets) },
|
|
||||||
func() interface{} { b := it.buckets; it.buckets = nil; return b })
|
|
||||||
return it
|
|
||||||
}
|
|
||||||
|
|
||||||
// A BucketIterator is an iterator over BucketAttrs.
|
|
||||||
type BucketIterator struct {
|
|
||||||
// Prefix restricts the iterator to buckets whose names begin with it.
|
|
||||||
Prefix string
|
|
||||||
|
|
||||||
ctx context.Context
|
|
||||||
client *Client
|
|
||||||
projectID string
|
|
||||||
buckets []*BucketAttrs
|
|
||||||
pageInfo *iterator.PageInfo
|
|
||||||
nextFunc func() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next result. Its second return value is iterator.Done if
|
|
||||||
// there are no more results. Once Next returns iterator.Done, all subsequent
|
|
||||||
// calls will return iterator.Done.
|
|
||||||
func (it *BucketIterator) Next() (*BucketAttrs, error) {
|
|
||||||
if err := it.nextFunc(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b := it.buckets[0]
|
|
||||||
it.buckets = it.buckets[1:]
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
|
||||||
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
|
||||||
|
|
||||||
func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) {
|
|
||||||
req := it.client.raw.Buckets.List(it.projectID)
|
|
||||||
setClientHeader(req.Header())
|
|
||||||
req.Projection("full")
|
|
||||||
req.Prefix(it.Prefix)
|
|
||||||
req.PageToken(pageToken)
|
|
||||||
if pageSize > 0 {
|
|
||||||
req.MaxResults(int64(pageSize))
|
|
||||||
}
|
|
||||||
var resp *raw.Buckets
|
|
||||||
var err error
|
|
||||||
err = runWithRetry(it.ctx, func() error {
|
|
||||||
resp, err = req.Context(it.ctx).Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
for _, item := range resp.Items {
|
|
||||||
it.buckets = append(it.buckets, newBucket(item))
|
|
||||||
}
|
|
||||||
return resp.NextPageToken, nil
|
|
||||||
}
|
|
||||||
201
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
201
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
|
|
@ -1,201 +0,0 @@
|
||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CopierFrom creates a Copier that can copy src to dst.
|
|
||||||
// You can immediately call Run on the returned Copier, or
|
|
||||||
// you can configure it first.
|
|
||||||
//
|
|
||||||
// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
|
|
||||||
// in which case the user project of src is billed.
|
|
||||||
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
|
|
||||||
return &Copier{dst: dst, src: src}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Copier copies a source object to a destination.
|
|
||||||
type Copier struct {
|
|
||||||
// ObjectAttrs are optional attributes to set on the destination object.
|
|
||||||
// Any attributes must be initialized before any calls on the Copier. Nil
|
|
||||||
// or zero-valued attributes are ignored.
|
|
||||||
ObjectAttrs
|
|
||||||
|
|
||||||
// RewriteToken can be set before calling Run to resume a copy
|
|
||||||
// operation. After Run returns a non-nil error, RewriteToken will
|
|
||||||
// have been updated to contain the value needed to resume the copy.
|
|
||||||
RewriteToken string
|
|
||||||
|
|
||||||
// ProgressFunc can be used to monitor the progress of a multi-RPC copy
|
|
||||||
// operation. If ProgressFunc is not nil and copying requires multiple
|
|
||||||
// calls to the underlying service (see
|
|
||||||
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
|
|
||||||
// ProgressFunc will be invoked after each call with the number of bytes of
|
|
||||||
// content copied so far and the total size in bytes of the source object.
|
|
||||||
//
|
|
||||||
// ProgressFunc is intended to make upload progress available to the
|
|
||||||
// application. For example, the implementation of ProgressFunc may update
|
|
||||||
// a progress bar in the application's UI, or log the result of
|
|
||||||
// float64(copiedBytes)/float64(totalBytes).
|
|
||||||
//
|
|
||||||
// ProgressFunc should return quickly without blocking.
|
|
||||||
ProgressFunc func(copiedBytes, totalBytes uint64)
|
|
||||||
|
|
||||||
dst, src *ObjectHandle
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run performs the copy.
|
|
||||||
func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) {
|
|
||||||
if err := c.src.validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := c.dst.validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Convert destination attributes to raw form, omitting the bucket.
|
|
||||||
// If the bucket is included but name or content-type aren't, the service
|
|
||||||
// returns a 400 with "Required" as the only message. Omitting the bucket
|
|
||||||
// does not cause any problems.
|
|
||||||
rawObject := c.ObjectAttrs.toRawObject("")
|
|
||||||
for {
|
|
||||||
res, err := c.callRewrite(ctx, rawObject)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if c.ProgressFunc != nil {
|
|
||||||
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
|
|
||||||
}
|
|
||||||
if res.Done { // Finished successfully.
|
|
||||||
return newObject(res.Resource), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
|
|
||||||
call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
|
|
||||||
|
|
||||||
call.Context(ctx).Projection("full")
|
|
||||||
if c.RewriteToken != "" {
|
|
||||||
call.RewriteToken(c.RewriteToken)
|
|
||||||
}
|
|
||||||
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if c.dst.userProject != "" {
|
|
||||||
call.UserProject(c.dst.userProject)
|
|
||||||
} else if c.src.userProject != "" {
|
|
||||||
call.UserProject(c.src.userProject)
|
|
||||||
}
|
|
||||||
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var res *raw.RewriteResponse
|
|
||||||
var err error
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c.RewriteToken = res.RewriteToken
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ComposerFrom creates a Composer that can compose srcs into dst.
|
|
||||||
// You can immediately call Run on the returned Composer, or you can
|
|
||||||
// configure it first.
|
|
||||||
//
|
|
||||||
// The encryption key for the destination object will be used to decrypt all
|
|
||||||
// source objects and encrypt the destination object. It is an error
|
|
||||||
// to specify an encryption key for any of the source objects.
|
|
||||||
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
|
|
||||||
return &Composer{dst: dst, srcs: srcs}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Composer composes source objects into a destination object.
|
|
||||||
//
|
|
||||||
// For Requester Pays buckets, the user project of dst is billed.
|
|
||||||
type Composer struct {
|
|
||||||
// ObjectAttrs are optional attributes to set on the destination object.
|
|
||||||
// Any attributes must be initialized before any calls on the Composer. Nil
|
|
||||||
// or zero-valued attributes are ignored.
|
|
||||||
ObjectAttrs
|
|
||||||
|
|
||||||
dst *ObjectHandle
|
|
||||||
srcs []*ObjectHandle
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run performs the compose operation.
|
|
||||||
func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
|
|
||||||
if err := c.dst.validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(c.srcs) == 0 {
|
|
||||||
return nil, errors.New("storage: at least one source object must be specified")
|
|
||||||
}
|
|
||||||
|
|
||||||
req := &raw.ComposeRequest{}
|
|
||||||
// Compose requires a non-empty Destination, so we always set it,
|
|
||||||
// even if the caller-provided ObjectAttrs is the zero value.
|
|
||||||
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
|
|
||||||
for _, src := range c.srcs {
|
|
||||||
if err := src.validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if src.bucket != c.dst.bucket {
|
|
||||||
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
|
|
||||||
}
|
|
||||||
if src.encryptionKey != nil {
|
|
||||||
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
|
|
||||||
}
|
|
||||||
srcObj := &raw.ComposeRequestSourceObjects{
|
|
||||||
Name: src.object,
|
|
||||||
}
|
|
||||||
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req.SourceObjects = append(req.SourceObjects, srcObj)
|
|
||||||
}
|
|
||||||
|
|
||||||
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
|
|
||||||
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if c.dst.userProject != "" {
|
|
||||||
call.UserProject(c.dst.userProject)
|
|
||||||
}
|
|
||||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var obj *raw.Object
|
|
||||||
var err error
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return newObject(obj), nil
|
|
||||||
}
|
|
||||||
166
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
166
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
|
|
@ -1,166 +0,0 @@
|
||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package storage provides an easy way to work with Google Cloud Storage.
|
|
||||||
Google Cloud Storage stores data in named objects, which are grouped into buckets.
|
|
||||||
|
|
||||||
More information about Google Cloud Storage is available at
|
|
||||||
https://cloud.google.com/storage/docs.
|
|
||||||
|
|
||||||
All of the methods of this package use exponential backoff to retry calls
|
|
||||||
that fail with certain errors, as described in
|
|
||||||
https://cloud.google.com/storage/docs/exponential-backoff.
|
|
||||||
|
|
||||||
|
|
||||||
Creating a Client
|
|
||||||
|
|
||||||
To start working with this package, create a client:
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
client, err := storage.NewClient(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
|
|
||||||
The client will use your default application credentials.
|
|
||||||
|
|
||||||
If you only wish to access public data, you can create
|
|
||||||
an unauthenticated client with
|
|
||||||
|
|
||||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
|
||||||
|
|
||||||
Buckets
|
|
||||||
|
|
||||||
A Google Cloud Storage bucket is a collection of objects. To work with a
|
|
||||||
bucket, make a bucket handle:
|
|
||||||
|
|
||||||
bkt := client.Bucket(bucketName)
|
|
||||||
|
|
||||||
A handle is a reference to a bucket. You can have a handle even if the
|
|
||||||
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
|
|
||||||
call Create on the handle:
|
|
||||||
|
|
||||||
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
|
|
||||||
Note that although buckets are associated with projects, bucket names are
|
|
||||||
global across all projects.
|
|
||||||
|
|
||||||
Each bucket has associated metadata, represented in this package by
|
|
||||||
BucketAttrs. The third argument to BucketHandle.Create allows you to set
|
|
||||||
the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
|
|
||||||
Attrs:
|
|
||||||
|
|
||||||
attrs, err := bkt.Attrs(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
|
||||||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
|
||||||
|
|
||||||
Objects
|
|
||||||
|
|
||||||
An object holds arbitrary data as a sequence of bytes, like a file. You
|
|
||||||
refer to objects using a handle, just as with buckets. You can use the
|
|
||||||
standard Go io.Reader and io.Writer interfaces to read and write
|
|
||||||
object data:
|
|
||||||
|
|
||||||
obj := bkt.Object("data")
|
|
||||||
// Write something to obj.
|
|
||||||
// w implements io.Writer.
|
|
||||||
w := obj.NewWriter(ctx)
|
|
||||||
// Write some text to obj. This will overwrite whatever is there.
|
|
||||||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
// Close, just like writing a file.
|
|
||||||
if err := w.Close(); err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read it back.
|
|
||||||
r, err := obj.NewReader(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
defer r.Close()
|
|
||||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
// Prints "This object contains text."
|
|
||||||
|
|
||||||
Objects also have attributes, which you can fetch with Attrs:
|
|
||||||
|
|
||||||
objAttrs, err := obj.Attrs(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
fmt.Printf("object %s has size %d and can be read using %s\n",
|
|
||||||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
|
||||||
|
|
||||||
ACLs
|
|
||||||
|
|
||||||
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
|
|
||||||
ACLRules, each of which specifies the role of a user, group or project. ACLs
|
|
||||||
are suitable for fine-grained control, but you may prefer using IAM to control
|
|
||||||
access at the project level (see
|
|
||||||
https://cloud.google.com/storage/docs/access-control/iam).
|
|
||||||
|
|
||||||
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
|
|
||||||
|
|
||||||
acls, err := obj.ACL().List(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
for _, rule := range acls {
|
|
||||||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
|
||||||
}
|
|
||||||
|
|
||||||
You can also set and delete ACLs.
|
|
||||||
|
|
||||||
Conditions
|
|
||||||
|
|
||||||
Every object has a generation and a metageneration. The generation changes
|
|
||||||
whenever the content changes, and the metageneration changes whenever the
|
|
||||||
metadata changes. Conditions let you check these values before an operation;
|
|
||||||
the operation only executes if the conditions match. You can use conditions to
|
|
||||||
prevent race conditions in read-modify-write operations.
|
|
||||||
|
|
||||||
For example, say you've read an object's metadata into objAttrs. Now
|
|
||||||
you want to write to that object, but only if its contents haven't changed
|
|
||||||
since you read it. Here is how to express that:
|
|
||||||
|
|
||||||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
|
||||||
// Proceed with writing as above.
|
|
||||||
|
|
||||||
Signed URLs
|
|
||||||
|
|
||||||
You can obtain a URL that lets anyone read or write an object for a limited time.
|
|
||||||
You don't need to create a client to do this. See the documentation of
|
|
||||||
SignedURL for details.
|
|
||||||
|
|
||||||
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
fmt.Println(url)
|
|
||||||
|
|
||||||
Authentication
|
|
||||||
|
|
||||||
See examples of authorization and authentication at
|
|
||||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
|
||||||
*/
|
|
||||||
package storage // import "cloud.google.com/go/storage"
|
|
||||||
30
vendor/cloud.google.com/go/storage/go110.go
generated
vendored
30
vendor/cloud.google.com/go/storage/go110.go
generated
vendored
|
|
@ -1,30 +0,0 @@
|
||||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build go1.10
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import "google.golang.org/api/googleapi"
|
|
||||||
|
|
||||||
func shouldRetry(err error) bool {
|
|
||||||
switch e := err.(type) {
|
|
||||||
case *googleapi.Error:
|
|
||||||
// Retry on 429 and 5xx, according to
|
|
||||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
|
||||||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
26
vendor/cloud.google.com/go/storage/go17.go
generated
vendored
26
vendor/cloud.google.com/go/storage/go17.go
generated
vendored
|
|
@ -1,26 +0,0 @@
|
||||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build go1.7
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func withContext(r *http.Request, ctx context.Context) *http.Request {
|
|
||||||
return r.WithContext(ctx)
|
|
||||||
}
|
|
||||||
121
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
121
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
|
|
@ -1,121 +0,0 @@
|
||||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cloud.google.com/go/iam"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
|
||||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IAM provides access to IAM access control for the bucket.
|
|
||||||
func (b *BucketHandle) IAM() *iam.Handle {
|
|
||||||
return iam.InternalNewHandleClient(&iamClient{
|
|
||||||
raw: b.c.raw,
|
|
||||||
userProject: b.userProject,
|
|
||||||
}, b.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// iamClient implements the iam.client interface.
|
|
||||||
type iamClient struct {
|
|
||||||
raw *raw.Service
|
|
||||||
userProject string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) {
|
|
||||||
call := c.raw.Buckets.GetIamPolicy(resource)
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
if c.userProject != "" {
|
|
||||||
call.UserProject(c.userProject)
|
|
||||||
}
|
|
||||||
var rp *raw.Policy
|
|
||||||
var err error
|
|
||||||
err = runWithRetry(ctx, func() error {
|
|
||||||
rp, err = call.Context(ctx).Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return iamFromStoragePolicy(rp), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error {
|
|
||||||
rp := iamToStoragePolicy(p)
|
|
||||||
call := c.raw.Buckets.SetIamPolicy(resource, rp)
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
if c.userProject != "" {
|
|
||||||
call.UserProject(c.userProject)
|
|
||||||
}
|
|
||||||
return runWithRetry(ctx, func() error {
|
|
||||||
_, err := call.Context(ctx).Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
|
||||||
call := c.raw.Buckets.TestIamPermissions(resource, perms)
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
if c.userProject != "" {
|
|
||||||
call.UserProject(c.userProject)
|
|
||||||
}
|
|
||||||
var res *raw.TestIamPermissionsResponse
|
|
||||||
var err error
|
|
||||||
err = runWithRetry(ctx, func() error {
|
|
||||||
res, err = call.Context(ctx).Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return res.Permissions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
|
|
||||||
return &raw.Policy{
|
|
||||||
Bindings: iamToStorageBindings(ip.Bindings),
|
|
||||||
Etag: string(ip.Etag),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings {
|
|
||||||
var rbs []*raw.PolicyBindings
|
|
||||||
for _, ib := range ibs {
|
|
||||||
rbs = append(rbs, &raw.PolicyBindings{
|
|
||||||
Role: ib.Role,
|
|
||||||
Members: ib.Members,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return rbs
|
|
||||||
}
|
|
||||||
|
|
||||||
func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy {
|
|
||||||
return &iampb.Policy{
|
|
||||||
Bindings: iamFromStorageBindings(rp.Bindings),
|
|
||||||
Etag: []byte(rp.Etag),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding {
|
|
||||||
var ibs []*iampb.Binding
|
|
||||||
for _, rb := range rbs {
|
|
||||||
ibs = append(ibs, &iampb.Binding{
|
|
||||||
Role: rb.Role,
|
|
||||||
Members: rb.Members,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return ibs
|
|
||||||
}
|
|
||||||
36
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
36
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
|
|
@ -1,36 +0,0 @@
|
||||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"cloud.google.com/go/internal"
|
|
||||||
gax "github.com/googleapis/gax-go"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
|
||||||
// the context is done.
|
|
||||||
func runWithRetry(ctx context.Context, call func() error) error {
|
|
||||||
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
|
||||||
err = call()
|
|
||||||
if err == nil {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
if shouldRetry(err) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return true, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
40
vendor/cloud.google.com/go/storage/not_go110.go
generated
vendored
40
vendor/cloud.google.com/go/storage/not_go110.go
generated
vendored
|
|
@ -1,40 +0,0 @@
|
||||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !go1.10
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
|
||||||
|
|
||||||
func shouldRetry(err error) bool {
|
|
||||||
switch e := err.(type) {
|
|
||||||
case *googleapi.Error:
|
|
||||||
// Retry on 429 and 5xx, according to
|
|
||||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
|
||||||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
|
||||||
case *url.Error:
|
|
||||||
// Retry on REFUSED_STREAM.
|
|
||||||
// Unfortunately the error type is unexported, so we resort to string
|
|
||||||
// matching.
|
|
||||||
return strings.Contains(e.Error(), "REFUSED_STREAM")
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
26
vendor/cloud.google.com/go/storage/not_go17.go
generated
vendored
26
vendor/cloud.google.com/go/storage/not_go17.go
generated
vendored
|
|
@ -1,26 +0,0 @@
|
||||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !go1.7
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func withContext(r *http.Request, _ interface{}) *http.Request {
|
|
||||||
// In Go 1.6 and below, ignore the context.
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
179
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
179
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
|
|
@ -1,179 +0,0 @@
|
||||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Notification describes how to send Cloud PubSub messages when certain
|
|
||||||
// events occur in a bucket.
|
|
||||||
type Notification struct {
|
|
||||||
//The ID of the notification.
|
|
||||||
ID string
|
|
||||||
|
|
||||||
// The ID of the topic to which this subscription publishes.
|
|
||||||
TopicID string
|
|
||||||
|
|
||||||
// The ID of the project to which the topic belongs.
|
|
||||||
TopicProjectID string
|
|
||||||
|
|
||||||
// Only send notifications about listed event types. If empty, send notifications
|
|
||||||
// for all event types.
|
|
||||||
// See https://cloud.google.com/storage/docs/pubsub-notifications#events.
|
|
||||||
EventTypes []string
|
|
||||||
|
|
||||||
// If present, only apply this notification configuration to object names that
|
|
||||||
// begin with this prefix.
|
|
||||||
ObjectNamePrefix string
|
|
||||||
|
|
||||||
// An optional list of additional attributes to attach to each Cloud PubSub
|
|
||||||
// message published for this notification subscription.
|
|
||||||
CustomAttributes map[string]string
|
|
||||||
|
|
||||||
// The contents of the message payload.
|
|
||||||
// See https://cloud.google.com/storage/docs/pubsub-notifications#payload.
|
|
||||||
PayloadFormat string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values for Notification.PayloadFormat.
|
|
||||||
const (
|
|
||||||
// Send no payload with notification messages.
|
|
||||||
NoPayload = "NONE"
|
|
||||||
|
|
||||||
// Send object metadata as JSON with notification messages.
|
|
||||||
JSONPayload = "JSON_API_V1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Values for Notification.EventTypes.
|
|
||||||
const (
|
|
||||||
// Event that occurs when an object is successfully created.
|
|
||||||
ObjectFinalizeEvent = "OBJECT_FINALIZE"
|
|
||||||
|
|
||||||
// Event that occurs when the metadata of an existing object changes.
|
|
||||||
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE"
|
|
||||||
|
|
||||||
// Event that occurs when an object is permanently deleted.
|
|
||||||
ObjectDeleteEvent = "OBJECT_DELETE"
|
|
||||||
|
|
||||||
// Event that occurs when the live version of an object becomes an
|
|
||||||
// archived version.
|
|
||||||
ObjectArchiveEvent = "OBJECT_ARCHIVE"
|
|
||||||
)
|
|
||||||
|
|
||||||
func toNotification(rn *raw.Notification) *Notification {
|
|
||||||
n := &Notification{
|
|
||||||
ID: rn.Id,
|
|
||||||
EventTypes: rn.EventTypes,
|
|
||||||
ObjectNamePrefix: rn.ObjectNamePrefix,
|
|
||||||
CustomAttributes: rn.CustomAttributes,
|
|
||||||
PayloadFormat: rn.PayloadFormat,
|
|
||||||
}
|
|
||||||
n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
|
|
||||||
|
|
||||||
// parseNotificationTopic extracts the project and topic IDs from from the full
|
|
||||||
// resource name returned by the service. If the name is malformed, it returns
|
|
||||||
// "?" for both IDs.
|
|
||||||
func parseNotificationTopic(nt string) (projectID, topicID string) {
|
|
||||||
matches := topicRE.FindStringSubmatch(nt)
|
|
||||||
if matches == nil {
|
|
||||||
return "?", "?"
|
|
||||||
}
|
|
||||||
return matches[1], matches[2]
|
|
||||||
}
|
|
||||||
|
|
||||||
func toRawNotification(n *Notification) *raw.Notification {
|
|
||||||
return &raw.Notification{
|
|
||||||
Id: n.ID,
|
|
||||||
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
|
|
||||||
n.TopicProjectID, n.TopicID),
|
|
||||||
EventTypes: n.EventTypes,
|
|
||||||
ObjectNamePrefix: n.ObjectNamePrefix,
|
|
||||||
CustomAttributes: n.CustomAttributes,
|
|
||||||
PayloadFormat: string(n.PayloadFormat),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
|
|
||||||
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
|
|
||||||
// returned Notification's ID can be used to refer to it.
|
|
||||||
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (*Notification, error) {
|
|
||||||
if n.ID != "" {
|
|
||||||
return nil, errors.New("storage: AddNotification: ID must not be set")
|
|
||||||
}
|
|
||||||
if n.TopicProjectID == "" {
|
|
||||||
return nil, errors.New("storage: AddNotification: missing TopicProjectID")
|
|
||||||
}
|
|
||||||
if n.TopicID == "" {
|
|
||||||
return nil, errors.New("storage: AddNotification: missing TopicID")
|
|
||||||
}
|
|
||||||
call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n))
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
if b.userProject != "" {
|
|
||||||
call.UserProject(b.userProject)
|
|
||||||
}
|
|
||||||
rn, err := call.Context(ctx).Do()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return toNotification(rn), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notifications returns all the Notifications configured for this bucket, as a map
|
|
||||||
// indexed by notification ID.
|
|
||||||
func (b *BucketHandle) Notifications(ctx context.Context) (map[string]*Notification, error) {
|
|
||||||
call := b.c.raw.Notifications.List(b.name)
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
if b.userProject != "" {
|
|
||||||
call.UserProject(b.userProject)
|
|
||||||
}
|
|
||||||
var res *raw.Notifications
|
|
||||||
var err error
|
|
||||||
err = runWithRetry(ctx, func() error {
|
|
||||||
res, err = call.Context(ctx).Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return notificationsToMap(res.Items), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
|
|
||||||
m := map[string]*Notification{}
|
|
||||||
for _, rn := range rns {
|
|
||||||
m[rn.Id] = toNotification(rn)
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteNotification deletes the notification with the given ID.
|
|
||||||
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) error {
|
|
||||||
call := b.c.raw.Notifications.Delete(b.name, id)
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
if b.userProject != "" {
|
|
||||||
call.UserProject(b.userProject)
|
|
||||||
}
|
|
||||||
return call.Context(ctx).Do()
|
|
||||||
}
|
|
||||||
94
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
94
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
|
|
@ -1,94 +0,0 @@
|
||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
|
|
||||||
|
|
||||||
// Reader reads a Cloud Storage object.
|
|
||||||
// It implements io.Reader.
|
|
||||||
//
|
|
||||||
// Typically, a Reader computes the CRC of the downloaded content and compares it to
|
|
||||||
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
|
|
||||||
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
|
|
||||||
type Reader struct {
|
|
||||||
body io.ReadCloser
|
|
||||||
remain, size int64
|
|
||||||
contentType string
|
|
||||||
contentEncoding string
|
|
||||||
cacheControl string
|
|
||||||
checkCRC bool // should we check the CRC?
|
|
||||||
wantCRC uint32 // the CRC32c value the server sent in the header
|
|
||||||
gotCRC uint32 // running crc
|
|
||||||
checkedCRC bool // did we check the CRC? (For tests.)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the Reader. It must be called when done reading.
|
|
||||||
func (r *Reader) Close() error {
|
|
||||||
return r.body.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) Read(p []byte) (int, error) {
|
|
||||||
n, err := r.body.Read(p)
|
|
||||||
if r.remain != -1 {
|
|
||||||
r.remain -= int64(n)
|
|
||||||
}
|
|
||||||
if r.checkCRC {
|
|
||||||
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
|
|
||||||
// Check CRC here. It would be natural to check it in Close, but
|
|
||||||
// everybody defers Close on the assumption that it doesn't return
|
|
||||||
// anything worth looking at.
|
|
||||||
if r.remain == 0 { // Only check if we have Content-Length.
|
|
||||||
r.checkedCRC = true
|
|
||||||
if r.gotCRC != r.wantCRC {
|
|
||||||
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
|
|
||||||
r.gotCRC, r.wantCRC)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the object in bytes.
|
|
||||||
// The returned value is always the same and is not affected by
|
|
||||||
// calls to Read or Close.
|
|
||||||
func (r *Reader) Size() int64 {
|
|
||||||
return r.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remain returns the number of bytes left to read, or -1 if unknown.
|
|
||||||
func (r *Reader) Remain() int64 {
|
|
||||||
return r.remain
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentType returns the content type of the object.
|
|
||||||
func (r *Reader) ContentType() string {
|
|
||||||
return r.contentType
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentEncoding returns the content encoding of the object.
|
|
||||||
func (r *Reader) ContentEncoding() string {
|
|
||||||
return r.contentEncoding
|
|
||||||
}
|
|
||||||
|
|
||||||
// CacheControl returns the cache control of the object.
|
|
||||||
func (r *Reader) CacheControl() string {
|
|
||||||
return r.cacheControl
|
|
||||||
}
|
|
||||||
1201
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
1201
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
File diff suppressed because it is too large
Load diff
215
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
215
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
|
|
@ -1,215 +0,0 @@
|
||||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Writer writes a Cloud Storage object.
|
|
||||||
type Writer struct {
|
|
||||||
// ObjectAttrs are optional attributes to set on the object. Any attributes
|
|
||||||
// must be initialized before the first Write call. Nil or zero-valued
|
|
||||||
// attributes are ignored.
|
|
||||||
ObjectAttrs
|
|
||||||
|
|
||||||
// SendCRC specifies whether to transmit a CRC32C field. It should be set
|
|
||||||
// to true in addition to setting the Writer's CRC32C field, because zero
|
|
||||||
// is a valid CRC and normally a zero would not be transmitted.
|
|
||||||
// If a CRC32C is sent, and the data written does not match the checksum,
|
|
||||||
// the write will be rejected.
|
|
||||||
SendCRC32C bool
|
|
||||||
|
|
||||||
// ChunkSize controls the maximum number of bytes of the object that the
|
|
||||||
// Writer will attempt to send to the server in a single request. Objects
|
|
||||||
// smaller than the size will be sent in a single request, while larger
|
|
||||||
// objects will be split over multiple requests. The size will be rounded up
|
|
||||||
// to the nearest multiple of 256K. If zero, chunking will be disabled and
|
|
||||||
// the object will be uploaded in a single request.
|
|
||||||
//
|
|
||||||
// ChunkSize will default to a reasonable value. Any custom configuration
|
|
||||||
// must be done before the first Write call.
|
|
||||||
ChunkSize int
|
|
||||||
|
|
||||||
// ProgressFunc can be used to monitor the progress of a large write.
|
|
||||||
// operation. If ProgressFunc is not nil and writing requires multiple
|
|
||||||
// calls to the underlying service (see
|
|
||||||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
|
|
||||||
// then ProgressFunc will be invoked after each call with the number of bytes of
|
|
||||||
// content copied so far.
|
|
||||||
//
|
|
||||||
// ProgressFunc should return quickly without blocking.
|
|
||||||
ProgressFunc func(int64)
|
|
||||||
|
|
||||||
ctx context.Context
|
|
||||||
o *ObjectHandle
|
|
||||||
|
|
||||||
opened bool
|
|
||||||
pw *io.PipeWriter
|
|
||||||
|
|
||||||
donec chan struct{} // closed after err and obj are set.
|
|
||||||
obj *ObjectAttrs
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) open() error {
|
|
||||||
attrs := w.ObjectAttrs
|
|
||||||
// Check the developer didn't change the object Name (this is unfortunate, but
|
|
||||||
// we don't want to store an object under the wrong name).
|
|
||||||
if attrs.Name != w.o.object {
|
|
||||||
return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
|
|
||||||
}
|
|
||||||
if !utf8.ValidString(attrs.Name) {
|
|
||||||
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
|
|
||||||
}
|
|
||||||
pr, pw := io.Pipe()
|
|
||||||
w.pw = pw
|
|
||||||
w.opened = true
|
|
||||||
|
|
||||||
if w.ChunkSize < 0 {
|
|
||||||
return errors.New("storage: Writer.ChunkSize must be non-negative")
|
|
||||||
}
|
|
||||||
mediaOpts := []googleapi.MediaOption{
|
|
||||||
googleapi.ChunkSize(w.ChunkSize),
|
|
||||||
}
|
|
||||||
if c := attrs.ContentType; c != "" {
|
|
||||||
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(w.donec)
|
|
||||||
|
|
||||||
rawObj := attrs.toRawObject(w.o.bucket)
|
|
||||||
if w.SendCRC32C {
|
|
||||||
rawObj.Crc32c = encodeUint32(attrs.CRC32C)
|
|
||||||
}
|
|
||||||
if w.MD5 != nil {
|
|
||||||
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
|
|
||||||
}
|
|
||||||
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
|
|
||||||
Media(pr, mediaOpts...).
|
|
||||||
Projection("full").
|
|
||||||
Context(w.ctx)
|
|
||||||
if w.ProgressFunc != nil {
|
|
||||||
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
|
|
||||||
}
|
|
||||||
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
|
|
||||||
w.mu.Lock()
|
|
||||||
w.err = err
|
|
||||||
w.mu.Unlock()
|
|
||||||
pr.CloseWithError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var resp *raw.Object
|
|
||||||
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
|
|
||||||
if err == nil {
|
|
||||||
if w.o.userProject != "" {
|
|
||||||
call.UserProject(w.o.userProject)
|
|
||||||
}
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
// If the chunk size is zero, then no chunking is done on the Reader,
|
|
||||||
// which means we cannot retry: the first call will read the data, and if
|
|
||||||
// it fails, there is no way to re-read.
|
|
||||||
if w.ChunkSize == 0 {
|
|
||||||
resp, err = call.Do()
|
|
||||||
} else {
|
|
||||||
// We will only retry here if the initial POST, which obtains a URI for
|
|
||||||
// the resumable upload, fails with a retryable error. The upload itself
|
|
||||||
// has its own retry logic.
|
|
||||||
err = runWithRetry(w.ctx, func() error {
|
|
||||||
var err2 error
|
|
||||||
resp, err2 = call.Do()
|
|
||||||
return err2
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
w.mu.Lock()
|
|
||||||
w.err = err
|
|
||||||
w.mu.Unlock()
|
|
||||||
pr.CloseWithError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.obj = newObject(resp)
|
|
||||||
}()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write appends to w. It implements the io.Writer interface.
|
|
||||||
//
|
|
||||||
// Since writes happen asynchronously, Write may return a nil
|
|
||||||
// error even though the write failed (or will fail). Always
|
|
||||||
// use the error returned from Writer.Close to determine if
|
|
||||||
// the upload was successful.
|
|
||||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
|
||||||
w.mu.Lock()
|
|
||||||
werr := w.err
|
|
||||||
w.mu.Unlock()
|
|
||||||
if werr != nil {
|
|
||||||
return 0, werr
|
|
||||||
}
|
|
||||||
if !w.opened {
|
|
||||||
if err := w.open(); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return w.pw.Write(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close completes the write operation and flushes any buffered data.
|
|
||||||
// If Close doesn't return an error, metadata about the written object
|
|
||||||
// can be retrieved by calling Attrs.
|
|
||||||
func (w *Writer) Close() error {
|
|
||||||
if !w.opened {
|
|
||||||
if err := w.open(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := w.pw.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
<-w.donec
|
|
||||||
w.mu.Lock()
|
|
||||||
defer w.mu.Unlock()
|
|
||||||
return w.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// CloseWithError aborts the write operation with the provided error.
|
|
||||||
// CloseWithError always returns nil.
|
|
||||||
//
|
|
||||||
// Deprecated: cancel the context passed to NewWriter instead.
|
|
||||||
func (w *Writer) CloseWithError(err error) error {
|
|
||||||
if !w.opened {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return w.pw.CloseWithError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attrs returns metadata about a successfully-written object.
|
|
||||||
// It's only valid to call it after Close returns nil.
|
|
||||||
func (w *Writer) Attrs() *ObjectAttrs {
|
|
||||||
return w.obj
|
|
||||||
}
|
|
||||||
202
vendor/github.com/Azure/azure-sdk-for-go/LICENSE
generated
vendored
202
vendor/github.com/Azure/azure-sdk-for-go/LICENSE
generated
vendored
|
|
@ -1,202 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright 2016 Microsoft Corporation
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
5
vendor/github.com/Azure/azure-sdk-for-go/NOTICE
generated
vendored
5
vendor/github.com/Azure/azure-sdk-for-go/NOTICE
generated
vendored
|
|
@ -1,5 +0,0 @@
|
||||||
Microsoft Azure-SDK-for-Go
|
|
||||||
Copyright 2014-2017 Microsoft
|
|
||||||
|
|
||||||
This product includes software developed at
|
|
||||||
the Microsoft Corporation (https://www.microsoft.com).
|
|
||||||
73
vendor/github.com/Azure/azure-sdk-for-go/storage/README.md
generated
vendored
73
vendor/github.com/Azure/azure-sdk-for-go/storage/README.md
generated
vendored
|
|
@ -1,73 +0,0 @@
|
||||||
# Azure Storage SDK for Go
|
|
||||||
|
|
||||||
The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform REST operations against the [Azure Storage Service](https://docs.microsoft.com/en-us/azure/storage/). To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](https://github.com/Azure/azure-sdk-for-go/tree/master/arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](https://github.com/Azure/azure-sdk-for-go/tree/master/management/storageservice) package.
|
|
||||||
|
|
||||||
This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/).
|
|
||||||
|
|
||||||
# Getting Started
|
|
||||||
|
|
||||||
1. Go get the SDK `go get -u github.com/Azure/azure-sdk-for-go/storage`
|
|
||||||
1. If you don't already have one, [create a Storage Account](https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account).
|
|
||||||
- Take note of your Azure Storage Account Name and Azure Storage Account Key. They'll both be necessary for using this library.
|
|
||||||
- This option is production ready, but can also be used for development.
|
|
||||||
1. (Optional, Windows only) Download and start the [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/).
|
|
||||||
1. Checkout our existing [samples](https://github.com/Azure-Samples?q=Storage&language=go).
|
|
||||||
|
|
||||||
# Contributing
|
|
||||||
|
|
||||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
|
||||||
|
|
||||||
When contributing, please conform to the following practices:
|
|
||||||
- Run [gofmt](https://golang.org/cmd/gofmt/) to use standard go formatting.
|
|
||||||
- Run [golint](https://github.com/golang/lint) to conform to standard naming conventions.
|
|
||||||
- Run [go vet](https://golang.org/cmd/vet/) to catch common Go mistakes.
|
|
||||||
- Use [GoASTScanner/gas](https://github.com/GoASTScanner/gas) to ensure there are no common security violations in your contribution.
|
|
||||||
- Run [go test](https://golang.org/cmd/go/#hdr-Test_packages) to catch possible bugs in the code: `go test ./storage/...`.
|
|
||||||
- This project uses HTTP recordings for testing.
|
|
||||||
- The recorder should be attached to the client before calling the functions to test and later stopped.
|
|
||||||
- If you updated an existing test, its recording might need to be updated. Run `go test ./storage/... -ow -check.f TestName` to rerecord the test.
|
|
||||||
- Important note: all HTTP requests in the recording must be unique: different bodies, headers (`User-Agent`, `Authorization` and `Date` or `x-ms-date` headers are ignored), URLs and methods. As opposed to the example above, the following test is not suitable for recording:
|
|
||||||
|
|
||||||
``` go
|
|
||||||
func (s *StorageQueueSuite) TestQueueExists(c *chk.C) {
|
|
||||||
cli := getQueueClient(c)
|
|
||||||
rec := cli.client.appendRecorder(c)
|
|
||||||
defer rec.Stop()
|
|
||||||
|
|
||||||
queue := cli.GetQueueReference(queueName(c))
|
|
||||||
ok, err := queue.Exists()
|
|
||||||
c.Assert(err, chk.IsNil)
|
|
||||||
c.Assert(ok, chk.Equals, false)
|
|
||||||
|
|
||||||
c.Assert(queue.Create(nil), chk.IsNil)
|
|
||||||
defer queue.Delete(nil)
|
|
||||||
|
|
||||||
ok, err = queue.Exists() // This is the very same request as the one 5 lines above
|
|
||||||
// The test replayer gets confused and the test fails in the last line
|
|
||||||
c.Assert(err, chk.IsNil)
|
|
||||||
c.Assert(ok, chk.Equals, true)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
- On the other side, this test does not repeat requests: the URLs are different.
|
|
||||||
|
|
||||||
``` go
|
|
||||||
func (s *StorageQueueSuite) TestQueueExists(c *chk.C) {
|
|
||||||
cli := getQueueClient(c)
|
|
||||||
rec := cli.client.appendRecorder(c)
|
|
||||||
defer rec.Stop()
|
|
||||||
|
|
||||||
queue1 := cli.GetQueueReference(queueName(c, "nonexistent"))
|
|
||||||
ok, err := queue1.Exists()
|
|
||||||
c.Assert(err, chk.IsNil)
|
|
||||||
c.Assert(ok, chk.Equals, false)
|
|
||||||
|
|
||||||
queue2 := cli.GetQueueReference(queueName(c, "exisiting"))
|
|
||||||
c.Assert(queue2.Create(nil), chk.IsNil)
|
|
||||||
defer queue2.Delete(nil)
|
|
||||||
|
|
||||||
ok, err = queue2.Exists()
|
|
||||||
c.Assert(err, chk.IsNil)
|
|
||||||
c.Assert(ok, chk.Equals, true)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
91
vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go
generated
vendored
91
vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go
generated
vendored
|
|
@ -1,91 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PutAppendBlob initializes an empty append blob with specified name. An
|
|
||||||
// append blob must be created using this method before appending blocks.
|
|
||||||
//
|
|
||||||
// See CreateBlockBlobFromReader for more info on creating blobs.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
|
||||||
func (b *Blob) PutAppendBlob(options *PutBlobOptions) error {
|
|
||||||
params := url.Values{}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers["x-ms-blob-type"] = string(BlobTypeAppend)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
|
||||||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return b.respondCreation(resp, BlobTypeAppend)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendBlockOptions includes the options for an append block operation
|
|
||||||
type AppendBlockOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
MaxSize *uint `header:"x-ms-blob-condition-maxsize"`
|
|
||||||
AppendPosition *uint `header:"x-ms-blob-condition-appendpos"`
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
IfMatch string `header:"If-Match"`
|
|
||||||
IfNoneMatch string `header:"If-None-Match"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
ContentMD5 bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendBlock appends a block to an append blob.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block
|
|
||||||
func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error {
|
|
||||||
params := url.Values{"comp": {"appendblock"}}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers["x-ms-blob-type"] = string(BlobTypeAppend)
|
|
||||||
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
if options.ContentMD5 {
|
|
||||||
md5sum := md5.Sum(chunk)
|
|
||||||
headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return b.respondCreation(resp, BlobTypeAppend)
|
|
||||||
}
|
|
||||||
246
vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go
generated
vendored
246
vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go
generated
vendored
|
|
@ -1,246 +0,0 @@
|
||||||
// Package storage provides clients for Microsoft Azure Storage Services.
|
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services
|
|
||||||
|
|
||||||
type authentication string
|
|
||||||
|
|
||||||
const (
|
|
||||||
sharedKey authentication = "sharedKey"
|
|
||||||
sharedKeyForTable authentication = "sharedKeyTable"
|
|
||||||
sharedKeyLite authentication = "sharedKeyLite"
|
|
||||||
sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
|
|
||||||
|
|
||||||
// headers
|
|
||||||
headerAcceptCharset = "Accept-Charset"
|
|
||||||
headerAuthorization = "Authorization"
|
|
||||||
headerContentLength = "Content-Length"
|
|
||||||
headerDate = "Date"
|
|
||||||
headerXmsDate = "x-ms-date"
|
|
||||||
headerXmsVersion = "x-ms-version"
|
|
||||||
headerContentEncoding = "Content-Encoding"
|
|
||||||
headerContentLanguage = "Content-Language"
|
|
||||||
headerContentType = "Content-Type"
|
|
||||||
headerContentMD5 = "Content-MD5"
|
|
||||||
headerIfModifiedSince = "If-Modified-Since"
|
|
||||||
headerIfMatch = "If-Match"
|
|
||||||
headerIfNoneMatch = "If-None-Match"
|
|
||||||
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
|
||||||
headerRange = "Range"
|
|
||||||
headerDataServiceVersion = "DataServiceVersion"
|
|
||||||
headerMaxDataServiceVersion = "MaxDataServiceVersion"
|
|
||||||
headerContentTransferEncoding = "Content-Transfer-Encoding"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
|
|
||||||
if !c.sasClient {
|
|
||||||
authHeader, err := c.getSharedKey(verb, url, headers, auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
headers[headerAuthorization] = authHeader
|
|
||||||
}
|
|
||||||
return headers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
|
|
||||||
canRes, err := c.buildCanonicalizedResource(url, auth, false)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
canString, err := buildCanonicalizedString(verb, headers, canRes, auth)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return c.createAuthorizationHeader(canString, auth), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) {
|
|
||||||
errMsg := "buildCanonicalizedResource error: %s"
|
|
||||||
u, err := url.Parse(uri)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf(errMsg, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
cr := bytes.NewBufferString("")
|
|
||||||
if c.accountName != StorageEmulatorAccountName || !sas {
|
|
||||||
cr.WriteString("/")
|
|
||||||
cr.WriteString(c.getCanonicalizedAccountName())
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(u.Path) > 0 {
|
|
||||||
// Any portion of the CanonicalizedResource string that is derived from
|
|
||||||
// the resource's URI should be encoded exactly as it is in the URI.
|
|
||||||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
|
||||||
cr.WriteString(u.EscapedPath())
|
|
||||||
}
|
|
||||||
|
|
||||||
params, err := url.ParseQuery(u.RawQuery)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf(errMsg, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
|
|
||||||
if auth == sharedKey {
|
|
||||||
if len(params) > 0 {
|
|
||||||
cr.WriteString("\n")
|
|
||||||
|
|
||||||
keys := []string{}
|
|
||||||
for key := range params {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
|
|
||||||
completeParams := []string{}
|
|
||||||
for _, key := range keys {
|
|
||||||
if len(params[key]) > 1 {
|
|
||||||
sort.Strings(params[key])
|
|
||||||
}
|
|
||||||
|
|
||||||
completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
|
|
||||||
}
|
|
||||||
cr.WriteString(strings.Join(completeParams, "\n"))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// search for "comp" parameter, if exists then add it to canonicalizedresource
|
|
||||||
if v, ok := params["comp"]; ok {
|
|
||||||
cr.WriteString("?comp=" + v[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(cr.Bytes()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) getCanonicalizedAccountName() string {
|
|
||||||
// since we may be trying to access a secondary storage account, we need to
|
|
||||||
// remove the -secondary part of the storage name
|
|
||||||
return strings.TrimSuffix(c.accountName, "-secondary")
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) {
|
|
||||||
contentLength := headers[headerContentLength]
|
|
||||||
if contentLength == "0" {
|
|
||||||
contentLength = ""
|
|
||||||
}
|
|
||||||
date := headers[headerDate]
|
|
||||||
if v, ok := headers[headerXmsDate]; ok {
|
|
||||||
if auth == sharedKey || auth == sharedKeyLite {
|
|
||||||
date = ""
|
|
||||||
} else {
|
|
||||||
date = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var canString string
|
|
||||||
switch auth {
|
|
||||||
case sharedKey:
|
|
||||||
canString = strings.Join([]string{
|
|
||||||
verb,
|
|
||||||
headers[headerContentEncoding],
|
|
||||||
headers[headerContentLanguage],
|
|
||||||
contentLength,
|
|
||||||
headers[headerContentMD5],
|
|
||||||
headers[headerContentType],
|
|
||||||
date,
|
|
||||||
headers[headerIfModifiedSince],
|
|
||||||
headers[headerIfMatch],
|
|
||||||
headers[headerIfNoneMatch],
|
|
||||||
headers[headerIfUnmodifiedSince],
|
|
||||||
headers[headerRange],
|
|
||||||
buildCanonicalizedHeader(headers),
|
|
||||||
canonicalizedResource,
|
|
||||||
}, "\n")
|
|
||||||
case sharedKeyForTable:
|
|
||||||
canString = strings.Join([]string{
|
|
||||||
verb,
|
|
||||||
headers[headerContentMD5],
|
|
||||||
headers[headerContentType],
|
|
||||||
date,
|
|
||||||
canonicalizedResource,
|
|
||||||
}, "\n")
|
|
||||||
case sharedKeyLite:
|
|
||||||
canString = strings.Join([]string{
|
|
||||||
verb,
|
|
||||||
headers[headerContentMD5],
|
|
||||||
headers[headerContentType],
|
|
||||||
date,
|
|
||||||
buildCanonicalizedHeader(headers),
|
|
||||||
canonicalizedResource,
|
|
||||||
}, "\n")
|
|
||||||
case sharedKeyLiteForTable:
|
|
||||||
canString = strings.Join([]string{
|
|
||||||
date,
|
|
||||||
canonicalizedResource,
|
|
||||||
}, "\n")
|
|
||||||
default:
|
|
||||||
return "", fmt.Errorf("%s authentication is not supported yet", auth)
|
|
||||||
}
|
|
||||||
return canString, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCanonicalizedHeader(headers map[string]string) string {
|
|
||||||
cm := make(map[string]string)
|
|
||||||
|
|
||||||
for k, v := range headers {
|
|
||||||
headerName := strings.TrimSpace(strings.ToLower(k))
|
|
||||||
if strings.HasPrefix(headerName, "x-ms-") {
|
|
||||||
cm[headerName] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cm) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
keys := []string{}
|
|
||||||
for key := range cm {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(keys)
|
|
||||||
|
|
||||||
ch := bytes.NewBufferString("")
|
|
||||||
|
|
||||||
for _, key := range keys {
|
|
||||||
ch.WriteString(key)
|
|
||||||
ch.WriteRune(':')
|
|
||||||
ch.WriteString(cm[key])
|
|
||||||
ch.WriteRune('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.TrimSuffix(string(ch.Bytes()), "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string {
|
|
||||||
signature := c.computeHmac256(canonicalizedString)
|
|
||||||
var key string
|
|
||||||
switch auth {
|
|
||||||
case sharedKey, sharedKeyForTable:
|
|
||||||
key = "SharedKey"
|
|
||||||
case sharedKeyLite, sharedKeyLiteForTable:
|
|
||||||
key = "SharedKeyLite"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature)
|
|
||||||
}
|
|
||||||
632
vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go
generated
vendored
632
vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go
generated
vendored
|
|
@ -1,632 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Blob is an entry in BlobListResponse.
|
|
||||||
type Blob struct {
|
|
||||||
Container *Container
|
|
||||||
Name string `xml:"Name"`
|
|
||||||
Snapshot time.Time `xml:"Snapshot"`
|
|
||||||
Properties BlobProperties `xml:"Properties"`
|
|
||||||
Metadata BlobMetadata `xml:"Metadata"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutBlobOptions includes the options any put blob operation
|
|
||||||
// (page, block, append)
|
|
||||||
type PutBlobOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
Origin string `header:"Origin"`
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
IfMatch string `header:"If-Match"`
|
|
||||||
IfNoneMatch string `header:"If-None-Match"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlobMetadata is a set of custom name/value pairs.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx
|
|
||||||
type BlobMetadata map[string]string
|
|
||||||
|
|
||||||
type blobMetadataEntries struct {
|
|
||||||
Entries []blobMetadataEntry `xml:",any"`
|
|
||||||
}
|
|
||||||
type blobMetadataEntry struct {
|
|
||||||
XMLName xml.Name
|
|
||||||
Value string `xml:",chardata"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalXML converts the xml:Metadata into Metadata map
|
|
||||||
func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
|
||||||
var entries blobMetadataEntries
|
|
||||||
if err := d.DecodeElement(&entries, &start); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, entry := range entries.Entries {
|
|
||||||
if *bm == nil {
|
|
||||||
*bm = make(BlobMetadata)
|
|
||||||
}
|
|
||||||
(*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalXML implements the xml.Marshaler interface. It encodes
|
|
||||||
// metadata name/value pairs as they would appear in an Azure
|
|
||||||
// ListBlobs response.
|
|
||||||
func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
|
|
||||||
entries := make([]blobMetadataEntry, 0, len(bm))
|
|
||||||
for k, v := range bm {
|
|
||||||
entries = append(entries, blobMetadataEntry{
|
|
||||||
XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)},
|
|
||||||
Value: v,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return enc.EncodeElement(blobMetadataEntries{
|
|
||||||
Entries: entries,
|
|
||||||
}, start)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlobProperties contains various properties of a blob
|
|
||||||
// returned in various endpoints like ListBlobs or GetBlobProperties.
|
|
||||||
type BlobProperties struct {
|
|
||||||
LastModified TimeRFC1123 `xml:"Last-Modified"`
|
|
||||||
Etag string `xml:"Etag"`
|
|
||||||
ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"`
|
|
||||||
ContentLength int64 `xml:"Content-Length"`
|
|
||||||
ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"`
|
|
||||||
ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"`
|
|
||||||
CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"`
|
|
||||||
ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"`
|
|
||||||
ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"`
|
|
||||||
BlobType BlobType `xml:"BlobType"`
|
|
||||||
SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
|
|
||||||
CopyID string `xml:"CopyId"`
|
|
||||||
CopyStatus string `xml:"CopyStatus"`
|
|
||||||
CopySource string `xml:"CopySource"`
|
|
||||||
CopyProgress string `xml:"CopyProgress"`
|
|
||||||
CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"`
|
|
||||||
CopyStatusDescription string `xml:"CopyStatusDescription"`
|
|
||||||
LeaseStatus string `xml:"LeaseStatus"`
|
|
||||||
LeaseState string `xml:"LeaseState"`
|
|
||||||
LeaseDuration string `xml:"LeaseDuration"`
|
|
||||||
ServerEncrypted bool `xml:"ServerEncrypted"`
|
|
||||||
IncrementalCopy bool `xml:"IncrementalCopy"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlobType defines the type of the Azure Blob.
|
|
||||||
type BlobType string
|
|
||||||
|
|
||||||
// Types of page blobs
|
|
||||||
const (
|
|
||||||
BlobTypeBlock BlobType = "BlockBlob"
|
|
||||||
BlobTypePage BlobType = "PageBlob"
|
|
||||||
BlobTypeAppend BlobType = "AppendBlob"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *Blob) buildPath() string {
|
|
||||||
return b.Container.buildPath() + "/" + b.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exists returns true if a blob with given name exists on the specified
|
|
||||||
// container of the storage account.
|
|
||||||
func (b *Blob) Exists() (bool, error) {
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil)
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if resp != nil {
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
|
||||||
return resp.statusCode == http.StatusOK, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetURL gets the canonical URL to the blob with the specified name in the
|
|
||||||
// specified container.
|
|
||||||
// This method does not create a publicly accessible URL if the blob or container
|
|
||||||
// is private and this method does not check if the blob exists.
|
|
||||||
func (b *Blob) GetURL() string {
|
|
||||||
container := b.Container.Name
|
|
||||||
if container == "" {
|
|
||||||
container = "$root"
|
|
||||||
}
|
|
||||||
return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlobRangeOptions includes the options for a get blob range operation
|
|
||||||
type GetBlobRangeOptions struct {
|
|
||||||
Range *BlobRange
|
|
||||||
GetRangeContentMD5 bool
|
|
||||||
*GetBlobOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlobOptions includes the options for a get blob operation
|
|
||||||
type GetBlobOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
Snapshot *time.Time
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
Origin string `header:"Origin"`
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
IfMatch string `header:"If-Match"`
|
|
||||||
IfNoneMatch string `header:"If-None-Match"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlobRange represents the bytes range to be get
|
|
||||||
type BlobRange struct {
|
|
||||||
Start uint64
|
|
||||||
End uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (br BlobRange) String() string {
|
|
||||||
if br.End == 0 {
|
|
||||||
return fmt.Sprintf("bytes=%d-", br.Start)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("bytes=%d-%d", br.Start, br.End)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a stream to read the blob. Caller must call both Read and Close()
|
|
||||||
// to correctly close the underlying connection.
|
|
||||||
//
|
|
||||||
// See the GetRange method for use with a Range header.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
|
|
||||||
func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) {
|
|
||||||
rangeOptions := GetBlobRangeOptions{
|
|
||||||
GetBlobOptions: options,
|
|
||||||
}
|
|
||||||
resp, err := b.getRange(&rangeOptions)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := b.writeProperties(resp.headers, true); err != nil {
|
|
||||||
return resp.body, err
|
|
||||||
}
|
|
||||||
return resp.body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRange reads the specified range of a blob to a stream. The bytesRange
|
|
||||||
// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
|
|
||||||
// Caller must call both Read and Close()// to correctly close the underlying
|
|
||||||
// connection.
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
|
|
||||||
func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) {
|
|
||||||
resp, err := b.getRange(options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Content-Length header should not be updated, as the service returns the range length
|
|
||||||
// (which is not alwys the full blob length)
|
|
||||||
if err := b.writeProperties(resp.headers, false); err != nil {
|
|
||||||
return resp.body, err
|
|
||||||
}
|
|
||||||
return resp.body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Blob) getRange(options *GetBlobRangeOptions) (*storageResponse, error) {
|
|
||||||
params := url.Values{}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
if options.Range != nil {
|
|
||||||
headers["Range"] = options.Range.String()
|
|
||||||
if options.GetRangeContentMD5 {
|
|
||||||
headers["x-ms-range-get-content-md5"] = "true"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if options.GetBlobOptions != nil {
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions))
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
params = addSnapshot(params, options.Snapshot)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SnapshotOptions includes the options for a snapshot blob operation
|
|
||||||
type SnapshotOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
IfMatch string `header:"If-Match"`
|
|
||||||
IfNoneMatch string `header:"If-None-Match"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateSnapshot creates a snapshot for a blob
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx
|
|
||||||
func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) {
|
|
||||||
params := url.Values{"comp": {"snapshot"}}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil || resp == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
snapshotResponse := resp.headers.Get(http.CanonicalHeaderKey("x-ms-snapshot"))
|
|
||||||
if snapshotResponse != "" {
|
|
||||||
snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &snapshotTimestamp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, errors.New("Snapshot not created")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlobPropertiesOptions includes the options for a get blob properties operation
|
|
||||||
type GetBlobPropertiesOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
Snapshot *time.Time
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
IfMatch string `header:"If-Match"`
|
|
||||||
IfNoneMatch string `header:"If-None-Match"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetProperties provides various information about the specified blob.
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
|
|
||||||
func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error {
|
|
||||||
params := url.Values{}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
params = addSnapshot(params, options.Snapshot)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return b.writeProperties(resp.headers, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
contentLength := b.Properties.ContentLength
|
|
||||||
if includeContentLen {
|
|
||||||
contentLengthStr := h.Get("Content-Length")
|
|
||||||
if contentLengthStr != "" {
|
|
||||||
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var sequenceNum int64
|
|
||||||
sequenceNumStr := h.Get("x-ms-blob-sequence-number")
|
|
||||||
if sequenceNumStr != "" {
|
|
||||||
sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
lastModified, err := getTimeFromHeaders(h, "Last-Modified")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
b.Properties = BlobProperties{
|
|
||||||
LastModified: TimeRFC1123(*lastModified),
|
|
||||||
Etag: h.Get("Etag"),
|
|
||||||
ContentMD5: h.Get("Content-MD5"),
|
|
||||||
ContentLength: contentLength,
|
|
||||||
ContentEncoding: h.Get("Content-Encoding"),
|
|
||||||
ContentType: h.Get("Content-Type"),
|
|
||||||
ContentDisposition: h.Get("Content-Disposition"),
|
|
||||||
CacheControl: h.Get("Cache-Control"),
|
|
||||||
ContentLanguage: h.Get("Content-Language"),
|
|
||||||
SequenceNumber: sequenceNum,
|
|
||||||
CopyCompletionTime: TimeRFC1123(*copyCompletionTime),
|
|
||||||
CopyStatusDescription: h.Get("x-ms-copy-status-description"),
|
|
||||||
CopyID: h.Get("x-ms-copy-id"),
|
|
||||||
CopyProgress: h.Get("x-ms-copy-progress"),
|
|
||||||
CopySource: h.Get("x-ms-copy-source"),
|
|
||||||
CopyStatus: h.Get("x-ms-copy-status"),
|
|
||||||
BlobType: BlobType(h.Get("x-ms-blob-type")),
|
|
||||||
LeaseStatus: h.Get("x-ms-lease-status"),
|
|
||||||
LeaseState: h.Get("x-ms-lease-state"),
|
|
||||||
}
|
|
||||||
b.writeMetadata(h)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBlobPropertiesOptions contains various properties of a blob and is an entry
|
|
||||||
// in SetProperties
|
|
||||||
type SetBlobPropertiesOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
Origin string `header:"Origin"`
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
IfMatch string `header:"If-Match"`
|
|
||||||
IfNoneMatch string `header:"If-None-Match"`
|
|
||||||
SequenceNumberAction *SequenceNumberAction
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SequenceNumberAction defines how the blob's sequence number should be modified
|
|
||||||
type SequenceNumberAction string
|
|
||||||
|
|
||||||
// Options for sequence number action
|
|
||||||
const (
|
|
||||||
SequenceNumberActionMax SequenceNumberAction = "max"
|
|
||||||
SequenceNumberActionUpdate SequenceNumberAction = "update"
|
|
||||||
SequenceNumberActionIncrement SequenceNumberAction = "increment"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SetProperties replaces the BlobHeaders for the specified blob.
|
|
||||||
//
|
|
||||||
// Some keys may be converted to Camel-Case before sending. All keys
|
|
||||||
// are returned in lower case by GetBlobProperties. HTTP header names
|
|
||||||
// are case-insensitive so case munging should not matter to other
|
|
||||||
// applications either.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties
|
|
||||||
func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error {
|
|
||||||
params := url.Values{"comp": {"properties"}}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
if b.Properties.BlobType == BlobTypePage {
|
|
||||||
headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength))
|
|
||||||
if options != nil && options.SequenceNumberAction != nil {
|
|
||||||
headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction))
|
|
||||||
if *options.SequenceNumberAction != SequenceNumberActionIncrement {
|
|
||||||
headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusOK})
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBlobMetadataOptions includes the options for a set blob metadata operation
|
|
||||||
type SetBlobMetadataOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
IfMatch string `header:"If-Match"`
|
|
||||||
IfNoneMatch string `header:"If-None-Match"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMetadata replaces the metadata for the specified blob.
|
|
||||||
//
|
|
||||||
// Some keys may be converted to Camel-Case before sending. All keys
|
|
||||||
// are returned in lower case by GetBlobMetadata. HTTP header names
|
|
||||||
// are case-insensitive so case munging should not matter to other
|
|
||||||
// applications either.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
|
||||||
func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error {
|
|
||||||
params := url.Values{"comp": {"metadata"}}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusOK})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlobMetadataOptions includes the options for a get blob metadata operation
|
|
||||||
type GetBlobMetadataOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
Snapshot *time.Time
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
IfMatch string `header:"If-Match"`
|
|
||||||
IfNoneMatch string `header:"If-None-Match"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetadata returns all user-defined metadata for the specified blob.
|
|
||||||
//
|
|
||||||
// All metadata keys will be returned in lower case. (HTTP header
|
|
||||||
// names are case-insensitive.)
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
|
||||||
func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error {
|
|
||||||
params := url.Values{"comp": {"metadata"}}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
params = addSnapshot(params, options.Snapshot)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
b.writeMetadata(resp.headers)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Blob) writeMetadata(h http.Header) {
|
|
||||||
b.Metadata = BlobMetadata(writeMetadata(h))
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteBlobOptions includes the options for a delete blob operation
|
|
||||||
type DeleteBlobOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
Snapshot *time.Time
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
DeleteSnapshots *bool
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
IfMatch string `header:"If-Match"`
|
|
||||||
IfNoneMatch string `header:"If-None-Match"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the given blob from the specified container.
|
|
||||||
// If the blob does not exists at the time of the Delete Blob operation, it
|
|
||||||
// returns error.
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
|
|
||||||
func (b *Blob) Delete(options *DeleteBlobOptions) error {
|
|
||||||
resp, err := b.delete(options)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteIfExists deletes the given blob from the specified container If the
|
|
||||||
// blob is deleted with this call, returns true. Otherwise returns false.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
|
|
||||||
func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) {
|
|
||||||
resp, err := b.delete(options)
|
|
||||||
if resp != nil {
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
|
||||||
return resp.statusCode == http.StatusAccepted, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Blob) delete(options *DeleteBlobOptions) (*storageResponse, error) {
|
|
||||||
params := url.Values{}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
params = addSnapshot(params, options.Snapshot)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
if options.DeleteSnapshots != nil {
|
|
||||||
if *options.DeleteSnapshots {
|
|
||||||
headers["x-ms-delete-snapshots"] = "include"
|
|
||||||
} else {
|
|
||||||
headers["x-ms-delete-snapshots"] = "only"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// helper method to construct the path to either a blob or container
|
|
||||||
func pathForResource(container, name string) string {
|
|
||||||
if name != "" {
|
|
||||||
return fmt.Sprintf("/%s/%s", container, name)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("/%s", container)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Blob) respondCreation(resp *storageResponse, bt BlobType) error {
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
err := checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
b.Properties.BlobType = bt
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
170
vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go
generated
vendored
170
vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go
generated
vendored
|
|
@ -1,170 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OverrideHeaders defines overridable response heaedrs in
|
|
||||||
// a request using a SAS URI.
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|
||||||
type OverrideHeaders struct {
|
|
||||||
CacheControl string
|
|
||||||
ContentDisposition string
|
|
||||||
ContentEncoding string
|
|
||||||
ContentLanguage string
|
|
||||||
ContentType string
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlobSASOptions are options to construct a blob SAS
|
|
||||||
// URI.
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|
||||||
type BlobSASOptions struct {
|
|
||||||
BlobServiceSASPermissions
|
|
||||||
OverrideHeaders
|
|
||||||
SASOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlobServiceSASPermissions includes the available permissions for
|
|
||||||
// blob service SAS URI.
|
|
||||||
type BlobServiceSASPermissions struct {
|
|
||||||
Read bool
|
|
||||||
Add bool
|
|
||||||
Create bool
|
|
||||||
Write bool
|
|
||||||
Delete bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p BlobServiceSASPermissions) buildString() string {
|
|
||||||
permissions := ""
|
|
||||||
if p.Read {
|
|
||||||
permissions += "r"
|
|
||||||
}
|
|
||||||
if p.Add {
|
|
||||||
permissions += "a"
|
|
||||||
}
|
|
||||||
if p.Create {
|
|
||||||
permissions += "c"
|
|
||||||
}
|
|
||||||
if p.Write {
|
|
||||||
permissions += "w"
|
|
||||||
}
|
|
||||||
if p.Delete {
|
|
||||||
permissions += "d"
|
|
||||||
}
|
|
||||||
return permissions
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSASURI creates an URL to the blob which contains the Shared
|
|
||||||
// Access Signature with the specified options.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|
||||||
func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) {
|
|
||||||
uri := b.GetURL()
|
|
||||||
signedResource := "b"
|
|
||||||
canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
permissions := options.BlobServiceSASPermissions.buildString()
|
|
||||||
return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) {
|
|
||||||
start := ""
|
|
||||||
if options.Start != (time.Time{}) {
|
|
||||||
start = options.Start.UTC().Format(time.RFC3339)
|
|
||||||
}
|
|
||||||
|
|
||||||
expiry := options.Expiry.UTC().Format(time.RFC3339)
|
|
||||||
|
|
||||||
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
|
|
||||||
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
|
|
||||||
canonicalizedResource, err := url.QueryUnescape(canonicalizedResource)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
protocols := ""
|
|
||||||
if options.UseHTTPS {
|
|
||||||
protocols = "https"
|
|
||||||
}
|
|
||||||
stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, headers)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
sig := c.computeHmac256(stringToSign)
|
|
||||||
sasParams := url.Values{
|
|
||||||
"sv": {c.apiVersion},
|
|
||||||
"se": {expiry},
|
|
||||||
"sr": {signedResource},
|
|
||||||
"sp": {permissions},
|
|
||||||
"sig": {sig},
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.apiVersion >= "2015-04-05" {
|
|
||||||
if protocols != "" {
|
|
||||||
sasParams.Add("spr", protocols)
|
|
||||||
}
|
|
||||||
if options.IP != "" {
|
|
||||||
sasParams.Add("sip", options.IP)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add override response hedaers
|
|
||||||
addQueryParameter(sasParams, "rscc", headers.CacheControl)
|
|
||||||
addQueryParameter(sasParams, "rscd", headers.ContentDisposition)
|
|
||||||
addQueryParameter(sasParams, "rsce", headers.ContentEncoding)
|
|
||||||
addQueryParameter(sasParams, "rscl", headers.ContentLanguage)
|
|
||||||
addQueryParameter(sasParams, "rsct", headers.ContentType)
|
|
||||||
|
|
||||||
sasURL, err := url.Parse(uri)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
sasURL.RawQuery = sasParams.Encode()
|
|
||||||
return sasURL.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion string, headers OverrideHeaders) (string, error) {
|
|
||||||
rscc := headers.CacheControl
|
|
||||||
rscd := headers.ContentDisposition
|
|
||||||
rsce := headers.ContentEncoding
|
|
||||||
rscl := headers.ContentLanguage
|
|
||||||
rsct := headers.ContentType
|
|
||||||
|
|
||||||
if signedVersion >= "2015-02-21" {
|
|
||||||
canonicalizedResource = "/blob" + canonicalizedResource
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
|
|
||||||
if signedVersion >= "2015-04-05" {
|
|
||||||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
|
||||||
if signedVersion >= "2013-08-15" {
|
|
||||||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
|
|
||||||
}
|
|
||||||
182
vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go
generated
vendored
182
vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go
generated
vendored
|
|
@ -1,182 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
|
|
||||||
// Service.
|
|
||||||
type BlobStorageClient struct {
|
|
||||||
client Client
|
|
||||||
auth authentication
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetServiceProperties gets the properties of your storage account's blob service.
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties
|
|
||||||
func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) {
|
|
||||||
return b.client.getServiceProperties(blobServiceName, b.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetServiceProperties sets the properties of your storage account's blob service.
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties
|
|
||||||
func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error {
|
|
||||||
return b.client.setServiceProperties(props, blobServiceName, b.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListContainersParameters defines the set of customizable parameters to make a
|
|
||||||
// List Containers call.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
|
||||||
type ListContainersParameters struct {
|
|
||||||
Prefix string
|
|
||||||
Marker string
|
|
||||||
Include string
|
|
||||||
MaxResults uint
|
|
||||||
Timeout uint
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContainerReference returns a Container object for the specified container name.
|
|
||||||
func (b *BlobStorageClient) GetContainerReference(name string) *Container {
|
|
||||||
return &Container{
|
|
||||||
bsc: b,
|
|
||||||
Name: name,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContainerReferenceFromSASURI returns a Container object for the specified
|
|
||||||
// container SASURI
|
|
||||||
func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) {
|
|
||||||
path := strings.Split(sasuri.Path, "/")
|
|
||||||
if len(path) <= 1 {
|
|
||||||
return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String())
|
|
||||||
}
|
|
||||||
cli := newSASClient().GetBlobService()
|
|
||||||
return &Container{
|
|
||||||
bsc: &cli,
|
|
||||||
Name: path[1],
|
|
||||||
sasuri: sasuri,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListContainers returns the list of containers in a storage account along with
|
|
||||||
// pagination token and other response details.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
|
||||||
func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) {
|
|
||||||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
|
||||||
uri := b.client.getEndpoint(blobServiceName, "", q)
|
|
||||||
headers := b.client.getStandardHeaders()
|
|
||||||
|
|
||||||
type ContainerAlias struct {
|
|
||||||
bsc *BlobStorageClient
|
|
||||||
Name string `xml:"Name"`
|
|
||||||
Properties ContainerProperties `xml:"Properties"`
|
|
||||||
Metadata BlobMetadata
|
|
||||||
sasuri url.URL
|
|
||||||
}
|
|
||||||
type ContainerListResponseAlias struct {
|
|
||||||
XMLName xml.Name `xml:"EnumerationResults"`
|
|
||||||
Xmlns string `xml:"xmlns,attr"`
|
|
||||||
Prefix string `xml:"Prefix"`
|
|
||||||
Marker string `xml:"Marker"`
|
|
||||||
NextMarker string `xml:"NextMarker"`
|
|
||||||
MaxResults int64 `xml:"MaxResults"`
|
|
||||||
Containers []ContainerAlias `xml:"Containers>Container"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var outAlias ContainerListResponseAlias
|
|
||||||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
err = xmlUnmarshal(resp.body, &outAlias)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
out := ContainerListResponse{
|
|
||||||
XMLName: outAlias.XMLName,
|
|
||||||
Xmlns: outAlias.Xmlns,
|
|
||||||
Prefix: outAlias.Prefix,
|
|
||||||
Marker: outAlias.Marker,
|
|
||||||
NextMarker: outAlias.NextMarker,
|
|
||||||
MaxResults: outAlias.MaxResults,
|
|
||||||
Containers: make([]Container, len(outAlias.Containers)),
|
|
||||||
}
|
|
||||||
for i, cnt := range outAlias.Containers {
|
|
||||||
out.Containers[i] = Container{
|
|
||||||
bsc: &b,
|
|
||||||
Name: cnt.Name,
|
|
||||||
Properties: cnt.Properties,
|
|
||||||
Metadata: map[string]string(cnt.Metadata),
|
|
||||||
sasuri: cnt.sasuri,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &out, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p ListContainersParameters) getParameters() url.Values {
|
|
||||||
out := url.Values{}
|
|
||||||
|
|
||||||
if p.Prefix != "" {
|
|
||||||
out.Set("prefix", p.Prefix)
|
|
||||||
}
|
|
||||||
if p.Marker != "" {
|
|
||||||
out.Set("marker", p.Marker)
|
|
||||||
}
|
|
||||||
if p.Include != "" {
|
|
||||||
out.Set("include", p.Include)
|
|
||||||
}
|
|
||||||
if p.MaxResults != 0 {
|
|
||||||
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
|
||||||
}
|
|
||||||
if p.Timeout != 0 {
|
|
||||||
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
|
|
||||||
}
|
|
||||||
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeMetadata(h http.Header) map[string]string {
|
|
||||||
metadata := make(map[string]string)
|
|
||||||
for k, v := range h {
|
|
||||||
// Can't trust CanonicalHeaderKey() to munge case
|
|
||||||
// reliably. "_" is allowed in identifiers:
|
|
||||||
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
|
||||||
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
|
||||||
// http://tools.ietf.org/html/rfc7230#section-3.2
|
|
||||||
// ...but "_" is considered invalid by
|
|
||||||
// CanonicalMIMEHeaderKey in
|
|
||||||
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
|
||||||
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
|
|
||||||
k = k[len(userDefinedMetadataHeaderPrefix):]
|
|
||||||
metadata[k] = v[len(v)-1]
|
|
||||||
}
|
|
||||||
return metadata
|
|
||||||
}
|
|
||||||
270
vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go
generated
vendored
270
vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go
generated
vendored
|
|
@ -1,270 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BlockListType is used to filter out types of blocks in a Get Blocks List call
|
|
||||||
// for a block blob.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all
|
|
||||||
// block types.
|
|
||||||
type BlockListType string
|
|
||||||
|
|
||||||
// Filters for listing blocks in block blobs
|
|
||||||
const (
|
|
||||||
BlockListTypeAll BlockListType = "all"
|
|
||||||
BlockListTypeCommitted BlockListType = "committed"
|
|
||||||
BlockListTypeUncommitted BlockListType = "uncommitted"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Maximum sizes (per REST API) for various concepts
|
|
||||||
const (
|
|
||||||
MaxBlobBlockSize = 100 * 1024 * 1024
|
|
||||||
MaxBlobPageSize = 4 * 1024 * 1024
|
|
||||||
)
|
|
||||||
|
|
||||||
// BlockStatus defines states a block for a block blob can
|
|
||||||
// be in.
|
|
||||||
type BlockStatus string
|
|
||||||
|
|
||||||
// List of statuses that can be used to refer to a block in a block list
|
|
||||||
const (
|
|
||||||
BlockStatusUncommitted BlockStatus = "Uncommitted"
|
|
||||||
BlockStatusCommitted BlockStatus = "Committed"
|
|
||||||
BlockStatusLatest BlockStatus = "Latest"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Block is used to create Block entities for Put Block List
|
|
||||||
// call.
|
|
||||||
type Block struct {
|
|
||||||
ID string
|
|
||||||
Status BlockStatus
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlockListResponse contains the response fields from Get Block List call.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
|
|
||||||
type BlockListResponse struct {
|
|
||||||
XMLName xml.Name `xml:"BlockList"`
|
|
||||||
CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"`
|
|
||||||
UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlockResponse contains the block information returned
|
|
||||||
// in the GetBlockListCall.
|
|
||||||
type BlockResponse struct {
|
|
||||||
Name string `xml:"Name"`
|
|
||||||
Size int64 `xml:"Size"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateBlockBlob initializes an empty block blob with no blocks.
|
|
||||||
//
|
|
||||||
// See CreateBlockBlobFromReader for more info on creating blobs.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
|
||||||
func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error {
|
|
||||||
return b.CreateBlockBlobFromReader(nil, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateBlockBlobFromReader initializes a block blob using data from
|
|
||||||
// reader. Size must be the number of bytes read from reader. To
|
|
||||||
// create an empty blob, use size==0 and reader==nil.
|
|
||||||
//
|
|
||||||
// Any headers set in blob.Properties or metadata in blob.Metadata
|
|
||||||
// will be set on the blob.
|
|
||||||
//
|
|
||||||
// The API rejects requests with size > 256 MiB (but this limit is not
|
|
||||||
// checked by the SDK). To write a larger blob, use CreateBlockBlob,
|
|
||||||
// PutBlock, and PutBlockList.
|
|
||||||
//
|
|
||||||
// To create a blob from scratch, call container.GetBlobReference() to
|
|
||||||
// get an empty blob, fill in blob.Properties and blob.Metadata as
|
|
||||||
// appropriate then call this method.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
|
||||||
func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error {
|
|
||||||
params := url.Values{}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers["x-ms-blob-type"] = string(BlobTypeBlock)
|
|
||||||
|
|
||||||
headers["Content-Length"] = "0"
|
|
||||||
var n int64
|
|
||||||
var err error
|
|
||||||
if blob != nil {
|
|
||||||
type lener interface {
|
|
||||||
Len() int
|
|
||||||
}
|
|
||||||
// TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc.
|
|
||||||
if l, ok := blob.(lener); ok {
|
|
||||||
n = int64(l.Len())
|
|
||||||
} else {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
n, err = io.Copy(&buf, blob)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
blob = &buf
|
|
||||||
}
|
|
||||||
|
|
||||||
headers["Content-Length"] = strconv.FormatInt(n, 10)
|
|
||||||
}
|
|
||||||
b.Properties.ContentLength = n
|
|
||||||
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
|
||||||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return b.respondCreation(resp, BlobTypeBlock)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutBlockOptions includes the options for a put block operation
|
|
||||||
type PutBlockOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
ContentMD5 string `header:"Content-MD5"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutBlock saves the given data chunk to the specified block blob with
|
|
||||||
// given ID.
|
|
||||||
//
|
|
||||||
// The API rejects chunks larger than 100 MiB (but this limit is not
|
|
||||||
// checked by the SDK).
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
|
|
||||||
func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error {
|
|
||||||
return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutBlockWithLength saves the given data stream of exactly specified size to
|
|
||||||
// the block blob with given ID. It is an alternative to PutBlocks where data
|
|
||||||
// comes as stream but the length is known in advance.
|
|
||||||
//
|
|
||||||
// The API rejects requests with size > 100 MiB (but this limit is not
|
|
||||||
// checked by the SDK).
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
|
|
||||||
func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error {
|
|
||||||
query := url.Values{
|
|
||||||
"comp": {"block"},
|
|
||||||
"blockid": {blockID},
|
|
||||||
}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers["Content-Length"] = fmt.Sprintf("%v", size)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
query = addTimeout(query, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return b.respondCreation(resp, BlobTypeBlock)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutBlockListOptions includes the options for a put block list operation
|
|
||||||
type PutBlockListOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
IfMatch string `header:"If-Match"`
|
|
||||||
IfNoneMatch string `header:"If-None-Match"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutBlockList saves list of blocks to the specified block blob.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List
|
|
||||||
func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error {
|
|
||||||
params := url.Values{"comp": {"blocklist"}}
|
|
||||||
blockListXML := prepareBlockListRequest(blocks)
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML))
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
|
||||||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlockListOptions includes the options for a get block list operation
|
|
||||||
type GetBlockListOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
Snapshot *time.Time
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlockList retrieves list of blocks in the specified block blob.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List
|
|
||||||
func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) {
|
|
||||||
params := url.Values{
|
|
||||||
"comp": {"blocklist"},
|
|
||||||
"blocklisttype": {string(blockType)},
|
|
||||||
}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
params = addSnapshot(params, options.Snapshot)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
var out BlockListResponse
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return out, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
|
||||||
return out, err
|
|
||||||
}
|
|
||||||
988
vendor/github.com/Azure/azure-sdk-for-go/storage/client.go
generated
vendored
988
vendor/github.com/Azure/azure-sdk-for-go/storage/client.go
generated
vendored
|
|
@ -1,988 +0,0 @@
|
||||||
// Package storage provides clients for Microsoft Azure Storage Services.
|
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/xml"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"mime"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"regexp"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest"
|
|
||||||
"github.com/Azure/go-autorest/autorest/azure"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultBaseURL is the domain name used for storage requests in the
|
|
||||||
// public cloud when a default client is created.
|
|
||||||
DefaultBaseURL = "core.windows.net"
|
|
||||||
|
|
||||||
// DefaultAPIVersion is the Azure Storage API version string used when a
|
|
||||||
// basic client is created.
|
|
||||||
DefaultAPIVersion = "2016-05-31"
|
|
||||||
|
|
||||||
defaultUseHTTPS = true
|
|
||||||
defaultRetryAttempts = 5
|
|
||||||
defaultRetryDuration = time.Second * 5
|
|
||||||
|
|
||||||
// StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
|
|
||||||
StorageEmulatorAccountName = "devstoreaccount1"
|
|
||||||
|
|
||||||
// StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator
|
|
||||||
StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
|
||||||
|
|
||||||
blobServiceName = "blob"
|
|
||||||
tableServiceName = "table"
|
|
||||||
queueServiceName = "queue"
|
|
||||||
fileServiceName = "file"
|
|
||||||
|
|
||||||
storageEmulatorBlob = "127.0.0.1:10000"
|
|
||||||
storageEmulatorTable = "127.0.0.1:10002"
|
|
||||||
storageEmulatorQueue = "127.0.0.1:10001"
|
|
||||||
|
|
||||||
userAgentHeader = "User-Agent"
|
|
||||||
|
|
||||||
userDefinedMetadataHeaderPrefix = "x-ms-meta-"
|
|
||||||
|
|
||||||
connectionStringAccountName = "accountname"
|
|
||||||
connectionStringAccountKey = "accountkey"
|
|
||||||
connectionStringEndpointSuffix = "endpointsuffix"
|
|
||||||
connectionStringEndpointProtocol = "defaultendpointsprotocol"
|
|
||||||
|
|
||||||
connectionStringBlobEndpoint = "blobendpoint"
|
|
||||||
connectionStringFileEndpoint = "fileendpoint"
|
|
||||||
connectionStringQueueEndpoint = "queueendpoint"
|
|
||||||
connectionStringTableEndpoint = "tableendpoint"
|
|
||||||
connectionStringSAS = "sharedaccesssignature"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$")
|
|
||||||
defaultValidStatusCodes = []int{
|
|
||||||
http.StatusRequestTimeout, // 408
|
|
||||||
http.StatusInternalServerError, // 500
|
|
||||||
http.StatusBadGateway, // 502
|
|
||||||
http.StatusServiceUnavailable, // 503
|
|
||||||
http.StatusGatewayTimeout, // 504
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sender sends a request
|
|
||||||
type Sender interface {
|
|
||||||
Send(*Client, *http.Request) (*http.Response, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultSender is the default sender for the client. It implements
|
|
||||||
// an automatic retry strategy.
|
|
||||||
type DefaultSender struct {
|
|
||||||
RetryAttempts int
|
|
||||||
RetryDuration time.Duration
|
|
||||||
ValidStatusCodes []int
|
|
||||||
attempts int // used for testing
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send is the default retry strategy in the client
|
|
||||||
func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) {
|
|
||||||
rr := autorest.NewRetriableRequest(req)
|
|
||||||
for attempts := 0; attempts < ds.RetryAttempts; attempts++ {
|
|
||||||
err = rr.Prepare()
|
|
||||||
if err != nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
resp, err = c.HTTPClient.Do(rr.Request())
|
|
||||||
if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel)
|
|
||||||
ds.attempts = attempts
|
|
||||||
}
|
|
||||||
ds.attempts++
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Client is the object that needs to be constructed to perform
|
|
||||||
// operations on the storage account.
|
|
||||||
type Client struct {
|
|
||||||
// HTTPClient is the http.Client used to initiate API
|
|
||||||
// requests. http.DefaultClient is used when creating a
|
|
||||||
// client.
|
|
||||||
HTTPClient *http.Client
|
|
||||||
|
|
||||||
// Sender is an interface that sends the request. Clients are
|
|
||||||
// created with a DefaultSender. The DefaultSender has an
|
|
||||||
// automatic retry strategy built in. The Sender can be customized.
|
|
||||||
Sender Sender
|
|
||||||
|
|
||||||
accountName string
|
|
||||||
accountKey []byte
|
|
||||||
useHTTPS bool
|
|
||||||
UseSharedKeyLite bool
|
|
||||||
baseURL string
|
|
||||||
apiVersion string
|
|
||||||
userAgent string
|
|
||||||
sasClient bool
|
|
||||||
accountSASToken url.Values
|
|
||||||
}
|
|
||||||
|
|
||||||
type storageResponse struct {
|
|
||||||
statusCode int
|
|
||||||
headers http.Header
|
|
||||||
body io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
type odataResponse struct {
|
|
||||||
storageResponse
|
|
||||||
odata odataErrorWrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// AzureStorageServiceError contains fields of the error response from
|
|
||||||
// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
|
|
||||||
// Some fields might be specific to certain calls.
|
|
||||||
type AzureStorageServiceError struct {
|
|
||||||
Code string `xml:"Code"`
|
|
||||||
Message string `xml:"Message"`
|
|
||||||
AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"`
|
|
||||||
QueryParameterName string `xml:"QueryParameterName"`
|
|
||||||
QueryParameterValue string `xml:"QueryParameterValue"`
|
|
||||||
Reason string `xml:"Reason"`
|
|
||||||
Lang string
|
|
||||||
StatusCode int
|
|
||||||
RequestID string
|
|
||||||
Date string
|
|
||||||
APIVersion string
|
|
||||||
}
|
|
||||||
|
|
||||||
type odataErrorMessage struct {
|
|
||||||
Lang string `json:"lang"`
|
|
||||||
Value string `json:"value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type odataError struct {
|
|
||||||
Code string `json:"code"`
|
|
||||||
Message odataErrorMessage `json:"message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type odataErrorWrapper struct {
|
|
||||||
Err odataError `json:"odata.error"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
|
||||||
// nor with an HTTP status code indicating success.
|
|
||||||
type UnexpectedStatusCodeError struct {
|
|
||||||
allowed []int
|
|
||||||
got int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e UnexpectedStatusCodeError) Error() string {
|
|
||||||
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
|
|
||||||
|
|
||||||
got := s(e.got)
|
|
||||||
expected := []string{}
|
|
||||||
for _, v := range e.allowed {
|
|
||||||
expected = append(expected, s(v))
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or "))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Got is the actual status code returned by Azure.
|
|
||||||
func (e UnexpectedStatusCodeError) Got() int {
|
|
||||||
return e.got
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClientFromConnectionString creates a Client from the connection string.
|
|
||||||
func NewClientFromConnectionString(input string) (Client, error) {
|
|
||||||
// build a map of connection string key/value pairs
|
|
||||||
parts := map[string]string{}
|
|
||||||
for _, pair := range strings.Split(input, ";") {
|
|
||||||
if pair == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
equalDex := strings.IndexByte(pair, '=')
|
|
||||||
if equalDex <= 0 {
|
|
||||||
return Client{}, fmt.Errorf("Invalid connection segment %q", pair)
|
|
||||||
}
|
|
||||||
|
|
||||||
value := strings.TrimSpace(pair[equalDex+1:])
|
|
||||||
key := strings.TrimSpace(strings.ToLower(pair[:equalDex]))
|
|
||||||
parts[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: validate parameter sets?
|
|
||||||
|
|
||||||
if parts[connectionStringAccountName] == StorageEmulatorAccountName {
|
|
||||||
return NewEmulatorClient()
|
|
||||||
}
|
|
||||||
|
|
||||||
if parts[connectionStringSAS] != "" {
|
|
||||||
endpoint := ""
|
|
||||||
if parts[connectionStringBlobEndpoint] != "" {
|
|
||||||
endpoint = parts[connectionStringBlobEndpoint]
|
|
||||||
} else if parts[connectionStringFileEndpoint] != "" {
|
|
||||||
endpoint = parts[connectionStringFileEndpoint]
|
|
||||||
} else if parts[connectionStringQueueEndpoint] != "" {
|
|
||||||
endpoint = parts[connectionStringQueueEndpoint]
|
|
||||||
} else {
|
|
||||||
endpoint = parts[connectionStringTableEndpoint]
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewAccountSASClientFromEndpointToken(endpoint, parts[connectionStringSAS])
|
|
||||||
}
|
|
||||||
|
|
||||||
useHTTPS := defaultUseHTTPS
|
|
||||||
if parts[connectionStringEndpointProtocol] != "" {
|
|
||||||
useHTTPS = parts[connectionStringEndpointProtocol] == "https"
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewClient(parts[connectionStringAccountName], parts[connectionStringAccountKey],
|
|
||||||
parts[connectionStringEndpointSuffix], DefaultAPIVersion, useHTTPS)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBasicClient constructs a Client with given storage service name and
|
|
||||||
// key.
|
|
||||||
func NewBasicClient(accountName, accountKey string) (Client, error) {
|
|
||||||
if accountName == StorageEmulatorAccountName {
|
|
||||||
return NewEmulatorClient()
|
|
||||||
}
|
|
||||||
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and
|
|
||||||
// key in the referenced cloud.
|
|
||||||
func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) {
|
|
||||||
if accountName == StorageEmulatorAccountName {
|
|
||||||
return NewEmulatorClient()
|
|
||||||
}
|
|
||||||
return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS)
|
|
||||||
}
|
|
||||||
|
|
||||||
//NewEmulatorClient contructs a Client intended to only work with Azure
|
|
||||||
//Storage Emulator
|
|
||||||
func NewEmulatorClient() (Client, error) {
|
|
||||||
return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient constructs a Client. This should be used if the caller wants
|
|
||||||
// to specify whether to use HTTPS, a specific REST API version or a custom
|
|
||||||
// storage endpoint than Azure Public Cloud.
|
|
||||||
func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
|
|
||||||
var c Client
|
|
||||||
if !IsValidStorageAccount(accountName) {
|
|
||||||
return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName)
|
|
||||||
} else if accountKey == "" {
|
|
||||||
return c, fmt.Errorf("azure: account key required")
|
|
||||||
} else if serviceBaseURL == "" {
|
|
||||||
return c, fmt.Errorf("azure: base storage service url required")
|
|
||||||
}
|
|
||||||
|
|
||||||
key, err := base64.StdEncoding.DecodeString(accountKey)
|
|
||||||
if err != nil {
|
|
||||||
return c, fmt.Errorf("azure: malformed storage account key: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c = Client{
|
|
||||||
HTTPClient: http.DefaultClient,
|
|
||||||
accountName: accountName,
|
|
||||||
accountKey: key,
|
|
||||||
useHTTPS: useHTTPS,
|
|
||||||
baseURL: serviceBaseURL,
|
|
||||||
apiVersion: apiVersion,
|
|
||||||
sasClient: false,
|
|
||||||
UseSharedKeyLite: false,
|
|
||||||
Sender: &DefaultSender{
|
|
||||||
RetryAttempts: defaultRetryAttempts,
|
|
||||||
ValidStatusCodes: defaultValidStatusCodes,
|
|
||||||
RetryDuration: defaultRetryDuration,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
c.userAgent = c.getDefaultUserAgent()
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValidStorageAccount checks if the storage account name is valid.
|
|
||||||
// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account
|
|
||||||
func IsValidStorageAccount(account string) bool {
|
|
||||||
return validStorageAccount.MatchString(account)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAccountSASClient contructs a client that uses accountSAS authorization
|
|
||||||
// for its operations.
|
|
||||||
func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client {
|
|
||||||
c := newSASClient()
|
|
||||||
c.accountSASToken = token
|
|
||||||
c.accountName = account
|
|
||||||
c.baseURL = env.StorageEndpointSuffix
|
|
||||||
|
|
||||||
// Get API version and protocol from token
|
|
||||||
c.apiVersion = token.Get("sv")
|
|
||||||
c.useHTTPS = token.Get("spr") == "https"
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAccountSASClientFromEndpointToken constructs a client that uses accountSAS authorization
|
|
||||||
// for its operations using the specified endpoint and SAS token.
|
|
||||||
func NewAccountSASClientFromEndpointToken(endpoint string, sasToken string) (Client, error) {
|
|
||||||
u, err := url.Parse(endpoint)
|
|
||||||
if err != nil {
|
|
||||||
return Client{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
token, err := url.ParseQuery(sasToken)
|
|
||||||
if err != nil {
|
|
||||||
return Client{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// the host name will look something like this
|
|
||||||
// - foo.blob.core.windows.net
|
|
||||||
// "foo" is the account name
|
|
||||||
// "core.windows.net" is the baseURL
|
|
||||||
|
|
||||||
// find the first dot to get account name
|
|
||||||
i1 := strings.IndexByte(u.Host, '.')
|
|
||||||
if i1 < 0 {
|
|
||||||
return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host)
|
|
||||||
}
|
|
||||||
|
|
||||||
// now find the second dot to get the base URL
|
|
||||||
i2 := strings.IndexByte(u.Host[i1+1:], '.')
|
|
||||||
if i2 < 0 {
|
|
||||||
return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host[i1+1:])
|
|
||||||
}
|
|
||||||
|
|
||||||
c := newSASClient()
|
|
||||||
c.accountSASToken = token
|
|
||||||
c.accountName = u.Host[:i1]
|
|
||||||
c.baseURL = u.Host[i1+i2+2:]
|
|
||||||
|
|
||||||
// Get API version and protocol from token
|
|
||||||
c.apiVersion = token.Get("sv")
|
|
||||||
c.useHTTPS = token.Get("spr") == "https"
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSASClient() Client {
|
|
||||||
c := Client{
|
|
||||||
HTTPClient: http.DefaultClient,
|
|
||||||
apiVersion: DefaultAPIVersion,
|
|
||||||
sasClient: true,
|
|
||||||
Sender: &DefaultSender{
|
|
||||||
RetryAttempts: defaultRetryAttempts,
|
|
||||||
ValidStatusCodes: defaultValidStatusCodes,
|
|
||||||
RetryDuration: defaultRetryDuration,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
c.userAgent = c.getDefaultUserAgent()
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) isServiceSASClient() bool {
|
|
||||||
return c.sasClient && c.accountSASToken == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) isAccountSASClient() bool {
|
|
||||||
return c.sasClient && c.accountSASToken != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) getDefaultUserAgent() string {
|
|
||||||
return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s",
|
|
||||||
runtime.Version(),
|
|
||||||
runtime.GOARCH,
|
|
||||||
runtime.GOOS,
|
|
||||||
sdkVersion,
|
|
||||||
c.apiVersion,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddToUserAgent adds an extension to the current user agent
|
|
||||||
func (c *Client) AddToUserAgent(extension string) error {
|
|
||||||
if extension != "" {
|
|
||||||
c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// protectUserAgent is used in funcs that include extraheaders as a parameter.
|
|
||||||
// It prevents the User-Agent header to be overwritten, instead if it happens to
|
|
||||||
// be present, it gets added to the current User-Agent. Use it before getStandardHeaders
|
|
||||||
func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string {
|
|
||||||
if v, ok := extraheaders[userAgentHeader]; ok {
|
|
||||||
c.AddToUserAgent(v)
|
|
||||||
delete(extraheaders, userAgentHeader)
|
|
||||||
}
|
|
||||||
return extraheaders
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) getBaseURL(service string) *url.URL {
|
|
||||||
scheme := "http"
|
|
||||||
if c.useHTTPS {
|
|
||||||
scheme = "https"
|
|
||||||
}
|
|
||||||
host := ""
|
|
||||||
if c.accountName == StorageEmulatorAccountName {
|
|
||||||
switch service {
|
|
||||||
case blobServiceName:
|
|
||||||
host = storageEmulatorBlob
|
|
||||||
case tableServiceName:
|
|
||||||
host = storageEmulatorTable
|
|
||||||
case queueServiceName:
|
|
||||||
host = storageEmulatorQueue
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &url.URL{
|
|
||||||
Scheme: scheme,
|
|
||||||
Host: host,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) getEndpoint(service, path string, params url.Values) string {
|
|
||||||
u := c.getBaseURL(service)
|
|
||||||
|
|
||||||
// API doesn't accept path segments not starting with '/'
|
|
||||||
if !strings.HasPrefix(path, "/") {
|
|
||||||
path = fmt.Sprintf("/%v", path)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.accountName == StorageEmulatorAccountName {
|
|
||||||
path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path)
|
|
||||||
}
|
|
||||||
|
|
||||||
u.Path = path
|
|
||||||
u.RawQuery = params.Encode()
|
|
||||||
return u.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// AccountSASTokenOptions includes options for constructing
|
|
||||||
// an account SAS token.
|
|
||||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
|
|
||||||
type AccountSASTokenOptions struct {
|
|
||||||
APIVersion string
|
|
||||||
Services Services
|
|
||||||
ResourceTypes ResourceTypes
|
|
||||||
Permissions Permissions
|
|
||||||
Start time.Time
|
|
||||||
Expiry time.Time
|
|
||||||
IP string
|
|
||||||
UseHTTPS bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Services specify services accessible with an account SAS.
|
|
||||||
type Services struct {
|
|
||||||
Blob bool
|
|
||||||
Queue bool
|
|
||||||
Table bool
|
|
||||||
File bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResourceTypes specify the resources accesible with an
|
|
||||||
// account SAS.
|
|
||||||
type ResourceTypes struct {
|
|
||||||
Service bool
|
|
||||||
Container bool
|
|
||||||
Object bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Permissions specifies permissions for an accountSAS.
|
|
||||||
type Permissions struct {
|
|
||||||
Read bool
|
|
||||||
Write bool
|
|
||||||
Delete bool
|
|
||||||
List bool
|
|
||||||
Add bool
|
|
||||||
Create bool
|
|
||||||
Update bool
|
|
||||||
Process bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAccountSASToken creates an account SAS token
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
|
|
||||||
func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) {
|
|
||||||
if options.APIVersion == "" {
|
|
||||||
options.APIVersion = c.apiVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.APIVersion < "2015-04-05" {
|
|
||||||
return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion)
|
|
||||||
}
|
|
||||||
|
|
||||||
// build services string
|
|
||||||
services := ""
|
|
||||||
if options.Services.Blob {
|
|
||||||
services += "b"
|
|
||||||
}
|
|
||||||
if options.Services.Queue {
|
|
||||||
services += "q"
|
|
||||||
}
|
|
||||||
if options.Services.Table {
|
|
||||||
services += "t"
|
|
||||||
}
|
|
||||||
if options.Services.File {
|
|
||||||
services += "f"
|
|
||||||
}
|
|
||||||
|
|
||||||
// build resources string
|
|
||||||
resources := ""
|
|
||||||
if options.ResourceTypes.Service {
|
|
||||||
resources += "s"
|
|
||||||
}
|
|
||||||
if options.ResourceTypes.Container {
|
|
||||||
resources += "c"
|
|
||||||
}
|
|
||||||
if options.ResourceTypes.Object {
|
|
||||||
resources += "o"
|
|
||||||
}
|
|
||||||
|
|
||||||
// build permissions string
|
|
||||||
permissions := ""
|
|
||||||
if options.Permissions.Read {
|
|
||||||
permissions += "r"
|
|
||||||
}
|
|
||||||
if options.Permissions.Write {
|
|
||||||
permissions += "w"
|
|
||||||
}
|
|
||||||
if options.Permissions.Delete {
|
|
||||||
permissions += "d"
|
|
||||||
}
|
|
||||||
if options.Permissions.List {
|
|
||||||
permissions += "l"
|
|
||||||
}
|
|
||||||
if options.Permissions.Add {
|
|
||||||
permissions += "a"
|
|
||||||
}
|
|
||||||
if options.Permissions.Create {
|
|
||||||
permissions += "c"
|
|
||||||
}
|
|
||||||
if options.Permissions.Update {
|
|
||||||
permissions += "u"
|
|
||||||
}
|
|
||||||
if options.Permissions.Process {
|
|
||||||
permissions += "p"
|
|
||||||
}
|
|
||||||
|
|
||||||
// build start time, if exists
|
|
||||||
start := ""
|
|
||||||
if options.Start != (time.Time{}) {
|
|
||||||
start = options.Start.Format(time.RFC3339)
|
|
||||||
// For some reason I don't understand, it fails when the rest of the string is included
|
|
||||||
start = start[:10]
|
|
||||||
}
|
|
||||||
|
|
||||||
// build expiry time
|
|
||||||
expiry := options.Expiry.Format(time.RFC3339)
|
|
||||||
// For some reason I don't understand, it fails when the rest of the string is included
|
|
||||||
expiry = expiry[:10]
|
|
||||||
|
|
||||||
protocol := "https,http"
|
|
||||||
if options.UseHTTPS {
|
|
||||||
protocol = "https"
|
|
||||||
}
|
|
||||||
|
|
||||||
stringToSign := strings.Join([]string{
|
|
||||||
c.accountName,
|
|
||||||
permissions,
|
|
||||||
services,
|
|
||||||
resources,
|
|
||||||
start,
|
|
||||||
expiry,
|
|
||||||
options.IP,
|
|
||||||
protocol,
|
|
||||||
options.APIVersion,
|
|
||||||
"",
|
|
||||||
}, "\n")
|
|
||||||
signature := c.computeHmac256(stringToSign)
|
|
||||||
|
|
||||||
sasParams := url.Values{
|
|
||||||
"sv": {options.APIVersion},
|
|
||||||
"ss": {services},
|
|
||||||
"srt": {resources},
|
|
||||||
"sp": {permissions},
|
|
||||||
"se": {expiry},
|
|
||||||
"spr": {protocol},
|
|
||||||
"sig": {signature},
|
|
||||||
}
|
|
||||||
if start != "" {
|
|
||||||
sasParams.Add("st", start)
|
|
||||||
}
|
|
||||||
if options.IP != "" {
|
|
||||||
sasParams.Add("sip", options.IP)
|
|
||||||
}
|
|
||||||
|
|
||||||
return sasParams, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlobService returns a BlobStorageClient which can operate on the blob
|
|
||||||
// service of the storage account.
|
|
||||||
func (c Client) GetBlobService() BlobStorageClient {
|
|
||||||
b := BlobStorageClient{
|
|
||||||
client: c,
|
|
||||||
}
|
|
||||||
b.client.AddToUserAgent(blobServiceName)
|
|
||||||
b.auth = sharedKey
|
|
||||||
if c.UseSharedKeyLite {
|
|
||||||
b.auth = sharedKeyLite
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetQueueService returns a QueueServiceClient which can operate on the queue
|
|
||||||
// service of the storage account.
|
|
||||||
func (c Client) GetQueueService() QueueServiceClient {
|
|
||||||
q := QueueServiceClient{
|
|
||||||
client: c,
|
|
||||||
}
|
|
||||||
q.client.AddToUserAgent(queueServiceName)
|
|
||||||
q.auth = sharedKey
|
|
||||||
if c.UseSharedKeyLite {
|
|
||||||
q.auth = sharedKeyLite
|
|
||||||
}
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTableService returns a TableServiceClient which can operate on the table
|
|
||||||
// service of the storage account.
|
|
||||||
func (c Client) GetTableService() TableServiceClient {
|
|
||||||
t := TableServiceClient{
|
|
||||||
client: c,
|
|
||||||
}
|
|
||||||
t.client.AddToUserAgent(tableServiceName)
|
|
||||||
t.auth = sharedKeyForTable
|
|
||||||
if c.UseSharedKeyLite {
|
|
||||||
t.auth = sharedKeyLiteForTable
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFileService returns a FileServiceClient which can operate on the file
|
|
||||||
// service of the storage account.
|
|
||||||
func (c Client) GetFileService() FileServiceClient {
|
|
||||||
f := FileServiceClient{
|
|
||||||
client: c,
|
|
||||||
}
|
|
||||||
f.client.AddToUserAgent(fileServiceName)
|
|
||||||
f.auth = sharedKey
|
|
||||||
if c.UseSharedKeyLite {
|
|
||||||
f.auth = sharedKeyLite
|
|
||||||
}
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) getStandardHeaders() map[string]string {
|
|
||||||
return map[string]string{
|
|
||||||
userAgentHeader: c.userAgent,
|
|
||||||
"x-ms-version": c.apiVersion,
|
|
||||||
"x-ms-date": currentTimeRfc1123Formatted(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*storageResponse, error) {
|
|
||||||
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest(verb, url, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New("azure/storage: error creating request: " + err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// http.NewRequest() will automatically set req.ContentLength for a handful of types
|
|
||||||
// otherwise we will handle here.
|
|
||||||
if req.ContentLength < 1 {
|
|
||||||
if clstr, ok := headers["Content-Length"]; ok {
|
|
||||||
if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil {
|
|
||||||
req.ContentLength = cl
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range headers {
|
|
||||||
req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.isAccountSASClient() {
|
|
||||||
// append the SAS token to the query params
|
|
||||||
v := req.URL.Query()
|
|
||||||
v = mergeParams(v, c.accountSASToken)
|
|
||||||
req.URL.RawQuery = v.Encode()
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := c.Sender.Send(&c, req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode >= 400 && resp.StatusCode <= 505 {
|
|
||||||
var respBody []byte
|
|
||||||
respBody, err = readAndCloseBody(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
requestID, date, version := getDebugHeaders(resp.Header)
|
|
||||||
if len(respBody) == 0 {
|
|
||||||
// no error in response body, might happen in HEAD requests
|
|
||||||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
|
|
||||||
} else {
|
|
||||||
storageErr := AzureStorageServiceError{
|
|
||||||
StatusCode: resp.StatusCode,
|
|
||||||
RequestID: requestID,
|
|
||||||
Date: date,
|
|
||||||
APIVersion: version,
|
|
||||||
}
|
|
||||||
// response contains storage service error object, unmarshal
|
|
||||||
if resp.Header.Get("Content-Type") == "application/xml" {
|
|
||||||
errIn := serviceErrFromXML(respBody, &storageErr)
|
|
||||||
if err != nil { // error unmarshaling the error response
|
|
||||||
err = errIn
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
errIn := serviceErrFromJSON(respBody, &storageErr)
|
|
||||||
if err != nil { // error unmarshaling the error response
|
|
||||||
err = errIn
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = storageErr
|
|
||||||
}
|
|
||||||
return &storageResponse{
|
|
||||||
statusCode: resp.StatusCode,
|
|
||||||
headers: resp.Header,
|
|
||||||
body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */
|
|
||||||
}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &storageResponse{
|
|
||||||
statusCode: resp.StatusCode,
|
|
||||||
headers: resp.Header,
|
|
||||||
body: resp.Body}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) {
|
|
||||||
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest(verb, url, body)
|
|
||||||
for k, v := range headers {
|
|
||||||
req.Header.Add(k, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := c.Sender.Send(&c, req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
respToRet := &odataResponse{}
|
|
||||||
respToRet.body = resp.Body
|
|
||||||
respToRet.statusCode = resp.StatusCode
|
|
||||||
respToRet.headers = resp.Header
|
|
||||||
|
|
||||||
statusCode := resp.StatusCode
|
|
||||||
if statusCode >= 400 && statusCode <= 505 {
|
|
||||||
var respBody []byte
|
|
||||||
respBody, err = readAndCloseBody(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
requestID, date, version := getDebugHeaders(resp.Header)
|
|
||||||
if len(respBody) == 0 {
|
|
||||||
// no error in response body, might happen in HEAD requests
|
|
||||||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
|
|
||||||
return respToRet, req, resp, err
|
|
||||||
}
|
|
||||||
// try unmarshal as odata.error json
|
|
||||||
err = json.Unmarshal(respBody, &respToRet.odata)
|
|
||||||
}
|
|
||||||
|
|
||||||
return respToRet, req, resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
|
|
||||||
respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
|
|
||||||
return respToRet, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
|
|
||||||
// execute common query, get back generated request, response etc... for more processing.
|
|
||||||
respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// return the OData in the case of executing batch commands.
|
|
||||||
// In this case we need to read the outer batch boundary and contents.
|
|
||||||
// Then we read the changeset information within the batch
|
|
||||||
var respBody []byte
|
|
||||||
respBody, err = readAndCloseBody(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// outer multipart body
|
|
||||||
_, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// batch details.
|
|
||||||
batchBoundary := batchHeader["boundary"]
|
|
||||||
batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// changeset details.
|
|
||||||
err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return respToRet, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error {
|
|
||||||
changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary)
|
|
||||||
changesetPart, err := changesetMultiReader.NextPart()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
changesetPartBufioReader := bufio.NewReader(changesetPart)
|
|
||||||
changesetResp, err := http.ReadResponse(changesetPartBufioReader, req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if changesetResp.StatusCode != http.StatusNoContent {
|
|
||||||
changesetBody, err := readAndCloseBody(changesetResp.Body)
|
|
||||||
err = json.Unmarshal(changesetBody, &respToRet.odata)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
respToRet.statusCode = changesetResp.StatusCode
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) {
|
|
||||||
respBodyString := string(respBody)
|
|
||||||
respBodyReader := strings.NewReader(respBodyString)
|
|
||||||
|
|
||||||
// reading batchresponse
|
|
||||||
batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary)
|
|
||||||
batchPart, err := batchMultiReader.NextPart()
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
batchPartBufioReader := bufio.NewReader(batchPart)
|
|
||||||
|
|
||||||
_, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
changesetBoundary := changesetHeader["boundary"]
|
|
||||||
return batchPartBufioReader, changesetBoundary, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
|
|
||||||
defer body.Close()
|
|
||||||
out, err := ioutil.ReadAll(body)
|
|
||||||
if err == io.EOF {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
return out, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error {
|
|
||||||
if err := xml.Unmarshal(body, storageErr); err != nil {
|
|
||||||
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error {
|
|
||||||
odataError := odataErrorWrapper{}
|
|
||||||
if err := json.Unmarshal(body, &odataError); err != nil {
|
|
||||||
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
storageErr.Code = odataError.Err.Code
|
|
||||||
storageErr.Message = odataError.Err.Message.Value
|
|
||||||
storageErr.Lang = odataError.Err.Message.Lang
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError {
|
|
||||||
return AzureStorageServiceError{
|
|
||||||
StatusCode: code,
|
|
||||||
Code: status,
|
|
||||||
RequestID: requestID,
|
|
||||||
Date: date,
|
|
||||||
APIVersion: version,
|
|
||||||
Message: "no response body was available for error status code",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e AzureStorageServiceError) Error() string {
|
|
||||||
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s",
|
|
||||||
e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue)
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkRespCode returns UnexpectedStatusError if the given response code is not
|
|
||||||
// one of the allowed status codes; otherwise nil.
|
|
||||||
func checkRespCode(respCode int, allowed []int) error {
|
|
||||||
for _, v := range allowed {
|
|
||||||
if respCode == v {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return UnexpectedStatusCodeError{allowed, respCode}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string {
|
|
||||||
metadata = c.protectUserAgent(metadata)
|
|
||||||
for k, v := range metadata {
|
|
||||||
h[userDefinedMetadataHeaderPrefix+k] = v
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDebugHeaders(h http.Header) (requestID, date, version string) {
|
|
||||||
requestID = h.Get("x-ms-request-id")
|
|
||||||
version = h.Get("x-ms-version")
|
|
||||||
date = h.Get("Date")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
38
vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go
generated
vendored
38
vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go
generated
vendored
|
|
@ -1,38 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SASOptions includes options used by SAS URIs for different
|
|
||||||
// services and resources.
|
|
||||||
type SASOptions struct {
|
|
||||||
APIVersion string
|
|
||||||
Start time.Time
|
|
||||||
Expiry time.Time
|
|
||||||
IP string
|
|
||||||
UseHTTPS bool
|
|
||||||
Identifier string
|
|
||||||
}
|
|
||||||
|
|
||||||
func addQueryParameter(query url.Values, key, value string) url.Values {
|
|
||||||
if value != "" {
|
|
||||||
query.Add(key, value)
|
|
||||||
}
|
|
||||||
return query
|
|
||||||
}
|
|
||||||
614
vendor/github.com/Azure/azure-sdk-for-go/storage/container.go
generated
vendored
614
vendor/github.com/Azure/azure-sdk-for-go/storage/container.go
generated
vendored
|
|
@ -1,614 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Container represents an Azure container.
|
|
||||||
type Container struct {
|
|
||||||
bsc *BlobStorageClient
|
|
||||||
Name string `xml:"Name"`
|
|
||||||
Properties ContainerProperties `xml:"Properties"`
|
|
||||||
Metadata map[string]string
|
|
||||||
sasuri url.URL
|
|
||||||
}
|
|
||||||
|
|
||||||
// Client returns the HTTP client used by the Container reference.
|
|
||||||
func (c *Container) Client() *Client {
|
|
||||||
return &c.bsc.client
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Container) buildPath() string {
|
|
||||||
return fmt.Sprintf("/%s", c.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetURL gets the canonical URL to the container.
|
|
||||||
// This method does not create a publicly accessible URL if the container
|
|
||||||
// is private and this method does not check if the blob exists.
|
|
||||||
func (c *Container) GetURL() string {
|
|
||||||
container := c.Name
|
|
||||||
if container == "" {
|
|
||||||
container = "$root"
|
|
||||||
}
|
|
||||||
return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerSASOptions are options to construct a container SAS
|
|
||||||
// URI.
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|
||||||
type ContainerSASOptions struct {
|
|
||||||
ContainerSASPermissions
|
|
||||||
OverrideHeaders
|
|
||||||
SASOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerSASPermissions includes the available permissions for
|
|
||||||
// a container SAS URI.
|
|
||||||
type ContainerSASPermissions struct {
|
|
||||||
BlobServiceSASPermissions
|
|
||||||
List bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSASURI creates an URL to the container which contains the Shared
|
|
||||||
// Access Signature with the specified options.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|
||||||
func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) {
|
|
||||||
uri := c.GetURL()
|
|
||||||
signedResource := "c"
|
|
||||||
canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// build permissions string
|
|
||||||
permissions := options.BlobServiceSASPermissions.buildString()
|
|
||||||
if options.List {
|
|
||||||
permissions += "l"
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerProperties contains various properties of a container returned from
|
|
||||||
// various endpoints like ListContainers.
|
|
||||||
type ContainerProperties struct {
|
|
||||||
LastModified string `xml:"Last-Modified"`
|
|
||||||
Etag string `xml:"Etag"`
|
|
||||||
LeaseStatus string `xml:"LeaseStatus"`
|
|
||||||
LeaseState string `xml:"LeaseState"`
|
|
||||||
LeaseDuration string `xml:"LeaseDuration"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerListResponse contains the response fields from
|
|
||||||
// ListContainers call.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
|
||||||
type ContainerListResponse struct {
|
|
||||||
XMLName xml.Name `xml:"EnumerationResults"`
|
|
||||||
Xmlns string `xml:"xmlns,attr"`
|
|
||||||
Prefix string `xml:"Prefix"`
|
|
||||||
Marker string `xml:"Marker"`
|
|
||||||
NextMarker string `xml:"NextMarker"`
|
|
||||||
MaxResults int64 `xml:"MaxResults"`
|
|
||||||
Containers []Container `xml:"Containers>Container"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlobListResponse contains the response fields from ListBlobs call.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
|
||||||
type BlobListResponse struct {
|
|
||||||
XMLName xml.Name `xml:"EnumerationResults"`
|
|
||||||
Xmlns string `xml:"xmlns,attr"`
|
|
||||||
Prefix string `xml:"Prefix"`
|
|
||||||
Marker string `xml:"Marker"`
|
|
||||||
NextMarker string `xml:"NextMarker"`
|
|
||||||
MaxResults int64 `xml:"MaxResults"`
|
|
||||||
Blobs []Blob `xml:"Blobs>Blob"`
|
|
||||||
|
|
||||||
// BlobPrefix is used to traverse blobs as if it were a file system.
|
|
||||||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
|
||||||
// The list here can be thought of as "folders" that may contain
|
|
||||||
// other folders or blobs.
|
|
||||||
BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"`
|
|
||||||
|
|
||||||
// Delimiter is used to traverse blobs as if it were a file system.
|
|
||||||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
|
||||||
Delimiter string `xml:"Delimiter"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// IncludeBlobDataset has options to include in a list blobs operation
|
|
||||||
type IncludeBlobDataset struct {
|
|
||||||
Snapshots bool
|
|
||||||
Metadata bool
|
|
||||||
UncommittedBlobs bool
|
|
||||||
Copy bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListBlobsParameters defines the set of customizable
|
|
||||||
// parameters to make a List Blobs call.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
|
||||||
type ListBlobsParameters struct {
|
|
||||||
Prefix string
|
|
||||||
Delimiter string
|
|
||||||
Marker string
|
|
||||||
Include *IncludeBlobDataset
|
|
||||||
MaxResults uint
|
|
||||||
Timeout uint
|
|
||||||
RequestID string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p ListBlobsParameters) getParameters() url.Values {
|
|
||||||
out := url.Values{}
|
|
||||||
|
|
||||||
if p.Prefix != "" {
|
|
||||||
out.Set("prefix", p.Prefix)
|
|
||||||
}
|
|
||||||
if p.Delimiter != "" {
|
|
||||||
out.Set("delimiter", p.Delimiter)
|
|
||||||
}
|
|
||||||
if p.Marker != "" {
|
|
||||||
out.Set("marker", p.Marker)
|
|
||||||
}
|
|
||||||
if p.Include != nil {
|
|
||||||
include := []string{}
|
|
||||||
include = addString(include, p.Include.Snapshots, "snapshots")
|
|
||||||
include = addString(include, p.Include.Metadata, "metadata")
|
|
||||||
include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs")
|
|
||||||
include = addString(include, p.Include.Copy, "copy")
|
|
||||||
fullInclude := strings.Join(include, ",")
|
|
||||||
out.Set("include", fullInclude)
|
|
||||||
}
|
|
||||||
if p.MaxResults != 0 {
|
|
||||||
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
|
||||||
}
|
|
||||||
if p.Timeout != 0 {
|
|
||||||
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
|
|
||||||
}
|
|
||||||
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func addString(datasets []string, include bool, text string) []string {
|
|
||||||
if include {
|
|
||||||
datasets = append(datasets, text)
|
|
||||||
}
|
|
||||||
return datasets
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerAccessType defines the access level to the container from a public
|
|
||||||
// request.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
|
|
||||||
// blob-public-access" header.
|
|
||||||
type ContainerAccessType string
|
|
||||||
|
|
||||||
// Access options for containers
|
|
||||||
const (
|
|
||||||
ContainerAccessTypePrivate ContainerAccessType = ""
|
|
||||||
ContainerAccessTypeBlob ContainerAccessType = "blob"
|
|
||||||
ContainerAccessTypeContainer ContainerAccessType = "container"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ContainerAccessPolicy represents each access policy in the container ACL.
|
|
||||||
type ContainerAccessPolicy struct {
|
|
||||||
ID string
|
|
||||||
StartTime time.Time
|
|
||||||
ExpiryTime time.Time
|
|
||||||
CanRead bool
|
|
||||||
CanWrite bool
|
|
||||||
CanDelete bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerPermissions represents the container ACLs.
|
|
||||||
type ContainerPermissions struct {
|
|
||||||
AccessType ContainerAccessType
|
|
||||||
AccessPolicies []ContainerAccessPolicy
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerAccessHeader references header used when setting/getting container ACL
|
|
||||||
const (
|
|
||||||
ContainerAccessHeader string = "x-ms-blob-public-access"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetBlobReference returns a Blob object for the specified blob name.
|
|
||||||
func (c *Container) GetBlobReference(name string) *Blob {
|
|
||||||
return &Blob{
|
|
||||||
Container: c,
|
|
||||||
Name: name,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateContainerOptions includes the options for a create container operation
|
|
||||||
type CreateContainerOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
Access ContainerAccessType `header:"x-ms-blob-public-access"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create creates a blob container within the storage account
|
|
||||||
// with given name and access level. Returns error if container already exists.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container
|
|
||||||
func (c *Container) Create(options *CreateContainerOptions) error {
|
|
||||||
resp, err := c.create(options)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateIfNotExists creates a blob container if it does not exist. Returns
|
|
||||||
// true if container is newly created or false if container already exists.
|
|
||||||
func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) {
|
|
||||||
resp, err := c.create(options)
|
|
||||||
if resp != nil {
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
|
||||||
return resp.statusCode == http.StatusCreated, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Container) create(options *CreateContainerOptions) (*storageResponse, error) {
|
|
||||||
query := url.Values{"restype": {"container"}}
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
|
||||||
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
query = addTimeout(query, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
|
|
||||||
|
|
||||||
return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exists returns true if a container with given name exists
|
|
||||||
// on the storage account, otherwise returns false.
|
|
||||||
func (c *Container) Exists() (bool, error) {
|
|
||||||
q := url.Values{"restype": {"container"}}
|
|
||||||
var uri string
|
|
||||||
if c.bsc.client.isServiceSASClient() {
|
|
||||||
q = mergeParams(q, c.sasuri.Query())
|
|
||||||
newURI := c.sasuri
|
|
||||||
newURI.RawQuery = q.Encode()
|
|
||||||
uri = newURI.String()
|
|
||||||
|
|
||||||
} else {
|
|
||||||
uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
|
|
||||||
}
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth)
|
|
||||||
if resp != nil {
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
|
||||||
return resp.statusCode == http.StatusOK, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetContainerPermissionOptions includes options for a set container permissions operation
|
|
||||||
type SetContainerPermissionOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPermissions sets up container permissions
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL
|
|
||||||
func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error {
|
|
||||||
body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
params := url.Values{
|
|
||||||
"restype": {"container"},
|
|
||||||
"comp": {"acl"},
|
|
||||||
}
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
|
||||||
headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType))
|
|
||||||
headers["Content-Length"] = strconv.Itoa(length)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return errors.New("Unable to set permissions")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContainerPermissionOptions includes options for a get container permissions operation
|
|
||||||
type GetContainerPermissionOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
|
|
||||||
// If timeout is 0 then it will not be passed to Azure
|
|
||||||
// leaseID will only be passed to Azure if populated
|
|
||||||
func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) {
|
|
||||||
params := url.Values{
|
|
||||||
"restype": {"container"},
|
|
||||||
"comp": {"acl"},
|
|
||||||
}
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
var ap AccessPolicy
|
|
||||||
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return buildAccessPolicy(ap, &resp.headers), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions {
|
|
||||||
// containerAccess. Blob, Container, empty
|
|
||||||
containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader))
|
|
||||||
permissions := ContainerPermissions{
|
|
||||||
AccessType: ContainerAccessType(containerAccess),
|
|
||||||
AccessPolicies: []ContainerAccessPolicy{},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
|
||||||
capd := ContainerAccessPolicy{
|
|
||||||
ID: policy.ID,
|
|
||||||
StartTime: policy.AccessPolicy.StartTime,
|
|
||||||
ExpiryTime: policy.AccessPolicy.ExpiryTime,
|
|
||||||
}
|
|
||||||
capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
|
|
||||||
capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w")
|
|
||||||
capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
|
|
||||||
|
|
||||||
permissions.AccessPolicies = append(permissions.AccessPolicies, capd)
|
|
||||||
}
|
|
||||||
return &permissions
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteContainerOptions includes options for a delete container operation
|
|
||||||
type DeleteContainerOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the container with given name on the storage
|
|
||||||
// account. If the container does not exist returns error.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
|
|
||||||
func (c *Container) Delete(options *DeleteContainerOptions) error {
|
|
||||||
resp, err := c.delete(options)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteIfExists deletes the container with given name on the storage
|
|
||||||
// account if it exists. Returns true if container is deleted with this call, or
|
|
||||||
// false if the container did not exist at the time of the Delete Container
|
|
||||||
// operation.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
|
|
||||||
func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) {
|
|
||||||
resp, err := c.delete(options)
|
|
||||||
if resp != nil {
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
|
||||||
return resp.statusCode == http.StatusAccepted, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Container) delete(options *DeleteContainerOptions) (*storageResponse, error) {
|
|
||||||
query := url.Values{"restype": {"container"}}
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
query = addTimeout(query, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
|
|
||||||
|
|
||||||
return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListBlobs returns an object that contains list of blobs in the container,
|
|
||||||
// pagination token and other information in the response of List Blobs call.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs
|
|
||||||
func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
|
|
||||||
q := mergeParams(params.getParameters(), url.Values{
|
|
||||||
"restype": {"container"},
|
|
||||||
"comp": {"list"},
|
|
||||||
})
|
|
||||||
var uri string
|
|
||||||
if c.bsc.client.isServiceSASClient() {
|
|
||||||
q = mergeParams(q, c.sasuri.Query())
|
|
||||||
newURI := c.sasuri
|
|
||||||
newURI.RawQuery = q.Encode()
|
|
||||||
uri = newURI.String()
|
|
||||||
} else {
|
|
||||||
uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
|
||||||
headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID)
|
|
||||||
|
|
||||||
var out BlobListResponse
|
|
||||||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return out, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
|
||||||
for i := range out.Blobs {
|
|
||||||
out.Blobs[i].Container = c
|
|
||||||
}
|
|
||||||
return out, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerMetadataOptions includes options for container metadata operations
|
|
||||||
type ContainerMetadataOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMetadata replaces the metadata for the specified container.
|
|
||||||
//
|
|
||||||
// Some keys may be converted to Camel-Case before sending. All keys
|
|
||||||
// are returned in lower case by GetBlobMetadata. HTTP header names
|
|
||||||
// are case-insensitive so case munging should not matter to other
|
|
||||||
// applications either.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata
|
|
||||||
func (c *Container) SetMetadata(options *ContainerMetadataOptions) error {
|
|
||||||
params := url.Values{
|
|
||||||
"comp": {"metadata"},
|
|
||||||
"restype": {"container"},
|
|
||||||
}
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
|
||||||
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusOK})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetadata returns all user-defined metadata for the specified container.
|
|
||||||
//
|
|
||||||
// All metadata keys will be returned in lower case. (HTTP header
|
|
||||||
// names are case-insensitive.)
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-metadata
|
|
||||||
func (c *Container) GetMetadata(options *ContainerMetadataOptions) error {
|
|
||||||
params := url.Values{
|
|
||||||
"comp": {"metadata"},
|
|
||||||
"restype": {"container"},
|
|
||||||
}
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
c.writeMetadata(resp.headers)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Container) writeMetadata(h http.Header) {
|
|
||||||
c.Metadata = writeMetadata(h)
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) {
|
|
||||||
sil := SignedIdentifiers{
|
|
||||||
SignedIdentifiers: []SignedIdentifier{},
|
|
||||||
}
|
|
||||||
for _, capd := range policies {
|
|
||||||
permission := capd.generateContainerPermissions()
|
|
||||||
signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission)
|
|
||||||
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
|
|
||||||
}
|
|
||||||
return xmlMarshal(sil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) {
|
|
||||||
// generate the permissions string (rwd).
|
|
||||||
// still want the end user API to have bool flags.
|
|
||||||
permissions = ""
|
|
||||||
|
|
||||||
if capd.CanRead {
|
|
||||||
permissions += "r"
|
|
||||||
}
|
|
||||||
|
|
||||||
if capd.CanWrite {
|
|
||||||
permissions += "w"
|
|
||||||
}
|
|
||||||
|
|
||||||
if capd.CanDelete {
|
|
||||||
permissions += "d"
|
|
||||||
}
|
|
||||||
|
|
||||||
return permissions
|
|
||||||
}
|
|
||||||
237
vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go
generated
vendored
237
vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go
generated
vendored
|
|
@ -1,237 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
blobCopyStatusPending = "pending"
|
|
||||||
blobCopyStatusSuccess = "success"
|
|
||||||
blobCopyStatusAborted = "aborted"
|
|
||||||
blobCopyStatusFailed = "failed"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CopyOptions includes the options for a copy blob operation
|
|
||||||
type CopyOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
Source CopyOptionsConditions
|
|
||||||
Destiny CopyOptionsConditions
|
|
||||||
RequestID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// IncrementalCopyOptions includes the options for an incremental copy blob operation
|
|
||||||
type IncrementalCopyOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
Destination IncrementalCopyOptionsConditions
|
|
||||||
RequestID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyOptionsConditions includes some conditional options in a copy blob operation
|
|
||||||
type CopyOptionsConditions struct {
|
|
||||||
LeaseID string
|
|
||||||
IfModifiedSince *time.Time
|
|
||||||
IfUnmodifiedSince *time.Time
|
|
||||||
IfMatch string
|
|
||||||
IfNoneMatch string
|
|
||||||
}
|
|
||||||
|
|
||||||
// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation
|
|
||||||
type IncrementalCopyOptionsConditions struct {
|
|
||||||
IfModifiedSince *time.Time
|
|
||||||
IfUnmodifiedSince *time.Time
|
|
||||||
IfMatch string
|
|
||||||
IfNoneMatch string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy starts a blob copy operation and waits for the operation to
|
|
||||||
// complete. sourceBlob parameter must be a canonical URL to the blob (can be
|
|
||||||
// obtained using the GetURL method.) There is no SLA on blob copy and therefore
|
|
||||||
// this helper method works faster on smaller files.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
|
|
||||||
func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error {
|
|
||||||
copyID, err := b.StartCopy(sourceBlob, options)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.WaitForCopy(copyID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartCopy starts a blob copy operation.
|
|
||||||
// sourceBlob parameter must be a canonical URL to the blob (can be
|
|
||||||
// obtained using the GetURL method.)
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
|
|
||||||
func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) {
|
|
||||||
params := url.Values{}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers["x-ms-copy-source"] = sourceBlob
|
|
||||||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
|
||||||
// source
|
|
||||||
headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID)
|
|
||||||
headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince)
|
|
||||||
headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince)
|
|
||||||
headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch)
|
|
||||||
headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch)
|
|
||||||
//destiny
|
|
||||||
headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID)
|
|
||||||
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince)
|
|
||||||
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince)
|
|
||||||
headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch)
|
|
||||||
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch)
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
copyID := resp.headers.Get("x-ms-copy-id")
|
|
||||||
if copyID == "" {
|
|
||||||
return "", errors.New("Got empty copy id header")
|
|
||||||
}
|
|
||||||
return copyID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AbortCopyOptions includes the options for an abort blob operation
|
|
||||||
type AbortCopyOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function.
|
|
||||||
// copyID is generated from StartBlobCopy function.
|
|
||||||
// currentLeaseID is required IF the destination blob has an active lease on it.
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob
|
|
||||||
func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error {
|
|
||||||
params := url.Values{
|
|
||||||
"comp": {"copy"},
|
|
||||||
"copyid": {copyID},
|
|
||||||
}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers["x-ms-copy-action"] = "abort"
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForCopy loops until a BlobCopy operation is completed (or fails with error)
|
|
||||||
func (b *Blob) WaitForCopy(copyID string) error {
|
|
||||||
for {
|
|
||||||
err := b.GetProperties(nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.Properties.CopyID != copyID {
|
|
||||||
return errBlobCopyIDMismatch
|
|
||||||
}
|
|
||||||
|
|
||||||
switch b.Properties.CopyStatus {
|
|
||||||
case blobCopyStatusSuccess:
|
|
||||||
return nil
|
|
||||||
case blobCopyStatusPending:
|
|
||||||
continue
|
|
||||||
case blobCopyStatusAborted:
|
|
||||||
return errBlobCopyAborted
|
|
||||||
case blobCopyStatusFailed:
|
|
||||||
return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob
|
|
||||||
// sourceBlob parameter must be a valid snapshot URL of the original blob.
|
|
||||||
// THe original blob mut be public, or use a Shared Access Signature.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob .
|
|
||||||
func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) {
|
|
||||||
params := url.Values{"comp": {"incrementalcopy"}}
|
|
||||||
|
|
||||||
// need formatting to 7 decimal places so it's friendly to Windows and *nix
|
|
||||||
snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z")
|
|
||||||
u, err := url.Parse(sourceBlobURL)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
query := u.Query()
|
|
||||||
query.Add("snapshot", snapshotTimeFormatted)
|
|
||||||
encodedQuery := query.Encode()
|
|
||||||
encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1)
|
|
||||||
u.RawQuery = encodedQuery
|
|
||||||
snapshotURL := u.String()
|
|
||||||
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers["x-ms-copy-source"] = snapshotURL
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
addTimeout(params, options.Timeout)
|
|
||||||
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
|
||||||
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince)
|
|
||||||
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince)
|
|
||||||
headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch)
|
|
||||||
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch)
|
|
||||||
}
|
|
||||||
|
|
||||||
// get URI of destination blob
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted}); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
copyID := resp.headers.Get("x-ms-copy-id")
|
|
||||||
if copyID == "" {
|
|
||||||
return "", errors.New("Got empty copy id header")
|
|
||||||
}
|
|
||||||
return copyID, nil
|
|
||||||
}
|
|
||||||
238
vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go
generated
vendored
238
vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go
generated
vendored
|
|
@ -1,238 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Directory represents a directory on a share.
|
|
||||||
type Directory struct {
|
|
||||||
fsc *FileServiceClient
|
|
||||||
Metadata map[string]string
|
|
||||||
Name string `xml:"Name"`
|
|
||||||
parent *Directory
|
|
||||||
Properties DirectoryProperties
|
|
||||||
share *Share
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirectoryProperties contains various properties of a directory.
|
|
||||||
type DirectoryProperties struct {
|
|
||||||
LastModified string `xml:"Last-Modified"`
|
|
||||||
Etag string `xml:"Etag"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListDirsAndFilesParameters defines the set of customizable parameters to
|
|
||||||
// make a List Files and Directories call.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
|
||||||
type ListDirsAndFilesParameters struct {
|
|
||||||
Prefix string
|
|
||||||
Marker string
|
|
||||||
MaxResults uint
|
|
||||||
Timeout uint
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirsAndFilesListResponse contains the response fields from
|
|
||||||
// a List Files and Directories call.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
|
||||||
type DirsAndFilesListResponse struct {
|
|
||||||
XMLName xml.Name `xml:"EnumerationResults"`
|
|
||||||
Xmlns string `xml:"xmlns,attr"`
|
|
||||||
Marker string `xml:"Marker"`
|
|
||||||
MaxResults int64 `xml:"MaxResults"`
|
|
||||||
Directories []Directory `xml:"Entries>Directory"`
|
|
||||||
Files []File `xml:"Entries>File"`
|
|
||||||
NextMarker string `xml:"NextMarker"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// builds the complete directory path for this directory object.
|
|
||||||
func (d *Directory) buildPath() string {
|
|
||||||
path := ""
|
|
||||||
current := d
|
|
||||||
for current.Name != "" {
|
|
||||||
path = "/" + current.Name + path
|
|
||||||
current = current.parent
|
|
||||||
}
|
|
||||||
return d.share.buildPath() + path
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create this directory in the associated share.
|
|
||||||
// If a directory with the same name already exists, the operation fails.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
|
|
||||||
func (d *Directory) Create(options *FileRequestOptions) error {
|
|
||||||
// if this is the root directory exit early
|
|
||||||
if d.parent == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
params := prepareOptions(options)
|
|
||||||
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
d.updateEtagAndLastModified(headers)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateIfNotExists creates this directory under the associated share if the
|
|
||||||
// directory does not exists. Returns true if the directory is newly created or
|
|
||||||
// false if the directory already exists.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
|
|
||||||
func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
|
|
||||||
// if this is the root directory exit early
|
|
||||||
if d.parent == nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
params := prepareOptions(options)
|
|
||||||
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil)
|
|
||||||
if resp != nil {
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
|
||||||
if resp.statusCode == http.StatusCreated {
|
|
||||||
d.updateEtagAndLastModified(resp.headers)
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, d.FetchAttributes(nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes this directory. It must be empty in order to be deleted.
|
|
||||||
// If the directory does not exist the operation fails.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
|
|
||||||
func (d *Directory) Delete(options *FileRequestOptions) error {
|
|
||||||
return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteIfExists removes this directory if it exists.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
|
|
||||||
func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) {
|
|
||||||
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options)
|
|
||||||
if resp != nil {
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
|
||||||
return resp.statusCode == http.StatusAccepted, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exists returns true if this directory exists.
|
|
||||||
func (d *Directory) Exists() (bool, error) {
|
|
||||||
exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory)
|
|
||||||
if exists {
|
|
||||||
d.updateEtagAndLastModified(headers)
|
|
||||||
}
|
|
||||||
return exists, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchAttributes retrieves metadata for this directory.
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties
|
|
||||||
func (d *Directory) FetchAttributes(options *FileRequestOptions) error {
|
|
||||||
params := prepareOptions(options)
|
|
||||||
headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
d.updateEtagAndLastModified(headers)
|
|
||||||
d.Metadata = getMetadataFromHeaders(headers)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDirectoryReference returns a child Directory object for this directory.
|
|
||||||
func (d *Directory) GetDirectoryReference(name string) *Directory {
|
|
||||||
return &Directory{
|
|
||||||
fsc: d.fsc,
|
|
||||||
Name: name,
|
|
||||||
parent: d,
|
|
||||||
share: d.share,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFileReference returns a child File object for this directory.
|
|
||||||
func (d *Directory) GetFileReference(name string) *File {
|
|
||||||
return &File{
|
|
||||||
fsc: d.fsc,
|
|
||||||
Name: name,
|
|
||||||
parent: d,
|
|
||||||
share: d.share,
|
|
||||||
mutex: &sync.Mutex{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListDirsAndFiles returns a list of files and directories under this directory.
|
|
||||||
// It also contains a pagination token and other response details.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
|
||||||
func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
|
|
||||||
q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
|
|
||||||
|
|
||||||
resp, err := d.fsc.listContent(d.buildPath(), q, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer resp.body.Close()
|
|
||||||
var out DirsAndFilesListResponse
|
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
|
||||||
return &out, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMetadata replaces the metadata for this directory.
|
|
||||||
//
|
|
||||||
// Some keys may be converted to Camel-Case before sending. All keys
|
|
||||||
// are returned in lower case by GetDirectoryMetadata. HTTP header names
|
|
||||||
// are case-insensitive so case munging should not matter to other
|
|
||||||
// applications either.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata
|
|
||||||
func (d *Directory) SetMetadata(options *FileRequestOptions) error {
|
|
||||||
headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
d.updateEtagAndLastModified(headers)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// updates Etag and last modified date
|
|
||||||
func (d *Directory) updateEtagAndLastModified(headers http.Header) {
|
|
||||||
d.Properties.Etag = headers.Get("Etag")
|
|
||||||
d.Properties.LastModified = headers.Get("Last-Modified")
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL gets the canonical URL to this directory.
|
|
||||||
// This method does not create a publicly accessible URL if the directory
|
|
||||||
// is private and this method does not check if the directory exists.
|
|
||||||
func (d *Directory) URL() string {
|
|
||||||
return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{})
|
|
||||||
}
|
|
||||||
453
vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go
generated
vendored
453
vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go
generated
vendored
|
|
@ -1,453 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/satori/go.uuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Annotating as secure for gas scanning
|
|
||||||
/* #nosec */
|
|
||||||
const (
|
|
||||||
partitionKeyNode = "PartitionKey"
|
|
||||||
rowKeyNode = "RowKey"
|
|
||||||
etagErrorTemplate = "Etag didn't match: %v"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation")
|
|
||||||
errNilPreviousResult = errors.New("The previous results page is nil")
|
|
||||||
errNilNextLink = errors.New("There are no more pages in this query results")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Entity represents an entity inside an Azure table.
|
|
||||||
type Entity struct {
|
|
||||||
Table *Table
|
|
||||||
PartitionKey string
|
|
||||||
RowKey string
|
|
||||||
TimeStamp time.Time
|
|
||||||
OdataMetadata string
|
|
||||||
OdataType string
|
|
||||||
OdataID string
|
|
||||||
OdataEtag string
|
|
||||||
OdataEditLink string
|
|
||||||
Properties map[string]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetEntityReference returns an Entity object with the specified
|
|
||||||
// partition key and row key.
|
|
||||||
func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity {
|
|
||||||
return &Entity{
|
|
||||||
PartitionKey: partitionKey,
|
|
||||||
RowKey: rowKey,
|
|
||||||
Table: t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// EntityOptions includes options for entity operations.
|
|
||||||
type EntityOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetEntityOptions includes options for a get entity operation
|
|
||||||
type GetEntityOptions struct {
|
|
||||||
Select []string
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get gets the referenced entity. Which properties to get can be
|
|
||||||
// specified using the select option.
|
|
||||||
// See:
|
|
||||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
|
|
||||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
|
|
||||||
func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error {
|
|
||||||
if ml == EmptyPayload {
|
|
||||||
return errEmptyPayload
|
|
||||||
}
|
|
||||||
// RowKey and PartitionKey could be lost if not included in the query
|
|
||||||
// As those are the entity identifiers, it is best if they are not lost
|
|
||||||
rk := e.RowKey
|
|
||||||
pk := e.PartitionKey
|
|
||||||
|
|
||||||
query := url.Values{
|
|
||||||
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
|
|
||||||
}
|
|
||||||
headers := e.Table.tsc.client.getStandardHeaders()
|
|
||||||
headers[headerAccept] = string(ml)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
if len(options.Select) > 0 {
|
|
||||||
query.Add("$select", strings.Join(options.Select, ","))
|
|
||||||
}
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
|
|
||||||
resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
respBody, err := ioutil.ReadAll(resp.body)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(respBody, e)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
e.PartitionKey = pk
|
|
||||||
e.RowKey = rk
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert inserts the referenced entity in its table.
|
|
||||||
// The function fails if there is an entity with the same
|
|
||||||
// PartitionKey and RowKey in the table.
|
|
||||||
// ml determines the level of detail of metadata in the operation response,
|
|
||||||
// or no data at all.
|
|
||||||
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity
|
|
||||||
func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error {
|
|
||||||
query, headers := options.getParameters()
|
|
||||||
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
|
|
||||||
|
|
||||||
body, err := json.Marshal(e)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headers = addBodyRelatedHeaders(headers, len(body))
|
|
||||||
headers = addReturnContentHeaders(headers, ml)
|
|
||||||
|
|
||||||
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query)
|
|
||||||
resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
data, err := ioutil.ReadAll(resp.body)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if ml != EmptyPayload {
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = e.UnmarshalJSON(data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update updates the contents of an entity. The function fails if there is no entity
|
|
||||||
// with the same PartitionKey and RowKey in the table or if the ETag is different
|
|
||||||
// than the one in Azure.
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2
|
|
||||||
func (e *Entity) Update(force bool, options *EntityOptions) error {
|
|
||||||
return e.updateMerge(force, http.MethodPut, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge merges the contents of entity specified with PartitionKey and RowKey
|
|
||||||
// with the content specified in Properties.
|
|
||||||
// The function fails if there is no entity with the same PartitionKey and
|
|
||||||
// RowKey in the table or if the ETag is different than the one in Azure.
|
|
||||||
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity
|
|
||||||
func (e *Entity) Merge(force bool, options *EntityOptions) error {
|
|
||||||
return e.updateMerge(force, "MERGE", options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the entity.
|
|
||||||
// The function fails if there is no entity with the same PartitionKey and
|
|
||||||
// RowKey in the table or if the ETag is different than the one in Azure.
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1
|
|
||||||
func (e *Entity) Delete(force bool, options *EntityOptions) error {
|
|
||||||
query, headers := options.getParameters()
|
|
||||||
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
|
|
||||||
|
|
||||||
headers = addIfMatchHeader(headers, force, e.OdataEtag)
|
|
||||||
headers = addReturnContentHeaders(headers, EmptyPayload)
|
|
||||||
|
|
||||||
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
|
|
||||||
resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
if resp.statusCode == http.StatusPreconditionFailed {
|
|
||||||
return fmt.Errorf(etagErrorTemplate, err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.updateTimestamp(resp.headers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertOrReplace inserts an entity or replaces the existing one.
|
|
||||||
// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity
|
|
||||||
func (e *Entity) InsertOrReplace(options *EntityOptions) error {
|
|
||||||
return e.insertOr(http.MethodPut, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertOrMerge inserts an entity or merges the existing one.
|
|
||||||
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity
|
|
||||||
func (e *Entity) InsertOrMerge(options *EntityOptions) error {
|
|
||||||
return e.insertOr("MERGE", options)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Entity) buildPath() string {
|
|
||||||
return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON is a custom marshaller for entity
|
|
||||||
func (e *Entity) MarshalJSON() ([]byte, error) {
|
|
||||||
completeMap := map[string]interface{}{}
|
|
||||||
completeMap[partitionKeyNode] = e.PartitionKey
|
|
||||||
completeMap[rowKeyNode] = e.RowKey
|
|
||||||
for k, v := range e.Properties {
|
|
||||||
typeKey := strings.Join([]string{k, OdataTypeSuffix}, "")
|
|
||||||
switch t := v.(type) {
|
|
||||||
case []byte:
|
|
||||||
completeMap[typeKey] = OdataBinary
|
|
||||||
completeMap[k] = string(t)
|
|
||||||
case time.Time:
|
|
||||||
completeMap[typeKey] = OdataDateTime
|
|
||||||
completeMap[k] = t.Format(time.RFC3339Nano)
|
|
||||||
case uuid.UUID:
|
|
||||||
completeMap[typeKey] = OdataGUID
|
|
||||||
completeMap[k] = t.String()
|
|
||||||
case int64:
|
|
||||||
completeMap[typeKey] = OdataInt64
|
|
||||||
completeMap[k] = fmt.Sprintf("%v", v)
|
|
||||||
default:
|
|
||||||
completeMap[k] = v
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(k, OdataTypeSuffix) {
|
|
||||||
if !(completeMap[k] == OdataBinary ||
|
|
||||||
completeMap[k] == OdataDateTime ||
|
|
||||||
completeMap[k] == OdataGUID ||
|
|
||||||
completeMap[k] == OdataInt64) {
|
|
||||||
return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k)
|
|
||||||
}
|
|
||||||
valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
|
|
||||||
if _, ok := completeMap[valueKey]; !ok {
|
|
||||||
return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return json.Marshal(completeMap)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON is a custom unmarshaller for entities
|
|
||||||
func (e *Entity) UnmarshalJSON(data []byte) error {
|
|
||||||
errorTemplate := "Deserializing error: %v"
|
|
||||||
|
|
||||||
props := map[string]interface{}{}
|
|
||||||
err := json.Unmarshal(data, &props)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// deselialize metadata
|
|
||||||
e.OdataMetadata = stringFromMap(props, "odata.metadata")
|
|
||||||
e.OdataType = stringFromMap(props, "odata.type")
|
|
||||||
e.OdataID = stringFromMap(props, "odata.id")
|
|
||||||
e.OdataEtag = stringFromMap(props, "odata.etag")
|
|
||||||
e.OdataEditLink = stringFromMap(props, "odata.editLink")
|
|
||||||
e.PartitionKey = stringFromMap(props, partitionKeyNode)
|
|
||||||
e.RowKey = stringFromMap(props, rowKeyNode)
|
|
||||||
|
|
||||||
// deserialize timestamp
|
|
||||||
timeStamp, ok := props["Timestamp"]
|
|
||||||
if ok {
|
|
||||||
str, ok := timeStamp.(string)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf(errorTemplate, "Timestamp casting error")
|
|
||||||
}
|
|
||||||
t, err := time.Parse(time.RFC3339Nano, str)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf(errorTemplate, err)
|
|
||||||
}
|
|
||||||
e.TimeStamp = t
|
|
||||||
}
|
|
||||||
delete(props, "Timestamp")
|
|
||||||
delete(props, "Timestamp@odata.type")
|
|
||||||
|
|
||||||
// deserialize entity (user defined fields)
|
|
||||||
for k, v := range props {
|
|
||||||
if strings.HasSuffix(k, OdataTypeSuffix) {
|
|
||||||
valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
|
|
||||||
str, ok := props[valueKey].(string)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v))
|
|
||||||
}
|
|
||||||
switch v {
|
|
||||||
case OdataBinary:
|
|
||||||
props[valueKey] = []byte(str)
|
|
||||||
case OdataDateTime:
|
|
||||||
t, err := time.Parse("2006-01-02T15:04:05Z", str)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf(errorTemplate, err)
|
|
||||||
}
|
|
||||||
props[valueKey] = t
|
|
||||||
case OdataGUID:
|
|
||||||
props[valueKey] = uuid.FromStringOrNil(str)
|
|
||||||
case OdataInt64:
|
|
||||||
i, err := strconv.ParseInt(str, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf(errorTemplate, err)
|
|
||||||
}
|
|
||||||
props[valueKey] = i
|
|
||||||
default:
|
|
||||||
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v))
|
|
||||||
}
|
|
||||||
delete(props, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
e.Properties = props
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAndDelete(props map[string]interface{}, key string) interface{} {
|
|
||||||
if value, ok := props[key]; ok {
|
|
||||||
delete(props, key)
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string {
|
|
||||||
if force {
|
|
||||||
h[headerIfMatch] = "*"
|
|
||||||
} else {
|
|
||||||
h[headerIfMatch] = etag
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// updates Etag and timestamp
|
|
||||||
func (e *Entity) updateEtagAndTimestamp(headers http.Header) error {
|
|
||||||
e.OdataEtag = headers.Get(headerEtag)
|
|
||||||
return e.updateTimestamp(headers)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Entity) updateTimestamp(headers http.Header) error {
|
|
||||||
str := headers.Get(headerDate)
|
|
||||||
t, err := time.Parse(time.RFC1123, str)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Update timestamp error: %v", err)
|
|
||||||
}
|
|
||||||
e.TimeStamp = t
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Entity) insertOr(verb string, options *EntityOptions) error {
|
|
||||||
query, headers := options.getParameters()
|
|
||||||
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
|
|
||||||
|
|
||||||
body, err := json.Marshal(e)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headers = addBodyRelatedHeaders(headers, len(body))
|
|
||||||
headers = addReturnContentHeaders(headers, EmptyPayload)
|
|
||||||
|
|
||||||
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
|
|
||||||
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.updateEtagAndTimestamp(resp.headers)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error {
|
|
||||||
query, headers := options.getParameters()
|
|
||||||
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
|
|
||||||
|
|
||||||
body, err := json.Marshal(e)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headers = addBodyRelatedHeaders(headers, len(body))
|
|
||||||
headers = addIfMatchHeader(headers, force, e.OdataEtag)
|
|
||||||
headers = addReturnContentHeaders(headers, EmptyPayload)
|
|
||||||
|
|
||||||
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
|
|
||||||
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
if resp.statusCode == http.StatusPreconditionFailed {
|
|
||||||
return fmt.Errorf(etagErrorTemplate, err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.updateEtagAndTimestamp(resp.headers)
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringFromMap(props map[string]interface{}, key string) string {
|
|
||||||
value := getAndDelete(props, key)
|
|
||||||
if value != nil {
|
|
||||||
return value.(string)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *EntityOptions) getParameters() (url.Values, map[string]string) {
|
|
||||||
query := url.Values{}
|
|
||||||
headers := map[string]string{}
|
|
||||||
if options != nil {
|
|
||||||
query = addTimeout(query, options.Timeout)
|
|
||||||
headers = headersFromStruct(*options)
|
|
||||||
}
|
|
||||||
return query, headers
|
|
||||||
}
|
|
||||||
476
vendor/github.com/Azure/azure-sdk-for-go/storage/file.go
generated
vendored
476
vendor/github.com/Azure/azure-sdk-for-go/storage/file.go
generated
vendored
|
|
@ -1,476 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
const fourMB = uint64(4194304)
|
|
||||||
const oneTB = uint64(1099511627776)
|
|
||||||
|
|
||||||
// File represents a file on a share.
|
|
||||||
type File struct {
|
|
||||||
fsc *FileServiceClient
|
|
||||||
Metadata map[string]string
|
|
||||||
Name string `xml:"Name"`
|
|
||||||
parent *Directory
|
|
||||||
Properties FileProperties `xml:"Properties"`
|
|
||||||
share *Share
|
|
||||||
FileCopyProperties FileCopyState
|
|
||||||
mutex *sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileProperties contains various properties of a file.
|
|
||||||
type FileProperties struct {
|
|
||||||
CacheControl string `header:"x-ms-cache-control"`
|
|
||||||
Disposition string `header:"x-ms-content-disposition"`
|
|
||||||
Encoding string `header:"x-ms-content-encoding"`
|
|
||||||
Etag string
|
|
||||||
Language string `header:"x-ms-content-language"`
|
|
||||||
LastModified string
|
|
||||||
Length uint64 `xml:"Content-Length" header:"x-ms-content-length"`
|
|
||||||
MD5 string `header:"x-ms-content-md5"`
|
|
||||||
Type string `header:"x-ms-content-type"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileCopyState contains various properties of a file copy operation.
|
|
||||||
type FileCopyState struct {
|
|
||||||
CompletionTime string
|
|
||||||
ID string `header:"x-ms-copy-id"`
|
|
||||||
Progress string
|
|
||||||
Source string
|
|
||||||
Status string `header:"x-ms-copy-status"`
|
|
||||||
StatusDesc string
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileStream contains file data returned from a call to GetFile.
|
|
||||||
type FileStream struct {
|
|
||||||
Body io.ReadCloser
|
|
||||||
ContentMD5 string
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileRequestOptions will be passed to misc file operations.
|
|
||||||
// Currently just Timeout (in seconds) but could expand.
|
|
||||||
type FileRequestOptions struct {
|
|
||||||
Timeout uint // timeout duration in seconds.
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareOptions(options *FileRequestOptions) url.Values {
|
|
||||||
params := url.Values{}
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
}
|
|
||||||
return params
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileRanges contains a list of file range information for a file.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
|
|
||||||
type FileRanges struct {
|
|
||||||
ContentLength uint64
|
|
||||||
LastModified string
|
|
||||||
ETag string
|
|
||||||
FileRanges []FileRange `xml:"Range"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileRange contains range information for a file.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
|
|
||||||
type FileRange struct {
|
|
||||||
Start uint64 `xml:"Start"`
|
|
||||||
End uint64 `xml:"End"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fr FileRange) String() string {
|
|
||||||
return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End)
|
|
||||||
}
|
|
||||||
|
|
||||||
// builds the complete file path for this file object
|
|
||||||
func (f *File) buildPath() string {
|
|
||||||
return f.parent.buildPath() + "/" + f.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearRange releases the specified range of space in a file.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
|
|
||||||
func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error {
|
|
||||||
var timeout *uint
|
|
||||||
if options != nil {
|
|
||||||
timeout = &options.Timeout
|
|
||||||
}
|
|
||||||
headers, err := f.modifyRange(nil, fileRange, timeout, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
f.updateEtagAndLastModified(headers)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create creates a new file or replaces an existing one.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File
|
|
||||||
func (f *File) Create(maxSize uint64, options *FileRequestOptions) error {
|
|
||||||
if maxSize > oneTB {
|
|
||||||
return fmt.Errorf("max file size is 1TB")
|
|
||||||
}
|
|
||||||
params := prepareOptions(options)
|
|
||||||
headers := headersFromStruct(f.Properties)
|
|
||||||
headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10)
|
|
||||||
headers["x-ms-type"] = "file"
|
|
||||||
|
|
||||||
outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
f.Properties.Length = maxSize
|
|
||||||
f.updateEtagAndLastModified(outputHeaders)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyFile operation copied a file/blob from the sourceURL to the path provided.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file
|
|
||||||
func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error {
|
|
||||||
extraHeaders := map[string]string{
|
|
||||||
"x-ms-type": "file",
|
|
||||||
"x-ms-copy-source": sourceURL,
|
|
||||||
}
|
|
||||||
params := prepareOptions(options)
|
|
||||||
|
|
||||||
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
f.updateEtagAndLastModified(headers)
|
|
||||||
f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id")
|
|
||||||
f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete immediately removes this file from the storage account.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
|
|
||||||
func (f *File) Delete(options *FileRequestOptions) error {
|
|
||||||
return f.fsc.deleteResource(f.buildPath(), resourceFile, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteIfExists removes this file if it exists.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
|
|
||||||
func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) {
|
|
||||||
resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options)
|
|
||||||
if resp != nil {
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
|
||||||
return resp.statusCode == http.StatusAccepted, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFileOptions includes options for a get file operation
|
|
||||||
type GetFileOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
GetContentMD5 bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadToStream operation downloads the file.
|
|
||||||
//
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
|
|
||||||
func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) {
|
|
||||||
params := prepareOptions(options)
|
|
||||||
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resp.body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
|
|
||||||
func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) {
|
|
||||||
extraHeaders := map[string]string{
|
|
||||||
"Range": fileRange.String(),
|
|
||||||
}
|
|
||||||
params := url.Values{}
|
|
||||||
if options != nil {
|
|
||||||
if options.GetContentMD5 {
|
|
||||||
if isRangeTooBig(fileRange) {
|
|
||||||
return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true")
|
|
||||||
}
|
|
||||||
extraHeaders["x-ms-range-get-content-md5"] = "true"
|
|
||||||
}
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders)
|
|
||||||
if err != nil {
|
|
||||||
return fs, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil {
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return fs, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Body = resp.body
|
|
||||||
if options != nil && options.GetContentMD5 {
|
|
||||||
fs.ContentMD5 = resp.headers.Get("Content-MD5")
|
|
||||||
}
|
|
||||||
return fs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exists returns true if this file exists.
|
|
||||||
func (f *File) Exists() (bool, error) {
|
|
||||||
exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile)
|
|
||||||
if exists {
|
|
||||||
f.updateEtagAndLastModified(headers)
|
|
||||||
f.updateProperties(headers)
|
|
||||||
}
|
|
||||||
return exists, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchAttributes updates metadata and properties for this file.
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties
|
|
||||||
func (f *File) FetchAttributes(options *FileRequestOptions) error {
|
|
||||||
params := prepareOptions(options)
|
|
||||||
headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
f.updateEtagAndLastModified(headers)
|
|
||||||
f.updateProperties(headers)
|
|
||||||
f.Metadata = getMetadataFromHeaders(headers)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns true if the range is larger than 4MB
|
|
||||||
func isRangeTooBig(fileRange FileRange) bool {
|
|
||||||
if fileRange.End-fileRange.Start > fourMB {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListRangesOptions includes options for a list file ranges operation
|
|
||||||
type ListRangesOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
ListRange *FileRange
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListRanges returns the list of valid ranges for this file.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
|
|
||||||
func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) {
|
|
||||||
params := url.Values{"comp": {"rangelist"}}
|
|
||||||
|
|
||||||
// add optional range to list
|
|
||||||
var headers map[string]string
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
if options.ListRange != nil {
|
|
||||||
headers = make(map[string]string)
|
|
||||||
headers["Range"] = options.ListRange.String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := f.fsc.listContent(f.buildPath(), params, headers)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer resp.body.Close()
|
|
||||||
var cl uint64
|
|
||||||
cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
ioutil.ReadAll(resp.body)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var out FileRanges
|
|
||||||
out.ContentLength = cl
|
|
||||||
out.ETag = resp.headers.Get("ETag")
|
|
||||||
out.LastModified = resp.headers.Get("Last-Modified")
|
|
||||||
|
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
|
||||||
return &out, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// modifies a range of bytes in this file
|
|
||||||
func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) {
|
|
||||||
if err := f.fsc.checkForStorageEmulator(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if fileRange.End < fileRange.Start {
|
|
||||||
return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
|
|
||||||
}
|
|
||||||
if bytes != nil && isRangeTooBig(fileRange) {
|
|
||||||
return nil, errors.New("range cannot exceed 4MB in size")
|
|
||||||
}
|
|
||||||
|
|
||||||
params := url.Values{"comp": {"range"}}
|
|
||||||
if timeout != nil {
|
|
||||||
params = addTimeout(params, *timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params)
|
|
||||||
|
|
||||||
// default to clear
|
|
||||||
write := "clear"
|
|
||||||
cl := uint64(0)
|
|
||||||
|
|
||||||
// if bytes is not nil then this is an update operation
|
|
||||||
if bytes != nil {
|
|
||||||
write = "update"
|
|
||||||
cl = (fileRange.End - fileRange.Start) + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
extraHeaders := map[string]string{
|
|
||||||
"Content-Length": strconv.FormatUint(cl, 10),
|
|
||||||
"Range": fileRange.String(),
|
|
||||||
"x-ms-write": write,
|
|
||||||
}
|
|
||||||
|
|
||||||
if contentMD5 != nil {
|
|
||||||
extraHeaders["Content-MD5"] = *contentMD5
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders)
|
|
||||||
resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMetadata replaces the metadata for this file.
|
|
||||||
//
|
|
||||||
// Some keys may be converted to Camel-Case before sending. All keys
|
|
||||||
// are returned in lower case by GetFileMetadata. HTTP header names
|
|
||||||
// are case-insensitive so case munging should not matter to other
|
|
||||||
// applications either.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata
|
|
||||||
func (f *File) SetMetadata(options *FileRequestOptions) error {
|
|
||||||
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
f.updateEtagAndLastModified(headers)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetProperties sets system properties on this file.
|
|
||||||
//
|
|
||||||
// Some keys may be converted to Camel-Case before sending. All keys
|
|
||||||
// are returned in lower case by SetFileProperties. HTTP header names
|
|
||||||
// are case-insensitive so case munging should not matter to other
|
|
||||||
// applications either.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties
|
|
||||||
func (f *File) SetProperties(options *FileRequestOptions) error {
|
|
||||||
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
f.updateEtagAndLastModified(headers)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// updates Etag and last modified date
|
|
||||||
func (f *File) updateEtagAndLastModified(headers http.Header) {
|
|
||||||
f.Properties.Etag = headers.Get("Etag")
|
|
||||||
f.Properties.LastModified = headers.Get("Last-Modified")
|
|
||||||
}
|
|
||||||
|
|
||||||
// updates file properties from the specified HTTP header
|
|
||||||
func (f *File) updateProperties(header http.Header) {
|
|
||||||
size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64)
|
|
||||||
if err == nil {
|
|
||||||
f.Properties.Length = size
|
|
||||||
}
|
|
||||||
|
|
||||||
f.updateEtagAndLastModified(header)
|
|
||||||
f.Properties.CacheControl = header.Get("Cache-Control")
|
|
||||||
f.Properties.Disposition = header.Get("Content-Disposition")
|
|
||||||
f.Properties.Encoding = header.Get("Content-Encoding")
|
|
||||||
f.Properties.Language = header.Get("Content-Language")
|
|
||||||
f.Properties.MD5 = header.Get("Content-MD5")
|
|
||||||
f.Properties.Type = header.Get("Content-Type")
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL gets the canonical URL to this file.
|
|
||||||
// This method does not create a publicly accessible URL if the file
|
|
||||||
// is private and this method does not check if the file exists.
|
|
||||||
func (f *File) URL() string {
|
|
||||||
return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteRangeOptions includes options for a write file range operation
|
|
||||||
type WriteRangeOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
ContentMD5 string
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside
|
|
||||||
// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with
|
|
||||||
// a maximum size of 4MB.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
|
|
||||||
func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error {
|
|
||||||
if bytes == nil {
|
|
||||||
return errors.New("bytes cannot be nil")
|
|
||||||
}
|
|
||||||
var timeout *uint
|
|
||||||
var md5 *string
|
|
||||||
if options != nil {
|
|
||||||
timeout = &options.Timeout
|
|
||||||
md5 = &options.ContentMD5
|
|
||||||
}
|
|
||||||
|
|
||||||
headers, err := f.modifyRange(bytes, fileRange, timeout, md5)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// it's perfectly legal for multiple go routines to call WriteRange
|
|
||||||
// on the same *File (e.g. concurrently writing non-overlapping ranges)
|
|
||||||
// so we must take the file mutex before updating our properties.
|
|
||||||
f.mutex.Lock()
|
|
||||||
f.updateEtagAndLastModified(headers)
|
|
||||||
f.mutex.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
338
vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go
generated
vendored
338
vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go
generated
vendored
|
|
@ -1,338 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FileServiceClient contains operations for Microsoft Azure File Service.
|
|
||||||
type FileServiceClient struct {
|
|
||||||
client Client
|
|
||||||
auth authentication
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListSharesParameters defines the set of customizable parameters to make a
|
|
||||||
// List Shares call.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
|
|
||||||
type ListSharesParameters struct {
|
|
||||||
Prefix string
|
|
||||||
Marker string
|
|
||||||
Include string
|
|
||||||
MaxResults uint
|
|
||||||
Timeout uint
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShareListResponse contains the response fields from
|
|
||||||
// ListShares call.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
|
|
||||||
type ShareListResponse struct {
|
|
||||||
XMLName xml.Name `xml:"EnumerationResults"`
|
|
||||||
Xmlns string `xml:"xmlns,attr"`
|
|
||||||
Prefix string `xml:"Prefix"`
|
|
||||||
Marker string `xml:"Marker"`
|
|
||||||
NextMarker string `xml:"NextMarker"`
|
|
||||||
MaxResults int64 `xml:"MaxResults"`
|
|
||||||
Shares []Share `xml:"Shares>Share"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type compType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
compNone compType = ""
|
|
||||||
compList compType = "list"
|
|
||||||
compMetadata compType = "metadata"
|
|
||||||
compProperties compType = "properties"
|
|
||||||
compRangeList compType = "rangelist"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (ct compType) String() string {
|
|
||||||
return string(ct)
|
|
||||||
}
|
|
||||||
|
|
||||||
type resourceType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
resourceDirectory resourceType = "directory"
|
|
||||||
resourceFile resourceType = ""
|
|
||||||
resourceShare resourceType = "share"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (rt resourceType) String() string {
|
|
||||||
return string(rt)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p ListSharesParameters) getParameters() url.Values {
|
|
||||||
out := url.Values{}
|
|
||||||
|
|
||||||
if p.Prefix != "" {
|
|
||||||
out.Set("prefix", p.Prefix)
|
|
||||||
}
|
|
||||||
if p.Marker != "" {
|
|
||||||
out.Set("marker", p.Marker)
|
|
||||||
}
|
|
||||||
if p.Include != "" {
|
|
||||||
out.Set("include", p.Include)
|
|
||||||
}
|
|
||||||
if p.MaxResults != 0 {
|
|
||||||
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
|
||||||
}
|
|
||||||
if p.Timeout != 0 {
|
|
||||||
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
|
|
||||||
}
|
|
||||||
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p ListDirsAndFilesParameters) getParameters() url.Values {
|
|
||||||
out := url.Values{}
|
|
||||||
|
|
||||||
if p.Prefix != "" {
|
|
||||||
out.Set("prefix", p.Prefix)
|
|
||||||
}
|
|
||||||
if p.Marker != "" {
|
|
||||||
out.Set("marker", p.Marker)
|
|
||||||
}
|
|
||||||
if p.MaxResults != 0 {
|
|
||||||
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
|
||||||
}
|
|
||||||
out = addTimeout(out, p.Timeout)
|
|
||||||
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns url.Values for the specified types
|
|
||||||
func getURLInitValues(comp compType, res resourceType) url.Values {
|
|
||||||
values := url.Values{}
|
|
||||||
if comp != compNone {
|
|
||||||
values.Set("comp", comp.String())
|
|
||||||
}
|
|
||||||
if res != resourceFile {
|
|
||||||
values.Set("restype", res.String())
|
|
||||||
}
|
|
||||||
return values
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetShareReference returns a Share object for the specified share name.
|
|
||||||
func (f *FileServiceClient) GetShareReference(name string) *Share {
|
|
||||||
return &Share{
|
|
||||||
fsc: f,
|
|
||||||
Name: name,
|
|
||||||
Properties: ShareProperties{
|
|
||||||
Quota: -1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListShares returns the list of shares in a storage account along with
|
|
||||||
// pagination token and other response details.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares
|
|
||||||
func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) {
|
|
||||||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
|
||||||
|
|
||||||
var out ShareListResponse
|
|
||||||
resp, err := f.listContent("", q, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
|
||||||
|
|
||||||
// assign our client to the newly created Share objects
|
|
||||||
for i := range out.Shares {
|
|
||||||
out.Shares[i].fsc = &f
|
|
||||||
}
|
|
||||||
return &out, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetServiceProperties gets the properties of your storage account's file service.
|
|
||||||
// File service does not support logging
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties
|
|
||||||
func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
|
||||||
return f.client.getServiceProperties(fileServiceName, f.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetServiceProperties sets the properties of your storage account's file service.
|
|
||||||
// File service does not support logging
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties
|
|
||||||
func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error {
|
|
||||||
return f.client.setServiceProperties(props, fileServiceName, f.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// retrieves directory or share content
|
|
||||||
func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) {
|
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := f.client.getEndpoint(fileServiceName, path, params)
|
|
||||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
|
||||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
|
||||||
|
|
||||||
resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns true if the specified resource exists
|
|
||||||
func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) {
|
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
|
||||||
return false, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res))
|
|
||||||
headers := f.client.getStandardHeaders()
|
|
||||||
|
|
||||||
resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth)
|
|
||||||
if resp != nil {
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
|
||||||
return resp.statusCode == http.StatusOK, resp.headers, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// creates a resource depending on the specified resource type
|
|
||||||
func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) {
|
|
||||||
resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
return resp.headers, checkRespCode(resp.statusCode, expectedResponseCodes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// creates a resource depending on the specified resource type, doesn't close the response body
|
|
||||||
func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*storageResponse, error) {
|
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
values := getURLInitValues(compNone, res)
|
|
||||||
combinedParams := mergeParams(values, urlParams)
|
|
||||||
uri := f.client.getEndpoint(fileServiceName, path, combinedParams)
|
|
||||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
|
||||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
|
||||||
|
|
||||||
return f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns HTTP header data for the specified directory or share
|
|
||||||
func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) {
|
|
||||||
resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp.headers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// gets the specified resource, doesn't close the response body
|
|
||||||
func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*storageResponse, error) {
|
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
params = mergeParams(params, getURLInitValues(comp, res))
|
|
||||||
uri := f.client.getEndpoint(fileServiceName, path, params)
|
|
||||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
|
||||||
|
|
||||||
return f.client.exec(verb, uri, headers, nil, f.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// deletes the resource and returns the response
|
|
||||||
func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error {
|
|
||||||
resp, err := f.deleteResourceNoClose(path, res, options)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
|
||||||
}
|
|
||||||
|
|
||||||
// deletes the resource and returns the response, doesn't close the response body
|
|
||||||
func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*storageResponse, error) {
|
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options))
|
|
||||||
uri := f.client.getEndpoint(fileServiceName, path, values)
|
|
||||||
return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// merges metadata into extraHeaders and returns extraHeaders
|
|
||||||
func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string {
|
|
||||||
if metadata == nil && extraHeaders == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if extraHeaders == nil {
|
|
||||||
extraHeaders = make(map[string]string)
|
|
||||||
}
|
|
||||||
for k, v := range metadata {
|
|
||||||
extraHeaders[userDefinedMetadataHeaderPrefix+k] = v
|
|
||||||
}
|
|
||||||
return extraHeaders
|
|
||||||
}
|
|
||||||
|
|
||||||
// sets extra header data for the specified resource
|
|
||||||
func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) {
|
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
params := mergeParams(getURLInitValues(comp, res), prepareOptions(options))
|
|
||||||
uri := f.client.getEndpoint(fileServiceName, path, params)
|
|
||||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
|
||||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
|
||||||
|
|
||||||
resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK})
|
|
||||||
}
|
|
||||||
|
|
||||||
//checkForStorageEmulator determines if the client is setup for use with
|
|
||||||
//Azure Storage Emulator, and returns a relevant error
|
|
||||||
func (f FileServiceClient) checkForStorageEmulator() error {
|
|
||||||
if f.client.accountName == StorageEmulatorAccountName {
|
|
||||||
return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
201
vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go
generated
vendored
201
vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go
generated
vendored
|
|
@ -1,201 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// lease constants.
|
|
||||||
const (
|
|
||||||
leaseHeaderPrefix = "x-ms-lease-"
|
|
||||||
headerLeaseID = "x-ms-lease-id"
|
|
||||||
leaseAction = "x-ms-lease-action"
|
|
||||||
leaseBreakPeriod = "x-ms-lease-break-period"
|
|
||||||
leaseDuration = "x-ms-lease-duration"
|
|
||||||
leaseProposedID = "x-ms-proposed-lease-id"
|
|
||||||
leaseTime = "x-ms-lease-time"
|
|
||||||
|
|
||||||
acquireLease = "acquire"
|
|
||||||
renewLease = "renew"
|
|
||||||
changeLease = "change"
|
|
||||||
releaseLease = "release"
|
|
||||||
breakLease = "break"
|
|
||||||
)
|
|
||||||
|
|
||||||
// leasePut is common PUT code for the various acquire/release/break etc functions.
|
|
||||||
func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) {
|
|
||||||
params := url.Values{"comp": {"lease"}}
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp.headers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LeaseOptions includes options for all operations regarding leasing blobs
|
|
||||||
type LeaseOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
Origin string `header:"Origin"`
|
|
||||||
IfMatch string `header:"If-Match"`
|
|
||||||
IfNoneMatch string `header:"If-None-Match"`
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AcquireLease creates a lease for a blob
|
|
||||||
// returns leaseID acquired
|
|
||||||
// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum
|
|
||||||
// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1.
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
|
||||||
func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) {
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers[leaseAction] = acquireLease
|
|
||||||
|
|
||||||
if leaseTimeInSeconds == -1 {
|
|
||||||
// Do nothing, but don't trigger the following clauses.
|
|
||||||
} else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" {
|
|
||||||
leaseTimeInSeconds = 60
|
|
||||||
} else if leaseTimeInSeconds < 15 {
|
|
||||||
leaseTimeInSeconds = 15
|
|
||||||
}
|
|
||||||
|
|
||||||
headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds)
|
|
||||||
|
|
||||||
if proposedLeaseID != "" {
|
|
||||||
headers[leaseProposedID] = proposedLeaseID
|
|
||||||
}
|
|
||||||
|
|
||||||
respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
|
|
||||||
|
|
||||||
if returnedLeaseID != "" {
|
|
||||||
return returnedLeaseID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", errors.New("LeaseID not returned")
|
|
||||||
}
|
|
||||||
|
|
||||||
// BreakLease breaks the lease for a blob
|
|
||||||
// Returns the timeout remaining in the lease in seconds
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
|
||||||
func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) {
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers[leaseAction] = breakLease
|
|
||||||
return b.breakLeaseCommon(headers, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BreakLeaseWithBreakPeriod breaks the lease for a blob
|
|
||||||
// breakPeriodInSeconds is used to determine how long until new lease can be created.
|
|
||||||
// Returns the timeout remaining in the lease in seconds
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
|
||||||
func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) {
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers[leaseAction] = breakLease
|
|
||||||
headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds)
|
|
||||||
return b.breakLeaseCommon(headers, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// breakLeaseCommon is common code for both version of BreakLease (with and without break period)
|
|
||||||
func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) {
|
|
||||||
|
|
||||||
respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime))
|
|
||||||
if breakTimeoutStr != "" {
|
|
||||||
breakTimeout, err = strconv.Atoi(breakTimeoutStr)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return breakTimeout, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangeLease changes a lease ID for a blob
|
|
||||||
// Returns the new LeaseID acquired
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
|
||||||
func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) {
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers[leaseAction] = changeLease
|
|
||||||
headers[headerLeaseID] = currentLeaseID
|
|
||||||
headers[leaseProposedID] = proposedLeaseID
|
|
||||||
|
|
||||||
respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
|
|
||||||
if newLeaseID != "" {
|
|
||||||
return newLeaseID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", errors.New("LeaseID not returned")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseLease releases the lease for a blob
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
|
||||||
func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error {
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers[leaseAction] = releaseLease
|
|
||||||
headers[headerLeaseID] = currentLeaseID
|
|
||||||
|
|
||||||
_, err := b.leaseCommonPut(headers, http.StatusOK, options)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
|
|
||||||
func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error {
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers[leaseAction] = renewLease
|
|
||||||
headers[headerLeaseID] = currentLeaseID
|
|
||||||
|
|
||||||
_, err := b.leaseCommonPut(headers, http.StatusOK, options)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
167
vendor/github.com/Azure/azure-sdk-for-go/storage/message.go
generated
vendored
167
vendor/github.com/Azure/azure-sdk-for-go/storage/message.go
generated
vendored
|
|
@ -1,167 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Message represents an Azure message.
|
|
||||||
type Message struct {
|
|
||||||
Queue *Queue
|
|
||||||
Text string `xml:"MessageText"`
|
|
||||||
ID string `xml:"MessageId"`
|
|
||||||
Insertion TimeRFC1123 `xml:"InsertionTime"`
|
|
||||||
Expiration TimeRFC1123 `xml:"ExpirationTime"`
|
|
||||||
PopReceipt string `xml:"PopReceipt"`
|
|
||||||
NextVisible TimeRFC1123 `xml:"TimeNextVisible"`
|
|
||||||
DequeueCount int `xml:"DequeueCount"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Message) buildPath() string {
|
|
||||||
return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutMessageOptions is the set of options can be specified for Put Messsage
|
|
||||||
// operation. A zero struct does not use any preferences for the request.
|
|
||||||
type PutMessageOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
VisibilityTimeout int
|
|
||||||
MessageTTL int
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put operation adds a new message to the back of the message queue.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message
|
|
||||||
func (m *Message) Put(options *PutMessageOptions) error {
|
|
||||||
query := url.Values{}
|
|
||||||
headers := m.Queue.qsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
req := putMessageRequest{MessageText: m.Text}
|
|
||||||
body, nn, err := xmlMarshal(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headers["Content-Length"] = strconv.Itoa(nn)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
if options.VisibilityTimeout != 0 {
|
|
||||||
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
|
|
||||||
}
|
|
||||||
if options.MessageTTL != 0 {
|
|
||||||
query.Set("messagettl", strconv.Itoa(options.MessageTTL))
|
|
||||||
}
|
|
||||||
query = addTimeout(query, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query)
|
|
||||||
resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
err = xmlUnmarshal(resp.body, m)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateMessageOptions is the set of options can be specified for Update Messsage
|
|
||||||
// operation. A zero struct does not use any preferences for the request.
|
|
||||||
type UpdateMessageOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
VisibilityTimeout int
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update operation updates the specified message.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message
|
|
||||||
func (m *Message) Update(options *UpdateMessageOptions) error {
|
|
||||||
query := url.Values{}
|
|
||||||
if m.PopReceipt != "" {
|
|
||||||
query.Set("popreceipt", m.PopReceipt)
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := m.Queue.qsc.client.getStandardHeaders()
|
|
||||||
req := putMessageRequest{MessageText: m.Text}
|
|
||||||
body, nn, err := xmlMarshal(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headers["Content-Length"] = strconv.Itoa(nn)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
if options.VisibilityTimeout != 0 {
|
|
||||||
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
|
|
||||||
}
|
|
||||||
query = addTimeout(query, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query)
|
|
||||||
|
|
||||||
resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
m.PopReceipt = resp.headers.Get("x-ms-popreceipt")
|
|
||||||
nextTimeStr := resp.headers.Get("x-ms-time-next-visible")
|
|
||||||
if nextTimeStr != "" {
|
|
||||||
nextTime, err := time.Parse(time.RFC1123, nextTimeStr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.NextVisible = TimeRFC1123(nextTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete operation deletes the specified message.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
|
|
||||||
func (m *Message) Delete(options *QueueServiceOptions) error {
|
|
||||||
params := url.Values{"popreceipt": {m.PopReceipt}}
|
|
||||||
headers := m.Queue.qsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
type putMessageRequest struct {
|
|
||||||
XMLName xml.Name `xml:"QueueMessage"`
|
|
||||||
MessageText string `xml:"MessageText"`
|
|
||||||
}
|
|
||||||
47
vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go
generated
vendored
47
vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go
generated
vendored
|
|
@ -1,47 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// MetadataLevel determines if operations should return a paylod,
|
|
||||||
// and it level of detail.
|
|
||||||
type MetadataLevel string
|
|
||||||
|
|
||||||
// This consts are meant to help with Odata supported operations
|
|
||||||
const (
|
|
||||||
OdataTypeSuffix = "@odata.type"
|
|
||||||
|
|
||||||
// Types
|
|
||||||
|
|
||||||
OdataBinary = "Edm.Binary"
|
|
||||||
OdataDateTime = "Edm.DateTime"
|
|
||||||
OdataGUID = "Edm.Guid"
|
|
||||||
OdataInt64 = "Edm.Int64"
|
|
||||||
|
|
||||||
// Query options
|
|
||||||
|
|
||||||
OdataFilter = "$filter"
|
|
||||||
OdataOrderBy = "$orderby"
|
|
||||||
OdataTop = "$top"
|
|
||||||
OdataSkip = "$skip"
|
|
||||||
OdataCount = "$count"
|
|
||||||
OdataExpand = "$expand"
|
|
||||||
OdataSelect = "$select"
|
|
||||||
OdataSearch = "$search"
|
|
||||||
|
|
||||||
EmptyPayload MetadataLevel = ""
|
|
||||||
NoMetadata MetadataLevel = "application/json;odata=nometadata"
|
|
||||||
MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata"
|
|
||||||
FullMetadata MetadataLevel = "application/json;odata=fullmetadata"
|
|
||||||
)
|
|
||||||
204
vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go
generated
vendored
204
vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go
generated
vendored
|
|
@ -1,204 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetPageRangesResponse contains the response fields from
|
|
||||||
// Get Page Ranges call.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
|
|
||||||
type GetPageRangesResponse struct {
|
|
||||||
XMLName xml.Name `xml:"PageList"`
|
|
||||||
PageList []PageRange `xml:"PageRange"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PageRange contains information about a page of a page blob from
|
|
||||||
// Get Pages Range call.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
|
|
||||||
type PageRange struct {
|
|
||||||
Start int64 `xml:"Start"`
|
|
||||||
End int64 `xml:"End"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errBlobCopyAborted = errors.New("storage: blob copy is aborted")
|
|
||||||
errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch")
|
|
||||||
)
|
|
||||||
|
|
||||||
// PutPageOptions includes the options for a put page operation
|
|
||||||
type PutPageOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"`
|
|
||||||
IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"`
|
|
||||||
IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"`
|
|
||||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
|
||||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
|
||||||
IfMatch string `header:"If-Match"`
|
|
||||||
IfNoneMatch string `header:"If-None-Match"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteRange writes a range of pages to a page blob.
|
|
||||||
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
|
|
||||||
// multiplies by 512.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
|
|
||||||
func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
|
|
||||||
if bytes == nil {
|
|
||||||
return errors.New("bytes cannot be nil")
|
|
||||||
}
|
|
||||||
return b.modifyRange(blobRange, bytes, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearRange clears the given range in a page blob.
|
|
||||||
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
|
|
||||||
// multiplies by 512.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
|
|
||||||
func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error {
|
|
||||||
return b.modifyRange(blobRange, nil, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
|
|
||||||
if blobRange.End < blobRange.Start {
|
|
||||||
return errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
|
|
||||||
}
|
|
||||||
if blobRange.Start%512 != 0 {
|
|
||||||
return errors.New("the value for rangeStart must be a multiple of 512")
|
|
||||||
}
|
|
||||||
if blobRange.End%512 != 511 {
|
|
||||||
return errors.New("the value for rangeEnd must be a multiple of 512 - 1")
|
|
||||||
}
|
|
||||||
|
|
||||||
params := url.Values{"comp": {"page"}}
|
|
||||||
|
|
||||||
// default to clear
|
|
||||||
write := "clear"
|
|
||||||
var cl uint64
|
|
||||||
|
|
||||||
// if bytes is not nil then this is an update operation
|
|
||||||
if bytes != nil {
|
|
||||||
write = "update"
|
|
||||||
cl = (blobRange.End - blobRange.Start) + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers["x-ms-blob-type"] = string(BlobTypePage)
|
|
||||||
headers["x-ms-page-write"] = write
|
|
||||||
headers["x-ms-range"] = blobRange.String()
|
|
||||||
headers["Content-Length"] = fmt.Sprintf("%v", cl)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPageRangesOptions includes the options for a get page ranges operation
|
|
||||||
type GetPageRangesOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
Snapshot *time.Time
|
|
||||||
PreviousSnapshot *time.Time
|
|
||||||
Range *BlobRange
|
|
||||||
LeaseID string `header:"x-ms-lease-id"`
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPageRanges returns the list of valid page ranges for a page blob.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges
|
|
||||||
func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) {
|
|
||||||
params := url.Values{"comp": {"pagelist"}}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
params = addSnapshot(params, options.Snapshot)
|
|
||||||
if options.PreviousSnapshot != nil {
|
|
||||||
params.Add("prevsnapshot", timeRFC3339Formatted(*options.PreviousSnapshot))
|
|
||||||
}
|
|
||||||
if options.Range != nil {
|
|
||||||
headers["Range"] = options.Range.String()
|
|
||||||
}
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
var out GetPageRangesResponse
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return out, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return out, err
|
|
||||||
}
|
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
|
||||||
return out, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutPageBlob initializes an empty page blob with specified name and maximum
|
|
||||||
// size in bytes (size must be aligned to a 512-byte boundary). A page blob must
|
|
||||||
// be created using this method before writing pages.
|
|
||||||
//
|
|
||||||
// See CreateBlockBlobFromReader for more info on creating blobs.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
|
||||||
func (b *Blob) PutPageBlob(options *PutBlobOptions) error {
|
|
||||||
if b.Properties.ContentLength%512 != 0 {
|
|
||||||
return errors.New("Content length must be aligned to a 512-byte boundary")
|
|
||||||
}
|
|
||||||
|
|
||||||
params := url.Values{}
|
|
||||||
headers := b.Container.bsc.client.getStandardHeaders()
|
|
||||||
headers["x-ms-blob-type"] = string(BlobTypePage)
|
|
||||||
headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength)
|
|
||||||
headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
|
||||||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return b.respondCreation(resp, BlobTypePage)
|
|
||||||
}
|
|
||||||
441
vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go
generated
vendored
441
vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go
generated
vendored
|
|
@ -1,441 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// casing is per Golang's http.Header canonicalizing the header names.
|
|
||||||
approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count"
|
|
||||||
)
|
|
||||||
|
|
||||||
// QueueAccessPolicy represents each access policy in the queue ACL.
|
|
||||||
type QueueAccessPolicy struct {
|
|
||||||
ID string
|
|
||||||
StartTime time.Time
|
|
||||||
ExpiryTime time.Time
|
|
||||||
CanRead bool
|
|
||||||
CanAdd bool
|
|
||||||
CanUpdate bool
|
|
||||||
CanProcess bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueuePermissions represents the queue ACLs.
|
|
||||||
type QueuePermissions struct {
|
|
||||||
AccessPolicies []QueueAccessPolicy
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetQueuePermissionOptions includes options for a set queue permissions operation
|
|
||||||
type SetQueuePermissionOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Queue represents an Azure queue.
|
|
||||||
type Queue struct {
|
|
||||||
qsc *QueueServiceClient
|
|
||||||
Name string
|
|
||||||
Metadata map[string]string
|
|
||||||
AproxMessageCount uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *Queue) buildPath() string {
|
|
||||||
return fmt.Sprintf("/%s", q.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *Queue) buildPathMessages() string {
|
|
||||||
return fmt.Sprintf("%s/messages", q.buildPath())
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueueServiceOptions includes options for some queue service operations
|
|
||||||
type QueueServiceOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create operation creates a queue under the given account.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4
|
|
||||||
func (q *Queue) Create(options *QueueServiceOptions) error {
|
|
||||||
params := url.Values{}
|
|
||||||
headers := q.qsc.client.getStandardHeaders()
|
|
||||||
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete operation permanently deletes the specified queue.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3
|
|
||||||
func (q *Queue) Delete(options *QueueServiceOptions) error {
|
|
||||||
params := url.Values{}
|
|
||||||
headers := q.qsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
|
||||||
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exists returns true if a queue with given name exists.
|
|
||||||
func (q *Queue) Exists() (bool, error) {
|
|
||||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}})
|
|
||||||
resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth)
|
|
||||||
if resp != nil {
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
|
||||||
return resp.statusCode == http.StatusOK, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMetadata operation sets user-defined metadata on the specified queue.
|
|
||||||
// Metadata is associated with the queue as name-value pairs.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
|
|
||||||
func (q *Queue) SetMetadata(options *QueueServiceOptions) error {
|
|
||||||
params := url.Values{"comp": {"metadata"}}
|
|
||||||
headers := q.qsc.client.getStandardHeaders()
|
|
||||||
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
|
||||||
|
|
||||||
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetadata operation retrieves user-defined metadata and queue
|
|
||||||
// properties on the specified queue. Metadata is associated with
|
|
||||||
// the queue as name-values pairs.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
|
|
||||||
//
|
|
||||||
// Because the way Golang's http client (and http.Header in particular)
|
|
||||||
// canonicalize header names, the returned metadata names would always
|
|
||||||
// be all lower case.
|
|
||||||
func (q *Queue) GetMetadata(options *QueueServiceOptions) error {
|
|
||||||
params := url.Values{"comp": {"metadata"}}
|
|
||||||
headers := q.qsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}})
|
|
||||||
|
|
||||||
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
aproxMessagesStr := resp.headers.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader))
|
|
||||||
if aproxMessagesStr != "" {
|
|
||||||
aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
q.AproxMessageCount = aproxMessages
|
|
||||||
}
|
|
||||||
|
|
||||||
q.Metadata = getMetadataFromHeaders(resp.headers)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMessageReference returns a message object with the specified text.
|
|
||||||
func (q *Queue) GetMessageReference(text string) *Message {
|
|
||||||
return &Message{
|
|
||||||
Queue: q,
|
|
||||||
Text: text,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMessagesOptions is the set of options can be specified for Get
|
|
||||||
// Messsages operation. A zero struct does not use any preferences for the
|
|
||||||
// request.
|
|
||||||
type GetMessagesOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
NumOfMessages int
|
|
||||||
VisibilityTimeout int
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type messages struct {
|
|
||||||
XMLName xml.Name `xml:"QueueMessagesList"`
|
|
||||||
Messages []Message `xml:"QueueMessage"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMessages operation retrieves one or more messages from the front of the
|
|
||||||
// queue.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages
|
|
||||||
func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) {
|
|
||||||
query := url.Values{}
|
|
||||||
headers := q.qsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
if options.NumOfMessages != 0 {
|
|
||||||
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
|
|
||||||
}
|
|
||||||
if options.VisibilityTimeout != 0 {
|
|
||||||
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
|
|
||||||
}
|
|
||||||
query = addTimeout(query, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
|
|
||||||
|
|
||||||
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return []Message{}, err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
var out messages
|
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
|
||||||
if err != nil {
|
|
||||||
return []Message{}, err
|
|
||||||
}
|
|
||||||
for i := range out.Messages {
|
|
||||||
out.Messages[i].Queue = q
|
|
||||||
}
|
|
||||||
return out.Messages, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeekMessagesOptions is the set of options can be specified for Peek
|
|
||||||
// Messsage operation. A zero struct does not use any preferences for the
|
|
||||||
// request.
|
|
||||||
type PeekMessagesOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
NumOfMessages int
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeekMessages retrieves one or more messages from the front of the queue, but
|
|
||||||
// does not alter the visibility of the message.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages
|
|
||||||
func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) {
|
|
||||||
query := url.Values{"peekonly": {"true"}} // Required for peek operation
|
|
||||||
headers := q.qsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
if options.NumOfMessages != 0 {
|
|
||||||
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
|
|
||||||
}
|
|
||||||
query = addTimeout(query, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
|
|
||||||
|
|
||||||
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return []Message{}, err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
var out messages
|
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
|
||||||
if err != nil {
|
|
||||||
return []Message{}, err
|
|
||||||
}
|
|
||||||
for i := range out.Messages {
|
|
||||||
out.Messages[i].Queue = q
|
|
||||||
}
|
|
||||||
return out.Messages, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearMessages operation deletes all messages from the specified queue.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages
|
|
||||||
func (q *Queue) ClearMessages(options *QueueServiceOptions) error {
|
|
||||||
params := url.Values{}
|
|
||||||
headers := q.qsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params)
|
|
||||||
|
|
||||||
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPermissions sets up queue permissions
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl
|
|
||||||
func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error {
|
|
||||||
body, length, err := generateQueueACLpayload(permissions.AccessPolicies)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
params := url.Values{
|
|
||||||
"comp": {"acl"},
|
|
||||||
}
|
|
||||||
headers := q.qsc.client.getStandardHeaders()
|
|
||||||
headers["Content-Length"] = strconv.Itoa(length)
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
|
||||||
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
|
||||||
return errors.New("Unable to set permissions")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) {
|
|
||||||
sil := SignedIdentifiers{
|
|
||||||
SignedIdentifiers: []SignedIdentifier{},
|
|
||||||
}
|
|
||||||
for _, qapd := range policies {
|
|
||||||
permission := qapd.generateQueuePermissions()
|
|
||||||
signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission)
|
|
||||||
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
|
|
||||||
}
|
|
||||||
return xmlMarshal(sil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) {
|
|
||||||
// generate the permissions string (raup).
|
|
||||||
// still want the end user API to have bool flags.
|
|
||||||
permissions = ""
|
|
||||||
|
|
||||||
if qapd.CanRead {
|
|
||||||
permissions += "r"
|
|
||||||
}
|
|
||||||
|
|
||||||
if qapd.CanAdd {
|
|
||||||
permissions += "a"
|
|
||||||
}
|
|
||||||
|
|
||||||
if qapd.CanUpdate {
|
|
||||||
permissions += "u"
|
|
||||||
}
|
|
||||||
|
|
||||||
if qapd.CanProcess {
|
|
||||||
permissions += "p"
|
|
||||||
}
|
|
||||||
|
|
||||||
return permissions
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetQueuePermissionOptions includes options for a get queue permissions operation
|
|
||||||
type GetQueuePermissionOptions struct {
|
|
||||||
Timeout uint
|
|
||||||
RequestID string `header:"x-ms-client-request-id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl
|
|
||||||
// If timeout is 0 then it will not be passed to Azure
|
|
||||||
func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) {
|
|
||||||
params := url.Values{
|
|
||||||
"comp": {"acl"},
|
|
||||||
}
|
|
||||||
headers := q.qsc.client.getStandardHeaders()
|
|
||||||
|
|
||||||
if options != nil {
|
|
||||||
params = addTimeout(params, options.Timeout)
|
|
||||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
|
||||||
}
|
|
||||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
|
||||||
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
var ap AccessPolicy
|
|
||||||
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return buildQueueAccessPolicy(ap, &resp.headers), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions {
|
|
||||||
permissions := QueuePermissions{
|
|
||||||
AccessPolicies: []QueueAccessPolicy{},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
|
||||||
qapd := QueueAccessPolicy{
|
|
||||||
ID: policy.ID,
|
|
||||||
StartTime: policy.AccessPolicy.StartTime,
|
|
||||||
ExpiryTime: policy.AccessPolicy.ExpiryTime,
|
|
||||||
}
|
|
||||||
qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
|
|
||||||
qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a")
|
|
||||||
qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
|
|
||||||
qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p")
|
|
||||||
|
|
||||||
permissions.AccessPolicies = append(permissions.AccessPolicies, qapd)
|
|
||||||
}
|
|
||||||
return &permissions
|
|
||||||
}
|
|
||||||
146
vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go
generated
vendored
146
vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go
generated
vendored
|
|
@ -1,146 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// QueueSASOptions are options to construct a blob SAS
|
|
||||||
// URI.
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|
||||||
type QueueSASOptions struct {
|
|
||||||
QueueSASPermissions
|
|
||||||
SASOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueueSASPermissions includes the available permissions for
|
|
||||||
// a queue SAS URI.
|
|
||||||
type QueueSASPermissions struct {
|
|
||||||
Read bool
|
|
||||||
Add bool
|
|
||||||
Update bool
|
|
||||||
Process bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q QueueSASPermissions) buildString() string {
|
|
||||||
permissions := ""
|
|
||||||
|
|
||||||
if q.Read {
|
|
||||||
permissions += "r"
|
|
||||||
}
|
|
||||||
if q.Add {
|
|
||||||
permissions += "a"
|
|
||||||
}
|
|
||||||
if q.Update {
|
|
||||||
permissions += "u"
|
|
||||||
}
|
|
||||||
if q.Process {
|
|
||||||
permissions += "p"
|
|
||||||
}
|
|
||||||
return permissions
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSASURI creates an URL to the specified queue which contains the Shared
|
|
||||||
// Access Signature with specified permissions and expiration time.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
|
||||||
func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) {
|
|
||||||
canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// "The canonicalizedresouce portion of the string is a canonical path to the signed resource.
|
|
||||||
// It must include the service name (blob, table, queue or file) for version 2015-02-21 or
|
|
||||||
// later, the storage account name, and the resource name, and must be URL-decoded.
|
|
||||||
// -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
|
||||||
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
|
|
||||||
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
|
|
||||||
canonicalizedResource, err = url.QueryUnescape(canonicalizedResource)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
signedStart := ""
|
|
||||||
if options.Start != (time.Time{}) {
|
|
||||||
signedStart = options.Start.UTC().Format(time.RFC3339)
|
|
||||||
}
|
|
||||||
signedExpiry := options.Expiry.UTC().Format(time.RFC3339)
|
|
||||||
|
|
||||||
protocols := "https,http"
|
|
||||||
if options.UseHTTPS {
|
|
||||||
protocols = "https"
|
|
||||||
}
|
|
||||||
|
|
||||||
permissions := options.QueueSASPermissions.buildString()
|
|
||||||
stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
sig := q.qsc.client.computeHmac256(stringToSign)
|
|
||||||
sasParams := url.Values{
|
|
||||||
"sv": {q.qsc.client.apiVersion},
|
|
||||||
"se": {signedExpiry},
|
|
||||||
"sp": {permissions},
|
|
||||||
"sig": {sig},
|
|
||||||
}
|
|
||||||
|
|
||||||
if q.qsc.client.apiVersion >= "2015-04-05" {
|
|
||||||
sasParams.Add("spr", protocols)
|
|
||||||
addQueryParameter(sasParams, "sip", options.IP)
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil)
|
|
||||||
sasURL, err := url.Parse(uri)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
sasURL.RawQuery = sasParams.Encode()
|
|
||||||
return sasURL.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) {
|
|
||||||
|
|
||||||
if signedVersion >= "2015-02-21" {
|
|
||||||
canonicalizedResource = "/queue" + canonicalizedResource
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
|
|
||||||
if signedVersion >= "2015-04-05" {
|
|
||||||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s",
|
|
||||||
signedPermissions,
|
|
||||||
signedStart,
|
|
||||||
signedExpiry,
|
|
||||||
canonicalizedResource,
|
|
||||||
signedIdentifier,
|
|
||||||
signedIP,
|
|
||||||
protocols,
|
|
||||||
signedVersion), nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
|
||||||
if signedVersion >= "2013-08-15" {
|
|
||||||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
|
|
||||||
}
|
|
||||||
42
vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go
generated
vendored
42
vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go
generated
vendored
|
|
@ -1,42 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
|
|
||||||
// Service.
|
|
||||||
type QueueServiceClient struct {
|
|
||||||
client Client
|
|
||||||
auth authentication
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetServiceProperties gets the properties of your storage account's queue service.
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
|
|
||||||
func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
|
||||||
return q.client.getServiceProperties(queueServiceName, q.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetServiceProperties sets the properties of your storage account's queue service.
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
|
|
||||||
func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error {
|
|
||||||
return q.client.setServiceProperties(props, queueServiceName, q.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetQueueReference returns a Container object for the specified queue name.
|
|
||||||
func (q *QueueServiceClient) GetQueueReference(name string) *Queue {
|
|
||||||
return &Queue{
|
|
||||||
qsc: q,
|
|
||||||
Name: name,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
216
vendor/github.com/Azure/azure-sdk-for-go/storage/share.go
generated
vendored
216
vendor/github.com/Azure/azure-sdk-for-go/storage/share.go
generated
vendored
|
|
@ -1,216 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Share represents an Azure file share.
|
|
||||||
type Share struct {
|
|
||||||
fsc *FileServiceClient
|
|
||||||
Name string `xml:"Name"`
|
|
||||||
Properties ShareProperties `xml:"Properties"`
|
|
||||||
Metadata map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShareProperties contains various properties of a share.
|
|
||||||
type ShareProperties struct {
|
|
||||||
LastModified string `xml:"Last-Modified"`
|
|
||||||
Etag string `xml:"Etag"`
|
|
||||||
Quota int `xml:"Quota"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// builds the complete path for this share object.
|
|
||||||
func (s *Share) buildPath() string {
|
|
||||||
return fmt.Sprintf("/%s", s.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create this share under the associated account.
|
|
||||||
// If a share with the same name already exists, the operation fails.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
|
|
||||||
func (s *Share) Create(options *FileRequestOptions) error {
|
|
||||||
extraheaders := map[string]string{}
|
|
||||||
if s.Properties.Quota > 0 {
|
|
||||||
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
|
|
||||||
}
|
|
||||||
|
|
||||||
params := prepareOptions(options)
|
|
||||||
headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.updateEtagAndLastModified(headers)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateIfNotExists creates this share under the associated account if
|
|
||||||
// it does not exist. Returns true if the share is newly created or false if
|
|
||||||
// the share already exists.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
|
|
||||||
func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
|
|
||||||
extraheaders := map[string]string{}
|
|
||||||
if s.Properties.Quota > 0 {
|
|
||||||
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
|
|
||||||
}
|
|
||||||
|
|
||||||
params := prepareOptions(options)
|
|
||||||
resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders)
|
|
||||||
if resp != nil {
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
|
||||||
if resp.statusCode == http.StatusCreated {
|
|
||||||
s.updateEtagAndLastModified(resp.headers)
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return false, s.FetchAttributes(nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete marks this share for deletion. The share along with any files
|
|
||||||
// and directories contained within it are later deleted during garbage
|
|
||||||
// collection. If the share does not exist the operation fails
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
|
|
||||||
func (s *Share) Delete(options *FileRequestOptions) error {
|
|
||||||
return s.fsc.deleteResource(s.buildPath(), resourceShare, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteIfExists operation marks this share for deletion if it exists.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
|
|
||||||
func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) {
|
|
||||||
resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options)
|
|
||||||
if resp != nil {
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
|
||||||
return resp.statusCode == http.StatusAccepted, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exists returns true if this share already exists
|
|
||||||
// on the storage account, otherwise returns false.
|
|
||||||
func (s *Share) Exists() (bool, error) {
|
|
||||||
exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare)
|
|
||||||
if exists {
|
|
||||||
s.updateEtagAndLastModified(headers)
|
|
||||||
s.updateQuota(headers)
|
|
||||||
}
|
|
||||||
return exists, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchAttributes retrieves metadata and properties for this share.
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties
|
|
||||||
func (s *Share) FetchAttributes(options *FileRequestOptions) error {
|
|
||||||
params := prepareOptions(options)
|
|
||||||
headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.updateEtagAndLastModified(headers)
|
|
||||||
s.updateQuota(headers)
|
|
||||||
s.Metadata = getMetadataFromHeaders(headers)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRootDirectoryReference returns a Directory object at the root of this share.
|
|
||||||
func (s *Share) GetRootDirectoryReference() *Directory {
|
|
||||||
return &Directory{
|
|
||||||
fsc: s.fsc,
|
|
||||||
share: s,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServiceClient returns the FileServiceClient associated with this share.
|
|
||||||
func (s *Share) ServiceClient() *FileServiceClient {
|
|
||||||
return s.fsc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMetadata replaces the metadata for this share.
|
|
||||||
//
|
|
||||||
// Some keys may be converted to Camel-Case before sending. All keys
|
|
||||||
// are returned in lower case by GetShareMetadata. HTTP header names
|
|
||||||
// are case-insensitive so case munging should not matter to other
|
|
||||||
// applications either.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata
|
|
||||||
func (s *Share) SetMetadata(options *FileRequestOptions) error {
|
|
||||||
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.updateEtagAndLastModified(headers)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetProperties sets system properties for this share.
|
|
||||||
//
|
|
||||||
// Some keys may be converted to Camel-Case before sending. All keys
|
|
||||||
// are returned in lower case by SetShareProperties. HTTP header names
|
|
||||||
// are case-insensitive so case munging should not matter to other
|
|
||||||
// applications either.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties
|
|
||||||
func (s *Share) SetProperties(options *FileRequestOptions) error {
|
|
||||||
extraheaders := map[string]string{}
|
|
||||||
if s.Properties.Quota > 0 {
|
|
||||||
if s.Properties.Quota > 5120 {
|
|
||||||
return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota)
|
|
||||||
}
|
|
||||||
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
|
|
||||||
}
|
|
||||||
|
|
||||||
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.updateEtagAndLastModified(headers)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// updates Etag and last modified date
|
|
||||||
func (s *Share) updateEtagAndLastModified(headers http.Header) {
|
|
||||||
s.Properties.Etag = headers.Get("Etag")
|
|
||||||
s.Properties.LastModified = headers.Get("Last-Modified")
|
|
||||||
}
|
|
||||||
|
|
||||||
// updates quota value
|
|
||||||
func (s *Share) updateQuota(headers http.Header) {
|
|
||||||
quota, err := strconv.Atoi(headers.Get("x-ms-share-quota"))
|
|
||||||
if err == nil {
|
|
||||||
s.Properties.Quota = quota
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL gets the canonical URL to this share. This method does not create a publicly accessible
|
|
||||||
// URL if the share is private and this method does not check if the share exists.
|
|
||||||
func (s *Share) URL() string {
|
|
||||||
return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{})
|
|
||||||
}
|
|
||||||
61
vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go
generated
vendored
61
vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go
generated
vendored
|
|
@ -1,61 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AccessPolicyDetailsXML has specifics about an access policy
|
|
||||||
// annotated with XML details.
|
|
||||||
type AccessPolicyDetailsXML struct {
|
|
||||||
StartTime time.Time `xml:"Start"`
|
|
||||||
ExpiryTime time.Time `xml:"Expiry"`
|
|
||||||
Permission string `xml:"Permission"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignedIdentifier is a wrapper for a specific policy
|
|
||||||
type SignedIdentifier struct {
|
|
||||||
ID string `xml:"Id"`
|
|
||||||
AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignedIdentifiers part of the response from GetPermissions call.
|
|
||||||
type SignedIdentifiers struct {
|
|
||||||
SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AccessPolicy is the response type from the GetPermissions call.
|
|
||||||
type AccessPolicy struct {
|
|
||||||
SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the
|
|
||||||
// AccessPolicy struct which will get converted to XML.
|
|
||||||
func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier {
|
|
||||||
return SignedIdentifier{
|
|
||||||
ID: id,
|
|
||||||
AccessPolicy: AccessPolicyDetailsXML{
|
|
||||||
StartTime: startTime.UTC().Round(time.Second),
|
|
||||||
ExpiryTime: expiryTime.UTC().Round(time.Second),
|
|
||||||
Permission: permissions,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func updatePermissions(permissions, permission string) bool {
|
|
||||||
return strings.Contains(permissions, permission)
|
|
||||||
}
|
|
||||||
131
vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go
generated
vendored
131
vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go
generated
vendored
|
|
@ -1,131 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServiceProperties represents the storage account service properties
|
|
||||||
type ServiceProperties struct {
|
|
||||||
Logging *Logging
|
|
||||||
HourMetrics *Metrics
|
|
||||||
MinuteMetrics *Metrics
|
|
||||||
Cors *Cors
|
|
||||||
}
|
|
||||||
|
|
||||||
// Logging represents the Azure Analytics Logging settings
|
|
||||||
type Logging struct {
|
|
||||||
Version string
|
|
||||||
Delete bool
|
|
||||||
Read bool
|
|
||||||
Write bool
|
|
||||||
RetentionPolicy *RetentionPolicy
|
|
||||||
}
|
|
||||||
|
|
||||||
// RetentionPolicy indicates if retention is enabled and for how many days
|
|
||||||
type RetentionPolicy struct {
|
|
||||||
Enabled bool
|
|
||||||
Days *int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metrics provide request statistics.
|
|
||||||
type Metrics struct {
|
|
||||||
Version string
|
|
||||||
Enabled bool
|
|
||||||
IncludeAPIs *bool
|
|
||||||
RetentionPolicy *RetentionPolicy
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cors includes all the CORS rules
|
|
||||||
type Cors struct {
|
|
||||||
CorsRule []CorsRule
|
|
||||||
}
|
|
||||||
|
|
||||||
// CorsRule includes all settings for a Cors rule
|
|
||||||
type CorsRule struct {
|
|
||||||
AllowedOrigins string
|
|
||||||
AllowedMethods string
|
|
||||||
MaxAgeInSeconds int
|
|
||||||
ExposedHeaders string
|
|
||||||
AllowedHeaders string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) {
|
|
||||||
query := url.Values{
|
|
||||||
"restype": {"service"},
|
|
||||||
"comp": {"properties"},
|
|
||||||
}
|
|
||||||
uri := c.getEndpoint(service, "", query)
|
|
||||||
headers := c.getStandardHeaders()
|
|
||||||
|
|
||||||
resp, err := c.exec(http.MethodGet, uri, headers, nil, auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var out ServiceProperties
|
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error {
|
|
||||||
query := url.Values{
|
|
||||||
"restype": {"service"},
|
|
||||||
"comp": {"properties"},
|
|
||||||
}
|
|
||||||
uri := c.getEndpoint(service, "", query)
|
|
||||||
|
|
||||||
// Ideally, StorageServiceProperties would be the output struct
|
|
||||||
// This is to avoid golint stuttering, while generating the correct XML
|
|
||||||
type StorageServiceProperties struct {
|
|
||||||
Logging *Logging
|
|
||||||
HourMetrics *Metrics
|
|
||||||
MinuteMetrics *Metrics
|
|
||||||
Cors *Cors
|
|
||||||
}
|
|
||||||
input := StorageServiceProperties{
|
|
||||||
Logging: props.Logging,
|
|
||||||
HourMetrics: props.HourMetrics,
|
|
||||||
MinuteMetrics: props.MinuteMetrics,
|
|
||||||
Cors: props.Cors,
|
|
||||||
}
|
|
||||||
|
|
||||||
body, length, err := xmlMarshal(input)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
|
||||||
headers["Content-Length"] = strconv.Itoa(length)
|
|
||||||
|
|
||||||
resp, err := c.exec(http.MethodPut, uri, headers, body, auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
|
||||||
}
|
|
||||||
419
vendor/github.com/Azure/azure-sdk-for-go/storage/table.go
generated
vendored
419
vendor/github.com/Azure/azure-sdk-for-go/storage/table.go
generated
vendored
|
|
@ -1,419 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
tablesURIPath = "/Tables"
|
|
||||||
nextTableQueryParameter = "NextTableName"
|
|
||||||
headerNextPartitionKey = "x-ms-continuation-NextPartitionKey"
|
|
||||||
headerNextRowKey = "x-ms-continuation-NextRowKey"
|
|
||||||
nextPartitionKeyQueryParameter = "NextPartitionKey"
|
|
||||||
nextRowKeyQueryParameter = "NextRowKey"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TableAccessPolicy are used for SETTING table policies
|
|
||||||
type TableAccessPolicy struct {
|
|
||||||
ID string
|
|
||||||
StartTime time.Time
|
|
||||||
ExpiryTime time.Time
|
|
||||||
CanRead bool
|
|
||||||
CanAppend bool
|
|
||||||
CanUpdate bool
|
|
||||||
CanDelete bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Table represents an Azure table.
|
|
||||||
type Table struct {
|
|
||||||
tsc *TableServiceClient
|
|
||||||
Name string `json:"TableName"`
|
|
||||||
OdataEditLink string `json:"odata.editLink"`
|
|
||||||
OdataID string `json:"odata.id"`
|
|
||||||
OdataMetadata string `json:"odata.metadata"`
|
|
||||||
OdataType string `json:"odata.type"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// EntityQueryResult contains the response from
|
|
||||||
// ExecuteQuery and ExecuteQueryNextResults functions.
|
|
||||||
type EntityQueryResult struct {
|
|
||||||
OdataMetadata string `json:"odata.metadata"`
|
|
||||||
Entities []*Entity `json:"value"`
|
|
||||||
QueryNextLink
|
|
||||||
table *Table
|
|
||||||
}
|
|
||||||
|
|
||||||
type continuationToken struct {
|
|
||||||
NextPartitionKey string
|
|
||||||
NextRowKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Table) buildPath() string {
|
|
||||||
return fmt.Sprintf("/%s", t.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Table) buildSpecificPath() string {
|
|
||||||
return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get gets the referenced table.
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
|
|
||||||
func (t *Table) Get(timeout uint, ml MetadataLevel) error {
|
|
||||||
if ml == EmptyPayload {
|
|
||||||
return errEmptyPayload
|
|
||||||
}
|
|
||||||
|
|
||||||
query := url.Values{
|
|
||||||
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
|
|
||||||
}
|
|
||||||
headers := t.tsc.client.getStandardHeaders()
|
|
||||||
headers[headerAccept] = string(ml)
|
|
||||||
|
|
||||||
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query)
|
|
||||||
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
respBody, err := ioutil.ReadAll(resp.body)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(respBody, t)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create creates the referenced table.
|
|
||||||
// This function fails if the name is not compliant
|
|
||||||
// with the specification or the tables already exists.
|
|
||||||
// ml determines the level of detail of metadata in the operation response,
|
|
||||||
// or no data at all.
|
|
||||||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table
|
|
||||||
func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error {
|
|
||||||
uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{
|
|
||||||
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
|
|
||||||
})
|
|
||||||
|
|
||||||
type createTableRequest struct {
|
|
||||||
TableName string `json:"TableName"`
|
|
||||||
}
|
|
||||||
req := createTableRequest{TableName: t.Name}
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
if err := json.NewEncoder(buf).Encode(req); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := t.tsc.client.getStandardHeaders()
|
|
||||||
headers = addReturnContentHeaders(headers, ml)
|
|
||||||
headers = addBodyRelatedHeaders(headers, buf.Len())
|
|
||||||
headers = options.addToHeaders(headers)
|
|
||||||
|
|
||||||
resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
if ml == EmptyPayload {
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ml != EmptyPayload {
|
|
||||||
data, err := ioutil.ReadAll(resp.body)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(data, t)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the referenced table.
|
|
||||||
// This function fails if the table is not present.
|
|
||||||
// Be advised: Delete deletes all the entries that may be present.
|
|
||||||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table
|
|
||||||
func (t *Table) Delete(timeout uint, options *TableOptions) error {
|
|
||||||
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{
|
|
||||||
"timeout": {strconv.Itoa(int(timeout))},
|
|
||||||
})
|
|
||||||
|
|
||||||
headers := t.tsc.client.getStandardHeaders()
|
|
||||||
headers = addReturnContentHeaders(headers, EmptyPayload)
|
|
||||||
headers = options.addToHeaders(headers)
|
|
||||||
|
|
||||||
resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryOptions includes options for a query entities operation.
|
|
||||||
// Top, filter and select are OData query options.
|
|
||||||
type QueryOptions struct {
|
|
||||||
Top uint
|
|
||||||
Filter string
|
|
||||||
Select []string
|
|
||||||
RequestID string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *QueryOptions) getParameters() (url.Values, map[string]string) {
|
|
||||||
query := url.Values{}
|
|
||||||
headers := map[string]string{}
|
|
||||||
if options != nil {
|
|
||||||
if options.Top > 0 {
|
|
||||||
query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10))
|
|
||||||
}
|
|
||||||
if options.Filter != "" {
|
|
||||||
query.Add(OdataFilter, options.Filter)
|
|
||||||
}
|
|
||||||
if len(options.Select) > 0 {
|
|
||||||
query.Add(OdataSelect, strings.Join(options.Select, ","))
|
|
||||||
}
|
|
||||||
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
|
||||||
}
|
|
||||||
return query, headers
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryEntities returns the entities in the table.
|
|
||||||
// You can use query options defined by the OData Protocol specification.
|
|
||||||
//
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
|
|
||||||
func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) {
|
|
||||||
if ml == EmptyPayload {
|
|
||||||
return nil, errEmptyPayload
|
|
||||||
}
|
|
||||||
query, headers := options.getParameters()
|
|
||||||
query = addTimeout(query, timeout)
|
|
||||||
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query)
|
|
||||||
return t.queryEntities(uri, headers, ml)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextResults returns the next page of results
|
|
||||||
// from a QueryEntities or NextResults operation.
|
|
||||||
//
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
|
|
||||||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
|
|
||||||
func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) {
|
|
||||||
if eqr == nil {
|
|
||||||
return nil, errNilPreviousResult
|
|
||||||
}
|
|
||||||
if eqr.NextLink == nil {
|
|
||||||
return nil, errNilNextLink
|
|
||||||
}
|
|
||||||
headers := options.addToHeaders(map[string]string{})
|
|
||||||
return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPermissions sets up table ACL permissions
|
|
||||||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL
|
|
||||||
func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error {
|
|
||||||
params := url.Values{"comp": {"acl"},
|
|
||||||
"timeout": {strconv.Itoa(int(timeout))},
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params)
|
|
||||||
headers := t.tsc.client.getStandardHeaders()
|
|
||||||
headers = options.addToHeaders(headers)
|
|
||||||
|
|
||||||
body, length, err := generateTableACLPayload(tap)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headers["Content-Length"] = strconv.Itoa(length)
|
|
||||||
|
|
||||||
resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) {
|
|
||||||
sil := SignedIdentifiers{
|
|
||||||
SignedIdentifiers: []SignedIdentifier{},
|
|
||||||
}
|
|
||||||
for _, tap := range policies {
|
|
||||||
permission := generateTablePermissions(&tap)
|
|
||||||
signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission)
|
|
||||||
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
|
|
||||||
}
|
|
||||||
return xmlMarshal(sil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPermissions gets the table ACL permissions
|
|
||||||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl
|
|
||||||
func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) {
|
|
||||||
params := url.Values{"comp": {"acl"},
|
|
||||||
"timeout": {strconv.Itoa(int(timeout))},
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params)
|
|
||||||
headers := t.tsc.client.getStandardHeaders()
|
|
||||||
headers = options.addToHeaders(headers)
|
|
||||||
|
|
||||||
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var ap AccessPolicy
|
|
||||||
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return updateTableAccessPolicy(ap), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) {
|
|
||||||
headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders())
|
|
||||||
if ml != EmptyPayload {
|
|
||||||
headers[headerAccept] = string(ml)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := ioutil.ReadAll(resp.body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var entities EntityQueryResult
|
|
||||||
err = json.Unmarshal(data, &entities)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range entities.Entities {
|
|
||||||
entities.Entities[i].Table = t
|
|
||||||
}
|
|
||||||
entities.table = t
|
|
||||||
|
|
||||||
contToken := extractContinuationTokenFromHeaders(resp.headers)
|
|
||||||
if contToken == nil {
|
|
||||||
entities.NextLink = nil
|
|
||||||
} else {
|
|
||||||
originalURI, err := url.Parse(uri)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
v := originalURI.Query()
|
|
||||||
v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey)
|
|
||||||
v.Set(nextRowKeyQueryParameter, contToken.NextRowKey)
|
|
||||||
newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v)
|
|
||||||
entities.NextLink = &newURI
|
|
||||||
entities.ml = ml
|
|
||||||
}
|
|
||||||
|
|
||||||
return &entities, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractContinuationTokenFromHeaders(h http.Header) *continuationToken {
|
|
||||||
ct := continuationToken{
|
|
||||||
NextPartitionKey: h.Get(headerNextPartitionKey),
|
|
||||||
NextRowKey: h.Get(headerNextRowKey),
|
|
||||||
}
|
|
||||||
|
|
||||||
if ct.NextPartitionKey != "" && ct.NextRowKey != "" {
|
|
||||||
return &ct
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy {
|
|
||||||
taps := []TableAccessPolicy{}
|
|
||||||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
|
||||||
tap := TableAccessPolicy{
|
|
||||||
ID: policy.ID,
|
|
||||||
StartTime: policy.AccessPolicy.StartTime,
|
|
||||||
ExpiryTime: policy.AccessPolicy.ExpiryTime,
|
|
||||||
}
|
|
||||||
tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
|
|
||||||
tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a")
|
|
||||||
tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
|
|
||||||
tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
|
|
||||||
|
|
||||||
taps = append(taps, tap)
|
|
||||||
}
|
|
||||||
return taps
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateTablePermissions(tap *TableAccessPolicy) (permissions string) {
|
|
||||||
// generate the permissions string (raud).
|
|
||||||
// still want the end user API to have bool flags.
|
|
||||||
permissions = ""
|
|
||||||
|
|
||||||
if tap.CanRead {
|
|
||||||
permissions += "r"
|
|
||||||
}
|
|
||||||
|
|
||||||
if tap.CanAppend {
|
|
||||||
permissions += "a"
|
|
||||||
}
|
|
||||||
|
|
||||||
if tap.CanUpdate {
|
|
||||||
permissions += "u"
|
|
||||||
}
|
|
||||||
|
|
||||||
if tap.CanDelete {
|
|
||||||
permissions += "d"
|
|
||||||
}
|
|
||||||
return permissions
|
|
||||||
}
|
|
||||||
328
vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go
generated
vendored
328
vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go
generated
vendored
|
|
@ -1,328 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"net/textproto"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/marstr/guid"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Operation type. Insert, Delete, Replace etc.
|
|
||||||
type Operation int
|
|
||||||
|
|
||||||
// consts for batch operations.
|
|
||||||
const (
|
|
||||||
InsertOp = Operation(1)
|
|
||||||
DeleteOp = Operation(2)
|
|
||||||
ReplaceOp = Operation(3)
|
|
||||||
MergeOp = Operation(4)
|
|
||||||
InsertOrReplaceOp = Operation(5)
|
|
||||||
InsertOrMergeOp = Operation(6)
|
|
||||||
)
|
|
||||||
|
|
||||||
// BatchEntity used for tracking Entities to operate on and
|
|
||||||
// whether operations (replace/merge etc) should be forced.
|
|
||||||
// Wrapper for regular Entity with additional data specific for the entity.
|
|
||||||
type BatchEntity struct {
|
|
||||||
*Entity
|
|
||||||
Force bool
|
|
||||||
Op Operation
|
|
||||||
}
|
|
||||||
|
|
||||||
// TableBatch stores all the entities that will be operated on during a batch process.
|
|
||||||
// Entities can be inserted, replaced or deleted.
|
|
||||||
type TableBatch struct {
|
|
||||||
BatchEntitySlice []BatchEntity
|
|
||||||
|
|
||||||
// reference to table we're operating on.
|
|
||||||
Table *Table
|
|
||||||
}
|
|
||||||
|
|
||||||
// defaultChangesetHeaders for changeSets
|
|
||||||
var defaultChangesetHeaders = map[string]string{
|
|
||||||
"Accept": "application/json;odata=minimalmetadata",
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
"Prefer": "return-no-content",
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBatch return new TableBatch for populating.
|
|
||||||
func (t *Table) NewBatch() *TableBatch {
|
|
||||||
return &TableBatch{
|
|
||||||
Table: t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertEntity adds an entity in preparation for a batch insert.
|
|
||||||
func (t *TableBatch) InsertEntity(entity *Entity) {
|
|
||||||
be := BatchEntity{Entity: entity, Force: false, Op: InsertOp}
|
|
||||||
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace.
|
|
||||||
func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) {
|
|
||||||
be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp}
|
|
||||||
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag
|
|
||||||
func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) {
|
|
||||||
t.InsertOrReplaceEntity(entity, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge.
|
|
||||||
func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) {
|
|
||||||
be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp}
|
|
||||||
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag
|
|
||||||
func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) {
|
|
||||||
t.InsertOrMergeEntity(entity, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplaceEntity adds an entity in preparation for a batch replace.
|
|
||||||
func (t *TableBatch) ReplaceEntity(entity *Entity) {
|
|
||||||
be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp}
|
|
||||||
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteEntity adds an entity in preparation for a batch delete
|
|
||||||
func (t *TableBatch) DeleteEntity(entity *Entity, force bool) {
|
|
||||||
be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp}
|
|
||||||
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag
|
|
||||||
func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) {
|
|
||||||
t.DeleteEntity(entity, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergeEntity adds an entity in preparation for a batch merge
|
|
||||||
func (t *TableBatch) MergeEntity(entity *Entity) {
|
|
||||||
be := BatchEntity{Entity: entity, Force: false, Op: MergeOp}
|
|
||||||
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecuteBatch executes many table operations in one request to Azure.
|
|
||||||
// The operations can be combinations of Insert, Delete, Replace and Merge
|
|
||||||
// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses
|
|
||||||
// the changesets.
|
|
||||||
// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions
|
|
||||||
func (t *TableBatch) ExecuteBatch() error {
|
|
||||||
|
|
||||||
// Using `github.com/marstr/guid` is in response to issue #947 (https://github.com/Azure/azure-sdk-for-go/issues/947).
|
|
||||||
id, err := guid.NewGUIDs(guid.CreationStrategyVersion1)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
changesetBoundary := fmt.Sprintf("changeset_%s", id.String())
|
|
||||||
uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil)
|
|
||||||
changesetBody, err := t.generateChangesetBody(changesetBoundary)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
id, err = guid.NewGUIDs(guid.CreationStrategyVersion1)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
boundary := fmt.Sprintf("batch_%s", id.String())
|
|
||||||
body, err := generateBody(changesetBody, changesetBoundary, boundary)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := t.Table.tsc.client.getStandardHeaders()
|
|
||||||
headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary)
|
|
||||||
|
|
||||||
resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusAccepted}); err != nil {
|
|
||||||
|
|
||||||
// check which batch failed.
|
|
||||||
operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value)
|
|
||||||
requestID, date, version := getDebugHeaders(resp.headers)
|
|
||||||
return AzureStorageServiceError{
|
|
||||||
StatusCode: resp.statusCode,
|
|
||||||
Code: resp.odata.Err.Code,
|
|
||||||
RequestID: requestID,
|
|
||||||
Date: date,
|
|
||||||
APIVersion: version,
|
|
||||||
Message: operationFailedMessage,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getFailedOperation parses the original Azure error string and determines which operation failed
|
|
||||||
// and generates appropriate message.
|
|
||||||
func (t *TableBatch) getFailedOperation(errorMessage string) string {
|
|
||||||
// errorMessage consists of "number:string" we just need the number.
|
|
||||||
sp := strings.Split(errorMessage, ":")
|
|
||||||
if len(sp) > 1 {
|
|
||||||
msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage)
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
|
|
||||||
// cant parse the message, just return the original message to client
|
|
||||||
return errorMessage
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateBody generates the complete body for the batch request.
|
|
||||||
func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) {
|
|
||||||
|
|
||||||
body := new(bytes.Buffer)
|
|
||||||
writer := multipart.NewWriter(body)
|
|
||||||
writer.SetBoundary(boundary)
|
|
||||||
h := make(textproto.MIMEHeader)
|
|
||||||
h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary))
|
|
||||||
batchWriter, err := writer.CreatePart(h)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
batchWriter.Write(changeSetBody.Bytes())
|
|
||||||
writer.Close()
|
|
||||||
return body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateChangesetBody generates the individual changesets for the various operations within the batch request.
|
|
||||||
// There is a changeset for Insert, Delete, Merge etc.
|
|
||||||
func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) {
|
|
||||||
|
|
||||||
body := new(bytes.Buffer)
|
|
||||||
writer := multipart.NewWriter(body)
|
|
||||||
writer.SetBoundary(changesetBoundary)
|
|
||||||
|
|
||||||
for _, be := range t.BatchEntitySlice {
|
|
||||||
t.generateEntitySubset(&be, writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
writer.Close()
|
|
||||||
return body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateVerb generates the HTTP request VERB required for each changeset.
|
|
||||||
func generateVerb(op Operation) (string, error) {
|
|
||||||
switch op {
|
|
||||||
case InsertOp:
|
|
||||||
return http.MethodPost, nil
|
|
||||||
case DeleteOp:
|
|
||||||
return http.MethodDelete, nil
|
|
||||||
case ReplaceOp, InsertOrReplaceOp:
|
|
||||||
return http.MethodPut, nil
|
|
||||||
case MergeOp, InsertOrMergeOp:
|
|
||||||
return "MERGE", nil
|
|
||||||
default:
|
|
||||||
return "", errors.New("Unable to detect operation")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateQueryPath generates the query path for within the changesets
|
|
||||||
// For inserts it will just be a table query path (table name)
|
|
||||||
// but for other operations (modifying an existing entity) then
|
|
||||||
// the partition/row keys need to be generated.
|
|
||||||
func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string {
|
|
||||||
if op == InsertOp {
|
|
||||||
return entity.Table.buildPath()
|
|
||||||
}
|
|
||||||
return entity.buildPath()
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateGenericOperationHeaders generates common headers for a given operation.
|
|
||||||
func generateGenericOperationHeaders(be *BatchEntity) map[string]string {
|
|
||||||
retval := map[string]string{}
|
|
||||||
|
|
||||||
for k, v := range defaultChangesetHeaders {
|
|
||||||
retval[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp {
|
|
||||||
if be.Force || be.Entity.OdataEtag == "" {
|
|
||||||
retval["If-Match"] = "*"
|
|
||||||
} else {
|
|
||||||
retval["If-Match"] = be.Entity.OdataEtag
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return retval
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateEntitySubset generates body payload for particular batch entity
|
|
||||||
func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error {
|
|
||||||
|
|
||||||
h := make(textproto.MIMEHeader)
|
|
||||||
h.Set(headerContentType, "application/http")
|
|
||||||
h.Set(headerContentTransferEncoding, "binary")
|
|
||||||
|
|
||||||
verb, err := generateVerb(batchEntity.Op)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
genericOpHeadersMap := generateGenericOperationHeaders(batchEntity)
|
|
||||||
queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity)
|
|
||||||
uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil)
|
|
||||||
|
|
||||||
operationWriter, err := writer.CreatePart(h)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri)
|
|
||||||
operationWriter.Write([]byte(urlAndVerb))
|
|
||||||
writeHeaders(genericOpHeadersMap, &operationWriter)
|
|
||||||
operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body.
|
|
||||||
|
|
||||||
// delete operation doesn't need a body.
|
|
||||||
if batchEntity.Op != DeleteOp {
|
|
||||||
//var e Entity = batchEntity.Entity
|
|
||||||
body, err := json.Marshal(batchEntity.Entity)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
operationWriter.Write(body)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeHeaders(h map[string]string, writer *io.Writer) {
|
|
||||||
// This way it is guaranteed the headers will be written in a sorted order
|
|
||||||
var keys []string
|
|
||||||
for k := range h {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
for _, k := range keys {
|
|
||||||
(*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k])))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
204
vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go
generated
vendored
204
vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go
generated
vendored
|
|
@ -1,204 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
headerAccept = "Accept"
|
|
||||||
headerEtag = "Etag"
|
|
||||||
headerPrefer = "Prefer"
|
|
||||||
headerXmsContinuation = "x-ms-Continuation-NextTableName"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TableServiceClient contains operations for Microsoft Azure Table Storage
|
|
||||||
// Service.
|
|
||||||
type TableServiceClient struct {
|
|
||||||
client Client
|
|
||||||
auth authentication
|
|
||||||
}
|
|
||||||
|
|
||||||
// TableOptions includes options for some table operations
|
|
||||||
type TableOptions struct {
|
|
||||||
RequestID string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *TableOptions) addToHeaders(h map[string]string) map[string]string {
|
|
||||||
if options != nil {
|
|
||||||
h = addToHeaders(h, "x-ms-client-request-id", options.RequestID)
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryNextLink includes information for getting the next page of
|
|
||||||
// results in query operations
|
|
||||||
type QueryNextLink struct {
|
|
||||||
NextLink *string
|
|
||||||
ml MetadataLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetServiceProperties gets the properties of your storage account's table service.
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties
|
|
||||||
func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
|
||||||
return t.client.getServiceProperties(tableServiceName, t.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetServiceProperties sets the properties of your storage account's table service.
|
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties
|
|
||||||
func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error {
|
|
||||||
return t.client.setServiceProperties(props, tableServiceName, t.auth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTableReference returns a Table object for the specified table name.
|
|
||||||
func (t *TableServiceClient) GetTableReference(name string) *Table {
|
|
||||||
return &Table{
|
|
||||||
tsc: t,
|
|
||||||
Name: name,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryTablesOptions includes options for some table operations
|
|
||||||
type QueryTablesOptions struct {
|
|
||||||
Top uint
|
|
||||||
Filter string
|
|
||||||
RequestID string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) {
|
|
||||||
query := url.Values{}
|
|
||||||
headers := map[string]string{}
|
|
||||||
if options != nil {
|
|
||||||
if options.Top > 0 {
|
|
||||||
query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10))
|
|
||||||
}
|
|
||||||
if options.Filter != "" {
|
|
||||||
query.Add(OdataFilter, options.Filter)
|
|
||||||
}
|
|
||||||
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
|
||||||
}
|
|
||||||
return query, headers
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryTables returns the tables in the storage account.
|
|
||||||
// You can use query options defined by the OData Protocol specification.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables
|
|
||||||
func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) {
|
|
||||||
query, headers := options.getParameters()
|
|
||||||
uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query)
|
|
||||||
return t.queryTables(uri, headers, ml)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextResults returns the next page of results
|
|
||||||
// from a QueryTables or a NextResults operation.
|
|
||||||
//
|
|
||||||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables
|
|
||||||
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
|
|
||||||
func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) {
|
|
||||||
if tqr == nil {
|
|
||||||
return nil, errNilPreviousResult
|
|
||||||
}
|
|
||||||
if tqr.NextLink == nil {
|
|
||||||
return nil, errNilNextLink
|
|
||||||
}
|
|
||||||
headers := options.addToHeaders(map[string]string{})
|
|
||||||
|
|
||||||
return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TableQueryResult contains the response from
|
|
||||||
// QueryTables and QueryTablesNextResults functions.
|
|
||||||
type TableQueryResult struct {
|
|
||||||
OdataMetadata string `json:"odata.metadata"`
|
|
||||||
Tables []Table `json:"value"`
|
|
||||||
QueryNextLink
|
|
||||||
tsc *TableServiceClient
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) {
|
|
||||||
if ml == EmptyPayload {
|
|
||||||
return nil, errEmptyPayload
|
|
||||||
}
|
|
||||||
headers = mergeHeaders(headers, t.client.getStandardHeaders())
|
|
||||||
headers[headerAccept] = string(ml)
|
|
||||||
|
|
||||||
resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
respBody, err := ioutil.ReadAll(resp.body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var out TableQueryResult
|
|
||||||
err = json.Unmarshal(respBody, &out)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range out.Tables {
|
|
||||||
out.Tables[i].tsc = t
|
|
||||||
}
|
|
||||||
out.tsc = t
|
|
||||||
|
|
||||||
nextLink := resp.headers.Get(http.CanonicalHeaderKey(headerXmsContinuation))
|
|
||||||
if nextLink == "" {
|
|
||||||
out.NextLink = nil
|
|
||||||
} else {
|
|
||||||
originalURI, err := url.Parse(uri)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
v := originalURI.Query()
|
|
||||||
v.Set(nextTableQueryParameter, nextLink)
|
|
||||||
newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v)
|
|
||||||
out.NextLink = &newURI
|
|
||||||
out.ml = ml
|
|
||||||
}
|
|
||||||
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addBodyRelatedHeaders(h map[string]string, length int) map[string]string {
|
|
||||||
h[headerContentType] = "application/json"
|
|
||||||
h[headerContentLength] = fmt.Sprintf("%v", length)
|
|
||||||
h[headerAcceptCharset] = "UTF-8"
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string {
|
|
||||||
if ml != EmptyPayload {
|
|
||||||
h[headerPrefer] = "return-content"
|
|
||||||
h[headerAccept] = string(ml)
|
|
||||||
} else {
|
|
||||||
h[headerPrefer] = "return-no-content"
|
|
||||||
// From API version 2015-12-11 onwards, Accept header is required
|
|
||||||
h[headerAccept] = string(NoMetadata)
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
239
vendor/github.com/Azure/azure-sdk-for-go/storage/util.go
generated
vendored
239
vendor/github.com/Azure/azure-sdk-for-go/storage/util.go
generated
vendored
|
|
@ -1,239 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6))
|
|
||||||
accountSASOptions = AccountSASTokenOptions{
|
|
||||||
Services: Services{
|
|
||||||
Blob: true,
|
|
||||||
},
|
|
||||||
ResourceTypes: ResourceTypes{
|
|
||||||
Service: true,
|
|
||||||
Container: true,
|
|
||||||
Object: true,
|
|
||||||
},
|
|
||||||
Permissions: Permissions{
|
|
||||||
Read: true,
|
|
||||||
Write: true,
|
|
||||||
Delete: true,
|
|
||||||
List: true,
|
|
||||||
Add: true,
|
|
||||||
Create: true,
|
|
||||||
Update: true,
|
|
||||||
Process: true,
|
|
||||||
},
|
|
||||||
Expiry: fixedTime,
|
|
||||||
UseHTTPS: true,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (c Client) computeHmac256(message string) string {
|
|
||||||
h := hmac.New(sha256.New, c.accountKey)
|
|
||||||
h.Write([]byte(message))
|
|
||||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
func currentTimeRfc1123Formatted() string {
|
|
||||||
return timeRfc1123Formatted(time.Now().UTC())
|
|
||||||
}
|
|
||||||
|
|
||||||
func timeRfc1123Formatted(t time.Time) string {
|
|
||||||
return t.Format(http.TimeFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
func timeRFC3339Formatted(t time.Time) string {
|
|
||||||
return t.Format("2006-01-02T15:04:05.0000000Z")
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeParams(v1, v2 url.Values) url.Values {
|
|
||||||
out := url.Values{}
|
|
||||||
for k, v := range v1 {
|
|
||||||
out[k] = v
|
|
||||||
}
|
|
||||||
for k, v := range v2 {
|
|
||||||
vals, ok := out[k]
|
|
||||||
if ok {
|
|
||||||
vals = append(vals, v...)
|
|
||||||
out[k] = vals
|
|
||||||
} else {
|
|
||||||
out[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareBlockListRequest(blocks []Block) string {
|
|
||||||
s := `<?xml version="1.0" encoding="utf-8"?><BlockList>`
|
|
||||||
for _, v := range blocks {
|
|
||||||
s += fmt.Sprintf("<%s>%s</%s>", v.Status, v.ID, v.Status)
|
|
||||||
}
|
|
||||||
s += `</BlockList>`
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func xmlUnmarshal(body io.Reader, v interface{}) error {
|
|
||||||
data, err := ioutil.ReadAll(body)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return xml.Unmarshal(data, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func xmlMarshal(v interface{}) (io.Reader, int, error) {
|
|
||||||
b, err := xml.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
return bytes.NewReader(b), len(b), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func headersFromStruct(v interface{}) map[string]string {
|
|
||||||
headers := make(map[string]string)
|
|
||||||
value := reflect.ValueOf(v)
|
|
||||||
for i := 0; i < value.NumField(); i++ {
|
|
||||||
key := value.Type().Field(i).Tag.Get("header")
|
|
||||||
if key != "" {
|
|
||||||
reflectedValue := reflect.Indirect(value.Field(i))
|
|
||||||
var val string
|
|
||||||
if reflectedValue.IsValid() {
|
|
||||||
switch reflectedValue.Type() {
|
|
||||||
case reflect.TypeOf(fixedTime):
|
|
||||||
val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time))
|
|
||||||
case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)):
|
|
||||||
val = strconv.FormatUint(reflectedValue.Uint(), 10)
|
|
||||||
case reflect.TypeOf(int(0)):
|
|
||||||
val = strconv.FormatInt(reflectedValue.Int(), 10)
|
|
||||||
default:
|
|
||||||
val = reflectedValue.String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if val != "" {
|
|
||||||
headers[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return headers
|
|
||||||
}
|
|
||||||
|
|
||||||
// merges extraHeaders into headers and returns headers
|
|
||||||
func mergeHeaders(headers, extraHeaders map[string]string) map[string]string {
|
|
||||||
for k, v := range extraHeaders {
|
|
||||||
headers[k] = v
|
|
||||||
}
|
|
||||||
return headers
|
|
||||||
}
|
|
||||||
|
|
||||||
func addToHeaders(h map[string]string, key, value string) map[string]string {
|
|
||||||
if value != "" {
|
|
||||||
h[key] = value
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string {
|
|
||||||
if value != nil {
|
|
||||||
h = addToHeaders(h, key, timeRfc1123Formatted(*value))
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func addTimeout(params url.Values, timeout uint) url.Values {
|
|
||||||
if timeout > 0 {
|
|
||||||
params.Add("timeout", fmt.Sprintf("%v", timeout))
|
|
||||||
}
|
|
||||||
return params
|
|
||||||
}
|
|
||||||
|
|
||||||
func addSnapshot(params url.Values, snapshot *time.Time) url.Values {
|
|
||||||
if snapshot != nil {
|
|
||||||
params.Add("snapshot", timeRFC3339Formatted(*snapshot))
|
|
||||||
}
|
|
||||||
return params
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) {
|
|
||||||
var out time.Time
|
|
||||||
var err error
|
|
||||||
outStr := h.Get(key)
|
|
||||||
if outStr != "" {
|
|
||||||
out, err = time.Parse(time.RFC1123, outStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling
|
|
||||||
type TimeRFC1123 time.Time
|
|
||||||
|
|
||||||
// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout.
|
|
||||||
func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
|
||||||
var value string
|
|
||||||
d.DecodeElement(&value, &start)
|
|
||||||
parse, err := time.Parse(time.RFC1123, value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*t = TimeRFC1123(parse)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns a map of custom metadata values from the specified HTTP header
|
|
||||||
func getMetadataFromHeaders(header http.Header) map[string]string {
|
|
||||||
metadata := make(map[string]string)
|
|
||||||
for k, v := range header {
|
|
||||||
// Can't trust CanonicalHeaderKey() to munge case
|
|
||||||
// reliably. "_" is allowed in identifiers:
|
|
||||||
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
|
||||||
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
|
||||||
// http://tools.ietf.org/html/rfc7230#section-3.2
|
|
||||||
// ...but "_" is considered invalid by
|
|
||||||
// CanonicalMIMEHeaderKey in
|
|
||||||
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
|
||||||
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
|
|
||||||
k = k[len(userDefinedMetadataHeaderPrefix):]
|
|
||||||
metadata[k] = v[len(v)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(metadata) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return metadata
|
|
||||||
}
|
|
||||||
19
vendor/github.com/Azure/azure-sdk-for-go/storage/version.go
generated
vendored
19
vendor/github.com/Azure/azure-sdk-for-go/storage/version.go
generated
vendored
|
|
@ -1,19 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
var (
|
|
||||||
sdkVersion = "v12.3.0-beta"
|
|
||||||
)
|
|
||||||
191
vendor/github.com/Azure/go-autorest/LICENSE
generated
vendored
191
vendor/github.com/Azure/go-autorest/LICENSE
generated
vendored
|
|
@ -1,191 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
Copyright 2015 Microsoft Corporation
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
287
vendor/github.com/Azure/go-autorest/autorest/adal/README.md
generated
vendored
287
vendor/github.com/Azure/go-autorest/autorest/adal/README.md
generated
vendored
|
|
@ -1,287 +0,0 @@
|
||||||
# Azure Active Directory library for Go
|
|
||||||
|
|
||||||
This project provides a stand alone Azure Active Directory library for Go. The code was extracted
|
|
||||||
from [go-autorest](https://github.com/Azure/go-autorest/) project, which is used as a base for
|
|
||||||
[azure-sdk-for-go](https://github.com/Azure/azure-sdk-for-go).
|
|
||||||
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
```
|
|
||||||
go get -u github.com/Azure/go-autorest/autorest/adal
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) follow these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli).
|
|
||||||
|
|
||||||
### Register an Azure AD Application with secret
|
|
||||||
|
|
||||||
|
|
||||||
1. Register a new application with a `secret` credential
|
|
||||||
|
|
||||||
```
|
|
||||||
az ad app create \
|
|
||||||
--display-name example-app \
|
|
||||||
--homepage https://example-app/home \
|
|
||||||
--identifier-uris https://example-app/app \
|
|
||||||
--password secret
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Create a service principal using the `Application ID` from previous step
|
|
||||||
|
|
||||||
```
|
|
||||||
az ad sp create --id "Application ID"
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace `Application ID` with `appId` from step 1.
|
|
||||||
|
|
||||||
### Register an Azure AD Application with certificate
|
|
||||||
|
|
||||||
1. Create a private key
|
|
||||||
|
|
||||||
```
|
|
||||||
openssl genrsa -out "example-app.key" 2048
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Create the certificate
|
|
||||||
|
|
||||||
```
|
|
||||||
openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr"
|
|
||||||
openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Create the PKCS12 version of the certificate containing also the private key
|
|
||||||
|
|
||||||
```
|
|
||||||
openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass:
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Register a new application with the certificate content form `example-app.crt`
|
|
||||||
|
|
||||||
```
|
|
||||||
certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)"
|
|
||||||
|
|
||||||
az ad app create \
|
|
||||||
--display-name example-app \
|
|
||||||
--homepage https://example-app/home \
|
|
||||||
--identifier-uris https://example-app/app \
|
|
||||||
--key-usage Verify --end-date 2018-01-01 \
|
|
||||||
--key-value "${certificateContents}"
|
|
||||||
```
|
|
||||||
|
|
||||||
5. Create a service principal using the `Application ID` from previous step
|
|
||||||
|
|
||||||
```
|
|
||||||
az ad sp create --id "APPLICATION_ID"
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace `APPLICATION_ID` with `appId` from step 4.
|
|
||||||
|
|
||||||
|
|
||||||
### Grant the necessary permissions
|
|
||||||
|
|
||||||
Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained
|
|
||||||
level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles)
|
|
||||||
which can be assigned to a service principal of an Azure AD application depending of your needs.
|
|
||||||
|
|
||||||
```
|
|
||||||
az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME"
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step.
|
|
||||||
* Replace the `ROLE_NAME` with a role name of your choice.
|
|
||||||
|
|
||||||
It is also possible to define custom role definitions.
|
|
||||||
|
|
||||||
```
|
|
||||||
az role definition create --role-definition role-definition.json
|
|
||||||
```
|
|
||||||
|
|
||||||
* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file.
|
|
||||||
|
|
||||||
|
|
||||||
### Acquire Access Token
|
|
||||||
|
|
||||||
The common configuration used by all flows:
|
|
||||||
|
|
||||||
```Go
|
|
||||||
const activeDirectoryEndpoint = "https://login.microsoftonline.com/"
|
|
||||||
tenantID := "TENANT_ID"
|
|
||||||
oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID)
|
|
||||||
|
|
||||||
applicationID := "APPLICATION_ID"
|
|
||||||
|
|
||||||
callback := func(token adal.Token) error {
|
|
||||||
// This is called after the token is acquired
|
|
||||||
}
|
|
||||||
|
|
||||||
// The resource for which the token is acquired
|
|
||||||
resource := "https://management.core.windows.net/"
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace the `TENANT_ID` with your tenant ID.
|
|
||||||
* Replace the `APPLICATION_ID` with the value from previous section.
|
|
||||||
|
|
||||||
#### Client Credentials
|
|
||||||
|
|
||||||
```Go
|
|
||||||
applicationSecret := "APPLICATION_SECRET"
|
|
||||||
|
|
||||||
spt, err := adal.NewServicePrincipalToken(
|
|
||||||
oauthConfig,
|
|
||||||
appliationID,
|
|
||||||
applicationSecret,
|
|
||||||
resource,
|
|
||||||
callbacks...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Acquire a new access token
|
|
||||||
err = spt.Refresh()
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
* Replace the `APPLICATION_SECRET` with the `password` value from previous section.
|
|
||||||
|
|
||||||
#### Client Certificate
|
|
||||||
|
|
||||||
```Go
|
|
||||||
certificatePath := "./example-app.pfx"
|
|
||||||
|
|
||||||
certData, err := ioutil.ReadFile(certificatePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the certificate and private key from pfx file
|
|
||||||
certificate, rsaPrivateKey, err := decodePkcs12(certData, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromCertificate(
|
|
||||||
oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
certificate,
|
|
||||||
rsaPrivateKey,
|
|
||||||
resource,
|
|
||||||
callbacks...)
|
|
||||||
|
|
||||||
// Acquire a new access token
|
|
||||||
err = spt.Refresh()
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
* Update the certificate path to point to the example-app.pfx file which was created in previous section.
|
|
||||||
|
|
||||||
|
|
||||||
#### Device Code
|
|
||||||
|
|
||||||
```Go
|
|
||||||
oauthClient := &http.Client{}
|
|
||||||
|
|
||||||
// Acquire the device code
|
|
||||||
deviceCode, err := adal.InitiateDeviceAuth(
|
|
||||||
oauthClient,
|
|
||||||
oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
resource)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to start device auth flow: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display the authentication message
|
|
||||||
fmt.Println(*deviceCode.Message)
|
|
||||||
|
|
||||||
// Wait here until the user is authenticated
|
|
||||||
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to finish device auth flow: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromManualToken(
|
|
||||||
oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
resource,
|
|
||||||
*token,
|
|
||||||
callbacks...)
|
|
||||||
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Username password authenticate
|
|
||||||
|
|
||||||
```Go
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
|
|
||||||
oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
username,
|
|
||||||
password,
|
|
||||||
resource,
|
|
||||||
callbacks...)
|
|
||||||
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Authorization code authenticate
|
|
||||||
|
|
||||||
``` Go
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
|
|
||||||
oauthConfig,
|
|
||||||
applicationID,
|
|
||||||
clientSecret,
|
|
||||||
authorizationCode,
|
|
||||||
redirectURI,
|
|
||||||
resource,
|
|
||||||
callbacks...)
|
|
||||||
|
|
||||||
err = spt.Refresh()
|
|
||||||
if (err == nil) {
|
|
||||||
token := spt.Token
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Command Line Tool
|
|
||||||
|
|
||||||
A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above.
|
|
||||||
|
|
||||||
```
|
|
||||||
adal -h
|
|
||||||
|
|
||||||
Usage of ./adal:
|
|
||||||
-applicationId string
|
|
||||||
application id
|
|
||||||
-certificatePath string
|
|
||||||
path to pk12/PFC application certificate
|
|
||||||
-mode string
|
|
||||||
authentication mode (device, secret, cert, refresh) (default "device")
|
|
||||||
-resource string
|
|
||||||
resource for which the token is requested
|
|
||||||
-secret string
|
|
||||||
application secret
|
|
||||||
-tenantId string
|
|
||||||
tenant id
|
|
||||||
-tokenCachePath string
|
|
||||||
location of oath token cache (default "/home/cgc/.adal/accessToken.json")
|
|
||||||
```
|
|
||||||
|
|
||||||
Example acquire a token for `https://management.core.windows.net/` using device code flow:
|
|
||||||
|
|
||||||
```
|
|
||||||
adal -mode device \
|
|
||||||
-applicationId "APPLICATION_ID" \
|
|
||||||
-tenantId "TENANT_ID" \
|
|
||||||
-resource https://management.core.windows.net/
|
|
||||||
|
|
||||||
```
|
|
||||||
81
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
81
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
|
|
@ -1,81 +0,0 @@
|
||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
activeDirectoryAPIVersion = "1.0"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OAuthConfig represents the endpoints needed
|
|
||||||
// in OAuth operations
|
|
||||||
type OAuthConfig struct {
|
|
||||||
AuthorityEndpoint url.URL
|
|
||||||
AuthorizeEndpoint url.URL
|
|
||||||
TokenEndpoint url.URL
|
|
||||||
DeviceCodeEndpoint url.URL
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsZero returns true if the OAuthConfig object is zero-initialized.
|
|
||||||
func (oac OAuthConfig) IsZero() bool {
|
|
||||||
return oac == OAuthConfig{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateStringParam(param, name string) error {
|
|
||||||
if len(param) == 0 {
|
|
||||||
return fmt.Errorf("parameter '" + name + "' cannot be empty")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
|
|
||||||
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
|
|
||||||
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// it's legal for tenantID to be empty so don't validate it
|
|
||||||
const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s"
|
|
||||||
u, err := url.Parse(activeDirectoryEndpoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
authorityURL, err := u.Parse(tenantID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &OAuthConfig{
|
|
||||||
AuthorityEndpoint: *authorityURL,
|
|
||||||
AuthorizeEndpoint: *authorizeURL,
|
|
||||||
TokenEndpoint: *tokenURL,
|
|
||||||
DeviceCodeEndpoint: *deviceCodeURL,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
242
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
242
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
|
|
@ -1,242 +0,0 @@
|
||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
/*
|
|
||||||
This file is largely based on rjw57/oauth2device's code, with the follow differences:
|
|
||||||
* scope -> resource, and only allow a single one
|
|
||||||
* receive "Message" in the DeviceCode struct and show it to users as the prompt
|
|
||||||
* azure-xplat-cli has the following behavior that this emulates:
|
|
||||||
- does not send client_secret during the token exchange
|
|
||||||
- sends resource again in the token exchange request
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
logPrefix = "autorest/adal/devicetoken:"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow
|
|
||||||
ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow
|
|
||||||
ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow
|
|
||||||
ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow
|
|
||||||
ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceSlowDown represents the service telling us we're polling too often during device flow
|
|
||||||
ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix)
|
|
||||||
|
|
||||||
// ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow
|
|
||||||
ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix)
|
|
||||||
|
|
||||||
// ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow
|
|
||||||
ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix)
|
|
||||||
|
|
||||||
errCodeSendingFails = "Error occurred while sending request for Device Authorization Code"
|
|
||||||
errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint"
|
|
||||||
errTokenSendingFails = "Error occurred while sending request with device code for a token"
|
|
||||||
errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)"
|
|
||||||
errStatusNotOK = "Error HTTP status != 200"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DeviceCode is the object returned by the device auth endpoint
|
|
||||||
// It contains information to instruct the user to complete the auth flow
|
|
||||||
type DeviceCode struct {
|
|
||||||
DeviceCode *string `json:"device_code,omitempty"`
|
|
||||||
UserCode *string `json:"user_code,omitempty"`
|
|
||||||
VerificationURL *string `json:"verification_url,omitempty"`
|
|
||||||
ExpiresIn *int64 `json:"expires_in,string,omitempty"`
|
|
||||||
Interval *int64 `json:"interval,string,omitempty"`
|
|
||||||
|
|
||||||
Message *string `json:"message"` // Azure specific
|
|
||||||
Resource string // store the following, stored when initiating, used when exchanging
|
|
||||||
OAuthConfig OAuthConfig
|
|
||||||
ClientID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// TokenError is the object returned by the token exchange endpoint
|
|
||||||
// when something is amiss
|
|
||||||
type TokenError struct {
|
|
||||||
Error *string `json:"error,omitempty"`
|
|
||||||
ErrorCodes []int `json:"error_codes,omitempty"`
|
|
||||||
ErrorDescription *string `json:"error_description,omitempty"`
|
|
||||||
Timestamp *string `json:"timestamp,omitempty"`
|
|
||||||
TraceID *string `json:"trace_id,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeviceToken is the object return by the token exchange endpoint
|
|
||||||
// It can either look like a Token or an ErrorToken, so put both here
|
|
||||||
// and check for presence of "Error" to know if we are in error state
|
|
||||||
type deviceToken struct {
|
|
||||||
Token
|
|
||||||
TokenError
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
|
|
||||||
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
|
|
||||||
func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
|
|
||||||
v := url.Values{
|
|
||||||
"client_id": []string{clientID},
|
|
||||||
"resource": []string{resource},
|
|
||||||
}
|
|
||||||
|
|
||||||
s := v.Encode()
|
|
||||||
body := ioutil.NopCloser(strings.NewReader(s))
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
req.ContentLength = int64(len(s))
|
|
||||||
req.Header.Set(contentType, mimeTypeFormPost)
|
|
||||||
resp, err := sender.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
rb, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(strings.Trim(string(rb), " ")) == 0 {
|
|
||||||
return nil, ErrDeviceCodeEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
var code DeviceCode
|
|
||||||
err = json.Unmarshal(rb, &code)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
code.ClientID = clientID
|
|
||||||
code.Resource = resource
|
|
||||||
code.OAuthConfig = oauthConfig
|
|
||||||
|
|
||||||
return &code, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
|
|
||||||
// to see if the device flow has: been completed, timed out, or otherwise failed
|
|
||||||
func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
|
||||||
v := url.Values{
|
|
||||||
"client_id": []string{code.ClientID},
|
|
||||||
"code": []string{*code.DeviceCode},
|
|
||||||
"grant_type": []string{OAuthGrantTypeDeviceCode},
|
|
||||||
"resource": []string{code.Resource},
|
|
||||||
}
|
|
||||||
|
|
||||||
s := v.Encode()
|
|
||||||
body := ioutil.NopCloser(strings.NewReader(s))
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
req.ContentLength = int64(len(s))
|
|
||||||
req.Header.Set(contentType, mimeTypeFormPost)
|
|
||||||
resp, err := sender.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
rb, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK)
|
|
||||||
}
|
|
||||||
if len(strings.Trim(string(rb), " ")) == 0 {
|
|
||||||
return nil, ErrOAuthTokenEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
var token deviceToken
|
|
||||||
err = json.Unmarshal(rb, &token)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if token.Error == nil {
|
|
||||||
return &token.Token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch *token.Error {
|
|
||||||
case "authorization_pending":
|
|
||||||
return nil, ErrDeviceAuthorizationPending
|
|
||||||
case "slow_down":
|
|
||||||
return nil, ErrDeviceSlowDown
|
|
||||||
case "access_denied":
|
|
||||||
return nil, ErrDeviceAccessDenied
|
|
||||||
case "code_expired":
|
|
||||||
return nil, ErrDeviceCodeExpired
|
|
||||||
default:
|
|
||||||
return nil, ErrDeviceGeneric
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
|
|
||||||
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
|
|
||||||
func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
|
||||||
intervalDuration := time.Duration(*code.Interval) * time.Second
|
|
||||||
waitDuration := intervalDuration
|
|
||||||
|
|
||||||
for {
|
|
||||||
token, err := CheckForUserCompletion(sender, code)
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
return token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch err {
|
|
||||||
case ErrDeviceSlowDown:
|
|
||||||
waitDuration += waitDuration
|
|
||||||
case ErrDeviceAuthorizationPending:
|
|
||||||
// noop
|
|
||||||
default: // everything else is "fatal" to us
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if waitDuration > (intervalDuration * 3) {
|
|
||||||
return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(waitDuration)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
20
vendor/github.com/Azure/go-autorest/autorest/adal/msi.go
generated
vendored
20
vendor/github.com/Azure/go-autorest/autorest/adal/msi.go
generated
vendored
|
|
@ -1,20 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// msiPath is the path to the MSI Extension settings file (to discover the endpoint)
|
|
||||||
var msiPath = "/var/lib/waagent/ManagedIdentity-Settings"
|
|
||||||
25
vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go
generated
vendored
25
vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go
generated
vendored
|
|
@ -1,25 +0,0 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// msiPath is the path to the MSI Extension settings file (to discover the endpoint)
|
|
||||||
var msiPath = strings.Join([]string{os.Getenv("SystemDrive"), "WindowsAzure/Config/ManagedIdentity-Settings"}, "/")
|
|
||||||
73
vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
generated
vendored
73
vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
generated
vendored
|
|
@ -1,73 +0,0 @@
|
||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LoadToken restores a Token object from a file located at 'path'.
|
|
||||||
func LoadToken(path string) (*Token, error) {
|
|
||||||
file, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
var token Token
|
|
||||||
|
|
||||||
dec := json.NewDecoder(file)
|
|
||||||
if err = dec.Decode(&token); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err)
|
|
||||||
}
|
|
||||||
return &token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveToken persists an oauth token at the given location on disk.
|
|
||||||
// It moves the new file into place so it can safely be used to replace an existing file
|
|
||||||
// that maybe accessed by multiple processes.
|
|
||||||
func SaveToken(path string, mode os.FileMode, token Token) error {
|
|
||||||
dir := filepath.Dir(path)
|
|
||||||
err := os.MkdirAll(dir, os.ModePerm)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
newFile, err := ioutil.TempFile(dir, "token")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create the temp file to write the token: %v", err)
|
|
||||||
}
|
|
||||||
tempPath := newFile.Name()
|
|
||||||
|
|
||||||
if err := json.NewEncoder(newFile).Encode(token); err != nil {
|
|
||||||
return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err)
|
|
||||||
}
|
|
||||||
if err := newFile.Close(); err != nil {
|
|
||||||
return fmt.Errorf("failed to close temp file %s: %v", tempPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Atomic replace to avoid multi-writer file corruptions
|
|
||||||
if err := os.Rename(tempPath, path); err != nil {
|
|
||||||
return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err)
|
|
||||||
}
|
|
||||||
if err := os.Chmod(path, mode); err != nil {
|
|
||||||
return fmt.Errorf("failed to chmod the token file %s: %v", path, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
60
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
60
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
|
|
@ -1,60 +0,0 @@
|
||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
contentType = "Content-Type"
|
|
||||||
mimeTypeFormPost = "application/x-www-form-urlencoded"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sender is the interface that wraps the Do method to send HTTP requests.
|
|
||||||
//
|
|
||||||
// The standard http.Client conforms to this interface.
|
|
||||||
type Sender interface {
|
|
||||||
Do(*http.Request) (*http.Response, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SenderFunc is a method that implements the Sender interface.
|
|
||||||
type SenderFunc func(*http.Request) (*http.Response, error)
|
|
||||||
|
|
||||||
// Do implements the Sender interface on SenderFunc.
|
|
||||||
func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
|
|
||||||
return sf(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
|
|
||||||
// http.Request and pass it along or, first, pass the http.Request along then react to the
|
|
||||||
// http.Response result.
|
|
||||||
type SendDecorator func(Sender) Sender
|
|
||||||
|
|
||||||
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
|
|
||||||
func CreateSender(decorators ...SendDecorator) Sender {
|
|
||||||
return DecorateSender(&http.Client{}, decorators...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
|
|
||||||
// the Sender. Decorators are applied in the order received, but their affect upon the request
|
|
||||||
// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a
|
|
||||||
// post-decorator (pass the http.Request along and react to the results in http.Response).
|
|
||||||
func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
|
|
||||||
for _, decorate := range decorators {
|
|
||||||
s = decorate(s)
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
662
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
662
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
|
|
@ -1,662 +0,0 @@
|
||||||
package adal
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/sha1"
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest/date"
|
|
||||||
"github.com/dgrijalva/jwt-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultRefresh = 5 * time.Minute
|
|
||||||
|
|
||||||
// OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow
|
|
||||||
OAuthGrantTypeDeviceCode = "device_code"
|
|
||||||
|
|
||||||
// OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows
|
|
||||||
OAuthGrantTypeClientCredentials = "client_credentials"
|
|
||||||
|
|
||||||
// OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows
|
|
||||||
OAuthGrantTypeUserPass = "password"
|
|
||||||
|
|
||||||
// OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows
|
|
||||||
OAuthGrantTypeRefreshToken = "refresh_token"
|
|
||||||
|
|
||||||
// OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows
|
|
||||||
OAuthGrantTypeAuthorizationCode = "authorization_code"
|
|
||||||
|
|
||||||
// metadataHeader is the header required by MSI extension
|
|
||||||
metadataHeader = "Metadata"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OAuthTokenProvider is an interface which should be implemented by an access token retriever
|
|
||||||
type OAuthTokenProvider interface {
|
|
||||||
OAuthToken() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// TokenRefreshError is an interface used by errors returned during token refresh.
|
|
||||||
type TokenRefreshError interface {
|
|
||||||
error
|
|
||||||
Response() *http.Response
|
|
||||||
}
|
|
||||||
|
|
||||||
// Refresher is an interface for token refresh functionality
|
|
||||||
type Refresher interface {
|
|
||||||
Refresh() error
|
|
||||||
RefreshExchange(resource string) error
|
|
||||||
EnsureFresh() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// TokenRefreshCallback is the type representing callbacks that will be called after
|
|
||||||
// a successful token refresh
|
|
||||||
type TokenRefreshCallback func(Token) error
|
|
||||||
|
|
||||||
// Token encapsulates the access token used to authorize Azure requests.
|
|
||||||
type Token struct {
|
|
||||||
AccessToken string `json:"access_token"`
|
|
||||||
RefreshToken string `json:"refresh_token"`
|
|
||||||
|
|
||||||
ExpiresIn string `json:"expires_in"`
|
|
||||||
ExpiresOn string `json:"expires_on"`
|
|
||||||
NotBefore string `json:"not_before"`
|
|
||||||
|
|
||||||
Resource string `json:"resource"`
|
|
||||||
Type string `json:"token_type"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsZero returns true if the token object is zero-initialized.
|
|
||||||
func (t Token) IsZero() bool {
|
|
||||||
return t == Token{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expires returns the time.Time when the Token expires.
|
|
||||||
func (t Token) Expires() time.Time {
|
|
||||||
s, err := strconv.Atoi(t.ExpiresOn)
|
|
||||||
if err != nil {
|
|
||||||
s = -3600
|
|
||||||
}
|
|
||||||
|
|
||||||
expiration := date.NewUnixTimeFromSeconds(float64(s))
|
|
||||||
|
|
||||||
return time.Time(expiration).UTC()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsExpired returns true if the Token is expired, false otherwise.
|
|
||||||
func (t Token) IsExpired() bool {
|
|
||||||
return t.WillExpireIn(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WillExpireIn returns true if the Token will expire after the passed time.Duration interval
|
|
||||||
// from now, false otherwise.
|
|
||||||
func (t Token) WillExpireIn(d time.Duration) bool {
|
|
||||||
return !t.Expires().After(time.Now().Add(d))
|
|
||||||
}
|
|
||||||
|
|
||||||
//OAuthToken return the current access token
|
|
||||||
func (t *Token) OAuthToken() string {
|
|
||||||
return t.AccessToken
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServicePrincipalNoSecret represents a secret type that contains no secret
|
|
||||||
// meaning it is not valid for fetching a fresh token. This is used by Manual
|
|
||||||
type ServicePrincipalNoSecret struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret
|
|
||||||
// It only returns an error for the ServicePrincipalNoSecret type
|
|
||||||
func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
|
||||||
return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form
|
|
||||||
// that is submitted when acquiring an oAuth token.
|
|
||||||
type ServicePrincipalSecret interface {
|
|
||||||
SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization.
|
|
||||||
type ServicePrincipalTokenSecret struct {
|
|
||||||
ClientSecret string
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
|
||||||
// It will populate the form submitted during oAuth Token Acquisition using the client_secret.
|
|
||||||
func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
|
||||||
v.Set("client_secret", tokenSecret.ClientSecret)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs.
|
|
||||||
type ServicePrincipalCertificateSecret struct {
|
|
||||||
Certificate *x509.Certificate
|
|
||||||
PrivateKey *rsa.PrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension.
|
|
||||||
type ServicePrincipalMSISecret struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth.
|
|
||||||
type ServicePrincipalUsernamePasswordSecret struct {
|
|
||||||
Username string
|
|
||||||
Password string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth.
|
|
||||||
type ServicePrincipalAuthorizationCodeSecret struct {
|
|
||||||
ClientSecret string
|
|
||||||
AuthorizationCode string
|
|
||||||
RedirectURI string
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
|
||||||
func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
|
||||||
v.Set("code", secret.AuthorizationCode)
|
|
||||||
v.Set("client_secret", secret.ClientSecret)
|
|
||||||
v.Set("redirect_uri", secret.RedirectURI)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
|
||||||
func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
|
||||||
v.Set("username", secret.Username)
|
|
||||||
v.Set("password", secret.Password)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
|
||||||
func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignJwt returns the JWT signed with the certificate's private key.
|
|
||||||
func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) {
|
|
||||||
hasher := sha1.New()
|
|
||||||
_, err := hasher.Write(secret.Certificate.Raw)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
|
|
||||||
|
|
||||||
// The jti (JWT ID) claim provides a unique identifier for the JWT.
|
|
||||||
jti := make([]byte, 20)
|
|
||||||
_, err = rand.Read(jti)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
token := jwt.New(jwt.SigningMethodRS256)
|
|
||||||
token.Header["x5t"] = thumbprint
|
|
||||||
token.Claims = jwt.MapClaims{
|
|
||||||
"aud": spt.oauthConfig.TokenEndpoint.String(),
|
|
||||||
"iss": spt.clientID,
|
|
||||||
"sub": spt.clientID,
|
|
||||||
"jti": base64.URLEncoding.EncodeToString(jti),
|
|
||||||
"nbf": time.Now().Unix(),
|
|
||||||
"exp": time.Now().Add(time.Hour * 24).Unix(),
|
|
||||||
}
|
|
||||||
|
|
||||||
signedString, err := token.SignedString(secret.PrivateKey)
|
|
||||||
return signedString, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
|
||||||
// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate.
|
|
||||||
func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
|
||||||
jwt, err := secret.SignJwt(spt)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Set("client_assertion", jwt)
|
|
||||||
v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServicePrincipalToken encapsulates a Token created for a Service Principal.
|
|
||||||
type ServicePrincipalToken struct {
|
|
||||||
Token
|
|
||||||
|
|
||||||
secret ServicePrincipalSecret
|
|
||||||
oauthConfig OAuthConfig
|
|
||||||
clientID string
|
|
||||||
resource string
|
|
||||||
autoRefresh bool
|
|
||||||
autoRefreshLock *sync.Mutex
|
|
||||||
refreshWithin time.Duration
|
|
||||||
sender Sender
|
|
||||||
|
|
||||||
refreshCallbacks []TokenRefreshCallback
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateOAuthConfig(oac OAuthConfig) error {
|
|
||||||
if oac.IsZero() {
|
|
||||||
return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation.
|
|
||||||
func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
|
||||||
if err := validateOAuthConfig(oauthConfig); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(id, "id"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(resource, "resource"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if secret == nil {
|
|
||||||
return nil, fmt.Errorf("parameter 'secret' cannot be nil")
|
|
||||||
}
|
|
||||||
spt := &ServicePrincipalToken{
|
|
||||||
oauthConfig: oauthConfig,
|
|
||||||
secret: secret,
|
|
||||||
clientID: id,
|
|
||||||
resource: resource,
|
|
||||||
autoRefresh: true,
|
|
||||||
autoRefreshLock: &sync.Mutex{},
|
|
||||||
refreshWithin: defaultRefresh,
|
|
||||||
sender: &http.Client{},
|
|
||||||
refreshCallbacks: callbacks,
|
|
||||||
}
|
|
||||||
return spt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token
|
|
||||||
func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
|
||||||
if err := validateOAuthConfig(oauthConfig); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(clientID, "clientID"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(resource, "resource"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if token.IsZero() {
|
|
||||||
return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized")
|
|
||||||
}
|
|
||||||
spt, err := NewServicePrincipalTokenWithSecret(
|
|
||||||
oauthConfig,
|
|
||||||
clientID,
|
|
||||||
resource,
|
|
||||||
&ServicePrincipalNoSecret{},
|
|
||||||
callbacks...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
spt.Token = token
|
|
||||||
|
|
||||||
return spt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal
|
|
||||||
// credentials scoped to the named resource.
|
|
||||||
func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
|
||||||
if err := validateOAuthConfig(oauthConfig); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(clientID, "clientID"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(secret, "secret"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(resource, "resource"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return NewServicePrincipalTokenWithSecret(
|
|
||||||
oauthConfig,
|
|
||||||
clientID,
|
|
||||||
resource,
|
|
||||||
&ServicePrincipalTokenSecret{
|
|
||||||
ClientSecret: secret,
|
|
||||||
},
|
|
||||||
callbacks...,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes.
|
|
||||||
func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
|
||||||
if err := validateOAuthConfig(oauthConfig); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(clientID, "clientID"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(resource, "resource"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if certificate == nil {
|
|
||||||
return nil, fmt.Errorf("parameter 'certificate' cannot be nil")
|
|
||||||
}
|
|
||||||
if privateKey == nil {
|
|
||||||
return nil, fmt.Errorf("parameter 'privateKey' cannot be nil")
|
|
||||||
}
|
|
||||||
return NewServicePrincipalTokenWithSecret(
|
|
||||||
oauthConfig,
|
|
||||||
clientID,
|
|
||||||
resource,
|
|
||||||
&ServicePrincipalCertificateSecret{
|
|
||||||
PrivateKey: privateKey,
|
|
||||||
Certificate: certificate,
|
|
||||||
},
|
|
||||||
callbacks...,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password.
|
|
||||||
func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
|
||||||
if err := validateOAuthConfig(oauthConfig); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(clientID, "clientID"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(username, "username"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(password, "password"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(resource, "resource"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return NewServicePrincipalTokenWithSecret(
|
|
||||||
oauthConfig,
|
|
||||||
clientID,
|
|
||||||
resource,
|
|
||||||
&ServicePrincipalUsernamePasswordSecret{
|
|
||||||
Username: username,
|
|
||||||
Password: password,
|
|
||||||
},
|
|
||||||
callbacks...,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the
|
|
||||||
func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
|
||||||
|
|
||||||
if err := validateOAuthConfig(oauthConfig); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(clientID, "clientID"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(clientSecret, "clientSecret"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(redirectURI, "redirectURI"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(resource, "resource"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewServicePrincipalTokenWithSecret(
|
|
||||||
oauthConfig,
|
|
||||||
clientID,
|
|
||||||
resource,
|
|
||||||
&ServicePrincipalAuthorizationCodeSecret{
|
|
||||||
ClientSecret: clientSecret,
|
|
||||||
AuthorizationCode: authorizationCode,
|
|
||||||
RedirectURI: redirectURI,
|
|
||||||
},
|
|
||||||
callbacks...,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines.
|
|
||||||
func GetMSIVMEndpoint() (string, error) {
|
|
||||||
return getMSIVMEndpoint(msiPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getMSIVMEndpoint(path string) (string, error) {
|
|
||||||
// Read MSI settings
|
|
||||||
bytes, err := ioutil.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
msiSettings := struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
}{}
|
|
||||||
err = json.Unmarshal(bytes, &msiSettings)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return msiSettings.URL, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension.
|
|
||||||
// It will use the system assigned identity when creating the token.
|
|
||||||
func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
|
||||||
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension.
|
|
||||||
// It will use the specified user assigned identity when creating the token.
|
|
||||||
func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
|
||||||
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
|
||||||
if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateStringParam(resource, "resource"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if userAssignedID != nil {
|
|
||||||
if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// We set the oauth config token endpoint to be MSI's endpoint
|
|
||||||
msiEndpointURL, err := url.Parse(msiEndpoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
oauthConfig, err := NewOAuthConfig(msiEndpointURL.String(), "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
spt := &ServicePrincipalToken{
|
|
||||||
oauthConfig: *oauthConfig,
|
|
||||||
secret: &ServicePrincipalMSISecret{},
|
|
||||||
resource: resource,
|
|
||||||
autoRefresh: true,
|
|
||||||
autoRefreshLock: &sync.Mutex{},
|
|
||||||
refreshWithin: defaultRefresh,
|
|
||||||
sender: &http.Client{},
|
|
||||||
refreshCallbacks: callbacks,
|
|
||||||
}
|
|
||||||
|
|
||||||
if userAssignedID != nil {
|
|
||||||
spt.clientID = *userAssignedID
|
|
||||||
}
|
|
||||||
|
|
||||||
return spt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// internal type that implements TokenRefreshError
|
|
||||||
type tokenRefreshError struct {
|
|
||||||
message string
|
|
||||||
resp *http.Response
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface which is part of the TokenRefreshError interface.
|
|
||||||
func (tre tokenRefreshError) Error() string {
|
|
||||||
return tre.message
|
|
||||||
}
|
|
||||||
|
|
||||||
// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation.
|
|
||||||
func (tre tokenRefreshError) Response() *http.Response {
|
|
||||||
return tre.resp
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError {
|
|
||||||
return tokenRefreshError{message: message, resp: resp}
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureFresh will refresh the token if it will expire within the refresh window (as set by
|
|
||||||
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
|
|
||||||
func (spt *ServicePrincipalToken) EnsureFresh() error {
|
|
||||||
if spt.autoRefresh && spt.WillExpireIn(spt.refreshWithin) {
|
|
||||||
// take the lock then check to see if the token was already refreshed
|
|
||||||
spt.autoRefreshLock.Lock()
|
|
||||||
defer spt.autoRefreshLock.Unlock()
|
|
||||||
if spt.WillExpireIn(spt.refreshWithin) {
|
|
||||||
return spt.Refresh()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization
|
|
||||||
func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error {
|
|
||||||
if spt.refreshCallbacks != nil {
|
|
||||||
for _, callback := range spt.refreshCallbacks {
|
|
||||||
err := callback(spt.Token)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Refresh obtains a fresh token for the Service Principal.
|
|
||||||
// This method is not safe for concurrent use and should be syncrhonized.
|
|
||||||
func (spt *ServicePrincipalToken) Refresh() error {
|
|
||||||
return spt.refreshInternal(spt.resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefreshExchange refreshes the token, but for a different resource.
|
|
||||||
// This method is not safe for concurrent use and should be syncrhonized.
|
|
||||||
func (spt *ServicePrincipalToken) RefreshExchange(resource string) error {
|
|
||||||
return spt.refreshInternal(resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spt *ServicePrincipalToken) getGrantType() string {
|
|
||||||
switch spt.secret.(type) {
|
|
||||||
case *ServicePrincipalUsernamePasswordSecret:
|
|
||||||
return OAuthGrantTypeUserPass
|
|
||||||
case *ServicePrincipalAuthorizationCodeSecret:
|
|
||||||
return OAuthGrantTypeAuthorizationCode
|
|
||||||
default:
|
|
||||||
return OAuthGrantTypeClientCredentials
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
|
|
||||||
v := url.Values{}
|
|
||||||
v.Set("client_id", spt.clientID)
|
|
||||||
v.Set("resource", resource)
|
|
||||||
|
|
||||||
if spt.RefreshToken != "" {
|
|
||||||
v.Set("grant_type", OAuthGrantTypeRefreshToken)
|
|
||||||
v.Set("refresh_token", spt.RefreshToken)
|
|
||||||
} else {
|
|
||||||
v.Set("grant_type", spt.getGrantType())
|
|
||||||
err := spt.secret.SetAuthenticationValues(spt, &v)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s := v.Encode()
|
|
||||||
body := ioutil.NopCloser(strings.NewReader(s))
|
|
||||||
req, err := http.NewRequest(http.MethodPost, spt.oauthConfig.TokenEndpoint.String(), body)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req.ContentLength = int64(len(s))
|
|
||||||
req.Header.Set(contentType, mimeTypeFormPost)
|
|
||||||
if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok {
|
|
||||||
req.Header.Set(metadataHeader, "true")
|
|
||||||
}
|
|
||||||
resp, err := spt.sender.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer resp.Body.Close()
|
|
||||||
rb, err := ioutil.ReadAll(resp.Body)
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
if err != nil {
|
|
||||||
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode), resp)
|
|
||||||
}
|
|
||||||
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err)
|
|
||||||
}
|
|
||||||
if len(strings.Trim(string(rb), " ")) == 0 {
|
|
||||||
return fmt.Errorf("adal: Empty service principal token received during refresh")
|
|
||||||
}
|
|
||||||
var token Token
|
|
||||||
err = json.Unmarshal(rb, &token)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb))
|
|
||||||
}
|
|
||||||
|
|
||||||
spt.Token = token
|
|
||||||
|
|
||||||
return spt.InvokeRefreshCallbacks(token)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAutoRefresh enables or disables automatic refreshing of stale tokens.
|
|
||||||
func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) {
|
|
||||||
spt.autoRefresh = autoRefresh
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will
|
|
||||||
// refresh the token.
|
|
||||||
func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) {
|
|
||||||
spt.refreshWithin = d
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSender sets the http.Client used when obtaining the Service Principal token. An
|
|
||||||
// undecorated http.Client is used by default.
|
|
||||||
func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s }
|
|
||||||
254
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
254
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
|
|
@ -1,254 +0,0 @@
|
||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest/adal"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
bearerChallengeHeader = "Www-Authenticate"
|
|
||||||
bearer = "Bearer"
|
|
||||||
tenantID = "tenantID"
|
|
||||||
apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key"
|
|
||||||
bingAPISdkHeader = "X-BingApis-SDK-Client"
|
|
||||||
golangBingAPISdkHeaderValue = "Go-SDK"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Authorizer is the interface that provides a PrepareDecorator used to supply request
|
|
||||||
// authorization. Most often, the Authorizer decorator runs last so it has access to the full
|
|
||||||
// state of the formed HTTP request.
|
|
||||||
type Authorizer interface {
|
|
||||||
WithAuthorization() PrepareDecorator
|
|
||||||
}
|
|
||||||
|
|
||||||
// NullAuthorizer implements a default, "do nothing" Authorizer.
|
|
||||||
type NullAuthorizer struct{}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that does nothing.
|
|
||||||
func (na NullAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return WithNothing()
|
|
||||||
}
|
|
||||||
|
|
||||||
// APIKeyAuthorizer implements API Key authorization.
|
|
||||||
type APIKeyAuthorizer struct {
|
|
||||||
headers map[string]interface{}
|
|
||||||
queryParameters map[string]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers.
|
|
||||||
func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer {
|
|
||||||
return NewAPIKeyAuthorizer(headers, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters.
|
|
||||||
func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer {
|
|
||||||
return NewAPIKeyAuthorizer(nil, queryParameters)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers.
|
|
||||||
func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer {
|
|
||||||
return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters
|
|
||||||
func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CognitiveServicesAuthorizer implements authorization for Cognitive Services.
|
|
||||||
type CognitiveServicesAuthorizer struct {
|
|
||||||
subscriptionKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCognitiveServicesAuthorizer is
|
|
||||||
func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer {
|
|
||||||
return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization is
|
|
||||||
func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
headers := make(map[string]interface{})
|
|
||||||
headers[apiKeyAuthorizerHeader] = csa.subscriptionKey
|
|
||||||
headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue
|
|
||||||
|
|
||||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
|
||||||
}
|
|
||||||
|
|
||||||
// BearerAuthorizer implements the bearer authorization
|
|
||||||
type BearerAuthorizer struct {
|
|
||||||
tokenProvider adal.OAuthTokenProvider
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider
|
|
||||||
func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer {
|
|
||||||
return &BearerAuthorizer{tokenProvider: tp}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ba *BearerAuthorizer) withBearerAuthorization() PrepareDecorator {
|
|
||||||
return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
|
||||||
// value is "Bearer " followed by the token.
|
|
||||||
//
|
|
||||||
// By default, the token will be automatically refreshed through the Refresher interface.
|
|
||||||
func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
refresher, ok := ba.tokenProvider.(adal.Refresher)
|
|
||||||
if ok {
|
|
||||||
err := refresher.EnsureFresh()
|
|
||||||
if err != nil {
|
|
||||||
var resp *http.Response
|
|
||||||
if tokError, ok := err.(adal.TokenRefreshError); ok {
|
|
||||||
resp = tokError.Response()
|
|
||||||
}
|
|
||||||
return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp,
|
|
||||||
"Failed to refresh the Token for request to %s", r.URL)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return (ba.withBearerAuthorization()(p)).Prepare(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BearerAuthorizerCallbackFunc is the authentication callback signature.
|
|
||||||
type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error)
|
|
||||||
|
|
||||||
// BearerAuthorizerCallback implements bearer authorization via a callback.
|
|
||||||
type BearerAuthorizerCallback struct {
|
|
||||||
sender Sender
|
|
||||||
callback BearerAuthorizerCallbackFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback
|
|
||||||
// is invoked when the HTTP request is submitted.
|
|
||||||
func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
|
|
||||||
if sender == nil {
|
|
||||||
sender = &http.Client{}
|
|
||||||
}
|
|
||||||
return &BearerAuthorizerCallback{sender: sender, callback: callback}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value
|
|
||||||
// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback.
|
|
||||||
//
|
|
||||||
// By default, the token will be automatically refreshed through the Refresher interface.
|
|
||||||
func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
// make a copy of the request and remove the body as it's not
|
|
||||||
// required and avoids us having to create a copy of it.
|
|
||||||
rCopy := *r
|
|
||||||
removeRequestBody(&rCopy)
|
|
||||||
|
|
||||||
resp, err := bacb.sender.Do(&rCopy)
|
|
||||||
if err == nil && resp.StatusCode == 401 {
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if hasBearerChallenge(resp) {
|
|
||||||
bc, err := newBearerChallenge(resp)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
if bacb.callback != nil {
|
|
||||||
ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"])
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
return ba.WithAuthorization()(p).Prepare(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns true if the HTTP response contains a bearer challenge
|
|
||||||
func hasBearerChallenge(resp *http.Response) bool {
|
|
||||||
authHeader := resp.Header.Get(bearerChallengeHeader)
|
|
||||||
if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
type bearerChallenge struct {
|
|
||||||
values map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) {
|
|
||||||
challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader))
|
|
||||||
trimmedChallenge := challenge[len(bearer)+1:]
|
|
||||||
|
|
||||||
// challenge is a set of key=value pairs that are comma delimited
|
|
||||||
pairs := strings.Split(trimmedChallenge, ",")
|
|
||||||
if len(pairs) < 1 {
|
|
||||||
err = fmt.Errorf("challenge '%s' contains no pairs", challenge)
|
|
||||||
return bc, err
|
|
||||||
}
|
|
||||||
|
|
||||||
bc.values = make(map[string]string)
|
|
||||||
for i := range pairs {
|
|
||||||
trimmedPair := strings.TrimSpace(pairs[i])
|
|
||||||
pair := strings.Split(trimmedPair, "=")
|
|
||||||
if len(pair) == 2 {
|
|
||||||
// remove the enclosing quotes
|
|
||||||
key := strings.Trim(pair[0], "\"")
|
|
||||||
value := strings.Trim(pair[1], "\"")
|
|
||||||
|
|
||||||
switch key {
|
|
||||||
case "authorization", "authorization_uri":
|
|
||||||
// strip the tenant ID from the authorization URL
|
|
||||||
asURL, err := url.Parse(value)
|
|
||||||
if err != nil {
|
|
||||||
return bc, err
|
|
||||||
}
|
|
||||||
bc.values[tenantID] = asURL.Path[1:]
|
|
||||||
default:
|
|
||||||
bc.values[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return bc, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// EventGridKeyAuthorizer implements authorization for event grid using key authentication.
|
|
||||||
type EventGridKeyAuthorizer struct {
|
|
||||||
topicKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer
|
|
||||||
// with the specified topic key.
|
|
||||||
func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer {
|
|
||||||
return EventGridKeyAuthorizer{topicKey: topicKey}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header.
|
|
||||||
func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
headers := map[string]interface{}{
|
|
||||||
"aeg-sas-key": egta.topicKey,
|
|
||||||
}
|
|
||||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
|
||||||
}
|
|
||||||
132
vendor/github.com/Azure/go-autorest/autorest/autorest.go
generated
vendored
132
vendor/github.com/Azure/go-autorest/autorest/autorest.go
generated
vendored
|
|
@ -1,132 +0,0 @@
|
||||||
/*
|
|
||||||
Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines
|
|
||||||
and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/)
|
|
||||||
generated Go code.
|
|
||||||
|
|
||||||
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
|
|
||||||
and Responding. A typical pattern is:
|
|
||||||
|
|
||||||
req, err := Prepare(&http.Request{},
|
|
||||||
token.WithAuthorization())
|
|
||||||
|
|
||||||
resp, err := Send(req,
|
|
||||||
WithLogging(logger),
|
|
||||||
DoErrorIfStatusCode(http.StatusInternalServerError),
|
|
||||||
DoCloseIfError(),
|
|
||||||
DoRetryForAttempts(5, time.Second))
|
|
||||||
|
|
||||||
err = Respond(resp,
|
|
||||||
ByDiscardingBody(),
|
|
||||||
ByClosing())
|
|
||||||
|
|
||||||
Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
|
|
||||||
and then pass the data along, pass the data first and then modify the result, or wrap themselves
|
|
||||||
around passing the data (such as a logger might do). Decorators run in the order provided. For
|
|
||||||
example, the following:
|
|
||||||
|
|
||||||
req, err := Prepare(&http.Request{},
|
|
||||||
WithBaseURL("https://microsoft.com/"),
|
|
||||||
WithPath("a"),
|
|
||||||
WithPath("b"),
|
|
||||||
WithPath("c"))
|
|
||||||
|
|
||||||
will set the URL to:
|
|
||||||
|
|
||||||
https://microsoft.com/a/b/c
|
|
||||||
|
|
||||||
Preparers and Responders may be shared and re-used (assuming the underlying decorators support
|
|
||||||
sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders
|
|
||||||
shared among multiple go-routines, and a single Sender shared among multiple sending go-routines,
|
|
||||||
all bound together by means of input / output channels.
|
|
||||||
|
|
||||||
Decorators hold their passed state within a closure (such as the path components in the example
|
|
||||||
above). Be careful to share Preparers and Responders only in a context where such held state
|
|
||||||
applies. For example, it may not make sense to share a Preparer that applies a query string from a
|
|
||||||
fixed set of values. Similarly, sharing a Responder that reads the response body into a passed
|
|
||||||
struct (e.g., ByUnmarshallingJson) is likely incorrect.
|
|
||||||
|
|
||||||
Lastly, the Swagger specification (https://swagger.io) that drives AutoRest
|
|
||||||
(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
|
|
||||||
github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure
|
|
||||||
correct parsing and formatting.
|
|
||||||
|
|
||||||
Errors raised by autorest objects and methods will conform to the autorest.Error interface.
|
|
||||||
|
|
||||||
See the included examples for more detail. For details on the suggested use of this package by
|
|
||||||
generated clients, see the Client described below.
|
|
||||||
*/
|
|
||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// HeaderLocation specifies the HTTP Location header.
|
|
||||||
HeaderLocation = "Location"
|
|
||||||
|
|
||||||
// HeaderRetryAfter specifies the HTTP Retry-After header.
|
|
||||||
HeaderRetryAfter = "Retry-After"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set
|
|
||||||
// and false otherwise.
|
|
||||||
func ResponseHasStatusCode(resp *http.Response, codes ...int) bool {
|
|
||||||
if resp == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return containsInt(codes, resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLocation retrieves the URL from the Location header of the passed response.
|
|
||||||
func GetLocation(resp *http.Response) string {
|
|
||||||
return resp.Header.Get(HeaderLocation)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If
|
|
||||||
// the header is absent or is malformed, it will return the supplied default delay time.Duration.
|
|
||||||
func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration {
|
|
||||||
retry := resp.Header.Get(HeaderRetryAfter)
|
|
||||||
if retry == "" {
|
|
||||||
return defaultDelay
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := time.ParseDuration(retry + "s")
|
|
||||||
if err != nil {
|
|
||||||
return defaultDelay
|
|
||||||
}
|
|
||||||
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPollingRequest allocates and returns a new http.Request to poll for the passed response.
|
|
||||||
func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) {
|
|
||||||
location := GetLocation(resp)
|
|
||||||
if location == "" {
|
|
||||||
return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling")
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := Prepare(&http.Request{Cancel: cancel},
|
|
||||||
AsGet(),
|
|
||||||
WithBaseURL(location))
|
|
||||||
if err != nil {
|
|
||||||
return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location)
|
|
||||||
}
|
|
||||||
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
478
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
478
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
|
|
@ -1,478 +0,0 @@
|
||||||
package azure
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest"
|
|
||||||
"github.com/Azure/go-autorest/autorest/date"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
headerAsyncOperation = "Azure-AsyncOperation"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
operationInProgress string = "InProgress"
|
|
||||||
operationCanceled string = "Canceled"
|
|
||||||
operationFailed string = "Failed"
|
|
||||||
operationSucceeded string = "Succeeded"
|
|
||||||
)
|
|
||||||
|
|
||||||
var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK}
|
|
||||||
|
|
||||||
// Future provides a mechanism to access the status and results of an asynchronous request.
|
|
||||||
// Since futures are stateful they should be passed by value to avoid race conditions.
|
|
||||||
type Future struct {
|
|
||||||
req *http.Request
|
|
||||||
resp *http.Response
|
|
||||||
ps pollingState
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFuture returns a new Future object initialized with the specified request.
|
|
||||||
func NewFuture(req *http.Request) Future {
|
|
||||||
return Future{req: req}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Response returns the last HTTP response or nil if there isn't one.
|
|
||||||
func (f Future) Response() *http.Response {
|
|
||||||
return f.resp
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status returns the last status message of the operation.
|
|
||||||
func (f Future) Status() string {
|
|
||||||
if f.ps.State == "" {
|
|
||||||
return "Unknown"
|
|
||||||
}
|
|
||||||
return f.ps.State
|
|
||||||
}
|
|
||||||
|
|
||||||
// PollingMethod returns the method used to monitor the status of the asynchronous operation.
|
|
||||||
func (f Future) PollingMethod() PollingMethodType {
|
|
||||||
return f.ps.PollingMethod
|
|
||||||
}
|
|
||||||
|
|
||||||
// Done queries the service to see if the operation has completed.
|
|
||||||
func (f *Future) Done(sender autorest.Sender) (bool, error) {
|
|
||||||
// exit early if this future has terminated
|
|
||||||
if f.ps.hasTerminated() {
|
|
||||||
return true, f.errorInfo()
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := sender.Do(f.req)
|
|
||||||
f.resp = resp
|
|
||||||
if err != nil || !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = updatePollingState(resp, &f.ps)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.ps.hasTerminated() {
|
|
||||||
return true, f.errorInfo()
|
|
||||||
}
|
|
||||||
|
|
||||||
f.req, err = newPollingRequest(f.ps)
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPollingDelay returns a duration the application should wait before checking
|
|
||||||
// the status of the asynchronous request and true; this value is returned from
|
|
||||||
// the service via the Retry-After response header. If the header wasn't returned
|
|
||||||
// then the function returns the zero-value time.Duration and false.
|
|
||||||
func (f Future) GetPollingDelay() (time.Duration, bool) {
|
|
||||||
if f.resp == nil {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
retry := f.resp.Header.Get(autorest.HeaderRetryAfter)
|
|
||||||
if retry == "" {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := time.ParseDuration(retry + "s")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return d, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForCompletion will return when one of the following conditions is met: the long
|
|
||||||
// running operation has completed, the provided context is cancelled, or the client's
|
|
||||||
// polling duration has been exceeded. It will retry failed polling attempts based on
|
|
||||||
// the retry value defined in the client up to the maximum retry attempts.
|
|
||||||
func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error {
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, client.PollingDuration)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
done, err := f.Done(client)
|
|
||||||
for attempts := 0; !done; done, err = f.Done(client) {
|
|
||||||
if attempts >= client.RetryAttempts {
|
|
||||||
return autorest.NewErrorWithError(err, "azure", "WaitForCompletion", f.resp, "the number of retries has been exceeded")
|
|
||||||
}
|
|
||||||
// we want delayAttempt to be zero in the non-error case so
|
|
||||||
// that DelayForBackoff doesn't perform exponential back-off
|
|
||||||
var delayAttempt int
|
|
||||||
var delay time.Duration
|
|
||||||
if err == nil {
|
|
||||||
// check for Retry-After delay, if not present use the client's polling delay
|
|
||||||
var ok bool
|
|
||||||
delay, ok = f.GetPollingDelay()
|
|
||||||
if !ok {
|
|
||||||
delay = client.PollingDelay
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// there was an error polling for status so perform exponential
|
|
||||||
// back-off based on the number of attempts using the client's retry
|
|
||||||
// duration. update attempts after delayAttempt to avoid off-by-one.
|
|
||||||
delayAttempt = attempts
|
|
||||||
delay = client.RetryDuration
|
|
||||||
attempts++
|
|
||||||
}
|
|
||||||
// wait until the delay elapses or the context is cancelled
|
|
||||||
delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done())
|
|
||||||
if !delayElapsed {
|
|
||||||
return autorest.NewErrorWithError(ctx.Err(), "azure", "WaitForCompletion", f.resp, "context has been cancelled")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the operation failed the polling state will contain
|
|
||||||
// error information and implements the error interface
|
|
||||||
func (f *Future) errorInfo() error {
|
|
||||||
if !f.ps.hasSucceeded() {
|
|
||||||
return f.ps
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON implements the json.Marshaler interface.
|
|
||||||
func (f Future) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(&f.ps)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
|
||||||
func (f *Future) UnmarshalJSON(data []byte) error {
|
|
||||||
err := json.Unmarshal(data, &f.ps)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f.req, err = newPollingRequest(f.ps)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure
|
|
||||||
// long-running operation. It will delay between requests for the duration specified in the
|
|
||||||
// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by
|
|
||||||
// closing the optional channel on the http.Request.
|
|
||||||
func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator {
|
|
||||||
return func(s autorest.Sender) autorest.Sender {
|
|
||||||
return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
|
||||||
resp, err = s.Do(r)
|
|
||||||
if err != nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) {
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ps := pollingState{}
|
|
||||||
for err == nil {
|
|
||||||
err = updatePollingState(resp, &ps)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if ps.hasTerminated() {
|
|
||||||
if !ps.hasSucceeded() {
|
|
||||||
err = ps
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err = newPollingRequest(ps)
|
|
||||||
if err != nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
r.Cancel = resp.Request.Cancel
|
|
||||||
|
|
||||||
delay = autorest.GetRetryAfter(resp, delay)
|
|
||||||
resp, err = autorest.SendWithSender(s, r,
|
|
||||||
autorest.AfterDelay(delay))
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAsyncOperation(resp *http.Response) string {
|
|
||||||
return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation))
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasSucceeded(state string) bool {
|
|
||||||
return strings.EqualFold(state, operationSucceeded)
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasTerminated(state string) bool {
|
|
||||||
return strings.EqualFold(state, operationCanceled) || strings.EqualFold(state, operationFailed) || strings.EqualFold(state, operationSucceeded)
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasFailed(state string) bool {
|
|
||||||
return strings.EqualFold(state, operationFailed)
|
|
||||||
}
|
|
||||||
|
|
||||||
type provisioningTracker interface {
|
|
||||||
state() string
|
|
||||||
hasSucceeded() bool
|
|
||||||
hasTerminated() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type operationResource struct {
|
|
||||||
// Note:
|
|
||||||
// The specification states services should return the "id" field. However some return it as
|
|
||||||
// "operationId".
|
|
||||||
ID string `json:"id"`
|
|
||||||
OperationID string `json:"operationId"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Status string `json:"status"`
|
|
||||||
Properties map[string]interface{} `json:"properties"`
|
|
||||||
OperationError ServiceError `json:"error"`
|
|
||||||
StartTime date.Time `json:"startTime"`
|
|
||||||
EndTime date.Time `json:"endTime"`
|
|
||||||
PercentComplete float64 `json:"percentComplete"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (or operationResource) state() string {
|
|
||||||
return or.Status
|
|
||||||
}
|
|
||||||
|
|
||||||
func (or operationResource) hasSucceeded() bool {
|
|
||||||
return hasSucceeded(or.state())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (or operationResource) hasTerminated() bool {
|
|
||||||
return hasTerminated(or.state())
|
|
||||||
}
|
|
||||||
|
|
||||||
type provisioningProperties struct {
|
|
||||||
ProvisioningState string `json:"provisioningState"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type provisioningStatus struct {
|
|
||||||
Properties provisioningProperties `json:"properties,omitempty"`
|
|
||||||
ProvisioningError ServiceError `json:"error,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps provisioningStatus) state() string {
|
|
||||||
return ps.Properties.ProvisioningState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps provisioningStatus) hasSucceeded() bool {
|
|
||||||
return hasSucceeded(ps.state())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps provisioningStatus) hasTerminated() bool {
|
|
||||||
return hasTerminated(ps.state())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps provisioningStatus) hasProvisioningError() bool {
|
|
||||||
return ps.ProvisioningError != ServiceError{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PollingMethodType defines a type used for enumerating polling mechanisms.
|
|
||||||
type PollingMethodType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header.
|
|
||||||
PollingAsyncOperation PollingMethodType = "AsyncOperation"
|
|
||||||
|
|
||||||
// PollingLocation indicates the polling method uses the Location header.
|
|
||||||
PollingLocation PollingMethodType = "Location"
|
|
||||||
|
|
||||||
// PollingUnknown indicates an unknown polling method and is the default value.
|
|
||||||
PollingUnknown PollingMethodType = ""
|
|
||||||
)
|
|
||||||
|
|
||||||
type pollingState struct {
|
|
||||||
PollingMethod PollingMethodType `json:"pollingMethod"`
|
|
||||||
URI string `json:"uri"`
|
|
||||||
State string `json:"state"`
|
|
||||||
Code string `json:"code"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps pollingState) hasSucceeded() bool {
|
|
||||||
return hasSucceeded(ps.State)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps pollingState) hasTerminated() bool {
|
|
||||||
return hasTerminated(ps.State)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps pollingState) hasFailed() bool {
|
|
||||||
return hasFailed(ps.State)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps pollingState) Error() string {
|
|
||||||
return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.State, ps.Code, ps.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// updatePollingState maps the operation status -- retrieved from either a provisioningState
|
|
||||||
// field, the status field of an OperationResource, or inferred from the HTTP status code --
|
|
||||||
// into a well-known states. Since the process begins from the initial request, the state
|
|
||||||
// always comes from either a the provisioningState returned or is inferred from the HTTP
|
|
||||||
// status code. Subsequent requests will read an Azure OperationResource object if the
|
|
||||||
// service initially returned the Azure-AsyncOperation header. The responseFormat field notes
|
|
||||||
// the expected response format.
|
|
||||||
func updatePollingState(resp *http.Response, ps *pollingState) error {
|
|
||||||
// Determine the response shape
|
|
||||||
// -- The first response will always be a provisioningStatus response; only the polling requests,
|
|
||||||
// depending on the header returned, may be something otherwise.
|
|
||||||
var pt provisioningTracker
|
|
||||||
if ps.PollingMethod == PollingAsyncOperation {
|
|
||||||
pt = &operationResource{}
|
|
||||||
} else {
|
|
||||||
pt = &provisioningStatus{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this is the first request (that is, the polling response shape is unknown), determine how
|
|
||||||
// to poll and what to expect
|
|
||||||
if ps.PollingMethod == PollingUnknown {
|
|
||||||
req := resp.Request
|
|
||||||
if req == nil {
|
|
||||||
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prefer the Azure-AsyncOperation header
|
|
||||||
ps.URI = getAsyncOperation(resp)
|
|
||||||
if ps.URI != "" {
|
|
||||||
ps.PollingMethod = PollingAsyncOperation
|
|
||||||
} else {
|
|
||||||
ps.PollingMethod = PollingLocation
|
|
||||||
}
|
|
||||||
|
|
||||||
// Else, use the Location header
|
|
||||||
if ps.URI == "" {
|
|
||||||
ps.URI = autorest.GetLocation(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lastly, requests against an existing resource, use the last request URI
|
|
||||||
if ps.URI == "" {
|
|
||||||
m := strings.ToUpper(req.Method)
|
|
||||||
if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet {
|
|
||||||
ps.URI = req.URL.String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read and interpret the response (saving the Body in case no polling is necessary)
|
|
||||||
b := &bytes.Buffer{}
|
|
||||||
err := autorest.Respond(resp,
|
|
||||||
autorest.ByCopying(b),
|
|
||||||
autorest.ByUnmarshallingJSON(pt),
|
|
||||||
autorest.ByClosing())
|
|
||||||
resp.Body = ioutil.NopCloser(b)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interpret the results
|
|
||||||
// -- Terminal states apply regardless
|
|
||||||
// -- Unknown states are per-service inprogress states
|
|
||||||
// -- Otherwise, infer state from HTTP status code
|
|
||||||
if pt.hasTerminated() {
|
|
||||||
ps.State = pt.state()
|
|
||||||
} else if pt.state() != "" {
|
|
||||||
ps.State = operationInProgress
|
|
||||||
} else {
|
|
||||||
switch resp.StatusCode {
|
|
||||||
case http.StatusAccepted:
|
|
||||||
ps.State = operationInProgress
|
|
||||||
|
|
||||||
case http.StatusNoContent, http.StatusCreated, http.StatusOK:
|
|
||||||
ps.State = operationSucceeded
|
|
||||||
|
|
||||||
default:
|
|
||||||
ps.State = operationFailed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.EqualFold(ps.State, operationInProgress) && ps.URI == "" {
|
|
||||||
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For failed operation, check for error code and message in
|
|
||||||
// -- Operation resource
|
|
||||||
// -- Response
|
|
||||||
// -- Otherwise, Unknown
|
|
||||||
if ps.hasFailed() {
|
|
||||||
if ps.PollingMethod == PollingAsyncOperation {
|
|
||||||
or := pt.(*operationResource)
|
|
||||||
ps.Code = or.OperationError.Code
|
|
||||||
ps.Message = or.OperationError.Message
|
|
||||||
} else {
|
|
||||||
p := pt.(*provisioningStatus)
|
|
||||||
if p.hasProvisioningError() {
|
|
||||||
ps.Code = p.ProvisioningError.Code
|
|
||||||
ps.Message = p.ProvisioningError.Message
|
|
||||||
} else {
|
|
||||||
ps.Code = "Unknown"
|
|
||||||
ps.Message = "None"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPollingRequest(ps pollingState) (*http.Request, error) {
|
|
||||||
reqPoll, err := autorest.Prepare(&http.Request{},
|
|
||||||
autorest.AsGet(),
|
|
||||||
autorest.WithBaseURL(ps.URI))
|
|
||||||
if err != nil {
|
|
||||||
return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.URI)
|
|
||||||
}
|
|
||||||
|
|
||||||
return reqPoll, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsyncOpIncompleteError is the type that's returned from a future that has not completed.
|
|
||||||
type AsyncOpIncompleteError struct {
|
|
||||||
// FutureType is the name of the type composed of a azure.Future.
|
|
||||||
FutureType string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns an error message including the originating type name of the error.
|
|
||||||
func (e AsyncOpIncompleteError) Error() string {
|
|
||||||
return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters.
|
|
||||||
func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError {
|
|
||||||
return AsyncOpIncompleteError{
|
|
||||||
FutureType: futureType,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
200
vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
generated
vendored
200
vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
generated
vendored
|
|
@ -1,200 +0,0 @@
|
||||||
/*
|
|
||||||
Package azure provides Azure-specific implementations used with AutoRest.
|
|
||||||
|
|
||||||
See the included examples for more detail.
|
|
||||||
*/
|
|
||||||
package azure
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// HeaderClientID is the Azure extension header to set a user-specified request ID.
|
|
||||||
HeaderClientID = "x-ms-client-request-id"
|
|
||||||
|
|
||||||
// HeaderReturnClientID is the Azure extension header to set if the user-specified request ID
|
|
||||||
// should be included in the response.
|
|
||||||
HeaderReturnClientID = "x-ms-return-client-request-id"
|
|
||||||
|
|
||||||
// HeaderRequestID is the Azure extension header of the service generated request ID returned
|
|
||||||
// in the response.
|
|
||||||
HeaderRequestID = "x-ms-request-id"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServiceError encapsulates the error response from an Azure service.
|
|
||||||
type ServiceError struct {
|
|
||||||
Code string `json:"code"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
Details *[]interface{} `json:"details"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (se ServiceError) Error() string {
|
|
||||||
if se.Details != nil {
|
|
||||||
d, err := json.Marshal(*(se.Details))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d))
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequestError describes an error response returned by Azure service.
|
|
||||||
type RequestError struct {
|
|
||||||
autorest.DetailedError
|
|
||||||
|
|
||||||
// The error returned by the Azure service.
|
|
||||||
ServiceError *ServiceError `json:"error"`
|
|
||||||
|
|
||||||
// The request id (from the x-ms-request-id-header) of the request.
|
|
||||||
RequestID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns a human-friendly error message from service error.
|
|
||||||
func (e RequestError) Error() string {
|
|
||||||
return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v",
|
|
||||||
e.StatusCode, e.ServiceError)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsAzureError returns true if the passed error is an Azure Service error; false otherwise.
|
|
||||||
func IsAzureError(e error) bool {
|
|
||||||
_, ok := e.(*RequestError)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewErrorWithError creates a new Error conforming object from the
|
|
||||||
// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
|
|
||||||
// if resp is nil), message, and original error. message is treated as a format
|
|
||||||
// string to which the optional args apply.
|
|
||||||
func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError {
|
|
||||||
if v, ok := original.(*RequestError); ok {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
|
|
||||||
statusCode := autorest.UndefinedStatusCode
|
|
||||||
if resp != nil {
|
|
||||||
statusCode = resp.StatusCode
|
|
||||||
}
|
|
||||||
return RequestError{
|
|
||||||
DetailedError: autorest.DetailedError{
|
|
||||||
Original: original,
|
|
||||||
PackageType: packageType,
|
|
||||||
Method: method,
|
|
||||||
StatusCode: statusCode,
|
|
||||||
Message: fmt.Sprintf(message, args...),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of
|
|
||||||
// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g.,
|
|
||||||
// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id
|
|
||||||
// header to true such that UUID accompanies the http.Response.
|
|
||||||
func WithReturningClientID(uuid string) autorest.PrepareDecorator {
|
|
||||||
preparer := autorest.CreatePreparer(
|
|
||||||
WithClientID(uuid),
|
|
||||||
WithReturnClientID(true))
|
|
||||||
|
|
||||||
return func(p autorest.Preparer) autorest.Preparer {
|
|
||||||
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
return preparer.Prepare(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithClientID returns a PrepareDecorator that adds an HTTP extension header of
|
|
||||||
// x-ms-client-request-id whose value is passed, undecorated UUID (e.g.,
|
|
||||||
// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA").
|
|
||||||
func WithClientID(uuid string) autorest.PrepareDecorator {
|
|
||||||
return autorest.WithHeader(HeaderClientID, uuid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of
|
|
||||||
// x-ms-return-client-request-id whose boolean value indicates if the value of the
|
|
||||||
// x-ms-client-request-id header should be included in the http.Response.
|
|
||||||
func WithReturnClientID(b bool) autorest.PrepareDecorator {
|
|
||||||
return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the
|
|
||||||
// http.Request sent to the service (and returned in the http.Response)
|
|
||||||
func ExtractClientID(resp *http.Response) string {
|
|
||||||
return autorest.ExtractHeaderValue(HeaderClientID, resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractRequestID extracts the Azure server generated request identifier from the
|
|
||||||
// x-ms-request-id header.
|
|
||||||
func ExtractRequestID(resp *http.Response) string {
|
|
||||||
return autorest.ExtractHeaderValue(HeaderRequestID, resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithErrorUnlessStatusCode returns a RespondDecorator that emits an
|
|
||||||
// azure.RequestError by reading the response body unless the response HTTP status code
|
|
||||||
// is among the set passed.
|
|
||||||
//
|
|
||||||
// If there is a chance service may return responses other than the Azure error
|
|
||||||
// format and the response cannot be parsed into an error, a decoding error will
|
|
||||||
// be returned containing the response body. In any case, the Responder will
|
|
||||||
// return an error if the status code is not satisfied.
|
|
||||||
//
|
|
||||||
// If this Responder returns an error, the response body will be replaced with
|
|
||||||
// an in-memory reader, which needs no further closing.
|
|
||||||
func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
|
|
||||||
return func(r autorest.Responder) autorest.Responder {
|
|
||||||
return autorest.ResponderFunc(func(resp *http.Response) error {
|
|
||||||
err := r.Respond(resp)
|
|
||||||
if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) {
|
|
||||||
var e RequestError
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
// Copy and replace the Body in case it does not contain an error object.
|
|
||||||
// This will leave the Body available to the caller.
|
|
||||||
b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e)
|
|
||||||
resp.Body = ioutil.NopCloser(&b)
|
|
||||||
if decodeErr != nil {
|
|
||||||
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr)
|
|
||||||
} else if e.ServiceError == nil {
|
|
||||||
// Check if error is unwrapped ServiceError
|
|
||||||
if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" {
|
|
||||||
e.ServiceError = &ServiceError{
|
|
||||||
Code: "Unknown",
|
|
||||||
Message: "Unknown service error",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
e.RequestID = ExtractRequestID(resp)
|
|
||||||
if e.StatusCode == nil {
|
|
||||||
e.StatusCode = resp.StatusCode
|
|
||||||
}
|
|
||||||
err = &e
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
176
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
176
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
|
|
@ -1,176 +0,0 @@
|
||||||
package azure
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EnvironmentFilepathName captures the name of the environment variable containing the path to the file
|
|
||||||
// to be used while populating the Azure Environment.
|
|
||||||
const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH"
|
|
||||||
|
|
||||||
var environments = map[string]Environment{
|
|
||||||
"AZURECHINACLOUD": ChinaCloud,
|
|
||||||
"AZUREGERMANCLOUD": GermanCloud,
|
|
||||||
"AZUREPUBLICCLOUD": PublicCloud,
|
|
||||||
"AZUREUSGOVERNMENTCLOUD": USGovernmentCloud,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Environment represents a set of endpoints for each of Azure's Clouds.
|
|
||||||
type Environment struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
ManagementPortalURL string `json:"managementPortalURL"`
|
|
||||||
PublishSettingsURL string `json:"publishSettingsURL"`
|
|
||||||
ServiceManagementEndpoint string `json:"serviceManagementEndpoint"`
|
|
||||||
ResourceManagerEndpoint string `json:"resourceManagerEndpoint"`
|
|
||||||
ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"`
|
|
||||||
GalleryEndpoint string `json:"galleryEndpoint"`
|
|
||||||
KeyVaultEndpoint string `json:"keyVaultEndpoint"`
|
|
||||||
GraphEndpoint string `json:"graphEndpoint"`
|
|
||||||
StorageEndpointSuffix string `json:"storageEndpointSuffix"`
|
|
||||||
SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
|
|
||||||
TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
|
|
||||||
KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
|
|
||||||
ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"`
|
|
||||||
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
|
|
||||||
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
|
|
||||||
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// PublicCloud is the default public Azure cloud environment
|
|
||||||
PublicCloud = Environment{
|
|
||||||
Name: "AzurePublicCloud",
|
|
||||||
ManagementPortalURL: "https://manage.windowsazure.com/",
|
|
||||||
PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index",
|
|
||||||
ServiceManagementEndpoint: "https://management.core.windows.net/",
|
|
||||||
ResourceManagerEndpoint: "https://management.azure.com/",
|
|
||||||
ActiveDirectoryEndpoint: "https://login.microsoftonline.com/",
|
|
||||||
GalleryEndpoint: "https://gallery.azure.com/",
|
|
||||||
KeyVaultEndpoint: "https://vault.azure.net/",
|
|
||||||
GraphEndpoint: "https://graph.windows.net/",
|
|
||||||
StorageEndpointSuffix: "core.windows.net",
|
|
||||||
SQLDatabaseDNSSuffix: "database.windows.net",
|
|
||||||
TrafficManagerDNSSuffix: "trafficmanager.net",
|
|
||||||
KeyVaultDNSSuffix: "vault.azure.net",
|
|
||||||
ServiceBusEndpointSuffix: "servicebus.azure.com",
|
|
||||||
ServiceManagementVMDNSSuffix: "cloudapp.net",
|
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
|
|
||||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
|
||||||
}
|
|
||||||
|
|
||||||
// USGovernmentCloud is the cloud environment for the US Government
|
|
||||||
USGovernmentCloud = Environment{
|
|
||||||
Name: "AzureUSGovernmentCloud",
|
|
||||||
ManagementPortalURL: "https://manage.windowsazure.us/",
|
|
||||||
PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index",
|
|
||||||
ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/",
|
|
||||||
ResourceManagerEndpoint: "https://management.usgovcloudapi.net/",
|
|
||||||
ActiveDirectoryEndpoint: "https://login.microsoftonline.us/",
|
|
||||||
GalleryEndpoint: "https://gallery.usgovcloudapi.net/",
|
|
||||||
KeyVaultEndpoint: "https://vault.usgovcloudapi.net/",
|
|
||||||
GraphEndpoint: "https://graph.windows.net/",
|
|
||||||
StorageEndpointSuffix: "core.usgovcloudapi.net",
|
|
||||||
SQLDatabaseDNSSuffix: "database.usgovcloudapi.net",
|
|
||||||
TrafficManagerDNSSuffix: "usgovtrafficmanager.net",
|
|
||||||
KeyVaultDNSSuffix: "vault.usgovcloudapi.net",
|
|
||||||
ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net",
|
|
||||||
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
|
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
|
|
||||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChinaCloud is the cloud environment operated in China
|
|
||||||
ChinaCloud = Environment{
|
|
||||||
Name: "AzureChinaCloud",
|
|
||||||
ManagementPortalURL: "https://manage.chinacloudapi.com/",
|
|
||||||
PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index",
|
|
||||||
ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/",
|
|
||||||
ResourceManagerEndpoint: "https://management.chinacloudapi.cn/",
|
|
||||||
ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/",
|
|
||||||
GalleryEndpoint: "https://gallery.chinacloudapi.cn/",
|
|
||||||
KeyVaultEndpoint: "https://vault.azure.cn/",
|
|
||||||
GraphEndpoint: "https://graph.chinacloudapi.cn/",
|
|
||||||
StorageEndpointSuffix: "core.chinacloudapi.cn",
|
|
||||||
SQLDatabaseDNSSuffix: "database.chinacloudapi.cn",
|
|
||||||
TrafficManagerDNSSuffix: "trafficmanager.cn",
|
|
||||||
KeyVaultDNSSuffix: "vault.azure.cn",
|
|
||||||
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net",
|
|
||||||
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
|
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
|
|
||||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
|
||||||
}
|
|
||||||
|
|
||||||
// GermanCloud is the cloud environment operated in Germany
|
|
||||||
GermanCloud = Environment{
|
|
||||||
Name: "AzureGermanCloud",
|
|
||||||
ManagementPortalURL: "http://portal.microsoftazure.de/",
|
|
||||||
PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index",
|
|
||||||
ServiceManagementEndpoint: "https://management.core.cloudapi.de/",
|
|
||||||
ResourceManagerEndpoint: "https://management.microsoftazure.de/",
|
|
||||||
ActiveDirectoryEndpoint: "https://login.microsoftonline.de/",
|
|
||||||
GalleryEndpoint: "https://gallery.cloudapi.de/",
|
|
||||||
KeyVaultEndpoint: "https://vault.microsoftazure.de/",
|
|
||||||
GraphEndpoint: "https://graph.cloudapi.de/",
|
|
||||||
StorageEndpointSuffix: "core.cloudapi.de",
|
|
||||||
SQLDatabaseDNSSuffix: "database.cloudapi.de",
|
|
||||||
TrafficManagerDNSSuffix: "azuretrafficmanager.de",
|
|
||||||
KeyVaultDNSSuffix: "vault.microsoftazure.de",
|
|
||||||
ServiceBusEndpointSuffix: "servicebus.cloudapi.de",
|
|
||||||
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
|
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
|
|
||||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// EnvironmentFromName returns an Environment based on the common name specified.
|
|
||||||
func EnvironmentFromName(name string) (Environment, error) {
|
|
||||||
// IMPORTANT
|
|
||||||
// As per @radhikagupta5:
|
|
||||||
// This is technical debt, fundamentally here because Kubernetes is not currently accepting
|
|
||||||
// contributions to the providers. Once that is an option, the provider should be updated to
|
|
||||||
// directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation
|
|
||||||
// from this method based on the name that is provided to us.
|
|
||||||
if strings.EqualFold(name, "AZURESTACKCLOUD") {
|
|
||||||
return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName))
|
|
||||||
}
|
|
||||||
|
|
||||||
name = strings.ToUpper(name)
|
|
||||||
env, ok := environments[name]
|
|
||||||
if !ok {
|
|
||||||
return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
return env, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnvironmentFromFile loads an Environment from a configuration file available on disk.
|
|
||||||
// This function is particularly useful in the Hybrid Cloud model, where one must define their own
|
|
||||||
// endpoints.
|
|
||||||
func EnvironmentFromFile(location string) (unmarshaled Environment, err error) {
|
|
||||||
fileContents, err := ioutil.ReadFile(location)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = json.Unmarshal(fileContents, &unmarshaled)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
203
vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
generated
vendored
203
vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
generated
vendored
|
|
@ -1,203 +0,0 @@
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package azure
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DoRetryWithRegistration tries to register the resource provider in case it is unregistered.
|
|
||||||
// It also handles request retries
|
|
||||||
func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator {
|
|
||||||
return func(s autorest.Sender) autorest.Sender {
|
|
||||||
return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
|
||||||
rr := autorest.NewRetriableRequest(r)
|
|
||||||
for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ {
|
|
||||||
err = rr.Prepare()
|
|
||||||
if err != nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err = autorest.SendWithSender(s, rr.Request(),
|
|
||||||
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
var re RequestError
|
|
||||||
err = autorest.Respond(
|
|
||||||
resp,
|
|
||||||
autorest.ByUnmarshallingJSON(&re),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
err = re
|
|
||||||
|
|
||||||
if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" {
|
|
||||||
regErr := register(client, r, re)
|
|
||||||
if regErr != nil {
|
|
||||||
return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return resp, fmt.Errorf("failed request: %s", err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getProvider(re RequestError) (string, error) {
|
|
||||||
if re.ServiceError != nil {
|
|
||||||
if re.ServiceError.Details != nil && len(*re.ServiceError.Details) > 0 {
|
|
||||||
detail := (*re.ServiceError.Details)[0].(map[string]interface{})
|
|
||||||
return detail["target"].(string), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", errors.New("provider was not found in the response")
|
|
||||||
}
|
|
||||||
|
|
||||||
func register(client autorest.Client, originalReq *http.Request, re RequestError) error {
|
|
||||||
subID := getSubscription(originalReq.URL.Path)
|
|
||||||
if subID == "" {
|
|
||||||
return errors.New("missing parameter subscriptionID to register resource provider")
|
|
||||||
}
|
|
||||||
providerName, err := getProvider(re)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("missing parameter provider to register resource provider: %s", err)
|
|
||||||
}
|
|
||||||
newURL := url.URL{
|
|
||||||
Scheme: originalReq.URL.Scheme,
|
|
||||||
Host: originalReq.URL.Host,
|
|
||||||
}
|
|
||||||
|
|
||||||
// taken from the resources SDK
|
|
||||||
// with almost identical code, this sections are easier to mantain
|
|
||||||
// It is also not a good idea to import the SDK here
|
|
||||||
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252
|
|
||||||
pathParameters := map[string]interface{}{
|
|
||||||
"resourceProviderNamespace": autorest.Encode("path", providerName),
|
|
||||||
"subscriptionId": autorest.Encode("path", subID),
|
|
||||||
}
|
|
||||||
|
|
||||||
const APIVersion = "2016-09-01"
|
|
||||||
queryParameters := map[string]interface{}{
|
|
||||||
"api-version": APIVersion,
|
|
||||||
}
|
|
||||||
|
|
||||||
preparer := autorest.CreatePreparer(
|
|
||||||
autorest.AsPost(),
|
|
||||||
autorest.WithBaseURL(newURL.String()),
|
|
||||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters),
|
|
||||||
autorest.WithQueryParameters(queryParameters),
|
|
||||||
)
|
|
||||||
|
|
||||||
req, err := preparer.Prepare(&http.Request{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Cancel = originalReq.Cancel
|
|
||||||
|
|
||||||
resp, err := autorest.SendWithSender(client, req,
|
|
||||||
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type Provider struct {
|
|
||||||
RegistrationState *string `json:"registrationState,omitempty"`
|
|
||||||
}
|
|
||||||
var provider Provider
|
|
||||||
|
|
||||||
err = autorest.Respond(
|
|
||||||
resp,
|
|
||||||
WithErrorUnlessStatusCode(http.StatusOK),
|
|
||||||
autorest.ByUnmarshallingJSON(&provider),
|
|
||||||
autorest.ByClosing(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// poll for registered provisioning state
|
|
||||||
now := time.Now()
|
|
||||||
for err == nil && time.Since(now) < client.PollingDuration {
|
|
||||||
// taken from the resources SDK
|
|
||||||
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45
|
|
||||||
preparer := autorest.CreatePreparer(
|
|
||||||
autorest.AsGet(),
|
|
||||||
autorest.WithBaseURL(newURL.String()),
|
|
||||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters),
|
|
||||||
autorest.WithQueryParameters(queryParameters),
|
|
||||||
)
|
|
||||||
req, err = preparer.Prepare(&http.Request{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Cancel = originalReq.Cancel
|
|
||||||
|
|
||||||
resp, err := autorest.SendWithSender(client, req,
|
|
||||||
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = autorest.Respond(
|
|
||||||
resp,
|
|
||||||
WithErrorUnlessStatusCode(http.StatusOK),
|
|
||||||
autorest.ByUnmarshallingJSON(&provider),
|
|
||||||
autorest.ByClosing(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if provider.RegistrationState != nil &&
|
|
||||||
*provider.RegistrationState == "Registered" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
delayed := autorest.DelayWithRetryAfter(resp, originalReq.Cancel)
|
|
||||||
if !delayed {
|
|
||||||
autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Cancel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !(time.Since(now) < client.PollingDuration) {
|
|
||||||
return errors.New("polling for resource provider registration has exceeded the polling duration")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSubscription(path string) string {
|
|
||||||
parts := strings.Split(path, "/")
|
|
||||||
for i, v := range parts {
|
|
||||||
if v == "subscriptions" && (i+1) < len(parts) {
|
|
||||||
return parts[i+1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
263
vendor/github.com/Azure/go-autorest/autorest/client.go
generated
vendored
263
vendor/github.com/Azure/go-autorest/autorest/client.go
generated
vendored
|
|
@ -1,263 +0,0 @@
|
||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"net/http/cookiejar"
|
|
||||||
"runtime"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultPollingDelay is a reasonable delay between polling requests.
|
|
||||||
DefaultPollingDelay = 60 * time.Second
|
|
||||||
|
|
||||||
// DefaultPollingDuration is a reasonable total polling duration.
|
|
||||||
DefaultPollingDuration = 15 * time.Minute
|
|
||||||
|
|
||||||
// DefaultRetryAttempts is number of attempts for retry status codes (5xx).
|
|
||||||
DefaultRetryAttempts = 3
|
|
||||||
|
|
||||||
// DefaultRetryDuration is the duration to wait between retries.
|
|
||||||
DefaultRetryDuration = 30 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// defaultUserAgent builds a string containing the Go version, system archityecture and OS,
|
|
||||||
// and the go-autorest version.
|
|
||||||
defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
|
|
||||||
runtime.Version(),
|
|
||||||
runtime.GOARCH,
|
|
||||||
runtime.GOOS,
|
|
||||||
Version(),
|
|
||||||
)
|
|
||||||
|
|
||||||
// StatusCodesForRetry are a defined group of status code for which the client will retry
|
|
||||||
StatusCodesForRetry = []int{
|
|
||||||
http.StatusRequestTimeout, // 408
|
|
||||||
http.StatusTooManyRequests, // 429
|
|
||||||
http.StatusInternalServerError, // 500
|
|
||||||
http.StatusBadGateway, // 502
|
|
||||||
http.StatusServiceUnavailable, // 503
|
|
||||||
http.StatusGatewayTimeout, // 504
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
requestFormat = `HTTP Request Begin ===================================================
|
|
||||||
%s
|
|
||||||
===================================================== HTTP Request End
|
|
||||||
`
|
|
||||||
responseFormat = `HTTP Response Begin ===================================================
|
|
||||||
%s
|
|
||||||
===================================================== HTTP Response End
|
|
||||||
`
|
|
||||||
)
|
|
||||||
|
|
||||||
// Response serves as the base for all responses from generated clients. It provides access to the
|
|
||||||
// last http.Response.
|
|
||||||
type Response struct {
|
|
||||||
*http.Response `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoggingInspector implements request and response inspectors that log the full request and
|
|
||||||
// response to a supplied log.
|
|
||||||
type LoggingInspector struct {
|
|
||||||
Logger *log.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The
|
|
||||||
// body is restored after being emitted.
|
|
||||||
//
|
|
||||||
// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
|
|
||||||
// important. It is best used to trace JSON or similar body values.
|
|
||||||
func (li LoggingInspector) WithInspection() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
var body, b bytes.Buffer
|
|
||||||
|
|
||||||
defer r.Body.Close()
|
|
||||||
|
|
||||||
r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body))
|
|
||||||
if err := r.Write(&b); err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to write response: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
li.Logger.Printf(requestFormat, b.String())
|
|
||||||
|
|
||||||
r.Body = ioutil.NopCloser(&body)
|
|
||||||
return p.Prepare(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The
|
|
||||||
// body is restored after being emitted.
|
|
||||||
//
|
|
||||||
// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
|
|
||||||
// important. It is best used to trace JSON or similar body values.
|
|
||||||
func (li LoggingInspector) ByInspecting() RespondDecorator {
|
|
||||||
return func(r Responder) Responder {
|
|
||||||
return ResponderFunc(func(resp *http.Response) error {
|
|
||||||
var body, b bytes.Buffer
|
|
||||||
defer resp.Body.Close()
|
|
||||||
resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body))
|
|
||||||
if err := resp.Write(&b); err != nil {
|
|
||||||
return fmt.Errorf("Failed to write response: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
li.Logger.Printf(responseFormat, b.String())
|
|
||||||
|
|
||||||
resp.Body = ioutil.NopCloser(&body)
|
|
||||||
return r.Respond(resp)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Client is the base for autorest generated clients. It provides default, "do nothing"
|
|
||||||
// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the
|
|
||||||
// standard, undecorated http.Client as a default Sender.
|
|
||||||
//
|
|
||||||
// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and
|
|
||||||
// return responses that compose with Response.
|
|
||||||
//
|
|
||||||
// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom
|
|
||||||
// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit
|
|
||||||
// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence
|
|
||||||
// sending the request by providing a decorated Sender.
|
|
||||||
type Client struct {
|
|
||||||
Authorizer Authorizer
|
|
||||||
Sender Sender
|
|
||||||
RequestInspector PrepareDecorator
|
|
||||||
ResponseInspector RespondDecorator
|
|
||||||
|
|
||||||
// PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header
|
|
||||||
PollingDelay time.Duration
|
|
||||||
|
|
||||||
// PollingDuration sets the maximum polling time after which an error is returned.
|
|
||||||
PollingDuration time.Duration
|
|
||||||
|
|
||||||
// RetryAttempts sets the default number of retry attempts for client.
|
|
||||||
RetryAttempts int
|
|
||||||
|
|
||||||
// RetryDuration sets the delay duration for retries.
|
|
||||||
RetryDuration time.Duration
|
|
||||||
|
|
||||||
// UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent
|
|
||||||
// through the Do method.
|
|
||||||
UserAgent string
|
|
||||||
|
|
||||||
Jar http.CookieJar
|
|
||||||
|
|
||||||
// Set to true to skip attempted registration of resource providers (false by default).
|
|
||||||
SkipResourceProviderRegistration bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
|
|
||||||
// string.
|
|
||||||
func NewClientWithUserAgent(ua string) Client {
|
|
||||||
c := Client{
|
|
||||||
PollingDelay: DefaultPollingDelay,
|
|
||||||
PollingDuration: DefaultPollingDuration,
|
|
||||||
RetryAttempts: DefaultRetryAttempts,
|
|
||||||
RetryDuration: DefaultRetryDuration,
|
|
||||||
UserAgent: defaultUserAgent,
|
|
||||||
}
|
|
||||||
c.Sender = c.sender()
|
|
||||||
c.AddToUserAgent(ua)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddToUserAgent adds an extension to the current user agent
|
|
||||||
func (c *Client) AddToUserAgent(extension string) error {
|
|
||||||
if extension != "" {
|
|
||||||
c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do implements the Sender interface by invoking the active Sender after applying authorization.
|
|
||||||
// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent
|
|
||||||
// is set, apply set the User-Agent header.
|
|
||||||
func (c Client) Do(r *http.Request) (*http.Response, error) {
|
|
||||||
if r.UserAgent() == "" {
|
|
||||||
r, _ = Prepare(r,
|
|
||||||
WithUserAgent(c.UserAgent))
|
|
||||||
}
|
|
||||||
r, err := Prepare(r,
|
|
||||||
c.WithInspection(),
|
|
||||||
c.WithAuthorization())
|
|
||||||
if err != nil {
|
|
||||||
var resp *http.Response
|
|
||||||
if detErr, ok := err.(DetailedError); ok {
|
|
||||||
// if the authorization failed (e.g. invalid credentials) there will
|
|
||||||
// be a response associated with the error, be sure to return it.
|
|
||||||
resp = detErr.Response
|
|
||||||
}
|
|
||||||
return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := SendWithSender(c.sender(), r)
|
|
||||||
Respond(resp, c.ByInspecting())
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// sender returns the Sender to which to send requests.
|
|
||||||
func (c Client) sender() Sender {
|
|
||||||
if c.Sender == nil {
|
|
||||||
j, _ := cookiejar.New(nil)
|
|
||||||
return &http.Client{Jar: j}
|
|
||||||
}
|
|
||||||
return c.Sender
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator
|
|
||||||
// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer.
|
|
||||||
func (c Client) WithAuthorization() PrepareDecorator {
|
|
||||||
return c.authorizer().WithAuthorization()
|
|
||||||
}
|
|
||||||
|
|
||||||
// authorizer returns the Authorizer to use.
|
|
||||||
func (c Client) authorizer() Authorizer {
|
|
||||||
if c.Authorizer == nil {
|
|
||||||
return NullAuthorizer{}
|
|
||||||
}
|
|
||||||
return c.Authorizer
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithInspection is a convenience method that passes the request to the supplied RequestInspector,
|
|
||||||
// if present, or returns the WithNothing PrepareDecorator otherwise.
|
|
||||||
func (c Client) WithInspection() PrepareDecorator {
|
|
||||||
if c.RequestInspector == nil {
|
|
||||||
return WithNothing()
|
|
||||||
}
|
|
||||||
return c.RequestInspector
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector,
|
|
||||||
// if present, or returns the ByIgnoring RespondDecorator otherwise.
|
|
||||||
func (c Client) ByInspecting() RespondDecorator {
|
|
||||||
if c.ResponseInspector == nil {
|
|
||||||
return ByIgnoring()
|
|
||||||
}
|
|
||||||
return c.ResponseInspector
|
|
||||||
}
|
|
||||||
96
vendor/github.com/Azure/go-autorest/autorest/date/date.go
generated
vendored
96
vendor/github.com/Azure/go-autorest/autorest/date/date.go
generated
vendored
|
|
@ -1,96 +0,0 @@
|
||||||
/*
|
|
||||||
Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/)
|
|
||||||
defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of
|
|
||||||
time.Time types. And both convert to time.Time through a ToTime method.
|
|
||||||
*/
|
|
||||||
package date
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
fullDate = "2006-01-02"
|
|
||||||
fullDateJSON = `"2006-01-02"`
|
|
||||||
dateFormat = "%04d-%02d-%02d"
|
|
||||||
jsonFormat = `"%04d-%02d-%02d"`
|
|
||||||
)
|
|
||||||
|
|
||||||
// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e.,
|
|
||||||
// 2006-01-02).
|
|
||||||
type Date struct {
|
|
||||||
time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseDate create a new Date from the passed string.
|
|
||||||
func ParseDate(date string) (d Date, err error) {
|
|
||||||
return parseDate(date, fullDate)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseDate(date string, format string) (Date, error) {
|
|
||||||
d, err := time.Parse(format, date)
|
|
||||||
return Date{Time: d}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
|
|
||||||
// 2006-01-02).
|
|
||||||
func (d Date) MarshalBinary() ([]byte, error) {
|
|
||||||
return d.MarshalText()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
|
|
||||||
// 2006-01-02).
|
|
||||||
func (d *Date) UnmarshalBinary(data []byte) error {
|
|
||||||
return d.UnmarshalText(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e.,
|
|
||||||
// 2006-01-02).
|
|
||||||
func (d Date) MarshalJSON() (json []byte, err error) {
|
|
||||||
return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e.,
|
|
||||||
// 2006-01-02).
|
|
||||||
func (d *Date) UnmarshalJSON(data []byte) (err error) {
|
|
||||||
d.Time, err = time.Parse(fullDateJSON, string(data))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
|
|
||||||
// 2006-01-02).
|
|
||||||
func (d Date) MarshalText() (text []byte, err error) {
|
|
||||||
return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
|
|
||||||
// 2006-01-02).
|
|
||||||
func (d *Date) UnmarshalText(data []byte) (err error) {
|
|
||||||
d.Time, err = time.Parse(fullDate, string(data))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02).
|
|
||||||
func (d Date) String() string {
|
|
||||||
return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToTime returns a Date as a time.Time
|
|
||||||
func (d Date) ToTime() time.Time {
|
|
||||||
return d.Time
|
|
||||||
}
|
|
||||||
103
vendor/github.com/Azure/go-autorest/autorest/date/time.go
generated
vendored
103
vendor/github.com/Azure/go-autorest/autorest/date/time.go
generated
vendored
|
|
@ -1,103 +0,0 @@
|
||||||
package date
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases.
|
|
||||||
const (
|
|
||||||
azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"`
|
|
||||||
azureUtcFormat = "2006-01-02T15:04:05.999999999"
|
|
||||||
rfc3339JSON = `"` + time.RFC3339Nano + `"`
|
|
||||||
rfc3339 = time.RFC3339Nano
|
|
||||||
tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$`
|
|
||||||
)
|
|
||||||
|
|
||||||
// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e.,
|
|
||||||
// 2006-01-02T15:04:05Z).
|
|
||||||
type Time struct {
|
|
||||||
time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
|
|
||||||
// 2006-01-02T15:04:05Z).
|
|
||||||
func (t Time) MarshalBinary() ([]byte, error) {
|
|
||||||
return t.Time.MarshalText()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
|
|
||||||
// (i.e., 2006-01-02T15:04:05Z).
|
|
||||||
func (t *Time) UnmarshalBinary(data []byte) error {
|
|
||||||
return t.UnmarshalText(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e.,
|
|
||||||
// 2006-01-02T15:04:05Z).
|
|
||||||
func (t Time) MarshalJSON() (json []byte, err error) {
|
|
||||||
return t.Time.MarshalJSON()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time
|
|
||||||
// (i.e., 2006-01-02T15:04:05Z).
|
|
||||||
func (t *Time) UnmarshalJSON(data []byte) (err error) {
|
|
||||||
timeFormat := azureUtcFormatJSON
|
|
||||||
match, err := regexp.Match(tzOffsetRegex, data)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if match {
|
|
||||||
timeFormat = rfc3339JSON
|
|
||||||
}
|
|
||||||
t.Time, err = ParseTime(timeFormat, string(data))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
|
|
||||||
// 2006-01-02T15:04:05Z).
|
|
||||||
func (t Time) MarshalText() (text []byte, err error) {
|
|
||||||
return t.Time.MarshalText()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
|
|
||||||
// (i.e., 2006-01-02T15:04:05Z).
|
|
||||||
func (t *Time) UnmarshalText(data []byte) (err error) {
|
|
||||||
timeFormat := azureUtcFormat
|
|
||||||
match, err := regexp.Match(tzOffsetRegex, data)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if match {
|
|
||||||
timeFormat = rfc3339
|
|
||||||
}
|
|
||||||
t.Time, err = ParseTime(timeFormat, string(data))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the Time formatted as an RFC3339 date-time string (i.e.,
|
|
||||||
// 2006-01-02T15:04:05Z).
|
|
||||||
func (t Time) String() string {
|
|
||||||
// Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does.
|
|
||||||
b, err := t.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToTime returns a Time as a time.Time
|
|
||||||
func (t Time) ToTime() time.Time {
|
|
||||||
return t.Time
|
|
||||||
}
|
|
||||||
100
vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go
generated
vendored
100
vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go
generated
vendored
|
|
@ -1,100 +0,0 @@
|
||||||
package date
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
rfc1123JSON = `"` + time.RFC1123 + `"`
|
|
||||||
rfc1123 = time.RFC1123
|
|
||||||
)
|
|
||||||
|
|
||||||
// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e.,
|
|
||||||
// Mon, 02 Jan 2006 15:04:05 MST).
|
|
||||||
type TimeRFC1123 struct {
|
|
||||||
time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time
|
|
||||||
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
|
|
||||||
func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) {
|
|
||||||
t.Time, err = ParseTime(rfc1123JSON, string(data))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e.,
|
|
||||||
// Mon, 02 Jan 2006 15:04:05 MST).
|
|
||||||
func (t TimeRFC1123) MarshalJSON() ([]byte, error) {
|
|
||||||
if y := t.Year(); y < 0 || y >= 10000 {
|
|
||||||
return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]")
|
|
||||||
}
|
|
||||||
b := []byte(t.Format(rfc1123JSON))
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
|
|
||||||
// Mon, 02 Jan 2006 15:04:05 MST).
|
|
||||||
func (t TimeRFC1123) MarshalText() ([]byte, error) {
|
|
||||||
if y := t.Year(); y < 0 || y >= 10000 {
|
|
||||||
return nil, errors.New("Time.MarshalText: year outside of range [0,9999]")
|
|
||||||
}
|
|
||||||
|
|
||||||
b := []byte(t.Format(rfc1123))
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
|
|
||||||
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
|
|
||||||
func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) {
|
|
||||||
t.Time, err = ParseTime(rfc1123, string(data))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
|
|
||||||
// Mon, 02 Jan 2006 15:04:05 MST).
|
|
||||||
func (t TimeRFC1123) MarshalBinary() ([]byte, error) {
|
|
||||||
return t.MarshalText()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
|
|
||||||
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
|
|
||||||
func (t *TimeRFC1123) UnmarshalBinary(data []byte) error {
|
|
||||||
return t.UnmarshalText(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToTime returns a Time as a time.Time
|
|
||||||
func (t TimeRFC1123) ToTime() time.Time {
|
|
||||||
return t.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the Time formatted as an RFC1123 date-time string (i.e.,
|
|
||||||
// Mon, 02 Jan 2006 15:04:05 MST).
|
|
||||||
func (t TimeRFC1123) String() string {
|
|
||||||
// Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does.
|
|
||||||
b, err := t.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
123
vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go
generated
vendored
123
vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go
generated
vendored
|
|
@ -1,123 +0,0 @@
|
||||||
package date
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// unixEpoch is the moment in time that should be treated as timestamp 0.
|
|
||||||
var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC)
|
|
||||||
|
|
||||||
// UnixTime marshals and unmarshals a time that is represented as the number
|
|
||||||
// of seconds (ignoring skip-seconds) since the Unix Epoch.
|
|
||||||
type UnixTime time.Time
|
|
||||||
|
|
||||||
// Duration returns the time as a Duration since the UnixEpoch.
|
|
||||||
func (t UnixTime) Duration() time.Duration {
|
|
||||||
return time.Time(t).Sub(unixEpoch)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch.
|
|
||||||
func NewUnixTimeFromSeconds(seconds float64) UnixTime {
|
|
||||||
return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch.
|
|
||||||
func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime {
|
|
||||||
return NewUnixTimeFromDuration(time.Duration(nanoseconds))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch.
|
|
||||||
func NewUnixTimeFromDuration(dur time.Duration) UnixTime {
|
|
||||||
return UnixTime(unixEpoch.Add(dur))
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0'
|
|
||||||
func UnixEpoch() time.Time {
|
|
||||||
return unixEpoch
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements.
|
|
||||||
// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.)
|
|
||||||
func (t UnixTime) MarshalJSON() ([]byte, error) {
|
|
||||||
buffer := &bytes.Buffer{}
|
|
||||||
enc := json.NewEncoder(buffer)
|
|
||||||
err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return buffer.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since
|
|
||||||
// midnight January 1st, 1970.
|
|
||||||
func (t *UnixTime) UnmarshalJSON(text []byte) error {
|
|
||||||
dec := json.NewDecoder(bytes.NewReader(text))
|
|
||||||
|
|
||||||
var secondsSinceEpoch float64
|
|
||||||
if err := dec.Decode(&secondsSinceEpoch); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
*t = NewUnixTimeFromSeconds(secondsSinceEpoch)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number.
|
|
||||||
func (t UnixTime) MarshalText() ([]byte, error) {
|
|
||||||
cast := time.Time(t)
|
|
||||||
return cast.MarshalText()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch.
|
|
||||||
func (t *UnixTime) UnmarshalText(raw []byte) error {
|
|
||||||
var unmarshaled time.Time
|
|
||||||
|
|
||||||
if err := unmarshaled.UnmarshalText(raw); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
*t = UnixTime(unmarshaled)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch.
|
|
||||||
func (t UnixTime) MarshalBinary() ([]byte, error) {
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
|
|
||||||
payload := int64(t.Duration())
|
|
||||||
|
|
||||||
if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime.
|
|
||||||
func (t *UnixTime) UnmarshalBinary(raw []byte) error {
|
|
||||||
var nanosecondsSinceEpoch int64
|
|
||||||
|
|
||||||
if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
25
vendor/github.com/Azure/go-autorest/autorest/date/utility.go
generated
vendored
25
vendor/github.com/Azure/go-autorest/autorest/date/utility.go
generated
vendored
|
|
@ -1,25 +0,0 @@
|
||||||
package date
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseTime to parse Time string to specified format.
|
|
||||||
func ParseTime(format string, t string) (d time.Time, err error) {
|
|
||||||
return time.Parse(format, strings.ToUpper(t))
|
|
||||||
}
|
|
||||||
98
vendor/github.com/Azure/go-autorest/autorest/error.go
generated
vendored
98
vendor/github.com/Azure/go-autorest/autorest/error.go
generated
vendored
|
|
@ -1,98 +0,0 @@
|
||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// UndefinedStatusCode is used when HTTP status code is not available for an error.
|
|
||||||
UndefinedStatusCode = 0
|
|
||||||
)
|
|
||||||
|
|
||||||
// DetailedError encloses a error with details of the package, method, and associated HTTP
|
|
||||||
// status code (if any).
|
|
||||||
type DetailedError struct {
|
|
||||||
Original error
|
|
||||||
|
|
||||||
// PackageType is the package type of the object emitting the error. For types, the value
|
|
||||||
// matches that produced the the '%T' format specifier of the fmt package. For other elements,
|
|
||||||
// such as functions, it is just the package name (e.g., "autorest").
|
|
||||||
PackageType string
|
|
||||||
|
|
||||||
// Method is the name of the method raising the error.
|
|
||||||
Method string
|
|
||||||
|
|
||||||
// StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error.
|
|
||||||
StatusCode interface{}
|
|
||||||
|
|
||||||
// Message is the error message.
|
|
||||||
Message string
|
|
||||||
|
|
||||||
// Service Error is the response body of failed API in bytes
|
|
||||||
ServiceError []byte
|
|
||||||
|
|
||||||
// Response is the response object that was returned during failure if applicable.
|
|
||||||
Response *http.Response
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewError creates a new Error conforming object from the passed packageType, method, and
|
|
||||||
// message. message is treated as a format string to which the optional args apply.
|
|
||||||
func NewError(packageType string, method string, message string, args ...interface{}) DetailedError {
|
|
||||||
return NewErrorWithError(nil, packageType, method, nil, message, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewErrorWithResponse creates a new Error conforming object from the passed
|
|
||||||
// packageType, method, statusCode of the given resp (UndefinedStatusCode if
|
|
||||||
// resp is nil), and message. message is treated as a format string to which the
|
|
||||||
// optional args apply.
|
|
||||||
func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError {
|
|
||||||
return NewErrorWithError(nil, packageType, method, resp, message, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewErrorWithError creates a new Error conforming object from the
|
|
||||||
// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
|
|
||||||
// if resp is nil), message, and original error. message is treated as a format
|
|
||||||
// string to which the optional args apply.
|
|
||||||
func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError {
|
|
||||||
if v, ok := original.(DetailedError); ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
statusCode := UndefinedStatusCode
|
|
||||||
if resp != nil {
|
|
||||||
statusCode = resp.StatusCode
|
|
||||||
}
|
|
||||||
|
|
||||||
return DetailedError{
|
|
||||||
Original: original,
|
|
||||||
PackageType: packageType,
|
|
||||||
Method: method,
|
|
||||||
StatusCode: statusCode,
|
|
||||||
Message: fmt.Sprintf(message, args...),
|
|
||||||
Response: resp,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns a formatted containing all available details (i.e., PackageType, Method,
|
|
||||||
// StatusCode, Message, and original error (if any)).
|
|
||||||
func (e DetailedError) Error() string {
|
|
||||||
if e.Original == nil {
|
|
||||||
return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original)
|
|
||||||
}
|
|
||||||
480
vendor/github.com/Azure/go-autorest/autorest/preparer.go
generated
vendored
480
vendor/github.com/Azure/go-autorest/autorest/preparer.go
generated
vendored
|
|
@ -1,480 +0,0 @@
|
||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
mimeTypeJSON = "application/json"
|
|
||||||
mimeTypeOctetStream = "application/octet-stream"
|
|
||||||
mimeTypeFormPost = "application/x-www-form-urlencoded"
|
|
||||||
|
|
||||||
headerAuthorization = "Authorization"
|
|
||||||
headerContentType = "Content-Type"
|
|
||||||
headerUserAgent = "User-Agent"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Preparer is the interface that wraps the Prepare method.
|
|
||||||
//
|
|
||||||
// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations
|
|
||||||
// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used.
|
|
||||||
type Preparer interface {
|
|
||||||
Prepare(*http.Request) (*http.Request, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PreparerFunc is a method that implements the Preparer interface.
|
|
||||||
type PreparerFunc func(*http.Request) (*http.Request, error)
|
|
||||||
|
|
||||||
// Prepare implements the Preparer interface on PreparerFunc.
|
|
||||||
func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) {
|
|
||||||
return pf(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the
|
|
||||||
// http.Request and pass it along or, first, pass the http.Request along then affect the result.
|
|
||||||
type PrepareDecorator func(Preparer) Preparer
|
|
||||||
|
|
||||||
// CreatePreparer creates, decorates, and returns a Preparer.
|
|
||||||
// Without decorators, the returned Preparer returns the passed http.Request unmodified.
|
|
||||||
// Preparers are safe to share and re-use.
|
|
||||||
func CreatePreparer(decorators ...PrepareDecorator) Preparer {
|
|
||||||
return DecoratePreparer(
|
|
||||||
Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })),
|
|
||||||
decorators...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it
|
|
||||||
// applies to the Preparer. Decorators are applied in the order received, but their affect upon the
|
|
||||||
// request depends on whether they are a pre-decorator (change the http.Request and then pass it
|
|
||||||
// along) or a post-decorator (pass the http.Request along and alter it on return).
|
|
||||||
func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer {
|
|
||||||
for _, decorate := range decorators {
|
|
||||||
p = decorate(p)
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators.
|
|
||||||
// It creates a Preparer from the decorators which it then applies to the passed http.Request.
|
|
||||||
func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) {
|
|
||||||
if r == nil {
|
|
||||||
return nil, NewError("autorest", "Prepare", "Invoked without an http.Request")
|
|
||||||
}
|
|
||||||
return CreatePreparer(decorators...).Prepare(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed
|
|
||||||
// http.Request.
|
|
||||||
func WithNothing() PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
return p.Prepare(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to
|
|
||||||
// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before
|
|
||||||
// adding the header.
|
|
||||||
func WithHeader(header string, value string) PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
if r.Header == nil {
|
|
||||||
r.Header = make(http.Header)
|
|
||||||
}
|
|
||||||
r.Header.Set(http.CanonicalHeaderKey(header), value)
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to
|
|
||||||
// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before
|
|
||||||
// adding them.
|
|
||||||
func WithHeaders(headers map[string]interface{}) PrepareDecorator {
|
|
||||||
h := ensureValueStrings(headers)
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
if r.Header == nil {
|
|
||||||
r.Header = make(http.Header)
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, value := range h {
|
|
||||||
r.Header.Set(http.CanonicalHeaderKey(name), value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
|
||||||
// value is "Bearer " followed by the supplied token.
|
|
||||||
func WithBearerAuthorization(token string) PrepareDecorator {
|
|
||||||
return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value
|
|
||||||
// is the passed contentType.
|
|
||||||
func AsContentType(contentType string) PrepareDecorator {
|
|
||||||
return WithHeader(headerContentType, contentType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the
|
|
||||||
// passed string.
|
|
||||||
func WithUserAgent(ua string) PrepareDecorator {
|
|
||||||
return WithHeader(headerUserAgent, ua)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is
|
|
||||||
// "application/x-www-form-urlencoded".
|
|
||||||
func AsFormURLEncoded() PrepareDecorator {
|
|
||||||
return AsContentType(mimeTypeFormPost)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is
|
|
||||||
// "application/json".
|
|
||||||
func AsJSON() PrepareDecorator {
|
|
||||||
return AsContentType(mimeTypeJSON)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header.
|
|
||||||
func AsOctetStream() PrepareDecorator {
|
|
||||||
return AsContentType(mimeTypeOctetStream)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The
|
|
||||||
// decorator does not validate that the passed method string is a known HTTP method.
|
|
||||||
func WithMethod(method string) PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r.Method = method
|
|
||||||
return p.Prepare(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE.
|
|
||||||
func AsDelete() PrepareDecorator { return WithMethod("DELETE") }
|
|
||||||
|
|
||||||
// AsGet returns a PrepareDecorator that sets the HTTP method to GET.
|
|
||||||
func AsGet() PrepareDecorator { return WithMethod("GET") }
|
|
||||||
|
|
||||||
// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD.
|
|
||||||
func AsHead() PrepareDecorator { return WithMethod("HEAD") }
|
|
||||||
|
|
||||||
// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS.
|
|
||||||
func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") }
|
|
||||||
|
|
||||||
// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH.
|
|
||||||
func AsPatch() PrepareDecorator { return WithMethod("PATCH") }
|
|
||||||
|
|
||||||
// AsPost returns a PrepareDecorator that sets the HTTP method to POST.
|
|
||||||
func AsPost() PrepareDecorator { return WithMethod("POST") }
|
|
||||||
|
|
||||||
// AsPut returns a PrepareDecorator that sets the HTTP method to PUT.
|
|
||||||
func AsPut() PrepareDecorator { return WithMethod("PUT") }
|
|
||||||
|
|
||||||
// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed
|
|
||||||
// from the supplied baseUrl.
|
|
||||||
func WithBaseURL(baseURL string) PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
var u *url.URL
|
|
||||||
if u, err = url.Parse(baseURL); err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
if u.Scheme == "" {
|
|
||||||
err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
r.URL = u
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the
|
|
||||||
// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map.
|
|
||||||
func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator {
|
|
||||||
parameters := ensureValueStrings(urlParameters)
|
|
||||||
for key, value := range parameters {
|
|
||||||
baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1)
|
|
||||||
}
|
|
||||||
return WithBaseURL(baseURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the
|
|
||||||
// http.Request body.
|
|
||||||
func WithFormData(v url.Values) PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
s := v.Encode()
|
|
||||||
|
|
||||||
if r.Header == nil {
|
|
||||||
r.Header = make(http.Header)
|
|
||||||
}
|
|
||||||
r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost)
|
|
||||||
r.ContentLength = int64(len(s))
|
|
||||||
r.Body = ioutil.NopCloser(strings.NewReader(s))
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters
|
|
||||||
// into the http.Request body.
|
|
||||||
func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
var body bytes.Buffer
|
|
||||||
writer := multipart.NewWriter(&body)
|
|
||||||
for key, value := range formDataParameters {
|
|
||||||
if rc, ok := value.(io.ReadCloser); ok {
|
|
||||||
var fd io.Writer
|
|
||||||
if fd, err = writer.CreateFormFile(key, key); err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
if _, err = io.Copy(fd, rc); err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err = writer.WriteField(key, ensureValueString(value)); err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err = writer.Close(); err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
if r.Header == nil {
|
|
||||||
r.Header = make(http.Header)
|
|
||||||
}
|
|
||||||
r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType())
|
|
||||||
r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
|
|
||||||
r.ContentLength = int64(body.Len())
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithFile returns a PrepareDecorator that sends file in request body.
|
|
||||||
func WithFile(f io.ReadCloser) PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
b, err := ioutil.ReadAll(f)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
r.Body = ioutil.NopCloser(bytes.NewReader(b))
|
|
||||||
r.ContentLength = int64(len(b))
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request
|
|
||||||
// and sets the Content-Length header.
|
|
||||||
func WithBool(v bool) PrepareDecorator {
|
|
||||||
return WithString(fmt.Sprintf("%v", v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the
|
|
||||||
// request and sets the Content-Length header.
|
|
||||||
func WithFloat32(v float32) PrepareDecorator {
|
|
||||||
return WithString(fmt.Sprintf("%v", v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the
|
|
||||||
// request and sets the Content-Length header.
|
|
||||||
func WithFloat64(v float64) PrepareDecorator {
|
|
||||||
return WithString(fmt.Sprintf("%v", v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request
|
|
||||||
// and sets the Content-Length header.
|
|
||||||
func WithInt32(v int32) PrepareDecorator {
|
|
||||||
return WithString(fmt.Sprintf("%v", v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request
|
|
||||||
// and sets the Content-Length header.
|
|
||||||
func WithInt64(v int64) PrepareDecorator {
|
|
||||||
return WithString(fmt.Sprintf("%v", v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithString returns a PrepareDecorator that encodes the passed string into the body of the request
|
|
||||||
// and sets the Content-Length header.
|
|
||||||
func WithString(v string) PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
r.ContentLength = int64(len(v))
|
|
||||||
r.Body = ioutil.NopCloser(strings.NewReader(v))
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the
|
|
||||||
// request and sets the Content-Length header.
|
|
||||||
func WithJSON(v interface{}) PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
b, err := json.Marshal(v)
|
|
||||||
if err == nil {
|
|
||||||
r.ContentLength = int64(len(b))
|
|
||||||
r.Body = ioutil.NopCloser(bytes.NewReader(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path
|
|
||||||
// is absolute (that is, it begins with a "/"), it replaces the existing path.
|
|
||||||
func WithPath(path string) PrepareDecorator {
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
if r.URL == nil {
|
|
||||||
return r, NewError("autorest", "WithPath", "Invoked with a nil URL")
|
|
||||||
}
|
|
||||||
if r.URL, err = parseURL(r.URL, path); err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the
|
|
||||||
// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The
|
|
||||||
// values will be escaped (aka URL encoded) before insertion into the path.
|
|
||||||
func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator {
|
|
||||||
parameters := escapeValueStrings(ensureValueStrings(pathParameters))
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
if r.URL == nil {
|
|
||||||
return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL")
|
|
||||||
}
|
|
||||||
for key, value := range parameters {
|
|
||||||
path = strings.Replace(path, "{"+key+"}", value, -1)
|
|
||||||
}
|
|
||||||
if r.URL, err = parseURL(r.URL, path); err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the
|
|
||||||
// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map.
|
|
||||||
func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator {
|
|
||||||
parameters := ensureValueStrings(pathParameters)
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
if r.URL == nil {
|
|
||||||
return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL")
|
|
||||||
}
|
|
||||||
for key, value := range parameters {
|
|
||||||
path = strings.Replace(path, "{"+key+"}", value, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.URL, err = parseURL(r.URL, path); err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseURL(u *url.URL, path string) (*url.URL, error) {
|
|
||||||
p := strings.TrimRight(u.String(), "/")
|
|
||||||
if !strings.HasPrefix(path, "/") {
|
|
||||||
path = "/" + path
|
|
||||||
}
|
|
||||||
return url.Parse(p + path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters
|
|
||||||
// given in the supplied map (i.e., key=value).
|
|
||||||
func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator {
|
|
||||||
parameters := ensureValueStrings(queryParameters)
|
|
||||||
return func(p Preparer) Preparer {
|
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
r, err := p.Prepare(r)
|
|
||||||
if err == nil {
|
|
||||||
if r.URL == nil {
|
|
||||||
return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL")
|
|
||||||
}
|
|
||||||
|
|
||||||
v := r.URL.Query()
|
|
||||||
for key, value := range parameters {
|
|
||||||
d, err := url.QueryUnescape(value)
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
v.Add(key, d)
|
|
||||||
}
|
|
||||||
r.URL.RawQuery = v.Encode()
|
|
||||||
}
|
|
||||||
return r, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
250
vendor/github.com/Azure/go-autorest/autorest/responder.go
generated
vendored
250
vendor/github.com/Azure/go-autorest/autorest/responder.go
generated
vendored
|
|
@ -1,250 +0,0 @@
|
||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Responder is the interface that wraps the Respond method.
|
|
||||||
//
|
|
||||||
// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold
|
|
||||||
// state since Responders may be shared and re-used.
|
|
||||||
type Responder interface {
|
|
||||||
Respond(*http.Response) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResponderFunc is a method that implements the Responder interface.
|
|
||||||
type ResponderFunc func(*http.Response) error
|
|
||||||
|
|
||||||
// Respond implements the Responder interface on ResponderFunc.
|
|
||||||
func (rf ResponderFunc) Respond(r *http.Response) error {
|
|
||||||
return rf(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to
|
|
||||||
// the http.Response and pass it along or, first, pass the http.Response along then react.
|
|
||||||
type RespondDecorator func(Responder) Responder
|
|
||||||
|
|
||||||
// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned
|
|
||||||
// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share
|
|
||||||
// and re-used: It depends on the applied decorators. For example, a standard decorator that closes
|
|
||||||
// the response body is fine to share whereas a decorator that reads the body into a passed struct
|
|
||||||
// is not.
|
|
||||||
//
|
|
||||||
// To prevent memory leaks, ensure that at least one Responder closes the response body.
|
|
||||||
func CreateResponder(decorators ...RespondDecorator) Responder {
|
|
||||||
return DecorateResponder(
|
|
||||||
Responder(ResponderFunc(func(r *http.Response) error { return nil })),
|
|
||||||
decorators...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it
|
|
||||||
// applies to the Responder. Decorators are applied in the order received, but their affect upon the
|
|
||||||
// request depends on whether they are a pre-decorator (react to the http.Response and then pass it
|
|
||||||
// along) or a post-decorator (pass the http.Response along and then react).
|
|
||||||
func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder {
|
|
||||||
for _, decorate := range decorators {
|
|
||||||
r = decorate(r)
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators.
|
|
||||||
// It creates a Responder from the decorators it then applies to the passed http.Response.
|
|
||||||
func Respond(r *http.Response, decorators ...RespondDecorator) error {
|
|
||||||
if r == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return CreateResponder(decorators...).Respond(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined
|
|
||||||
// to the next RespondDecorator.
|
|
||||||
func ByIgnoring() RespondDecorator {
|
|
||||||
return func(r Responder) Responder {
|
|
||||||
return ResponderFunc(func(resp *http.Response) error {
|
|
||||||
return r.Respond(resp)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as
|
|
||||||
// the Body is read.
|
|
||||||
func ByCopying(b *bytes.Buffer) RespondDecorator {
|
|
||||||
return func(r Responder) Responder {
|
|
||||||
return ResponderFunc(func(resp *http.Response) error {
|
|
||||||
err := r.Respond(resp)
|
|
||||||
if err == nil && resp != nil && resp.Body != nil {
|
|
||||||
resp.Body = TeeReadCloser(resp.Body, b)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which
|
|
||||||
// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed
|
|
||||||
// Responder is invoked prior to discarding the response body, the decorator may occur anywhere
|
|
||||||
// within the set.
|
|
||||||
func ByDiscardingBody() RespondDecorator {
|
|
||||||
return func(r Responder) Responder {
|
|
||||||
return ResponderFunc(func(resp *http.Response) error {
|
|
||||||
err := r.Respond(resp)
|
|
||||||
if err == nil && resp != nil && resp.Body != nil {
|
|
||||||
if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil {
|
|
||||||
return fmt.Errorf("Error discarding the response body: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it
|
|
||||||
// closes the response body. Since the passed Responder is invoked prior to closing the response
|
|
||||||
// body, the decorator may occur anywhere within the set.
|
|
||||||
func ByClosing() RespondDecorator {
|
|
||||||
return func(r Responder) Responder {
|
|
||||||
return ResponderFunc(func(resp *http.Response) error {
|
|
||||||
err := r.Respond(resp)
|
|
||||||
if resp != nil && resp.Body != nil {
|
|
||||||
if err := resp.Body.Close(); err != nil {
|
|
||||||
return fmt.Errorf("Error closing the response body: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which
|
|
||||||
// it closes the response if the passed Responder returns an error and the response body exists.
|
|
||||||
func ByClosingIfError() RespondDecorator {
|
|
||||||
return func(r Responder) Responder {
|
|
||||||
return ResponderFunc(func(resp *http.Response) error {
|
|
||||||
err := r.Respond(resp)
|
|
||||||
if err != nil && resp != nil && resp.Body != nil {
|
|
||||||
if err := resp.Body.Close(); err != nil {
|
|
||||||
return fmt.Errorf("Error closing the response body: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the
|
|
||||||
// response Body into the value pointed to by v.
|
|
||||||
func ByUnmarshallingJSON(v interface{}) RespondDecorator {
|
|
||||||
return func(r Responder) Responder {
|
|
||||||
return ResponderFunc(func(resp *http.Response) error {
|
|
||||||
err := r.Respond(resp)
|
|
||||||
if err == nil {
|
|
||||||
b, errInner := ioutil.ReadAll(resp.Body)
|
|
||||||
// Some responses might include a BOM, remove for successful unmarshalling
|
|
||||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
|
||||||
if errInner != nil {
|
|
||||||
err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
|
|
||||||
} else if len(strings.Trim(string(b), " ")) > 0 {
|
|
||||||
errInner = json.Unmarshal(b, v)
|
|
||||||
if errInner != nil {
|
|
||||||
err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the
|
|
||||||
// response Body into the value pointed to by v.
|
|
||||||
func ByUnmarshallingXML(v interface{}) RespondDecorator {
|
|
||||||
return func(r Responder) Responder {
|
|
||||||
return ResponderFunc(func(resp *http.Response) error {
|
|
||||||
err := r.Respond(resp)
|
|
||||||
if err == nil {
|
|
||||||
b, errInner := ioutil.ReadAll(resp.Body)
|
|
||||||
if errInner != nil {
|
|
||||||
err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
|
|
||||||
} else {
|
|
||||||
errInner = xml.Unmarshal(b, v)
|
|
||||||
if errInner != nil {
|
|
||||||
err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response
|
|
||||||
// StatusCode is among the set passed. On error, response body is fully read into a buffer and
|
|
||||||
// presented in the returned error, as well as in the response body.
|
|
||||||
func WithErrorUnlessStatusCode(codes ...int) RespondDecorator {
|
|
||||||
return func(r Responder) Responder {
|
|
||||||
return ResponderFunc(func(resp *http.Response) error {
|
|
||||||
err := r.Respond(resp)
|
|
||||||
if err == nil && !ResponseHasStatusCode(resp, codes...) {
|
|
||||||
derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s",
|
|
||||||
resp.Request.Method,
|
|
||||||
resp.Request.URL,
|
|
||||||
resp.Status)
|
|
||||||
if resp.Body != nil {
|
|
||||||
defer resp.Body.Close()
|
|
||||||
b, _ := ioutil.ReadAll(resp.Body)
|
|
||||||
derr.ServiceError = b
|
|
||||||
resp.Body = ioutil.NopCloser(bytes.NewReader(b))
|
|
||||||
}
|
|
||||||
err = derr
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is
|
|
||||||
// anything other than HTTP 200.
|
|
||||||
func WithErrorUnlessOK() RespondDecorator {
|
|
||||||
return WithErrorUnlessStatusCode(http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractHeader extracts all values of the specified header from the http.Response. It returns an
|
|
||||||
// empty string slice if the passed http.Response is nil or the header does not exist.
|
|
||||||
func ExtractHeader(header string, resp *http.Response) []string {
|
|
||||||
if resp != nil && resp.Header != nil {
|
|
||||||
return resp.Header[http.CanonicalHeaderKey(header)]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It
|
|
||||||
// returns an empty string if the passed http.Response is nil or the header does not exist.
|
|
||||||
func ExtractHeaderValue(header string, resp *http.Response) string {
|
|
||||||
h := ExtractHeader(header, resp)
|
|
||||||
if len(h) > 0 {
|
|
||||||
return h[0]
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
52
vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go
generated
vendored
52
vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go
generated
vendored
|
|
@ -1,52 +0,0 @@
|
||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic.
|
|
||||||
func NewRetriableRequest(req *http.Request) *RetriableRequest {
|
|
||||||
return &RetriableRequest{req: req}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Request returns the wrapped HTTP request.
|
|
||||||
func (rr *RetriableRequest) Request() *http.Request {
|
|
||||||
return rr.req
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rr *RetriableRequest) prepareFromByteReader() (err error) {
|
|
||||||
// fall back to making a copy (only do this once)
|
|
||||||
b := []byte{}
|
|
||||||
if rr.req.ContentLength > 0 {
|
|
||||||
b = make([]byte, rr.req.ContentLength)
|
|
||||||
_, err = io.ReadFull(rr.req.Body, b)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
b, err = ioutil.ReadAll(rr.req.Body)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rr.br = bytes.NewReader(b)
|
|
||||||
rr.req.Body = ioutil.NopCloser(rr.br)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
54
vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go
generated
vendored
54
vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go
generated
vendored
|
|
@ -1,54 +0,0 @@
|
||||||
// +build !go1.8
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package autorest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RetriableRequest provides facilities for retrying an HTTP request.
|
|
||||||
type RetriableRequest struct {
|
|
||||||
req *http.Request
|
|
||||||
br *bytes.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare signals that the request is about to be sent.
|
|
||||||
func (rr *RetriableRequest) Prepare() (err error) {
|
|
||||||
// preserve the request body; this is to support retry logic as
|
|
||||||
// the underlying transport will always close the reqeust body
|
|
||||||
if rr.req.Body != nil {
|
|
||||||
if rr.br != nil {
|
|
||||||
_, err = rr.br.Seek(0, 0 /*io.SeekStart*/)
|
|
||||||
rr.req.Body = ioutil.NopCloser(rr.br)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if rr.br == nil {
|
|
||||||
// fall back to making a copy (only do this once)
|
|
||||||
err = rr.prepareFromByteReader()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeRequestBody(req *http.Request) {
|
|
||||||
req.Body = nil
|
|
||||||
req.ContentLength = 0
|
|
||||||
}
|
|
||||||
66
vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go
generated
vendored
66
vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go
generated
vendored
|
|
@ -1,66 +0,0 @@
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package autorest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RetriableRequest provides facilities for retrying an HTTP request.
|
|
||||||
type RetriableRequest struct {
|
|
||||||
req *http.Request
|
|
||||||
rc io.ReadCloser
|
|
||||||
br *bytes.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare signals that the request is about to be sent.
|
|
||||||
func (rr *RetriableRequest) Prepare() (err error) {
|
|
||||||
// preserve the request body; this is to support retry logic as
|
|
||||||
// the underlying transport will always close the reqeust body
|
|
||||||
if rr.req.Body != nil {
|
|
||||||
if rr.rc != nil {
|
|
||||||
rr.req.Body = rr.rc
|
|
||||||
} else if rr.br != nil {
|
|
||||||
_, err = rr.br.Seek(0, io.SeekStart)
|
|
||||||
rr.req.Body = ioutil.NopCloser(rr.br)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if rr.req.GetBody != nil {
|
|
||||||
// this will allow us to preserve the body without having to
|
|
||||||
// make a copy. note we need to do this on each iteration
|
|
||||||
rr.rc, err = rr.req.GetBody()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else if rr.br == nil {
|
|
||||||
// fall back to making a copy (only do this once)
|
|
||||||
err = rr.prepareFromByteReader()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeRequestBody(req *http.Request) {
|
|
||||||
req.Body = nil
|
|
||||||
req.GetBody = nil
|
|
||||||
req.ContentLength = 0
|
|
||||||
}
|
|
||||||
317
vendor/github.com/Azure/go-autorest/autorest/sender.go
generated
vendored
317
vendor/github.com/Azure/go-autorest/autorest/sender.go
generated
vendored
|
|
@ -1,317 +0,0 @@
|
||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sender is the interface that wraps the Do method to send HTTP requests.
|
|
||||||
//
|
|
||||||
// The standard http.Client conforms to this interface.
|
|
||||||
type Sender interface {
|
|
||||||
Do(*http.Request) (*http.Response, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SenderFunc is a method that implements the Sender interface.
|
|
||||||
type SenderFunc func(*http.Request) (*http.Response, error)
|
|
||||||
|
|
||||||
// Do implements the Sender interface on SenderFunc.
|
|
||||||
func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
|
|
||||||
return sf(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
|
|
||||||
// http.Request and pass it along or, first, pass the http.Request along then react to the
|
|
||||||
// http.Response result.
|
|
||||||
type SendDecorator func(Sender) Sender
|
|
||||||
|
|
||||||
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
|
|
||||||
func CreateSender(decorators ...SendDecorator) Sender {
|
|
||||||
return DecorateSender(&http.Client{}, decorators...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
|
|
||||||
// the Sender. Decorators are applied in the order received, but their affect upon the request
|
|
||||||
// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a
|
|
||||||
// post-decorator (pass the http.Request along and react to the results in http.Response).
|
|
||||||
func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
|
|
||||||
for _, decorate := range decorators {
|
|
||||||
s = decorate(s)
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send sends, by means of the default http.Client, the passed http.Request, returning the
|
|
||||||
// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which
|
|
||||||
// it will apply the http.Client before invoking the Do method.
|
|
||||||
//
|
|
||||||
// Send is a convenience method and not recommended for production. Advanced users should use
|
|
||||||
// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client).
|
|
||||||
//
|
|
||||||
// Send will not poll or retry requests.
|
|
||||||
func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
|
|
||||||
return SendWithSender(&http.Client{}, r, decorators...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendWithSender sends the passed http.Request, through the provided Sender, returning the
|
|
||||||
// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which
|
|
||||||
// it will apply the http.Client before invoking the Do method.
|
|
||||||
//
|
|
||||||
// SendWithSender will not poll or retry requests.
|
|
||||||
func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
|
|
||||||
return DecorateSender(s, decorators...).Do(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AfterDelay returns a SendDecorator that delays for the passed time.Duration before
|
|
||||||
// invoking the Sender. The delay may be terminated by closing the optional channel on the
|
|
||||||
// http.Request. If canceled, no further Senders are invoked.
|
|
||||||
func AfterDelay(d time.Duration) SendDecorator {
|
|
||||||
return func(s Sender) Sender {
|
|
||||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
|
||||||
if !DelayForBackoff(d, 0, r.Cancel) {
|
|
||||||
return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay")
|
|
||||||
}
|
|
||||||
return s.Do(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request.
|
|
||||||
func AsIs() SendDecorator {
|
|
||||||
return func(s Sender) Sender {
|
|
||||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
|
||||||
return s.Do(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which
|
|
||||||
// it closes the response if the passed Sender returns an error and the response body exists.
|
|
||||||
func DoCloseIfError() SendDecorator {
|
|
||||||
return func(s Sender) Sender {
|
|
||||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
|
||||||
resp, err := s.Do(r)
|
|
||||||
if err != nil {
|
|
||||||
Respond(resp, ByDiscardingBody(), ByClosing())
|
|
||||||
}
|
|
||||||
return resp, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is
|
|
||||||
// among the set passed. Since these are artificial errors, the response body may still require
|
|
||||||
// closing.
|
|
||||||
func DoErrorIfStatusCode(codes ...int) SendDecorator {
|
|
||||||
return func(s Sender) Sender {
|
|
||||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
|
||||||
resp, err := s.Do(r)
|
|
||||||
if err == nil && ResponseHasStatusCode(resp, codes...) {
|
|
||||||
err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s",
|
|
||||||
resp.Request.Method,
|
|
||||||
resp.Request.URL,
|
|
||||||
resp.Status)
|
|
||||||
}
|
|
||||||
return resp, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response
|
|
||||||
// StatusCode is among the set passed. Since these are artificial errors, the response body
|
|
||||||
// may still require closing.
|
|
||||||
func DoErrorUnlessStatusCode(codes ...int) SendDecorator {
|
|
||||||
return func(s Sender) Sender {
|
|
||||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
|
||||||
resp, err := s.Do(r)
|
|
||||||
if err == nil && !ResponseHasStatusCode(resp, codes...) {
|
|
||||||
err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s",
|
|
||||||
resp.Request.Method,
|
|
||||||
resp.Request.URL,
|
|
||||||
resp.Status)
|
|
||||||
}
|
|
||||||
return resp, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the
|
|
||||||
// passed status codes. It expects the http.Response to contain a Location header providing the
|
|
||||||
// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than
|
|
||||||
// the supplied duration. It will delay between requests for the duration specified in the
|
|
||||||
// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by
|
|
||||||
// closing the optional channel on the http.Request.
|
|
||||||
func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator {
|
|
||||||
return func(s Sender) Sender {
|
|
||||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
|
||||||
resp, err = s.Do(r)
|
|
||||||
|
|
||||||
if err == nil && ResponseHasStatusCode(resp, codes...) {
|
|
||||||
r, err = NewPollingRequest(resp, r.Cancel)
|
|
||||||
|
|
||||||
for err == nil && ResponseHasStatusCode(resp, codes...) {
|
|
||||||
Respond(resp,
|
|
||||||
ByDiscardingBody(),
|
|
||||||
ByClosing())
|
|
||||||
resp, err = SendWithSender(s, r,
|
|
||||||
AfterDelay(GetRetryAfter(resp, delay)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified
|
|
||||||
// number of attempts, exponentially backing off between requests using the supplied backoff
|
|
||||||
// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on
|
|
||||||
// the http.Request.
|
|
||||||
func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
|
|
||||||
return func(s Sender) Sender {
|
|
||||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
|
||||||
rr := NewRetriableRequest(r)
|
|
||||||
for attempt := 0; attempt < attempts; attempt++ {
|
|
||||||
err = rr.Prepare()
|
|
||||||
if err != nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
resp, err = s.Do(rr.Request())
|
|
||||||
if err == nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
DelayForBackoff(backoff, attempt, r.Cancel)
|
|
||||||
}
|
|
||||||
return resp, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified
|
|
||||||
// number of attempts, exponentially backing off between requests using the supplied backoff
|
|
||||||
// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on
|
|
||||||
// the http.Request.
|
|
||||||
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
|
|
||||||
return func(s Sender) Sender {
|
|
||||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
|
||||||
rr := NewRetriableRequest(r)
|
|
||||||
// Increment to add the first call (attempts denotes number of retries)
|
|
||||||
attempts++
|
|
||||||
for attempt := 0; attempt < attempts; {
|
|
||||||
err = rr.Prepare()
|
|
||||||
if err != nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
resp, err = s.Do(rr.Request())
|
|
||||||
// we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication
|
|
||||||
// resp and err will both have a value, so in this case we don't want to retry as it will never succeed.
|
|
||||||
if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
delayed := DelayWithRetryAfter(resp, r.Cancel)
|
|
||||||
if !delayed {
|
|
||||||
DelayForBackoff(backoff, attempt, r.Cancel)
|
|
||||||
}
|
|
||||||
// don't count a 429 against the number of attempts
|
|
||||||
// so that we continue to retry until it succeeds
|
|
||||||
if resp == nil || resp.StatusCode != http.StatusTooManyRequests {
|
|
||||||
attempt++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return resp, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in
|
|
||||||
// responses with status code 429
|
|
||||||
func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool {
|
|
||||||
if resp == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After"))
|
|
||||||
if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 {
|
|
||||||
select {
|
|
||||||
case <-time.After(time.Duration(retryAfter) * time.Second):
|
|
||||||
return true
|
|
||||||
case <-cancel:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal
|
|
||||||
// to or greater than the specified duration, exponentially backing off between requests using the
|
|
||||||
// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the
|
|
||||||
// optional channel on the http.Request.
|
|
||||||
func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator {
|
|
||||||
return func(s Sender) Sender {
|
|
||||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
|
||||||
rr := NewRetriableRequest(r)
|
|
||||||
end := time.Now().Add(d)
|
|
||||||
for attempt := 0; time.Now().Before(end); attempt++ {
|
|
||||||
err = rr.Prepare()
|
|
||||||
if err != nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
resp, err = s.Do(rr.Request())
|
|
||||||
if err == nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
DelayForBackoff(backoff, attempt, r.Cancel)
|
|
||||||
}
|
|
||||||
return resp, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLogging returns a SendDecorator that implements simple before and after logging of the
|
|
||||||
// request.
|
|
||||||
func WithLogging(logger *log.Logger) SendDecorator {
|
|
||||||
return func(s Sender) Sender {
|
|
||||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
|
||||||
logger.Printf("Sending %s %s", r.Method, r.URL)
|
|
||||||
resp, err := s.Do(r)
|
|
||||||
if err != nil {
|
|
||||||
logger.Printf("%s %s received error '%v'", r.Method, r.URL, err)
|
|
||||||
} else {
|
|
||||||
logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status)
|
|
||||||
}
|
|
||||||
return resp, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of
|
|
||||||
// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set
|
|
||||||
// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early,
|
|
||||||
// returns false.
|
|
||||||
// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
|
|
||||||
// count.
|
|
||||||
func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool {
|
|
||||||
select {
|
|
||||||
case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second):
|
|
||||||
return true
|
|
||||||
case <-cancel:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
218
vendor/github.com/Azure/go-autorest/autorest/utility.go
generated
vendored
218
vendor/github.com/Azure/go-autorest/autorest/utility.go
generated
vendored
|
|
@ -1,218 +0,0 @@
|
||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest/adal"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EncodedAs is a series of constants specifying various data encodings
|
|
||||||
type EncodedAs string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// EncodedAsJSON states that data is encoded as JSON
|
|
||||||
EncodedAsJSON EncodedAs = "JSON"
|
|
||||||
|
|
||||||
// EncodedAsXML states that data is encoded as Xml
|
|
||||||
EncodedAsXML EncodedAs = "XML"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Decoder defines the decoding method json.Decoder and xml.Decoder share
|
|
||||||
type Decoder interface {
|
|
||||||
Decode(v interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDecoder creates a new decoder appropriate to the passed encoding.
|
|
||||||
// encodedAs specifies the type of encoding and r supplies the io.Reader containing the
|
|
||||||
// encoded data.
|
|
||||||
func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder {
|
|
||||||
if encodedAs == EncodedAsJSON {
|
|
||||||
return json.NewDecoder(r)
|
|
||||||
} else if encodedAs == EncodedAsXML {
|
|
||||||
return xml.NewDecoder(r)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy
|
|
||||||
// is especially useful if there is a chance the data will fail to decode.
|
|
||||||
// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v
|
|
||||||
// is the decoding destination.
|
|
||||||
func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) {
|
|
||||||
b := bytes.Buffer{}
|
|
||||||
return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc.
|
|
||||||
// It utilizes io.TeeReader to copy the data read and has the same behavior when reading.
|
|
||||||
// Further, when it is closed, it ensures that rc is closed as well.
|
|
||||||
func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser {
|
|
||||||
return &teeReadCloser{rc, io.TeeReader(rc, w)}
|
|
||||||
}
|
|
||||||
|
|
||||||
type teeReadCloser struct {
|
|
||||||
rc io.ReadCloser
|
|
||||||
r io.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *teeReadCloser) Read(p []byte) (int, error) {
|
|
||||||
return t.r.Read(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *teeReadCloser) Close() error {
|
|
||||||
return t.rc.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func containsInt(ints []int, n int) bool {
|
|
||||||
for _, i := range ints {
|
|
||||||
if i == n {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func escapeValueStrings(m map[string]string) map[string]string {
|
|
||||||
for key, value := range m {
|
|
||||||
m[key] = url.QueryEscape(value)
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string {
|
|
||||||
mapOfStrings := make(map[string]string)
|
|
||||||
for key, value := range mapOfInterface {
|
|
||||||
mapOfStrings[key] = ensureValueString(value)
|
|
||||||
}
|
|
||||||
return mapOfStrings
|
|
||||||
}
|
|
||||||
|
|
||||||
func ensureValueString(value interface{}) string {
|
|
||||||
if value == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
switch v := value.(type) {
|
|
||||||
case string:
|
|
||||||
return v
|
|
||||||
case []byte:
|
|
||||||
return string(v)
|
|
||||||
default:
|
|
||||||
return fmt.Sprintf("%v", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapToValues method converts map[string]interface{} to url.Values.
|
|
||||||
func MapToValues(m map[string]interface{}) url.Values {
|
|
||||||
v := url.Values{}
|
|
||||||
for key, value := range m {
|
|
||||||
x := reflect.ValueOf(value)
|
|
||||||
if x.Kind() == reflect.Array || x.Kind() == reflect.Slice {
|
|
||||||
for i := 0; i < x.Len(); i++ {
|
|
||||||
v.Add(key, ensureValueString(x.Index(i)))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
v.Add(key, ensureValueString(value))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsStringSlice method converts interface{} to []string. This expects a
|
|
||||||
//that the parameter passed to be a slice or array of a type that has the underlying
|
|
||||||
//type a string.
|
|
||||||
func AsStringSlice(s interface{}) ([]string, error) {
|
|
||||||
v := reflect.ValueOf(s)
|
|
||||||
if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
|
|
||||||
return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.")
|
|
||||||
}
|
|
||||||
stringSlice := make([]string, 0, v.Len())
|
|
||||||
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
stringSlice = append(stringSlice, v.Index(i).String())
|
|
||||||
}
|
|
||||||
return stringSlice, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String method converts interface v to string. If interface is a list, it
|
|
||||||
// joins list elements using the seperator. Note that only sep[0] will be used for
|
|
||||||
// joining if any separator is specified.
|
|
||||||
func String(v interface{}, sep ...string) string {
|
|
||||||
if len(sep) == 0 {
|
|
||||||
return ensureValueString(v)
|
|
||||||
}
|
|
||||||
stringSlice, ok := v.([]string)
|
|
||||||
if ok == false {
|
|
||||||
var err error
|
|
||||||
stringSlice, err = AsStringSlice(v)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ensureValueString(strings.Join(stringSlice, sep[0]))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode method encodes url path and query parameters.
|
|
||||||
func Encode(location string, v interface{}, sep ...string) string {
|
|
||||||
s := String(v, sep...)
|
|
||||||
switch strings.ToLower(location) {
|
|
||||||
case "path":
|
|
||||||
return pathEscape(s)
|
|
||||||
case "query":
|
|
||||||
return queryEscape(s)
|
|
||||||
default:
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func pathEscape(s string) string {
|
|
||||||
return strings.Replace(url.QueryEscape(s), "+", "%20", -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func queryEscape(s string) string {
|
|
||||||
return url.QueryEscape(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't).
|
|
||||||
// This is mainly useful for long-running operations that use the Azure-AsyncOperation
|
|
||||||
// header, so we change the initial PUT into a GET to retrieve the final result.
|
|
||||||
func ChangeToGet(req *http.Request) *http.Request {
|
|
||||||
req.Method = "GET"
|
|
||||||
req.Body = nil
|
|
||||||
req.ContentLength = 0
|
|
||||||
req.Header.Del("Content-Length")
|
|
||||||
return req
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError
|
|
||||||
// interface. If err is a DetailedError it will walk the chain of Original errors.
|
|
||||||
func IsTokenRefreshError(err error) bool {
|
|
||||||
if _, ok := err.(adal.TokenRefreshError); ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if de, ok := err.(DetailedError); ok {
|
|
||||||
return IsTokenRefreshError(de.Original)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
49
vendor/github.com/Azure/go-autorest/autorest/version.go
generated
vendored
49
vendor/github.com/Azure/go-autorest/autorest/version.go
generated
vendored
|
|
@ -1,49 +0,0 @@
|
||||||
package autorest
|
|
||||||
|
|
||||||
// Copyright 2017 Microsoft Corporation
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
major = 9
|
|
||||||
minor = 8
|
|
||||||
patch = 1
|
|
||||||
tag = ""
|
|
||||||
)
|
|
||||||
|
|
||||||
var once sync.Once
|
|
||||||
var version string
|
|
||||||
|
|
||||||
// Version returns the semantic version (see http://semver.org).
|
|
||||||
func Version() string {
|
|
||||||
once.Do(func() {
|
|
||||||
semver := fmt.Sprintf("%d.%d.%d", major, minor, patch)
|
|
||||||
verBuilder := bytes.NewBufferString(semver)
|
|
||||||
if tag != "" && tag != "-" {
|
|
||||||
updated := strings.TrimPrefix(tag, "-")
|
|
||||||
_, err := verBuilder.WriteString("-" + updated)
|
|
||||||
if err == nil {
|
|
||||||
verBuilder = bytes.NewBufferString(semver)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
version = verBuilder.String()
|
|
||||||
})
|
|
||||||
return version
|
|
||||||
}
|
|
||||||
201
vendor/github.com/PaulARoy/azurestoragecache/LICENSE
generated
vendored
201
vendor/github.com/PaulARoy/azurestoragecache/LICENSE
generated
vendored
|
|
@ -1,201 +0,0 @@
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright {yyyy} {name of copyright owner}
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
2
vendor/github.com/PaulARoy/azurestoragecache/README.md
generated
vendored
2
vendor/github.com/PaulARoy/azurestoragecache/README.md
generated
vendored
|
|
@ -1,2 +0,0 @@
|
||||||
# azurestoragecache
|
|
||||||
Azure Storage interface for a Go cache
|
|
||||||
136
vendor/github.com/PaulARoy/azurestoragecache/azurestoragecache.go
generated
vendored
136
vendor/github.com/PaulARoy/azurestoragecache/azurestoragecache.go
generated
vendored
|
|
@ -1,136 +0,0 @@
|
||||||
// Copyright 2017 Paul Roy All rights reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package azurestoragecache provides an implementation of httpcache.Cache that
|
|
||||||
// stores and retrieves data using Azure Storage.
|
|
||||||
package azurestoragecache // import "github.com/PaulARoy/azurestoragecache"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/storage"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cache stores and retrieves data using Azure Storage.
|
|
||||||
type Cache struct {
|
|
||||||
// The Azure Blob Storage Client
|
|
||||||
client storage.BlobStorageClient
|
|
||||||
|
|
||||||
// container name to use to store blobs
|
|
||||||
container string
|
|
||||||
}
|
|
||||||
|
|
||||||
var noLogErrors, _ = strconv.ParseBool(os.Getenv("NO_LOG_AZUREBSCACHE_ERRORS"))
|
|
||||||
|
|
||||||
func keyToFilename(key string) string {
|
|
||||||
h := md5.New()
|
|
||||||
io.WriteString(h, key)
|
|
||||||
return hex.EncodeToString(h.Sum(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// blob retrieves a storage.Blob reference for the specified key.
|
|
||||||
func (c *Cache) blob(key string) *storage.Blob {
|
|
||||||
return c.client.GetContainerReference(c.container).GetBlobReference(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the cached value with the specified key.
|
|
||||||
func (c *Cache) Get(key string) (resp []byte, ok bool) {
|
|
||||||
key = keyToFilename(key)
|
|
||||||
rdr, err := c.blob(key).Get(nil)
|
|
||||||
if err != nil {
|
|
||||||
return []byte{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err = ioutil.ReadAll(rdr)
|
|
||||||
if err != nil {
|
|
||||||
if !noLogErrors {
|
|
||||||
log.Printf("azurestoragecache.Get failed: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rdr.Close()
|
|
||||||
return resp, err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the cached value with the specified key.
|
|
||||||
func (c *Cache) Set(key string, value []byte) {
|
|
||||||
key = keyToFilename(key)
|
|
||||||
err := c.blob(key).CreateBlockBlobFromReader(bytes.NewReader(value), nil)
|
|
||||||
if err != nil {
|
|
||||||
if !noLogErrors {
|
|
||||||
log.Printf("azurestoragecache.Set failed: %s", err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete the cached value with the specified key.
|
|
||||||
func (c *Cache) Delete(key string) {
|
|
||||||
key = keyToFilename(key)
|
|
||||||
res, err := c.blob(key).DeleteIfExists(nil)
|
|
||||||
if !noLogErrors {
|
|
||||||
log.Printf("azurestoragecache.Delete result: %s", res)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
if !noLogErrors {
|
|
||||||
log.Printf("azurestoragecache.Delete failed: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new Cache with underlying client for Azure Storage.
|
|
||||||
//
|
|
||||||
// accountName and accountKey are the Azure Storage credentials. If either are
|
|
||||||
// empty, the contents of the environment variables AZURESTORAGE_ACCOUNT_NAME
|
|
||||||
// and AZURESTORAGE_ACCESS_KEY will be used.
|
|
||||||
//
|
|
||||||
// containerName is the container name in which cached values will be stored.
|
|
||||||
// If not specified, "cache" will be used.
|
|
||||||
func New(accountName string, accountKey string, containerName string) (*Cache, error) {
|
|
||||||
if accountName == "" {
|
|
||||||
accountName = os.Getenv("AZURESTORAGE_ACCOUNT_NAME")
|
|
||||||
}
|
|
||||||
|
|
||||||
if accountKey == "" {
|
|
||||||
accountKey = os.Getenv("AZURESTORAGE_ACCESS_KEY")
|
|
||||||
}
|
|
||||||
|
|
||||||
if containerName == "" {
|
|
||||||
containerName = "cache"
|
|
||||||
}
|
|
||||||
|
|
||||||
client, err := storage.NewBasicClient(accountName, accountKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cache := Cache{
|
|
||||||
client: client.GetBlobService(),
|
|
||||||
container: containerName,
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = cache.client.GetContainerReference(cache.container).CreateIfNotExists(&storage.CreateContainerOptions{Access: storage.ContainerAccessTypeBlob})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &cache, nil
|
|
||||||
}
|
|
||||||
4
vendor/github.com/PaulARoy/azurestoragecache/glide.yaml
generated
vendored
4
vendor/github.com/PaulARoy/azurestoragecache/glide.yaml
generated
vendored
|
|
@ -1,4 +0,0 @@
|
||||||
package: .
|
|
||||||
import:
|
|
||||||
- package: github.com/Azure/azure-sdk-for-go
|
|
||||||
version: v7.0.1-beta
|
|
||||||
202
vendor/github.com/aws/aws-sdk-go/LICENSE.txt
generated
vendored
202
vendor/github.com/aws/aws-sdk-go/LICENSE.txt
generated
vendored
|
|
@ -1,202 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
3
vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
|
|
@ -1,3 +0,0 @@
|
||||||
AWS SDK for Go
|
|
||||||
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
||||||
Copyright 2014-2015 Stripe, Inc.
|
|
||||||
145
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
145
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
|
|
@ -1,145 +0,0 @@
|
||||||
// Package awserr represents API error interface accessors for the SDK.
|
|
||||||
package awserr
|
|
||||||
|
|
||||||
// An Error wraps lower level errors with code, message and an original error.
|
|
||||||
// The underlying concrete error type may also satisfy other interfaces which
|
|
||||||
// can be to used to obtain more specific information about the error.
|
|
||||||
//
|
|
||||||
// Calling Error() or String() will always include the full information about
|
|
||||||
// an error based on its underlying type.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// output, err := s3manage.Upload(svc, input, opts)
|
|
||||||
// if err != nil {
|
|
||||||
// if awsErr, ok := err.(awserr.Error); ok {
|
|
||||||
// // Get error details
|
|
||||||
// log.Println("Error:", awsErr.Code(), awsErr.Message())
|
|
||||||
//
|
|
||||||
// // Prints out full error message, including original error if there was one.
|
|
||||||
// log.Println("Error:", awsErr.Error())
|
|
||||||
//
|
|
||||||
// // Get original error
|
|
||||||
// if origErr := awsErr.OrigErr(); origErr != nil {
|
|
||||||
// // operate on original error.
|
|
||||||
// }
|
|
||||||
// } else {
|
|
||||||
// fmt.Println(err.Error())
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
type Error interface {
|
|
||||||
// Satisfy the generic error interface.
|
|
||||||
error
|
|
||||||
|
|
||||||
// Returns the short phrase depicting the classification of the error.
|
|
||||||
Code() string
|
|
||||||
|
|
||||||
// Returns the error details message.
|
|
||||||
Message() string
|
|
||||||
|
|
||||||
// Returns the original error if one was set. Nil is returned if not set.
|
|
||||||
OrigErr() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// BatchError is a batch of errors which also wraps lower level errors with
|
|
||||||
// code, message, and original errors. Calling Error() will include all errors
|
|
||||||
// that occurred in the batch.
|
|
||||||
//
|
|
||||||
// Deprecated: Replaced with BatchedErrors. Only defined for backwards
|
|
||||||
// compatibility.
|
|
||||||
type BatchError interface {
|
|
||||||
// Satisfy the generic error interface.
|
|
||||||
error
|
|
||||||
|
|
||||||
// Returns the short phrase depicting the classification of the error.
|
|
||||||
Code() string
|
|
||||||
|
|
||||||
// Returns the error details message.
|
|
||||||
Message() string
|
|
||||||
|
|
||||||
// Returns the original error if one was set. Nil is returned if not set.
|
|
||||||
OrigErrs() []error
|
|
||||||
}
|
|
||||||
|
|
||||||
// BatchedErrors is a batch of errors which also wraps lower level errors with
|
|
||||||
// code, message, and original errors. Calling Error() will include all errors
|
|
||||||
// that occurred in the batch.
|
|
||||||
//
|
|
||||||
// Replaces BatchError
|
|
||||||
type BatchedErrors interface {
|
|
||||||
// Satisfy the base Error interface.
|
|
||||||
Error
|
|
||||||
|
|
||||||
// Returns the original error if one was set. Nil is returned if not set.
|
|
||||||
OrigErrs() []error
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns an Error object described by the code, message, and origErr.
|
|
||||||
//
|
|
||||||
// If origErr satisfies the Error interface it will not be wrapped within a new
|
|
||||||
// Error object and will instead be returned.
|
|
||||||
func New(code, message string, origErr error) Error {
|
|
||||||
var errs []error
|
|
||||||
if origErr != nil {
|
|
||||||
errs = append(errs, origErr)
|
|
||||||
}
|
|
||||||
return newBaseError(code, message, errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBatchError returns an BatchedErrors with a collection of errors as an
|
|
||||||
// array of errors.
|
|
||||||
func NewBatchError(code, message string, errs []error) BatchedErrors {
|
|
||||||
return newBaseError(code, message, errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A RequestFailure is an interface to extract request failure information from
|
|
||||||
// an Error such as the request ID of the failed request returned by a service.
|
|
||||||
// RequestFailures may not always have a requestID value if the request failed
|
|
||||||
// prior to reaching the service such as a connection error.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// output, err := s3manage.Upload(svc, input, opts)
|
|
||||||
// if err != nil {
|
|
||||||
// if reqerr, ok := err.(RequestFailure); ok {
|
|
||||||
// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
|
|
||||||
// } else {
|
|
||||||
// log.Println("Error:", err.Error())
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Combined with awserr.Error:
|
|
||||||
//
|
|
||||||
// output, err := s3manage.Upload(svc, input, opts)
|
|
||||||
// if err != nil {
|
|
||||||
// if awsErr, ok := err.(awserr.Error); ok {
|
|
||||||
// // Generic AWS Error with Code, Message, and original error (if any)
|
|
||||||
// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
|
||||||
//
|
|
||||||
// if reqErr, ok := err.(awserr.RequestFailure); ok {
|
|
||||||
// // A service error occurred
|
|
||||||
// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
|
|
||||||
// }
|
|
||||||
// } else {
|
|
||||||
// fmt.Println(err.Error())
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
type RequestFailure interface {
|
|
||||||
Error
|
|
||||||
|
|
||||||
// The status code of the HTTP response.
|
|
||||||
StatusCode() int
|
|
||||||
|
|
||||||
// The request ID returned by the service for a request failure. This will
|
|
||||||
// be empty if no request ID is available such as the request failed due
|
|
||||||
// to a connection error.
|
|
||||||
RequestID() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRequestFailure returns a new request error wrapper for the given Error
|
|
||||||
// provided.
|
|
||||||
func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
|
|
||||||
return newRequestError(err, statusCode, reqID)
|
|
||||||
}
|
|
||||||
194
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
194
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
|
|
@ -1,194 +0,0 @@
|
||||||
package awserr
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// SprintError returns a string of the formatted error code.
|
|
||||||
//
|
|
||||||
// Both extra and origErr are optional. If they are included their lines
|
|
||||||
// will be added, but if they are not included their lines will be ignored.
|
|
||||||
func SprintError(code, message, extra string, origErr error) string {
|
|
||||||
msg := fmt.Sprintf("%s: %s", code, message)
|
|
||||||
if extra != "" {
|
|
||||||
msg = fmt.Sprintf("%s\n\t%s", msg, extra)
|
|
||||||
}
|
|
||||||
if origErr != nil {
|
|
||||||
msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
|
|
||||||
// A baseError wraps the code and message which defines an error. It also
|
|
||||||
// can be used to wrap an original error object.
|
|
||||||
//
|
|
||||||
// Should be used as the root for errors satisfying the awserr.Error. Also
|
|
||||||
// for any error which does not fit into a specific error wrapper type.
|
|
||||||
type baseError struct {
|
|
||||||
// Classification of error
|
|
||||||
code string
|
|
||||||
|
|
||||||
// Detailed information about error
|
|
||||||
message string
|
|
||||||
|
|
||||||
// Optional original error this error is based off of. Allows building
|
|
||||||
// chained errors.
|
|
||||||
errs []error
|
|
||||||
}
|
|
||||||
|
|
||||||
// newBaseError returns an error object for the code, message, and errors.
|
|
||||||
//
|
|
||||||
// code is a short no whitespace phrase depicting the classification of
|
|
||||||
// the error that is being created.
|
|
||||||
//
|
|
||||||
// message is the free flow string containing detailed information about the
|
|
||||||
// error.
|
|
||||||
//
|
|
||||||
// origErrs is the error objects which will be nested under the new errors to
|
|
||||||
// be returned.
|
|
||||||
func newBaseError(code, message string, origErrs []error) *baseError {
|
|
||||||
b := &baseError{
|
|
||||||
code: code,
|
|
||||||
message: message,
|
|
||||||
errs: origErrs,
|
|
||||||
}
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the string representation of the error.
|
|
||||||
//
|
|
||||||
// See ErrorWithExtra for formatting.
|
|
||||||
//
|
|
||||||
// Satisfies the error interface.
|
|
||||||
func (b baseError) Error() string {
|
|
||||||
size := len(b.errs)
|
|
||||||
if size > 0 {
|
|
||||||
return SprintError(b.code, b.message, "", errorList(b.errs))
|
|
||||||
}
|
|
||||||
|
|
||||||
return SprintError(b.code, b.message, "", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string representation of the error.
|
|
||||||
// Alias for Error to satisfy the stringer interface.
|
|
||||||
func (b baseError) String() string {
|
|
||||||
return b.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Code returns the short phrase depicting the classification of the error.
|
|
||||||
func (b baseError) Code() string {
|
|
||||||
return b.code
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message returns the error details message.
|
|
||||||
func (b baseError) Message() string {
|
|
||||||
return b.message
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrigErr returns the original error if one was set. Nil is returned if no
|
|
||||||
// error was set. This only returns the first element in the list. If the full
|
|
||||||
// list is needed, use BatchedErrors.
|
|
||||||
func (b baseError) OrigErr() error {
|
|
||||||
switch len(b.errs) {
|
|
||||||
case 0:
|
|
||||||
return nil
|
|
||||||
case 1:
|
|
||||||
return b.errs[0]
|
|
||||||
default:
|
|
||||||
if err, ok := b.errs[0].(Error); ok {
|
|
||||||
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
|
|
||||||
}
|
|
||||||
return NewBatchError("BatchedErrors",
|
|
||||||
"multiple errors occurred", b.errs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrigErrs returns the original errors if one was set. An empty slice is
|
|
||||||
// returned if no error was set.
|
|
||||||
func (b baseError) OrigErrs() []error {
|
|
||||||
return b.errs
|
|
||||||
}
|
|
||||||
|
|
||||||
// So that the Error interface type can be included as an anonymous field
|
|
||||||
// in the requestError struct and not conflict with the error.Error() method.
|
|
||||||
type awsError Error
|
|
||||||
|
|
||||||
// A requestError wraps a request or service error.
|
|
||||||
//
|
|
||||||
// Composed of baseError for code, message, and original error.
|
|
||||||
type requestError struct {
|
|
||||||
awsError
|
|
||||||
statusCode int
|
|
||||||
requestID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// newRequestError returns a wrapped error with additional information for
|
|
||||||
// request status code, and service requestID.
|
|
||||||
//
|
|
||||||
// Should be used to wrap all request which involve service requests. Even if
|
|
||||||
// the request failed without a service response, but had an HTTP status code
|
|
||||||
// that may be meaningful.
|
|
||||||
//
|
|
||||||
// Also wraps original errors via the baseError.
|
|
||||||
func newRequestError(err Error, statusCode int, requestID string) *requestError {
|
|
||||||
return &requestError{
|
|
||||||
awsError: err,
|
|
||||||
statusCode: statusCode,
|
|
||||||
requestID: requestID,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the string representation of the error.
|
|
||||||
// Satisfies the error interface.
|
|
||||||
func (r requestError) Error() string {
|
|
||||||
extra := fmt.Sprintf("status code: %d, request id: %s",
|
|
||||||
r.statusCode, r.requestID)
|
|
||||||
return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string representation of the error.
|
|
||||||
// Alias for Error to satisfy the stringer interface.
|
|
||||||
func (r requestError) String() string {
|
|
||||||
return r.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatusCode returns the wrapped status code for the error
|
|
||||||
func (r requestError) StatusCode() int {
|
|
||||||
return r.statusCode
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequestID returns the wrapped requestID
|
|
||||||
func (r requestError) RequestID() string {
|
|
||||||
return r.requestID
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrigErrs returns the original errors if one was set. An empty slice is
|
|
||||||
// returned if no error was set.
|
|
||||||
func (r requestError) OrigErrs() []error {
|
|
||||||
if b, ok := r.awsError.(BatchedErrors); ok {
|
|
||||||
return b.OrigErrs()
|
|
||||||
}
|
|
||||||
return []error{r.OrigErr()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// An error list that satisfies the golang interface
|
|
||||||
type errorList []error
|
|
||||||
|
|
||||||
// Error returns the string representation of the error.
|
|
||||||
//
|
|
||||||
// Satisfies the error interface.
|
|
||||||
func (e errorList) Error() string {
|
|
||||||
msg := ""
|
|
||||||
// How do we want to handle the array size being zero
|
|
||||||
if size := len(e); size > 0 {
|
|
||||||
for i := 0; i < size; i++ {
|
|
||||||
msg += fmt.Sprintf("%s", e[i].Error())
|
|
||||||
// We check the next index to see if it is within the slice.
|
|
||||||
// If it is, then we append a newline. We do this, because unit tests
|
|
||||||
// could be broken with the additional '\n'
|
|
||||||
if i+1 < size {
|
|
||||||
msg += "\n"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
108
vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
generated
vendored
108
vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
generated
vendored
|
|
@ -1,108 +0,0 @@
|
||||||
package awsutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Copy deeply copies a src structure to dst. Useful for copying request and
|
|
||||||
// response structures.
|
|
||||||
//
|
|
||||||
// Can copy between structs of different type, but will only copy fields which
|
|
||||||
// are assignable, and exist in both structs. Fields which are not assignable,
|
|
||||||
// or do not exist in both structs are ignored.
|
|
||||||
func Copy(dst, src interface{}) {
|
|
||||||
dstval := reflect.ValueOf(dst)
|
|
||||||
if !dstval.IsValid() {
|
|
||||||
panic("Copy dst cannot be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
rcopy(dstval, reflect.ValueOf(src), true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyOf returns a copy of src while also allocating the memory for dst.
|
|
||||||
// src must be a pointer type or this operation will fail.
|
|
||||||
func CopyOf(src interface{}) (dst interface{}) {
|
|
||||||
dsti := reflect.New(reflect.TypeOf(src).Elem())
|
|
||||||
dst = dsti.Interface()
|
|
||||||
rcopy(dsti, reflect.ValueOf(src), true)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// rcopy performs a recursive copy of values from the source to destination.
|
|
||||||
//
|
|
||||||
// root is used to skip certain aspects of the copy which are not valid
|
|
||||||
// for the root node of a object.
|
|
||||||
func rcopy(dst, src reflect.Value, root bool) {
|
|
||||||
if !src.IsValid() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch src.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
if _, ok := src.Interface().(io.Reader); ok {
|
|
||||||
if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
|
|
||||||
dst.Elem().Set(src)
|
|
||||||
} else if dst.CanSet() {
|
|
||||||
dst.Set(src)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
e := src.Type().Elem()
|
|
||||||
if dst.CanSet() && !src.IsNil() {
|
|
||||||
if _, ok := src.Interface().(*time.Time); !ok {
|
|
||||||
dst.Set(reflect.New(e))
|
|
||||||
} else {
|
|
||||||
tempValue := reflect.New(e)
|
|
||||||
tempValue.Elem().Set(src.Elem())
|
|
||||||
// Sets time.Time's unexported values
|
|
||||||
dst.Set(tempValue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if src.Elem().IsValid() {
|
|
||||||
// Keep the current root state since the depth hasn't changed
|
|
||||||
rcopy(dst.Elem(), src.Elem(), root)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
t := dst.Type()
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
name := t.Field(i).Name
|
|
||||||
srcVal := src.FieldByName(name)
|
|
||||||
dstVal := dst.FieldByName(name)
|
|
||||||
if srcVal.IsValid() && dstVal.CanSet() {
|
|
||||||
rcopy(dstVal, srcVal, false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Slice:
|
|
||||||
if src.IsNil() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
|
|
||||||
dst.Set(s)
|
|
||||||
for i := 0; i < src.Len(); i++ {
|
|
||||||
rcopy(dst.Index(i), src.Index(i), false)
|
|
||||||
}
|
|
||||||
case reflect.Map:
|
|
||||||
if src.IsNil() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
s := reflect.MakeMap(src.Type())
|
|
||||||
dst.Set(s)
|
|
||||||
for _, k := range src.MapKeys() {
|
|
||||||
v := src.MapIndex(k)
|
|
||||||
v2 := reflect.New(v.Type()).Elem()
|
|
||||||
rcopy(v2, v, false)
|
|
||||||
dst.SetMapIndex(k, v2)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// Assign the value if possible. If its not assignable, the value would
|
|
||||||
// need to be converted and the impact of that may be unexpected, or is
|
|
||||||
// not compatible with the dst type.
|
|
||||||
if src.Type().AssignableTo(dst.Type()) {
|
|
||||||
dst.Set(src)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
27
vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
generated
vendored
27
vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
generated
vendored
|
|
@ -1,27 +0,0 @@
|
||||||
package awsutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
|
|
||||||
// In addition to this, this method will also dereference the input values if
|
|
||||||
// possible so the DeepEqual performed will not fail if one parameter is a
|
|
||||||
// pointer and the other is not.
|
|
||||||
//
|
|
||||||
// DeepEqual will not perform indirection of nested values of the input parameters.
|
|
||||||
func DeepEqual(a, b interface{}) bool {
|
|
||||||
ra := reflect.Indirect(reflect.ValueOf(a))
|
|
||||||
rb := reflect.Indirect(reflect.ValueOf(b))
|
|
||||||
|
|
||||||
if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
|
|
||||||
// If the elements are both nil, and of the same type the are equal
|
|
||||||
// If they are of different types they are not equal
|
|
||||||
return reflect.TypeOf(a) == reflect.TypeOf(b)
|
|
||||||
} else if raValid != rbValid {
|
|
||||||
// Both values must be valid to be equal
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return reflect.DeepEqual(ra.Interface(), rb.Interface())
|
|
||||||
}
|
|
||||||
222
vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
222
vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
|
|
@ -1,222 +0,0 @@
|
||||||
package awsutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/jmespath/go-jmespath"
|
|
||||||
)
|
|
||||||
|
|
||||||
var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
|
|
||||||
|
|
||||||
// rValuesAtPath returns a slice of values found in value v. The values
|
|
||||||
// in v are explored recursively so all nested values are collected.
|
|
||||||
func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
|
|
||||||
pathparts := strings.Split(path, "||")
|
|
||||||
if len(pathparts) > 1 {
|
|
||||||
for _, pathpart := range pathparts {
|
|
||||||
vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
|
|
||||||
if len(vals) > 0 {
|
|
||||||
return vals
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
|
|
||||||
components := strings.Split(path, ".")
|
|
||||||
for len(values) > 0 && len(components) > 0 {
|
|
||||||
var index *int64
|
|
||||||
var indexStar bool
|
|
||||||
c := strings.TrimSpace(components[0])
|
|
||||||
if c == "" { // no actual component, illegal syntax
|
|
||||||
return nil
|
|
||||||
} else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
|
|
||||||
// TODO normalize case for user
|
|
||||||
return nil // don't support unexported fields
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse this component
|
|
||||||
if m := indexRe.FindStringSubmatch(c); m != nil {
|
|
||||||
c = m[1]
|
|
||||||
if m[2] == "" {
|
|
||||||
index = nil
|
|
||||||
indexStar = true
|
|
||||||
} else {
|
|
||||||
i, _ := strconv.ParseInt(m[2], 10, 32)
|
|
||||||
index = &i
|
|
||||||
indexStar = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nextvals := []reflect.Value{}
|
|
||||||
for _, value := range values {
|
|
||||||
// pull component name out of struct member
|
|
||||||
if value.Kind() != reflect.Struct {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if c == "*" { // pull all members
|
|
||||||
for i := 0; i < value.NumField(); i++ {
|
|
||||||
if f := reflect.Indirect(value.Field(i)); f.IsValid() {
|
|
||||||
nextvals = append(nextvals, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
value = value.FieldByNameFunc(func(name string) bool {
|
|
||||||
if c == name {
|
|
||||||
return true
|
|
||||||
} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
|
|
||||||
if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
|
|
||||||
if !value.IsNil() {
|
|
||||||
value.Set(reflect.Zero(value.Type()))
|
|
||||||
}
|
|
||||||
return []reflect.Value{value}
|
|
||||||
}
|
|
||||||
|
|
||||||
if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
|
|
||||||
// TODO if the value is the terminus it should not be created
|
|
||||||
// if the value to be set to its position is nil.
|
|
||||||
value.Set(reflect.New(value.Type().Elem()))
|
|
||||||
value = value.Elem()
|
|
||||||
} else {
|
|
||||||
value = reflect.Indirect(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
|
||||||
if !createPath && value.IsNil() {
|
|
||||||
value = reflect.ValueOf(nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if value.IsValid() {
|
|
||||||
nextvals = append(nextvals, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
values = nextvals
|
|
||||||
|
|
||||||
if indexStar || index != nil {
|
|
||||||
nextvals = []reflect.Value{}
|
|
||||||
for _, valItem := range values {
|
|
||||||
value := reflect.Indirect(valItem)
|
|
||||||
if value.Kind() != reflect.Slice {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if indexStar { // grab all indices
|
|
||||||
for i := 0; i < value.Len(); i++ {
|
|
||||||
idx := reflect.Indirect(value.Index(i))
|
|
||||||
if idx.IsValid() {
|
|
||||||
nextvals = append(nextvals, idx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// pull out index
|
|
||||||
i := int(*index)
|
|
||||||
if i >= value.Len() { // check out of bounds
|
|
||||||
if createPath {
|
|
||||||
// TODO resize slice
|
|
||||||
} else {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else if i < 0 { // support negative indexing
|
|
||||||
i = value.Len() + i
|
|
||||||
}
|
|
||||||
value = reflect.Indirect(value.Index(i))
|
|
||||||
|
|
||||||
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
|
||||||
if !createPath && value.IsNil() {
|
|
||||||
value = reflect.ValueOf(nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if value.IsValid() {
|
|
||||||
nextvals = append(nextvals, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
values = nextvals
|
|
||||||
}
|
|
||||||
|
|
||||||
components = components[1:]
|
|
||||||
}
|
|
||||||
return values
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValuesAtPath returns a list of values at the case insensitive lexical
|
|
||||||
// path inside of a structure.
|
|
||||||
func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
|
|
||||||
result, err := jmespath.Search(path, i)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
v := reflect.ValueOf(result)
|
|
||||||
if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if s, ok := result.([]interface{}); ok {
|
|
||||||
return s, err
|
|
||||||
}
|
|
||||||
if v.Kind() == reflect.Map && v.Len() == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if v.Kind() == reflect.Slice {
|
|
||||||
out := make([]interface{}, v.Len())
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
out[i] = v.Index(i).Interface()
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return []interface{}{result}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetValueAtPath sets a value at the case insensitive lexical path inside
|
|
||||||
// of a structure.
|
|
||||||
func SetValueAtPath(i interface{}, path string, v interface{}) {
|
|
||||||
if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
|
|
||||||
for _, rval := range rvals {
|
|
||||||
if rval.Kind() == reflect.Ptr && rval.IsNil() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
setValue(rval, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setValue(dstVal reflect.Value, src interface{}) {
|
|
||||||
if dstVal.Kind() == reflect.Ptr {
|
|
||||||
dstVal = reflect.Indirect(dstVal)
|
|
||||||
}
|
|
||||||
srcVal := reflect.ValueOf(src)
|
|
||||||
|
|
||||||
if !srcVal.IsValid() { // src is literal nil
|
|
||||||
if dstVal.CanAddr() {
|
|
||||||
// Convert to pointer so that pointer's value can be nil'ed
|
|
||||||
// dstVal = dstVal.Addr()
|
|
||||||
}
|
|
||||||
dstVal.Set(reflect.Zero(dstVal.Type()))
|
|
||||||
|
|
||||||
} else if srcVal.Kind() == reflect.Ptr {
|
|
||||||
if srcVal.IsNil() {
|
|
||||||
srcVal = reflect.Zero(dstVal.Type())
|
|
||||||
} else {
|
|
||||||
srcVal = reflect.ValueOf(src).Elem()
|
|
||||||
}
|
|
||||||
dstVal.Set(srcVal)
|
|
||||||
} else {
|
|
||||||
dstVal.Set(srcVal)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
113
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
113
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
|
|
@ -1,113 +0,0 @@
|
||||||
package awsutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Prettify returns the string representation of a value.
|
|
||||||
func Prettify(i interface{}) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
prettify(reflect.ValueOf(i), 0, &buf)
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// prettify will recursively walk value v to build a textual
|
|
||||||
// representation of the value.
|
|
||||||
func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
|
|
||||||
for v.Kind() == reflect.Ptr {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
strtype := v.Type().String()
|
|
||||||
if strtype == "time.Time" {
|
|
||||||
fmt.Fprintf(buf, "%s", v.Interface())
|
|
||||||
break
|
|
||||||
} else if strings.HasPrefix(strtype, "io.") {
|
|
||||||
buf.WriteString("<buffer>")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString("{\n")
|
|
||||||
|
|
||||||
names := []string{}
|
|
||||||
for i := 0; i < v.Type().NumField(); i++ {
|
|
||||||
name := v.Type().Field(i).Name
|
|
||||||
f := v.Field(i)
|
|
||||||
if name[0:1] == strings.ToLower(name[0:1]) {
|
|
||||||
continue // ignore unexported fields
|
|
||||||
}
|
|
||||||
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
|
|
||||||
continue // ignore unset fields
|
|
||||||
}
|
|
||||||
names = append(names, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, n := range names {
|
|
||||||
val := v.FieldByName(n)
|
|
||||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
|
||||||
buf.WriteString(n + ": ")
|
|
||||||
prettify(val, indent+2, buf)
|
|
||||||
|
|
||||||
if i < len(names)-1 {
|
|
||||||
buf.WriteString(",\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
|
||||||
case reflect.Slice:
|
|
||||||
strtype := v.Type().String()
|
|
||||||
if strtype == "[]uint8" {
|
|
||||||
fmt.Fprintf(buf, "<binary> len %d", v.Len())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
nl, id, id2 := "", "", ""
|
|
||||||
if v.Len() > 3 {
|
|
||||||
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
|
|
||||||
}
|
|
||||||
buf.WriteString("[" + nl)
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
buf.WriteString(id2)
|
|
||||||
prettify(v.Index(i), indent+2, buf)
|
|
||||||
|
|
||||||
if i < v.Len()-1 {
|
|
||||||
buf.WriteString("," + nl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString(nl + id + "]")
|
|
||||||
case reflect.Map:
|
|
||||||
buf.WriteString("{\n")
|
|
||||||
|
|
||||||
for i, k := range v.MapKeys() {
|
|
||||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
|
||||||
buf.WriteString(k.String() + ": ")
|
|
||||||
prettify(v.MapIndex(k), indent+2, buf)
|
|
||||||
|
|
||||||
if i < v.Len()-1 {
|
|
||||||
buf.WriteString(",\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
|
||||||
default:
|
|
||||||
if !v.IsValid() {
|
|
||||||
fmt.Fprint(buf, "<invalid value>")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
format := "%v"
|
|
||||||
switch v.Interface().(type) {
|
|
||||||
case string:
|
|
||||||
format = "%q"
|
|
||||||
case io.ReadSeeker, io.Reader:
|
|
||||||
format = "buffer(%p)"
|
|
||||||
}
|
|
||||||
fmt.Fprintf(buf, format, v.Interface())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
89
vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
generated
vendored
89
vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
generated
vendored
|
|
@ -1,89 +0,0 @@
|
||||||
package awsutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StringValue returns the string representation of a value.
|
|
||||||
func StringValue(i interface{}) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
stringValue(reflect.ValueOf(i), 0, &buf)
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
|
|
||||||
for v.Kind() == reflect.Ptr {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
buf.WriteString("{\n")
|
|
||||||
|
|
||||||
names := []string{}
|
|
||||||
for i := 0; i < v.Type().NumField(); i++ {
|
|
||||||
name := v.Type().Field(i).Name
|
|
||||||
f := v.Field(i)
|
|
||||||
if name[0:1] == strings.ToLower(name[0:1]) {
|
|
||||||
continue // ignore unexported fields
|
|
||||||
}
|
|
||||||
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
|
|
||||||
continue // ignore unset fields
|
|
||||||
}
|
|
||||||
names = append(names, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, n := range names {
|
|
||||||
val := v.FieldByName(n)
|
|
||||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
|
||||||
buf.WriteString(n + ": ")
|
|
||||||
stringValue(val, indent+2, buf)
|
|
||||||
|
|
||||||
if i < len(names)-1 {
|
|
||||||
buf.WriteString(",\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
|
||||||
case reflect.Slice:
|
|
||||||
nl, id, id2 := "", "", ""
|
|
||||||
if v.Len() > 3 {
|
|
||||||
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
|
|
||||||
}
|
|
||||||
buf.WriteString("[" + nl)
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
buf.WriteString(id2)
|
|
||||||
stringValue(v.Index(i), indent+2, buf)
|
|
||||||
|
|
||||||
if i < v.Len()-1 {
|
|
||||||
buf.WriteString("," + nl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString(nl + id + "]")
|
|
||||||
case reflect.Map:
|
|
||||||
buf.WriteString("{\n")
|
|
||||||
|
|
||||||
for i, k := range v.MapKeys() {
|
|
||||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
|
||||||
buf.WriteString(k.String() + ": ")
|
|
||||||
stringValue(v.MapIndex(k), indent+2, buf)
|
|
||||||
|
|
||||||
if i < v.Len()-1 {
|
|
||||||
buf.WriteString(",\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
|
||||||
default:
|
|
||||||
format := "%v"
|
|
||||||
switch v.Interface().(type) {
|
|
||||||
case string:
|
|
||||||
format = "%q"
|
|
||||||
}
|
|
||||||
fmt.Fprintf(buf, format, v.Interface())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
96
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
96
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
|
|
@ -1,96 +0,0 @@
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Config provides configuration to a service client instance.
|
|
||||||
type Config struct {
|
|
||||||
Config *aws.Config
|
|
||||||
Handlers request.Handlers
|
|
||||||
Endpoint string
|
|
||||||
SigningRegion string
|
|
||||||
SigningName string
|
|
||||||
|
|
||||||
// States that the signing name did not come from a modeled source but
|
|
||||||
// was derived based on other data. Used by service client constructors
|
|
||||||
// to determine if the signin name can be overriden based on metadata the
|
|
||||||
// service has.
|
|
||||||
SigningNameDerived bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigProvider provides a generic way for a service client to receive
|
|
||||||
// the ClientConfig without circular dependencies.
|
|
||||||
type ConfigProvider interface {
|
|
||||||
ClientConfig(serviceName string, cfgs ...*aws.Config) Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
|
|
||||||
// resolve the endpoint automatically. The service client's endpoint must be
|
|
||||||
// provided via the aws.Config.Endpoint field.
|
|
||||||
type ConfigNoResolveEndpointProvider interface {
|
|
||||||
ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Client implements the base client request and response handling
|
|
||||||
// used by all service clients.
|
|
||||||
type Client struct {
|
|
||||||
request.Retryer
|
|
||||||
metadata.ClientInfo
|
|
||||||
|
|
||||||
Config aws.Config
|
|
||||||
Handlers request.Handlers
|
|
||||||
}
|
|
||||||
|
|
||||||
// New will return a pointer to a new initialized service client.
|
|
||||||
func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
|
|
||||||
svc := &Client{
|
|
||||||
Config: cfg,
|
|
||||||
ClientInfo: info,
|
|
||||||
Handlers: handlers.Copy(),
|
|
||||||
}
|
|
||||||
|
|
||||||
switch retryer, ok := cfg.Retryer.(request.Retryer); {
|
|
||||||
case ok:
|
|
||||||
svc.Retryer = retryer
|
|
||||||
case cfg.Retryer != nil && cfg.Logger != nil:
|
|
||||||
s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
|
|
||||||
cfg.Logger.Log(s)
|
|
||||||
fallthrough
|
|
||||||
default:
|
|
||||||
maxRetries := aws.IntValue(cfg.MaxRetries)
|
|
||||||
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
|
|
||||||
maxRetries = 3
|
|
||||||
}
|
|
||||||
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
|
|
||||||
}
|
|
||||||
|
|
||||||
svc.AddDebugHandlers()
|
|
||||||
|
|
||||||
for _, option := range options {
|
|
||||||
option(svc)
|
|
||||||
}
|
|
||||||
|
|
||||||
return svc
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRequest returns a new Request pointer for the service API
|
|
||||||
// operation and parameters.
|
|
||||||
func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
|
|
||||||
return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddDebugHandlers injects debug logging handlers into the service to log request
|
|
||||||
// debug information.
|
|
||||||
func (c *Client) AddDebugHandlers() {
|
|
||||||
if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
|
|
||||||
c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
|
|
||||||
}
|
|
||||||
116
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
116
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
|
|
@ -1,116 +0,0 @@
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
"github.com/aws/aws-sdk-go/internal/sdkrand"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultRetryer implements basic retry logic using exponential backoff for
|
|
||||||
// most services. If you want to implement custom retry logic, implement the
|
|
||||||
// request.Retryer interface or create a structure type that composes this
|
|
||||||
// struct and override the specific methods. For example, to override only
|
|
||||||
// the MaxRetries method:
|
|
||||||
//
|
|
||||||
// type retryer struct {
|
|
||||||
// client.DefaultRetryer
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // This implementation always has 100 max retries
|
|
||||||
// func (d retryer) MaxRetries() int { return 100 }
|
|
||||||
type DefaultRetryer struct {
|
|
||||||
NumMaxRetries int
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxRetries returns the number of maximum returns the service will use to make
|
|
||||||
// an individual API request.
|
|
||||||
func (d DefaultRetryer) MaxRetries() int {
|
|
||||||
return d.NumMaxRetries
|
|
||||||
}
|
|
||||||
|
|
||||||
// RetryRules returns the delay duration before retrying this request again
|
|
||||||
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
|
||||||
// Set the upper limit of delay in retrying at ~five minutes
|
|
||||||
minTime := 30
|
|
||||||
throttle := d.shouldThrottle(r)
|
|
||||||
if throttle {
|
|
||||||
if delay, ok := getRetryDelay(r); ok {
|
|
||||||
return delay
|
|
||||||
}
|
|
||||||
|
|
||||||
minTime = 500
|
|
||||||
}
|
|
||||||
|
|
||||||
retryCount := r.RetryCount
|
|
||||||
if throttle && retryCount > 8 {
|
|
||||||
retryCount = 8
|
|
||||||
} else if retryCount > 13 {
|
|
||||||
retryCount = 13
|
|
||||||
}
|
|
||||||
|
|
||||||
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
|
|
||||||
return time.Duration(delay) * time.Millisecond
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShouldRetry returns true if the request should be retried.
|
|
||||||
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
|
||||||
// If one of the other handlers already set the retry state
|
|
||||||
// we don't want to override it based on the service's state
|
|
||||||
if r.Retryable != nil {
|
|
||||||
return *r.Retryable
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return r.IsErrorRetryable() || d.shouldThrottle(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShouldThrottle returns true if the request should be throttled.
|
|
||||||
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
|
|
||||||
switch r.HTTPResponse.StatusCode {
|
|
||||||
case 429:
|
|
||||||
case 502:
|
|
||||||
case 503:
|
|
||||||
case 504:
|
|
||||||
default:
|
|
||||||
return r.IsErrorThrottle()
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// This will look in the Retry-After header, RFC 7231, for how long
|
|
||||||
// it will wait before attempting another request
|
|
||||||
func getRetryDelay(r *request.Request) (time.Duration, bool) {
|
|
||||||
if !canUseRetryAfterHeader(r) {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
delayStr := r.HTTPResponse.Header.Get("Retry-After")
|
|
||||||
if len(delayStr) == 0 {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
delay, err := strconv.Atoi(delayStr)
|
|
||||||
if err != nil {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
return time.Duration(delay) * time.Second, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Will look at the status code to see if the retry header pertains to
|
|
||||||
// the status code.
|
|
||||||
func canUseRetryAfterHeader(r *request.Request) bool {
|
|
||||||
switch r.HTTPResponse.StatusCode {
|
|
||||||
case 429:
|
|
||||||
case 503:
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
184
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
184
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
|
|
@ -1,184 +0,0 @@
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http/httputil"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
)
|
|
||||||
|
|
||||||
const logReqMsg = `DEBUG: Request %s/%s Details:
|
|
||||||
---[ REQUEST POST-SIGN ]-----------------------------
|
|
||||||
%s
|
|
||||||
-----------------------------------------------------`
|
|
||||||
|
|
||||||
const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
|
|
||||||
---[ REQUEST DUMP ERROR ]-----------------------------
|
|
||||||
%s
|
|
||||||
------------------------------------------------------`
|
|
||||||
|
|
||||||
type logWriter struct {
|
|
||||||
// Logger is what we will use to log the payload of a response.
|
|
||||||
Logger aws.Logger
|
|
||||||
// buf stores the contents of what has been read
|
|
||||||
buf *bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *logWriter) Write(b []byte) (int, error) {
|
|
||||||
return logger.buf.Write(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
type teeReaderCloser struct {
|
|
||||||
// io.Reader will be a tee reader that is used during logging.
|
|
||||||
// This structure will read from a body and write the contents to a logger.
|
|
||||||
io.Reader
|
|
||||||
// Source is used just to close when we are done reading.
|
|
||||||
Source io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reader *teeReaderCloser) Close() error {
|
|
||||||
return reader.Source.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
|
|
||||||
// to a service. Will include the HTTP request body if the LogLevel of the
|
|
||||||
// request matches LogDebugWithHTTPBody.
|
|
||||||
var LogHTTPRequestHandler = request.NamedHandler{
|
|
||||||
Name: "awssdk.client.LogRequest",
|
|
||||||
Fn: logRequest,
|
|
||||||
}
|
|
||||||
|
|
||||||
func logRequest(r *request.Request) {
|
|
||||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
|
||||||
bodySeekable := aws.IsReaderSeekable(r.Body)
|
|
||||||
|
|
||||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
|
||||||
if err != nil {
|
|
||||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
|
||||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if logBody {
|
|
||||||
if !bodySeekable {
|
|
||||||
r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
|
|
||||||
}
|
|
||||||
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
|
|
||||||
// Body as a NoOpCloser and will not be reset after read by the HTTP
|
|
||||||
// client reader.
|
|
||||||
r.ResetBody()
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
|
||||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
|
|
||||||
// to a service. Will only log the HTTP request's headers. The request payload
|
|
||||||
// will not be read.
|
|
||||||
var LogHTTPRequestHeaderHandler = request.NamedHandler{
|
|
||||||
Name: "awssdk.client.LogRequestHeader",
|
|
||||||
Fn: logRequestHeader,
|
|
||||||
}
|
|
||||||
|
|
||||||
func logRequestHeader(r *request.Request) {
|
|
||||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
|
|
||||||
if err != nil {
|
|
||||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
|
||||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
|
||||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
|
||||||
}
|
|
||||||
|
|
||||||
const logRespMsg = `DEBUG: Response %s/%s Details:
|
|
||||||
---[ RESPONSE ]--------------------------------------
|
|
||||||
%s
|
|
||||||
-----------------------------------------------------`
|
|
||||||
|
|
||||||
const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
|
|
||||||
---[ RESPONSE DUMP ERROR ]-----------------------------
|
|
||||||
%s
|
|
||||||
-----------------------------------------------------`
|
|
||||||
|
|
||||||
// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
|
|
||||||
// received from a service. Will include the HTTP response body if the LogLevel
|
|
||||||
// of the request matches LogDebugWithHTTPBody.
|
|
||||||
var LogHTTPResponseHandler = request.NamedHandler{
|
|
||||||
Name: "awssdk.client.LogResponse",
|
|
||||||
Fn: logResponse,
|
|
||||||
}
|
|
||||||
|
|
||||||
func logResponse(r *request.Request) {
|
|
||||||
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
|
|
||||||
|
|
||||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
|
||||||
if logBody {
|
|
||||||
r.HTTPResponse.Body = &teeReaderCloser{
|
|
||||||
Reader: io.TeeReader(r.HTTPResponse.Body, lw),
|
|
||||||
Source: r.HTTPResponse.Body,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
handlerFn := func(req *request.Request) {
|
|
||||||
b, err := httputil.DumpResponse(req.HTTPResponse, false)
|
|
||||||
if err != nil {
|
|
||||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
|
||||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
lw.Logger.Log(fmt.Sprintf(logRespMsg,
|
|
||||||
req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
|
|
||||||
|
|
||||||
if logBody {
|
|
||||||
b, err := ioutil.ReadAll(lw.buf)
|
|
||||||
if err != nil {
|
|
||||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
|
||||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
lw.Logger.Log(string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const handlerName = "awsdk.client.LogResponse.ResponseBody"
|
|
||||||
|
|
||||||
r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{
|
|
||||||
Name: handlerName, Fn: handlerFn,
|
|
||||||
})
|
|
||||||
r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{
|
|
||||||
Name: handlerName, Fn: handlerFn,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
|
|
||||||
// response received from a service. Will only log the HTTP response's headers.
|
|
||||||
// The response payload will not be read.
|
|
||||||
var LogHTTPResponseHeaderHandler = request.NamedHandler{
|
|
||||||
Name: "awssdk.client.LogResponseHeader",
|
|
||||||
Fn: logResponseHeader,
|
|
||||||
}
|
|
||||||
|
|
||||||
func logResponseHeader(r *request.Request) {
|
|
||||||
if r.Config.Logger == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := httputil.DumpResponse(r.HTTPResponse, false)
|
|
||||||
if err != nil {
|
|
||||||
r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
|
||||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
|
|
||||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
|
||||||
}
|
|
||||||
13
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
|
|
@ -1,13 +0,0 @@
|
||||||
package metadata
|
|
||||||
|
|
||||||
// ClientInfo wraps immutable data from the client.Client structure.
|
|
||||||
type ClientInfo struct {
|
|
||||||
ServiceName string
|
|
||||||
ServiceID string
|
|
||||||
APIVersion string
|
|
||||||
Endpoint string
|
|
||||||
SigningName string
|
|
||||||
SigningRegion string
|
|
||||||
JSONVersion string
|
|
||||||
TargetPrefix string
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue